title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: Styler `hide` compatible with `max_columns` | diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index a71dd6f33e3c8..ae4e05160e70a 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -316,7 +316,7 @@ def _translate_header(self, sparsify_cols: bool, max_cols: int):
self.columns, sparsify_cols, max_cols, self.hidden_columns
)
- clabels = self.data.columns.tolist()[:max_cols] # slice to allow trimming
+ clabels = self.data.columns.tolist()
if self.data.columns.nlevels == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
@@ -339,7 +339,9 @@ def _translate_header(self, sparsify_cols: bool, max_cols: int):
and not all(self.hide_index_)
and not self.hide_index_names
):
- index_names_row = self._generate_index_names_row(clabels, max_cols)
+ index_names_row = self._generate_index_names_row(
+ clabels, max_cols, col_lengths
+ )
head.append(index_names_row)
return head
@@ -389,9 +391,27 @@ def _generate_col_header_row(self, iter: tuple, max_cols: int, col_lengths: dict
)
]
- column_headers = []
+ column_headers, visible_col_count = [], 0
for c, value in enumerate(clabels[r]):
header_element_visible = _is_visible(c, r, col_lengths)
+ if header_element_visible:
+ visible_col_count += col_lengths.get((r, c), 0)
+ if visible_col_count > max_cols:
+ # add an extra column with `...` value to indicate trimming
+ column_headers.append(
+ _element(
+ "th",
+ (
+ f"{self.css['col_heading']} {self.css['level']}{r} "
+ f"{self.css['col_trim']}"
+ ),
+ "...",
+ True,
+ attributes="",
+ )
+ )
+ break
+
header_element = _element(
"th",
(
@@ -422,23 +442,9 @@ def _generate_col_header_row(self, iter: tuple, max_cols: int, col_lengths: dict
column_headers.append(header_element)
- if len(self.data.columns) > max_cols:
- # add an extra column with `...` value to indicate trimming
- column_headers.append(
- _element(
- "th",
- (
- f"{self.css['col_heading']} {self.css['level']}{r} "
- f"{self.css['col_trim']}"
- ),
- "...",
- True,
- attributes="",
- )
- )
return index_blanks + column_name + column_headers
- def _generate_index_names_row(self, iter: tuple, max_cols):
+ def _generate_index_names_row(self, iter: tuple, max_cols: int, col_lengths: dict):
"""
Generate the row containing index names
@@ -470,22 +476,37 @@ def _generate_index_names_row(self, iter: tuple, max_cols):
for c, name in enumerate(self.data.index.names)
]
- if not clabels:
- blank_len = 0
- elif len(self.data.columns) <= max_cols:
- blank_len = len(clabels[0])
- else:
- blank_len = len(clabels[0]) + 1 # to allow room for `...` trim col
+ column_blanks, visible_col_count = [], 0
+ if clabels:
+ last_level = self.columns.nlevels - 1 # use last level since never sparsed
+ for c, value in enumerate(clabels[last_level]):
+ header_element_visible = _is_visible(c, last_level, col_lengths)
+ if header_element_visible:
+ visible_col_count += 1
+ if visible_col_count > max_cols:
+ column_blanks.append(
+ _element(
+ "th",
+ (
+ f"{self.css['blank']} {self.css['col']}{c} "
+ f"{self.css['col_trim']}"
+ ),
+ self.css["blank_value"],
+ True,
+ attributes="",
+ )
+ )
+ break
+
+ column_blanks.append(
+ _element(
+ "th",
+ f"{self.css['blank']} {self.css['col']}{c}",
+ self.css["blank_value"],
+ c not in self.hidden_columns,
+ )
+ )
- column_blanks = [
- _element(
- "th",
- f"{self.css['blank']} {self.css['col']}{c}",
- self.css["blank_value"],
- c not in self.hidden_columns,
- )
- for c in range(blank_len)
- ]
return index_names + column_blanks
def _translate_body(self, sparsify_index: bool, max_rows: int, max_cols: int):
@@ -561,31 +582,36 @@ def _generate_trimmed_row(self, max_cols: int) -> list:
for c in range(self.data.index.nlevels)
]
- data = [
- _element(
- "td",
- f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}",
- "...",
- (c not in self.hidden_columns),
- attributes="",
- )
- for c in range(max_cols)
- ]
+ data, visible_col_count = [], 0
+ for c, _ in enumerate(self.columns):
+ data_element_visible = c not in self.hidden_columns
+ if data_element_visible:
+ visible_col_count += 1
+ if visible_col_count > max_cols:
+ data.append(
+ _element(
+ "td",
+ (
+ f"{self.css['data']} {self.css['row_trim']} "
+ f"{self.css['col_trim']}"
+ ),
+ "...",
+ True,
+ attributes="",
+ )
+ )
+ break
- if len(self.data.columns) > max_cols:
- # columns are also trimmed so we add the final element
data.append(
_element(
"td",
- (
- f"{self.css['data']} {self.css['row_trim']} "
- f"{self.css['col_trim']}"
- ),
+ f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}",
"...",
- True,
+ data_element_visible,
attributes="",
)
)
+
return index_headers + data
def _generate_body_row(
@@ -654,9 +680,14 @@ def _generate_body_row(
index_headers.append(header_element)
- data = []
+ data, visible_col_count = [], 0
for c, value in enumerate(row_tup[1:]):
- if c >= max_cols:
+ data_element_visible = (
+ c not in self.hidden_columns and r not in self.hidden_rows
+ )
+ if data_element_visible:
+ visible_col_count += 1
+ if visible_col_count > max_cols:
data.append(
_element(
"td",
@@ -676,9 +707,6 @@ def _generate_body_row(
if (r, c) in self.cell_context:
cls = " " + self.cell_context[r, c]
- data_element_visible = (
- c not in self.hidden_columns and r not in self.hidden_rows
- )
data_element = _element(
"td",
(
@@ -1252,15 +1280,15 @@ def _get_level_lengths(
elif j not in hidden_elements:
# then element must be part of sparsified section and is visible
visible_row_count += 1
+ if visible_row_count > max_index:
+ break # do not add a length since the render trim limit reached
if lengths[(i, last_label)] == 0:
# if previous iteration was first-of-section but hidden then offset
last_label = j
lengths[(i, last_label)] = 1
else:
- # else add to previous iteration but do not extend more than max
- lengths[(i, last_label)] = min(
- max_index, 1 + lengths[(i, last_label)]
- )
+ # else add to previous iteration
+ lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1
diff --git a/pandas/tests/io/formats/style/test_html.py b/pandas/tests/io/formats/style/test_html.py
index e15283e558479..b778d18618bf4 100644
--- a/pandas/tests/io/formats/style/test_html.py
+++ b/pandas/tests/io/formats/style/test_html.py
@@ -668,3 +668,99 @@ def test_hiding_index_columns_multiindex_alignment():
"""
)
assert result == expected
+
+
+def test_hiding_index_columns_multiindex_trimming():
+ # gh 44272
+ df = DataFrame(np.arange(64).reshape(8, 8))
+ df.columns = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]])
+ df.index = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]])
+ df.index.names, df.columns.names = ["a", "b"], ["c", "d"]
+ styler = Styler(df, cell_ids=False, uuid_len=0)
+ styler.hide([(0, 0), (0, 1), (1, 0)], axis=1).hide([(0, 0), (0, 1), (1, 0)], axis=0)
+ with option_context("styler.render.max_rows", 4, "styler.render.max_columns", 4):
+ result = styler.to_html()
+
+ expected = dedent(
+ """\
+ <style type="text/css">
+ </style>
+ <table id="T_">
+ <thead>
+ <tr>
+ <th class="blank" > </th>
+ <th class="index_name level0" >c</th>
+ <th class="col_heading level0 col3" >1</th>
+ <th class="col_heading level0 col4" colspan="2">2</th>
+ <th class="col_heading level0 col6" >3</th>
+ </tr>
+ <tr>
+ <th class="blank" > </th>
+ <th class="index_name level1" >d</th>
+ <th class="col_heading level1 col3" >1</th>
+ <th class="col_heading level1 col4" >0</th>
+ <th class="col_heading level1 col5" >1</th>
+ <th class="col_heading level1 col6" >0</th>
+ <th class="col_heading level1 col_trim" >...</th>
+ </tr>
+ <tr>
+ <th class="index_name level0" >a</th>
+ <th class="index_name level1" >b</th>
+ <th class="blank col3" > </th>
+ <th class="blank col4" > </th>
+ <th class="blank col5" > </th>
+ <th class="blank col6" > </th>
+ <th class="blank col7 col_trim" > </th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th class="row_heading level0 row3" >1</th>
+ <th class="row_heading level1 row3" >1</th>
+ <td class="data row3 col3" >27</td>
+ <td class="data row3 col4" >28</td>
+ <td class="data row3 col5" >29</td>
+ <td class="data row3 col6" >30</td>
+ <td class="data row3 col_trim" >...</td>
+ </tr>
+ <tr>
+ <th class="row_heading level0 row4" rowspan="2">2</th>
+ <th class="row_heading level1 row4" >0</th>
+ <td class="data row4 col3" >35</td>
+ <td class="data row4 col4" >36</td>
+ <td class="data row4 col5" >37</td>
+ <td class="data row4 col6" >38</td>
+ <td class="data row4 col_trim" >...</td>
+ </tr>
+ <tr>
+ <th class="row_heading level1 row5" >1</th>
+ <td class="data row5 col3" >43</td>
+ <td class="data row5 col4" >44</td>
+ <td class="data row5 col5" >45</td>
+ <td class="data row5 col6" >46</td>
+ <td class="data row5 col_trim" >...</td>
+ </tr>
+ <tr>
+ <th class="row_heading level0 row6" >3</th>
+ <th class="row_heading level1 row6" >0</th>
+ <td class="data row6 col3" >51</td>
+ <td class="data row6 col4" >52</td>
+ <td class="data row6 col5" >53</td>
+ <td class="data row6 col6" >54</td>
+ <td class="data row6 col_trim" >...</td>
+ </tr>
+ <tr>
+ <th class="row_heading level0 row_trim" >...</th>
+ <th class="row_heading level1 row_trim" >...</th>
+ <td class="data col3 row_trim" >...</td>
+ <td class="data col4 row_trim" >...</td>
+ <td class="data col5 row_trim" >...</td>
+ <td class="data col6 row_trim" >...</td>
+ <td class="data row_trim col_trim" >...</td>
+ </tr>
+ </tbody>
+ </table>
+ """
+ )
+
+ assert result == expected
diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index 8ac0dd03c9fd6..281ab0d8b8e56 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -216,9 +216,6 @@ def test_render_trimming_mi():
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
- assert len(ctx["head"][0]) == 5 # 2 indexes + 2 column headers + trimming col
- assert {"attributes": 'colspan="2"'}.items() <= ctx["head"][0][2].items()
-
def test_render_empty_mi():
# GH 43305
@@ -1600,3 +1597,17 @@ def test_row_trimming_hide_index_mi():
assert ctx["body"][r][1]["display_value"] == val # level 1 index headers
for r, val in enumerate(["3", "4", "..."]):
assert ctx["body"][r][2]["display_value"] == val # data values
+
+
+def test_col_trimming_hide_columns():
+ # gh 44272
+ df = DataFrame([[1, 2, 3, 4, 5]])
+ with pd.option_context("styler.render.max_columns", 2):
+ ctx = df.style.hide([0, 1], axis="columns")._translate(True, True)
+
+ assert len(ctx["head"][0]) == 6 # blank, [0, 1 (hidden)], [2 ,3 (visible)], + trim
+ for c, vals in enumerate([(1, False), (2, True), (3, True), ("...", True)]):
+ assert ctx["head"][0][c + 2]["value"] == vals[0]
+ assert ctx["head"][0][c + 2]["is_visible"] == vals[1]
+
+ assert len(ctx["body"][0]) == 6 # index + 2 hidden + 2 visible + trimming col
| closes #43704
Description of render trimming and maximums bug:
```
pd.options.styler.render.max_columns = 5
pd.options.styler.render.max_rows = 5
df = DataFrame(np.random.rand(10,10))
df.columns = pd.MultiIndex.from_product([[0,1,2,3,4], [0,1]])
df.index = pd.MultiIndex.from_product([[0,1,2,3,4], [0,1]])
df.index.names, df.columns.names = ["a", "b"], ["c", "d"]
df.style.hide([(0,0), (0,1), (1,0)], axis=1).hide([(0,0), (0,1), (1,0)], axis=0)
```
## Pre #44248

## After #44248 (merged)

## After this PR

| https://api.github.com/repos/pandas-dev/pandas/pulls/44272 | 2021-11-01T17:09:57Z | 2021-11-05T13:08:39Z | 2021-11-05T13:08:39Z | 2021-11-05T16:02:46Z |
CLN: DataFrame.__repr__ | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 565c153603b86..0e74ed0ff1769 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -988,15 +988,13 @@ def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
- buf = StringIO("")
if self._info_repr():
+ buf = StringIO("")
self.info(buf=buf)
return buf.getvalue()
repr_params = fmt.get_dataframe_repr_params()
- self.to_string(buf=buf, **repr_params)
-
- return buf.getvalue()
+ return self.to_string(**repr_params)
def _repr_html_(self) -> str | None:
"""
| Minor clean-up. | https://api.github.com/repos/pandas-dev/pandas/pulls/44271 | 2021-11-01T16:15:09Z | 2021-11-01T17:16:10Z | 2021-11-01T17:16:10Z | 2021-11-02T10:10:26Z |
BUG: Series.replace(method='pad') with EA dtypes | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 1b3be65ee66f2..44250663ede8c 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -653,6 +653,8 @@ Other
- Bug in :meth:`RangeIndex.union` with another ``RangeIndex`` with matching (even) ``step`` and starts differing by strictly less than ``step / 2`` (:issue:`44019`)
- Bug in :meth:`RangeIndex.difference` with ``sort=None`` and ``step<0`` failing to sort (:issue:`44085`)
- Bug in :meth:`Series.to_frame` and :meth:`Index.to_frame` ignoring the ``name`` argument when ``name=None`` is explicitly passed (:issue:`44212`)
+- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` with ``value=None`` and ExtensionDtypes (:issue:`44270`)
+-
.. ***DO NOT USE THIS SECTION***
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index cbb029f62732a..8deeb44f65188 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -246,6 +246,14 @@ def __getitem__(
result = self._from_backing_data(result)
return result
+ def _fill_mask_inplace(
+ self, method: str, limit, mask: npt.NDArray[np.bool_]
+ ) -> None:
+ # (for now) when self.ndim == 2, we assume axis=0
+ func = missing.get_fill_func(method, ndim=self.ndim)
+ func(self._ndarray.T, limit=limit, mask=mask.T)
+ return
+
@doc(ExtensionArray.fillna)
def fillna(
self: NDArrayBackedExtensionArrayT, value=None, method=None, limit=None
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 46b505e7384b4..9ffcad2bd7b84 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1436,6 +1436,24 @@ def _where(
result[~mask] = val
return result
+ def _fill_mask_inplace(
+ self, method: str, limit, mask: npt.NDArray[np.bool_]
+ ) -> None:
+ """
+ Replace values in locations specified by 'mask' using pad or backfill.
+
+ See also
+ --------
+ ExtensionArray.fillna
+ """
+ func = missing.get_fill_func(method)
+ # NB: if we don't copy mask here, it may be altered inplace, which
+ # would mess up the `self[mask] = ...` below.
+ new_values, _ = func(self.astype(object), limit=limit, mask=mask.copy())
+ new_values = self._from_sequence(new_values, dtype=self.dtype)
+ self[mask] = new_values[mask]
+ return
+
@classmethod
def _empty(cls, shape: Shape, dtype: ExtensionDtype):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b53679e2b584a..732508f9b7fb6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6518,10 +6518,13 @@ def replace(
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
- return self.apply(
+ result = self.apply(
self._constructor_sliced._replace_single,
args=(to_replace, method, inplace, limit),
)
+ if inplace:
+ return
+ return result
self = cast("Series", self)
return self._replace_single(to_replace, method, inplace, limit)
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 68ac7b4968d15..ede0878f15caa 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -97,6 +97,9 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
if na_mask.any():
mask |= isna(arr)
+ if not isinstance(mask, np.ndarray):
+ # e.g. if arr is IntegerArray, then mask is BooleanArray
+ mask = mask.to_numpy(dtype=bool, na_value=False)
return mask
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 391169af598c2..0b4e7fe4ee774 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4889,23 +4889,20 @@ def _replace_single(self, to_replace, method: str, inplace: bool, limit):
replacement value is given in the replace method
"""
- orig_dtype = self.dtype
result = self if inplace else self.copy()
- fill_f = missing.get_fill_func(method)
- mask = missing.mask_missing(result.values, to_replace)
- values, _ = fill_f(result.values, limit=limit, mask=mask)
+ values = result._values
+ mask = missing.mask_missing(values, to_replace)
- if values.dtype == orig_dtype and inplace:
- return
-
- result = self._constructor(values, index=self.index, dtype=self.dtype)
- result = result.__finalize__(self)
+ if isinstance(values, ExtensionArray):
+ # dispatch to the EA's _pad_mask_inplace method
+ values._fill_mask_inplace(method, limit, mask)
+ else:
+ fill_f = missing.get_fill_func(method)
+ values, _ = fill_f(values, limit=limit, mask=mask)
if inplace:
- self._update_inplace(result)
return
-
return result
# error: Cannot determine type of 'shift'
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index fb9c326bdafd9..3a6cd4eb0face 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -442,6 +442,56 @@ def test_replace_extension_other(self, frame_or_series):
# should not have changed dtype
tm.assert_equal(obj, result)
+ def _check_replace_with_method(self, ser: pd.Series):
+ df = ser.to_frame()
+
+ res = ser.replace(ser[1], method="pad")
+ expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
+ tm.assert_series_equal(res, expected)
+
+ res_df = df.replace(ser[1], method="pad")
+ tm.assert_frame_equal(res_df, expected.to_frame())
+
+ ser2 = ser.copy()
+ res2 = ser2.replace(ser[1], method="pad", inplace=True)
+ assert res2 is None
+ tm.assert_series_equal(ser2, expected)
+
+ res_df2 = df.replace(ser[1], method="pad", inplace=True)
+ assert res_df2 is None
+ tm.assert_frame_equal(df, expected.to_frame())
+
+ def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
+ arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
+ ser = pd.Series(arr)
+
+ self._check_replace_with_method(ser)
+
+ @pytest.mark.parametrize("as_categorical", [True, False])
+ def test_replace_interval_with_method(self, as_categorical):
+ # in particular interval that can't hold NA
+
+ idx = pd.IntervalIndex.from_breaks(range(4))
+ ser = pd.Series(idx)
+ if as_categorical:
+ ser = ser.astype("category")
+
+ self._check_replace_with_method(ser)
+
+ @pytest.mark.parametrize("as_period", [True, False])
+ @pytest.mark.parametrize("as_categorical", [True, False])
+ def test_replace_datetimelike_with_method(self, as_period, as_categorical):
+ idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
+ if as_period:
+ idx = idx.tz_localize(None).to_period("D")
+
+ ser = pd.Series(idx)
+ ser.iloc[-2] = pd.NaT
+ if as_categorical:
+ ser = ser.astype("category")
+
+ self._check_replace_with_method(ser)
+
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
s = pd.Series(["a", "b", "c"])
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44270 | 2021-11-01T16:01:05Z | 2021-11-05T18:50:53Z | 2021-11-05T18:50:53Z | 2021-11-05T18:55:38Z |
ENH: Add DataFrameGroupBy.value_counts | diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst
index ccf130d03418c..2bb0659264eb0 100644
--- a/doc/source/reference/groupby.rst
+++ b/doc/source/reference/groupby.rst
@@ -122,6 +122,7 @@ application to columns of a specific data type.
DataFrameGroupBy.skew
DataFrameGroupBy.take
DataFrameGroupBy.tshift
+ DataFrameGroupBy.value_counts
The following methods are available only for ``SeriesGroupBy`` objects.
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 413dbb9cd0850..8b74e485439be 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -217,6 +217,7 @@ Other enhancements
- Added :meth:`.ExponentialMovingWindow.sum` (:issue:`13297`)
- :meth:`Series.str.split` now supports a ``regex`` argument that explicitly specifies whether the pattern is a regular expression. Default is ``None`` (:issue:`43563`, :issue:`32835`, :issue:`25549`)
- :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`)
+- Added :meth:`DataFrameGroupBy.value_counts` (:issue:`43564`)
- :class:`ExcelWriter` argument ``if_sheet_exists="overlay"`` option added (:issue:`40231`)
- :meth:`read_excel` now accepts a ``decimal`` argument that allow the user to specify the decimal point when parsing string columns to numeric (:issue:`14403`)
- :meth:`.GroupBy.mean`, :meth:`.GroupBy.std`, and :meth:`.GroupBy.var` now supports `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`43731`, :issue:`44862`)
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 986aaa07a913c..48faa1fc46759 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -143,6 +143,7 @@ class OutputKey:
"take",
"transform",
"sample",
+ "value_counts",
]
)
# Valid values of `name` for `groupby.transform(name)`
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 4535010b29c3a..9b341845c7170 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -17,6 +17,7 @@
Iterable,
Mapping,
NamedTuple,
+ Sequence,
TypeVar,
Union,
cast,
@@ -76,6 +77,7 @@
_transform_template,
warn_dropping_nuisance_columns_deprecated,
)
+from pandas.core.groupby.grouper import get_grouper
from pandas.core.indexes.api import (
Index,
MultiIndex,
@@ -1569,6 +1571,193 @@ def func(df):
boxplot = boxplot_frame_groupby
+ def value_counts(
+ self,
+ subset: Sequence[Hashable] | None = None,
+ normalize: bool = False,
+ sort: bool = True,
+ ascending: bool = False,
+ dropna: bool = True,
+ ) -> DataFrame | Series:
+ """
+ Return a Series or DataFrame containing counts of unique rows.
+
+ .. versionadded:: 1.4.0
+
+ Parameters
+ ----------
+ subset : list-like, optional
+ Columns to use when counting unique combinations.
+ normalize : bool, default False
+ Return proportions rather than frequencies.
+ sort : bool, default True
+ Sort by frequencies.
+ ascending : bool, default False
+ Sort in ascending order.
+ dropna : bool, default True
+ Don’t include counts of rows that contain NA values.
+
+ Returns
+ -------
+ Series or DataFrame
+ Series if the groupby as_index is True, otherwise DataFrame.
+
+ See Also
+ --------
+ Series.value_counts: Equivalent method on Series.
+ DataFrame.value_counts: Equivalent method on DataFrame.
+ SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.
+
+ Notes
+ -----
+ - If the groupby as_index is True then the returned Series will have a
+ MultiIndex with one level per input column.
+ - If the groupby as_index is False then the returned DataFrame will have an
+ additional column with the value_counts. The column is labelled 'count' or
+ 'proportion', depending on the ``normalize`` parameter.
+
+ By default, rows that contain any NA values are omitted from
+ the result.
+
+ By default, the result will be in descending order so that the
+ first element of each group is the most frequently-occurring row.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({
+ ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
+ ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
+ ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
+ ... })
+
+ >>> df
+ gender education country
+ 0 male low US
+ 1 male medium FR
+ 2 female high US
+ 3 male low FR
+ 4 female high FR
+ 5 male low FR
+
+ >>> df.groupby('gender').value_counts()
+ gender education country
+ female high FR 1
+ US 1
+ male low FR 2
+ US 1
+ medium FR 1
+ dtype: int64
+
+ >>> df.groupby('gender').value_counts(ascending=True)
+ gender education country
+ female high FR 1
+ US 1
+ male low US 1
+ medium FR 1
+ low FR 2
+ dtype: int64
+
+ >>> df.groupby('gender').value_counts(normalize=True)
+ gender education country
+ female high FR 0.50
+ US 0.50
+ male low FR 0.50
+ US 0.25
+ medium FR 0.25
+ dtype: float64
+
+ >>> df.groupby('gender', as_index=False).value_counts()
+ gender education country count
+ 0 female high FR 1
+ 1 female high US 1
+ 2 male low FR 2
+ 3 male low US 1
+ 4 male medium FR 1
+
+ >>> df.groupby('gender', as_index=False).value_counts(normalize=True)
+ gender education country proportion
+ 0 female high FR 0.50
+ 1 female high US 0.50
+ 2 male low FR 0.50
+ 3 male low US 0.25
+ 4 male medium FR 0.25
+ """
+ if self.axis == 1:
+ raise NotImplementedError(
+ "DataFrameGroupBy.value_counts only handles axis=0"
+ )
+
+ with self._group_selection_context():
+ df = self.obj
+
+ in_axis_names = {
+ grouping.name for grouping in self.grouper.groupings if grouping.in_axis
+ }
+ if isinstance(self._selected_obj, Series):
+ name = self._selected_obj.name
+ keys = [] if name in in_axis_names else [self._selected_obj]
+ else:
+ keys = [
+ # Can't use .values because the column label needs to be preserved
+ self._selected_obj.iloc[:, idx]
+ for idx, name in enumerate(self._selected_obj.columns)
+ if name not in in_axis_names
+ ]
+
+ if subset is not None:
+ clashing = set(subset) & set(in_axis_names)
+ if clashing:
+ raise ValueError(
+ f"Keys {clashing} in subset cannot be in "
+ "the groupby column keys"
+ )
+
+ groupings = list(self.grouper.groupings)
+ for key in keys:
+ grouper, _, _ = get_grouper(
+ df,
+ key=key,
+ axis=self.axis,
+ sort=self.sort,
+ dropna=dropna,
+ )
+ groupings += list(grouper.groupings)
+
+ # Take the size of the overall columns
+ gb = df.groupby(
+ groupings,
+ sort=self.sort,
+ observed=self.observed,
+ dropna=self.dropna,
+ )
+ result = cast(Series, gb.size())
+
+ if normalize:
+ # Normalize the results by dividing by the original group sizes.
+ # We are guaranteed to have the first N levels be the
+ # user-requested grouping.
+ levels = list(range(len(self.grouper.groupings), result.index.nlevels))
+ indexed_group_size = result.groupby(
+ result.index.droplevel(levels),
+ sort=self.sort,
+ observed=self.observed,
+ dropna=self.dropna,
+ ).transform("sum")
+
+ result /= indexed_group_size
+
+ if sort:
+ # Sort the values and then resort by the main grouping
+ index_level = range(len(self.grouper.groupings))
+ result = result.sort_values(ascending=ascending).sort_index(
+ level=index_level, sort_remaining=False
+ )
+
+ if not self.as_index:
+ # Convert to frame
+ result = result.reset_index(name="proportion" if normalize else "count")
+ return result.__finalize__(self.obj, method="value_counts")
+
def _wrap_transform_general_frame(
obj: DataFrame, group: DataFrame, res: DataFrame | Series
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index a05f8e581d12f..1e6515084d3b7 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -800,7 +800,7 @@ def get_grouper(
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
- any_groupers = any(isinstance(g, Grouper) for g in keys)
+ any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys)
any_arraylike = any(
isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
)
diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py
index 9d9a2e39e06c7..44778aafdf75f 100644
--- a/pandas/tests/groupby/test_allowlist.py
+++ b/pandas/tests/groupby/test_allowlist.py
@@ -319,6 +319,7 @@ def test_tab_completion(mframe):
"pipe",
"sample",
"ewm",
+ "value_counts",
}
assert results == expected
diff --git a/pandas/tests/groupby/test_frame_value_counts.py b/pandas/tests/groupby/test_frame_value_counts.py
new file mode 100644
index 0000000000000..79ef46db8e95e
--- /dev/null
+++ b/pandas/tests/groupby/test_frame_value_counts.py
@@ -0,0 +1,444 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ CategoricalIndex,
+ DataFrame,
+ Index,
+ MultiIndex,
+ Series,
+)
+import pandas._testing as tm
+
+
+@pytest.fixture
+def education_df():
+ return DataFrame(
+ {
+ "gender": ["male", "male", "female", "male", "female", "male"],
+ "education": ["low", "medium", "high", "low", "high", "low"],
+ "country": ["US", "FR", "US", "FR", "FR", "FR"],
+ }
+ )
+
+
+def test_axis(education_df):
+ gp = education_df.groupby("country", axis=1)
+ with pytest.raises(NotImplementedError, match="axis"):
+ gp.value_counts()
+
+
+def test_bad_subset(education_df):
+ gp = education_df.groupby("country")
+ with pytest.raises(ValueError, match="subset"):
+ gp.value_counts(subset=["country"])
+
+
+def test_basic(education_df):
+ # gh43564
+ result = education_df.groupby("country")[["gender", "education"]].value_counts(
+ normalize=True
+ )
+ expected = Series(
+ data=[0.5, 0.25, 0.25, 0.5, 0.5],
+ index=MultiIndex.from_tuples(
+ [
+ ("FR", "male", "low"),
+ ("FR", "female", "high"),
+ ("FR", "male", "medium"),
+ ("US", "female", "high"),
+ ("US", "male", "low"),
+ ],
+ names=["country", "gender", "education"],
+ ),
+ )
+ tm.assert_series_equal(result, expected)
+
+
+def _frame_value_counts(df, keys, normalize, sort, ascending):
+ return df[keys].value_counts(normalize=normalize, sort=sort, ascending=ascending)
+
+
+@pytest.mark.parametrize("groupby", ["column", "array", "function"])
+@pytest.mark.parametrize("normalize", [True, False])
+@pytest.mark.parametrize(
+ "sort, ascending",
+ [
+ (False, None),
+ (True, True),
+ (True, False),
+ ],
+)
+@pytest.mark.parametrize("as_index", [True, False])
+@pytest.mark.parametrize("frame", [True, False])
+def test_against_frame_and_seriesgroupby(
+ education_df, groupby, normalize, sort, ascending, as_index, frame
+):
+ # test all parameters:
+ # - Use column, array or function as by= parameter
+ # - Whether or not to normalize
+ # - Whether or not to sort and how
+ # - Whether or not to use the groupby as an index
+ # - 3-way compare against:
+ # - apply with :meth:`~DataFrame.value_counts`
+ # - `~SeriesGroupBy.value_counts`
+ by = {
+ "column": "country",
+ "array": education_df["country"].values,
+ "function": lambda x: education_df["country"][x] == "US",
+ }[groupby]
+
+ gp = education_df.groupby(by=by, as_index=as_index)
+ result = gp[["gender", "education"]].value_counts(
+ normalize=normalize, sort=sort, ascending=ascending
+ )
+ if frame:
+ # compare against apply with DataFrame value_counts
+ expected = gp.apply(
+ _frame_value_counts, ["gender", "education"], normalize, sort, ascending
+ )
+
+ if as_index:
+ tm.assert_series_equal(result, expected)
+ else:
+ name = "proportion" if normalize else "count"
+ expected = expected.reset_index().rename({0: name}, axis=1)
+ if groupby == "column":
+ expected = expected.rename({"level_0": "country"}, axis=1)
+ expected["country"] = np.where(expected["country"], "US", "FR")
+ elif groupby == "function":
+ expected["level_0"] = expected["level_0"] == 1
+ else:
+ expected["level_0"] = np.where(expected["level_0"], "US", "FR")
+ tm.assert_frame_equal(result, expected)
+ else:
+ # compare against SeriesGroupBy value_counts
+ education_df["both"] = education_df["gender"] + "-" + education_df["education"]
+ expected = gp["both"].value_counts(
+ normalize=normalize, sort=sort, ascending=ascending
+ )
+ expected.name = None
+ if as_index:
+ index_frame = expected.index.to_frame(index=False)
+ index_frame["gender"] = index_frame["both"].str.split("-").str.get(0)
+ index_frame["education"] = index_frame["both"].str.split("-").str.get(1)
+ del index_frame["both"]
+ index_frame = index_frame.rename({0: None}, axis=1)
+ expected.index = MultiIndex.from_frame(index_frame)
+ tm.assert_series_equal(result, expected)
+ else:
+ expected.insert(1, "gender", expected["both"].str.split("-").str.get(0))
+ expected.insert(2, "education", expected["both"].str.split("-").str.get(1))
+ del expected["both"]
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("normalize", [True, False])
+@pytest.mark.parametrize(
+ "sort, ascending, expected_rows, expected_count, expected_group_size",
+ [
+ (False, None, [0, 1, 2, 3, 4], [1, 1, 1, 2, 1], [1, 3, 1, 3, 1]),
+ (True, False, [4, 3, 1, 2, 0], [1, 2, 1, 1, 1], [1, 3, 3, 1, 1]),
+ (True, True, [4, 1, 3, 2, 0], [1, 1, 2, 1, 1], [1, 3, 3, 1, 1]),
+ ],
+)
+def test_compound(
+ education_df,
+ normalize,
+ sort,
+ ascending,
+ expected_rows,
+ expected_count,
+ expected_group_size,
+):
+ # Multiple groupby keys and as_index=False
+ gp = education_df.groupby(["country", "gender"], as_index=False, sort=False)
+ result = gp["education"].value_counts(
+ normalize=normalize, sort=sort, ascending=ascending
+ )
+ expected = DataFrame()
+ for column in ["country", "gender", "education"]:
+ expected[column] = [education_df[column][row] for row in expected_rows]
+ if normalize:
+ expected["proportion"] = expected_count
+ expected["proportion"] /= expected_group_size
+ else:
+ expected["count"] = expected_count
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.fixture
+def animals_df():
+ return DataFrame(
+ {"key": [1, 1, 1, 1], "num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
+ index=["falcon", "dog", "cat", "ant"],
+ )
+
+
+@pytest.mark.parametrize(
+ "sort, ascending, normalize, expected_data, expected_index",
+ [
+ (False, None, False, [1, 2, 1], [(1, 1, 1), (2, 4, 6), (2, 0, 0)]),
+ (True, True, False, [1, 1, 2], [(1, 1, 1), (2, 6, 4), (2, 0, 0)]),
+ (True, False, False, [2, 1, 1], [(1, 1, 1), (4, 2, 6), (0, 2, 0)]),
+ (True, False, True, [0.5, 0.25, 0.25], [(1, 1, 1), (4, 2, 6), (0, 2, 0)]),
+ ],
+)
+def test_data_frame_value_counts(
+ animals_df, sort, ascending, normalize, expected_data, expected_index
+):
+ # 3-way compare with :meth:`~DataFrame.value_counts`
+ # Tests from frame/methods/test_value_counts.py
+ result_frame = animals_df.value_counts(
+ sort=sort, ascending=ascending, normalize=normalize
+ )
+ expected = Series(
+ data=expected_data,
+ index=MultiIndex.from_arrays(
+ expected_index, names=["key", "num_legs", "num_wings"]
+ ),
+ )
+ tm.assert_series_equal(result_frame, expected)
+
+ result_frame_groupby = animals_df.groupby("key").value_counts(
+ sort=sort, ascending=ascending, normalize=normalize
+ )
+
+ tm.assert_series_equal(result_frame_groupby, expected)
+
+
+@pytest.fixture
+def nulls_df():
+ n = np.nan
+ return DataFrame(
+ {
+ "A": [1, 1, n, 4, n, 6, 6, 6, 6],
+ "B": [1, 1, 3, n, n, 6, 6, 6, 6],
+ "C": [1, 2, 3, 4, 5, 6, n, 8, n],
+ "D": [1, 2, 3, 4, 5, 6, 7, n, n],
+ }
+ )
+
+
+@pytest.mark.parametrize(
+ "group_dropna, count_dropna, expected_rows, expected_values",
+ [
+ (
+ False,
+ False,
+ [0, 1, 3, 5, 7, 6, 8, 2, 4],
+ [0.5, 0.5, 1.0, 0.25, 0.25, 0.25, 0.25, 1.0, 1.0],
+ ),
+ (False, True, [0, 1, 3, 5, 2, 4], [0.5, 0.5, 1.0, 1.0, 1.0, 1.0]),
+ (True, False, [0, 1, 5, 7, 6, 8], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25]),
+ (True, True, [0, 1, 5], [0.5, 0.5, 1.0]),
+ ],
+)
+def test_dropna_combinations(
+ nulls_df, group_dropna, count_dropna, expected_rows, expected_values
+):
+ gp = nulls_df.groupby(["A", "B"], dropna=group_dropna)
+ result = gp.value_counts(normalize=True, sort=True, dropna=count_dropna)
+ columns = DataFrame()
+ for column in nulls_df.columns:
+ columns[column] = [nulls_df[column][row] for row in expected_rows]
+ index = MultiIndex.from_frame(columns)
+ expected = Series(data=expected_values, index=index)
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.fixture
+def names_with_nulls_df(nulls_fixture):
+ return DataFrame(
+ {
+ "key": [1, 1, 1, 1],
+ "first_name": ["John", "Anne", "John", "Beth"],
+ "middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],
+ },
+ )
+
+
+@pytest.mark.parametrize(
+ "dropna, expected_data, expected_index",
+ [
+ (
+ True,
+ [1, 1],
+ MultiIndex.from_arrays(
+ [(1, 1), ("Beth", "John"), ("Louise", "Smith")],
+ names=["key", "first_name", "middle_name"],
+ ),
+ ),
+ (
+ False,
+ [1, 1, 1, 1],
+ MultiIndex(
+ levels=[
+ Index([1]),
+ Index(["Anne", "Beth", "John"]),
+ Index(["Louise", "Smith", np.nan]),
+ ],
+ codes=[[0, 0, 0, 0], [0, 1, 2, 2], [2, 0, 1, 2]],
+ names=["key", "first_name", "middle_name"],
+ ),
+ ),
+ ],
+)
+@pytest.mark.parametrize("normalize", [False, True])
+def test_data_frame_value_counts_dropna(
+ names_with_nulls_df, dropna, normalize, expected_data, expected_index
+):
+ # GH 41334
+ # 3-way compare with :meth:`~DataFrame.value_counts`
+ # Tests with nulls from frame/methods/test_value_counts.py
+ result_frame = names_with_nulls_df.value_counts(dropna=dropna, normalize=normalize)
+ expected = Series(
+ data=expected_data,
+ index=expected_index,
+ )
+ if normalize:
+ expected /= float(len(expected_data))
+
+ tm.assert_series_equal(result_frame, expected)
+
+ result_frame_groupby = names_with_nulls_df.groupby("key").value_counts(
+ dropna=dropna, normalize=normalize
+ )
+
+ tm.assert_series_equal(result_frame_groupby, expected)
+
+
+@pytest.mark.parametrize("as_index", [False, True])
+@pytest.mark.parametrize(
+ "observed, expected_index",
+ [
+ (
+ False,
+ [
+ ("FR", "male", "low"),
+ ("FR", "female", "high"),
+ ("FR", "male", "medium"),
+ ("FR", "female", "low"),
+ ("FR", "female", "medium"),
+ ("FR", "male", "high"),
+ ("US", "female", "high"),
+ ("US", "male", "low"),
+ ("US", "female", "low"),
+ ("US", "female", "medium"),
+ ("US", "male", "high"),
+ ("US", "male", "medium"),
+ ],
+ ),
+ (
+ True,
+ [
+ ("FR", "male", "low"),
+ ("FR", "female", "high"),
+ ("FR", "male", "medium"),
+ ("US", "female", "high"),
+ ("US", "male", "low"),
+ ],
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "normalize, expected_data",
+ [
+ (False, np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64)),
+ (
+ True,
+ np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),
+ ),
+ ],
+)
+def test_categorical(
+ education_df, as_index, observed, expected_index, normalize, expected_data
+):
+ # Test categorical data whether or not observed
+ gp = education_df.astype("category").groupby(
+ "country", as_index=as_index, observed=observed
+ )
+ result = gp.value_counts(normalize=normalize)
+
+ expected_series = Series(
+ data=expected_data[expected_data > 0.0] if observed else expected_data,
+ index=MultiIndex.from_tuples(
+ expected_index,
+ names=["country", "gender", "education"],
+ ),
+ )
+ for i in range(3):
+ expected_series.index = expected_series.index.set_levels(
+ CategoricalIndex(expected_series.index.levels[i]), level=i
+ )
+
+ if as_index:
+ tm.assert_series_equal(result, expected_series)
+ else:
+ expected = expected_series.reset_index(
+ name="proportion" if normalize else "count"
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "normalize, expected_label, expected_values",
+ [
+ (False, "count", [1, 1, 1]),
+ (True, "proportion", [0.5, 0.5, 1.0]),
+ ],
+)
+def test_mixed_groupings(normalize, expected_label, expected_values):
+ # Test multiple groupings
+ df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
+ gp = df.groupby([[4, 5, 4], "A", lambda i: 7 if i == 1 else 8], as_index=False)
+ result = gp.value_counts(sort=True, normalize=normalize)
+ expected = DataFrame(
+ {
+ "level_0": [4, 4, 5],
+ "A": [1, 1, 2],
+ "level_2": [8, 8, 7],
+ "B": [1, 3, 2],
+ expected_label: expected_values,
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "test, expected_names",
+ [
+ ("repeat", ["a", None, "d", "b", "b", "e"]),
+ ("level", ["a", None, "d", "b", "c", "level_1"]),
+ ],
+)
+@pytest.mark.parametrize("as_index", [False, True])
+def test_column_name_clashes(test, expected_names, as_index):
+ df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6], "d": [7, 8], "e": [9, 10]})
+ if test == "repeat":
+ df.columns = list("abbde")
+ else:
+ df.columns = list("abcd") + ["level_1"]
+
+ if as_index:
+ result = df.groupby(["a", [0, 1], "d"], as_index=as_index).value_counts()
+ expected = Series(
+ data=(1, 1),
+ index=MultiIndex.from_tuples(
+ [(1, 0, 7, 3, 5, 9), (2, 1, 8, 4, 6, 10)],
+ names=expected_names,
+ ),
+ )
+ tm.assert_series_equal(result, expected)
+ else:
+ with pytest.raises(ValueError, match="cannot insert"):
+ df.groupby(["a", [0, 1], "d"], as_index=as_index).value_counts()
+
+
+def test_ambiguous_grouping():
+ # Test that groupby is not confused by groupings length equal to row count
+ df = DataFrame({"a": [1, 1]})
+ gb = df.groupby([1, 1])
+ result = gb.value_counts()
+ expected = Series([2], index=MultiIndex.from_tuples([[1, 1]], names=[None, "a"]))
+ tm.assert_series_equal(result, expected)
| - [x] closes #43564
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44267 | 2021-11-01T07:34:22Z | 2021-12-19T23:25:42Z | 2021-12-19T23:25:42Z | 2021-12-20T08:05:12Z |
⬆️ UPGRADE: Autoupdate pre-commit config | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 469c4066e2387..21f325dd01913 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -9,7 +9,7 @@ repos:
- id: absolufy-imports
files: ^pandas/
- repo: https://github.com/python/black
- rev: 21.9b0
+ rev: 21.10b0
hooks:
- id: black
- repo: https://github.com/codespell-project/codespell
@@ -74,7 +74,7 @@ repos:
types: [text] # overwrite types: [rst]
types_or: [python, rst]
- repo: https://github.com/asottile/yesqa
- rev: v1.2.3
+ rev: v1.3.0
hooks:
- id: yesqa
additional_dependencies: *flake8_dependencies
| <!-- START pr-commits -->
<!-- END pr-commits -->
## Base PullRequest
default branch (https://github.com/pandas-dev/pandas/tree/master)
## Command results
<details>
<summary>Details: </summary>
<details>
<summary><em>add path</em></summary>
```Shell
/home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin
```
</details>
<details>
<summary><em>pip install pre-commit</em></summary>
```Shell
Collecting pre-commit
Downloading pre_commit-2.15.0-py2.py3-none-any.whl (191 kB)
Collecting toml
Downloading toml-0.10.2-py2.py3-none-any.whl (16 kB)
Collecting nodeenv>=0.11.1
Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB)
Collecting virtualenv>=20.0.8
Downloading virtualenv-20.9.0-py2.py3-none-any.whl (5.6 MB)
Collecting pyyaml>=5.1
Downloading PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (682 kB)
Collecting cfgv>=2.0.0
Downloading cfgv-3.3.1-py2.py3-none-any.whl (7.3 kB)
Collecting identify>=1.0.0
Downloading identify-2.3.3-py2.py3-none-any.whl (98 kB)
Collecting platformdirs<3,>=2
Downloading platformdirs-2.4.0-py3-none-any.whl (14 kB)
Collecting filelock<4,>=3.2
Downloading filelock-3.3.2-py3-none-any.whl (9.7 kB)
Collecting six<2,>=1.9.0
Downloading six-1.16.0-py2.py3-none-any.whl (11 kB)
Collecting distlib<1,>=0.3.1
Downloading distlib-0.3.3-py2.py3-none-any.whl (496 kB)
Collecting backports.entry-points-selectable>=1.0.4
Downloading backports.entry_points_selectable-1.1.0-py2.py3-none-any.whl (6.2 kB)
Installing collected packages: six, platformdirs, filelock, distlib, backports.entry-points-selectable, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit
Successfully installed backports.entry-points-selectable-1.1.0 cfgv-3.3.1 distlib-0.3.3 filelock-3.3.2 identify-2.3.3 nodeenv-1.6.0 platformdirs-2.4.0 pre-commit-2.15.0 pyyaml-6.0 six-1.16.0 toml-0.10.2 virtualenv-20.9.0
```
### stderr:
```Shell
WARNING: You are using pip version 21.3; however, version 21.3.1 is available.
You should consider upgrading via the '/opt/hostedtoolcache/Python/3.10.0/x64/bin/python -m pip install --upgrade pip' command.
```
</details>
<details>
<summary><em>pre-commit autoupdate || (exit 0);</em></summary>
```Shell
Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports.
already up to date.
Updating https://github.com/python/black ... [INFO] Initializing environment for https://github.com/python/black.
updating 21.9b0 -> 21.10b0.
Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell.
already up to date.
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
already up to date.
Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint.
already up to date.
Updating https://gitlab.com/pycqa/flake8 ... [INFO] Initializing environment for https://gitlab.com/pycqa/flake8.
already up to date.
Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort.
already up to date.
Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade.
already up to date.
Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks.
already up to date.
Updating https://github.com/asottile/yesqa ... [INFO] Initializing environment for https://github.com/asottile/yesqa.
updating v1.2.3 -> v1.3.0.
```
</details>
<details>
<summary><em>pre-commit run -a || (exit 0);</em></summary>
```Shell
[INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-bugbear==21.3.2,flake8-comprehensions==3.1.0,flake8==3.9.2,pandas-dev-flaker==0.2.0.
[INFO] Initializing environment for https://github.com/asottile/yesqa:flake8-bugbear==21.3.2,flake8-comprehensions==3.1.0,flake8==3.9.2,pandas-dev-flaker==0.2.0.
[INFO] Initializing environment for local:pyright@1.1.171.
[INFO] Initializing environment for local:flake8-rst==0.7.0,flake8==3.7.9.
[INFO] Initializing environment for local:pyyaml,toml.
[INFO] Initializing environment for local:pyyaml.
[INFO] Initializing environment for local.
[INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/python/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/codespell-project/codespell.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/cpplint/cpplint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/isort.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/pyupgrade.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/yesqa.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
absolufy-imports................................................................................................Passed
black...........................................................................................................Passed
codespell.......................................................................................................Passed
Debug Statements (Python).......................................................................................Passed
Fix End of Files................................................................................................Passed
Trim Trailing Whitespace........................................................................................Passed
cpplint.........................................................................................................Passed
flake8..........................................................................................................Passed
flake8 (cython).................................................................................................Failed
- hook id: flake8
- exit code: 1
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 478, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 225, in build_ast
return ast.parse("".join(self.lines))
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/ast.py", line 50, in parse
return compile(source, filename, mode, flags,
File "<unknown>", line 9
cimport numpy as cnp
^^^^^^^^^^^^^
SyntaxError: invalid syntax. Perhaps you forgot a comma?
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 125, in worker
result = (True, func(*args, **kwds))
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 48, in mapstar
return list(map(*args))
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 676, in _run_checks
return checker.run_checks()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 589, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 480, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 465, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/bin/flake8", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/cli.py", line 22, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 363, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 351, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 264, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 321, in run
self.run_parallel()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 287, in run_parallel
for ret in pool_map:
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 448, in <genexpr>
return (item for chunk in result for item in chunk)
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 870, in next
raise value
AttributeError: 'int' object has no attribute 'rstrip'
flake8 (cython template)........................................................................................Failed
- hook id: flake8
- exit code: 1
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 478, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 225, in build_ast
return ast.parse("".join(self.lines))
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/ast.py", line 50, in parse
return compile(source, filename, mode, flags,
File "<unknown>", line 12
def ensure_platform_int(object arr):
^^^
SyntaxError: invalid syntax
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 125, in worker
result = (True, func(*args, **kwds))
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 48, in mapstar
return list(map(*args))
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 676, in _run_checks
return checker.run_checks()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 589, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 480, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 465, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/bin/flake8", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/cli.py", line 22, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 363, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 351, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 264, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 321, in run
self.run_parallel()
File "/home/runner/.cache/pre-commit/repovgz80w1u/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 287, in run_parallel
for ret in pool_map:
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 448, in <genexpr>
return (item for chunk in result for item in chunk)
File "/opt/hostedtoolcache/Python/3.10.0/x64/lib/python3.10/multiprocessing/pool.py", line 870, in next
raise value
AttributeError: 'int' object has no attribute 'rstrip'
isort...........................................................................................................Passed
pyupgrade.......................................................................................................Passed
rst ``code`` is two backticks...................................................................................Passed
rst directives end with two colons..............................................................................Passed
rst ``inline code`` next to normal text.........................................................................Passed
Strip unnecessary `# noqa`s.....................................................................................Passed
flake8-rst......................................................................................................Failed
- hook id: flake8-rst
- exit code: 1
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 486, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 212, in build_ast
return compile("".join(self.lines), "", "exec", PyCF_ONLY_AST)
File "", line 86
gb.<TAB> # noqa: E225, E999
^
SyntaxError: invalid syntax
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/bin/flake8-rst", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8_rst/cli.py", line 16, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 393, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 381, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 300, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 331, in run
self.run_serial()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 315, in run_serial
checker.run_checks()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 598, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 488, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 473, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 486, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 212, in build_ast
return compile("".join(self.lines), "", "exec", PyCF_ONLY_AST)
File "", line 35
df.plot.<TAB> # noqa: E225, E999
^
SyntaxError: invalid syntax
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/bin/flake8-rst", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8_rst/cli.py", line 16, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 393, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 381, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 300, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 331, in run
self.run_serial()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 315, in run_serial
checker.run_checks()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 598, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 488, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repomx8iiwnk/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 473, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
Unwanted patterns...............................................................................................Passed
Check Cython casting is `<type>obj`, not `<type> obj`...........................................................Passed
Check for backticks incorrectly rendering because of missing spaces.............................................Passed
Check for unnecessary random seeds in asv benchmarks............................................................Passed
Check for usage of numpy testing or array_equal.................................................................Passed
Check for invalid EA testing....................................................................................Passed
Generate pip dependency from conda..............................................................................Passed
Check flake8 version is synced across flake8, yesqa, and environment.yml........................................Passed
Validate correct capitalization among titles in documentation...................................................Passed
Import pandas.array as pd_array in core.........................................................................Passed
Use bool_t instead of bool in pandas/core/generic.py............................................................Passed
Ensure pandas errors are documented in doc/source/reference/general_utility_functions.rst.......................Passed
```
</details>
</details>
## Changed files
<details>
<summary>Changed file: </summary>
- .pre-commit-config.yaml
</details>
<hr>
[:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action) | https://api.github.com/repos/pandas-dev/pandas/pulls/44266 | 2021-11-01T07:07:30Z | 2021-11-01T08:56:55Z | 2021-11-01T08:56:54Z | 2021-11-01T08:56:58Z |
Fix formatting typo in user_guide/style.ipynb | diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 5fe619c749d42..3a991b5338c38 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -1412,7 +1412,7 @@
"source": [
"## Limitations\n",
"\n",
- "- DataFrame only `(use Series.to_frame().style)`\n",
+ "- DataFrame only (use `Series.to_frame().style`)\n",
"- The index and columns must be unique\n",
"- No large repr, and construction performance isn't great; although we have some [HTML optimizations](#Optimization)\n",
"- You can only style the *values*, not the index or columns (except with `table_styles` above)\n",
| Seems to be incorrect:
> - DataFrame only `(use Series.to_frame().style)`
Fixed:
> - DataFrame only (use `Series.to_frame().style`) | https://api.github.com/repos/pandas-dev/pandas/pulls/44264 | 2021-11-01T04:24:52Z | 2021-11-01T13:48:52Z | 2021-11-01T13:48:52Z | 2021-11-01T13:48:56Z |
ENH/PERF: RangeIndex.argsort accept kind; pd.isna(rangeindex) fastpath | diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 38553bc1be8d6..c457b52cf4b0e 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -169,13 +169,17 @@ def _isna(obj, inf_as_na: bool = False):
return False
elif isinstance(obj, (np.ndarray, ABCExtensionArray)):
return _isna_array(obj, inf_as_na=inf_as_na)
- elif isinstance(obj, (ABCSeries, ABCIndex)):
+ elif isinstance(obj, ABCIndex):
+ # Try to use cached isna, which also short-circuits for integer dtypes
+ # and avoids materializing RangeIndex._values
+ if not obj._can_hold_na:
+ return obj.isna()
+ return _isna_array(obj._values, inf_as_na=inf_as_na)
+
+ elif isinstance(obj, ABCSeries):
result = _isna_array(obj._values, inf_as_na=inf_as_na)
# box
- if isinstance(obj, ABCSeries):
- result = obj._constructor(
- result, index=obj.index, name=obj.name, copy=False
- )
+ result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
return result
elif isinstance(obj, ABCDataFrame):
return obj.isna()
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 8aedc56d8e1cd..aed7a7a467db3 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -496,6 +496,7 @@ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
+ kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 8f37413dd53c8..e34620d4caf17 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -333,11 +333,9 @@ def test_numpy_argsort(self, index):
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
- if not isinstance(index, RangeIndex):
- # TODO: add compatibility to RangeIndex?
- result = np.argsort(index, kind="mergesort")
- expected = index.argsort(kind="mergesort")
- tm.assert_numpy_array_equal(result, expected)
+ result = np.argsort(index, kind="mergesort")
+ expected = index.argsort(kind="mergesort")
+ tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
We could use the cached index.isna more if #44262 is addressed. | https://api.github.com/repos/pandas-dev/pandas/pulls/44263 | 2021-11-01T02:22:03Z | 2021-11-01T23:33:42Z | 2021-11-01T23:33:41Z | 2021-11-02T01:43:20Z |
BUG: Series[int8][:3] = range(3) unnecessary upcasting to int64 | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 2a718fdcf16e7..0430db0c9dda7 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -537,7 +537,8 @@ Indexing
- Bug in :meth:`Index.get_indexer_non_unique` when index contains multiple ``np.datetime64("NaT")`` and ``np.timedelta64("NaT")`` (:issue:`43869`)
- Bug in setting a scalar :class:`Interval` value into a :class:`Series` with ``IntervalDtype`` when the scalar's sides are floats and the values' sides are integers (:issue:`44201`)
- Bug when setting string-backed :class:`Categorical` values that can be parsed to datetimes into a :class:`DatetimeArray` or :class:`Series` or :class:`DataFrame` column backed by :class:`DatetimeArray` failing to parse these strings (:issue:`44236`)
-
+- Bug in :meth:`Series.__setitem__` with an integer dtype other than ``int64`` setting with a ``range`` object unnecessarily upcasting to ``int64`` (:issue:`44261`)
+-
Missing
^^^^^^^
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index c0ac9098ec7fc..8be4fc13ed991 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -2197,6 +2197,9 @@ def can_hold_element(arr: ArrayLike, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if dtype.kind in ["i", "u"]:
+ if isinstance(element, range):
+ return _dtype_can_hold_range(element, dtype)
+
if tipo is not None:
if tipo.kind not in ["i", "u"]:
if is_float(element) and element.is_integer():
@@ -2209,6 +2212,7 @@ def can_hold_element(arr: ArrayLike, element: Any) -> bool:
# i.e. nullable IntegerDtype; we can put this into an ndarray
# losslessly iff it has no NAs
return not element._mask.any()
+
return True
# We have not inferred an integer from the dtype
@@ -2249,3 +2253,14 @@ def can_hold_element(arr: ArrayLike, element: Any) -> bool:
return isinstance(element, bytes) and len(element) <= dtype.itemsize
raise NotImplementedError(dtype)
+
+
+def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:
+ """
+ maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints),
+ but in many cases a range can be held by a smaller integer dtype.
+ Check if this is one of those cases.
+ """
+ if not len(rng):
+ return True
+ return np.can_cast(rng[0], dtype) and np.can_cast(rng[-1], dtype)
diff --git a/pandas/tests/dtypes/cast/test_can_hold_element.py b/pandas/tests/dtypes/cast/test_can_hold_element.py
new file mode 100644
index 0000000000000..c4776f2a1e143
--- /dev/null
+++ b/pandas/tests/dtypes/cast/test_can_hold_element.py
@@ -0,0 +1,42 @@
+import numpy as np
+
+from pandas.core.dtypes.cast import can_hold_element
+
+
+def test_can_hold_element_range(any_int_numpy_dtype):
+ # GH#44261
+ dtype = np.dtype(any_int_numpy_dtype)
+ arr = np.array([], dtype=dtype)
+
+ rng = range(2, 127)
+ assert can_hold_element(arr, rng)
+
+ # negatives -> can't be held by uint dtypes
+ rng = range(-2, 127)
+ if dtype.kind == "i":
+ assert can_hold_element(arr, rng)
+ else:
+ assert not can_hold_element(arr, rng)
+
+ rng = range(2, 255)
+ if dtype == "int8":
+ assert not can_hold_element(arr, rng)
+ else:
+ assert can_hold_element(arr, rng)
+
+ rng = range(-255, 65537)
+ if dtype.kind == "u":
+ assert not can_hold_element(arr, rng)
+ elif dtype.itemsize < 4:
+ assert not can_hold_element(arr, rng)
+ else:
+ assert can_hold_element(arr, rng)
+
+ # empty
+ rng = range(-(10 ** 10), -(10 ** 10))
+ assert len(rng) == 0
+ # assert can_hold_element(arr, rng)
+
+ rng = range(10 ** 10, 10 ** 10)
+ assert len(rng) == 0
+ assert can_hold_element(arr, rng)
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 5521bee09b19b..5f0710dfbb85a 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -6,6 +6,8 @@
import numpy as np
import pytest
+from pandas.core.dtypes.common import is_list_like
+
from pandas import (
Categorical,
DataFrame,
@@ -622,6 +624,16 @@ def test_mask_key(self, obj, key, expected, val, indexer_sli):
tm.assert_series_equal(obj, expected)
def test_series_where(self, obj, key, expected, val, is_inplace):
+ if is_list_like(val) and len(val) < len(obj):
+ # Series.where is not valid here
+ if isinstance(val, range):
+ return
+
+ # FIXME: The remaining TestSetitemDT64IntoInt that go through here
+ # are relying on technically-incorrect behavior because Block.where
+ # uses np.putmask instead of expressions.where in those cases,
+ # which has different length-checking semantics.
+
mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True
@@ -973,6 +985,35 @@ def expected(self, obj, val):
return Series(idx)
+class TestSetitemRangeIntoIntegerSeries(SetitemCastingEquivalents):
+ # GH#44261 Setting a range with sufficiently-small integers into
+ # small-itemsize integer dtypes should not need to upcast
+
+ @pytest.fixture
+ def obj(self, any_int_numpy_dtype):
+ dtype = np.dtype(any_int_numpy_dtype)
+ ser = Series(range(5), dtype=dtype)
+ return ser
+
+ @pytest.fixture
+ def val(self):
+ return range(2, 4)
+
+ @pytest.fixture
+ def key(self):
+ return slice(0, 2)
+
+ @pytest.fixture
+ def expected(self, any_int_numpy_dtype):
+ dtype = np.dtype(any_int_numpy_dtype)
+ exp = Series([2, 3, 2, 3, 4], dtype=dtype)
+ return exp
+
+ @pytest.fixture
+ def inplace(self):
+ return True
+
+
def test_setitem_int_as_positional_fallback_deprecation():
# GH#42215 deprecated falling back to positional on __setitem__ with an
# int not contained in the index
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44261 | 2021-11-01T01:59:06Z | 2021-11-04T00:37:12Z | 2021-11-04T00:37:12Z | 2021-11-04T01:12:03Z |
CLN: tighten flake8/cython.cfg | diff --git a/flake8/cython.cfg b/flake8/cython.cfg
index a9584ad2e0994..bf1f41647b34e 100644
--- a/flake8/cython.cfg
+++ b/flake8/cython.cfg
@@ -9,9 +9,5 @@ extend_ignore=
E226,
# missing whitespace around bitwise or shift operator
E227,
- # ambiguous variable name (# FIXME maybe this one can be fixed)
- E741,
# invalid syntax
E999,
- # invalid escape sequence (# FIXME maybe this one can be fixed)
- W605,
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 68c09f83e1cdf..3d099a53163bc 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -259,15 +259,15 @@ cdef inline numeric_t kth_smallest_c(numeric_t* arr, Py_ssize_t k, Py_ssize_t n)
in groupby.pyx
"""
cdef:
- Py_ssize_t i, j, l, m
+ Py_ssize_t i, j, left, m
numeric_t x
- l = 0
+ left = 0
m = n - 1
- while l < m:
+ while left < m:
x = arr[k]
- i = l
+ i = left
j = m
while 1:
@@ -284,7 +284,7 @@ cdef inline numeric_t kth_smallest_c(numeric_t* arr, Py_ssize_t k, Py_ssize_t n)
break
if j < k:
- l = i
+ left = i
if k < i:
m = j
return arr[k]
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index 75eee4d432637..39caf04ddf2f8 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -52,7 +52,7 @@ def hash_object_array(
mixed array types will raise TypeError.
"""
cdef:
- Py_ssize_t i, l, n
+ Py_ssize_t i, n
uint64_t[:] result
bytes data, k
uint8_t *kb
@@ -97,8 +97,7 @@ def hash_object_array(
"must be string or null"
)
- l = len(data)
- lens[i] = l
+ lens[i] = len(data)
cdata = data
# keep the references alive through the end of the
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 957432df20395..ac423ef6c0ca2 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -179,7 +179,7 @@ cdef class BlockPlacement:
cdef BlockPlacement iadd(self, other):
cdef:
slice s = self._ensure_has_slice()
- Py_ssize_t other_int, start, stop, step, l
+ Py_ssize_t other_int, start, stop, step
if is_integer_object(other) and s is not None:
other_int = <Py_ssize_t>other
@@ -188,7 +188,7 @@ cdef class BlockPlacement:
# BlockPlacement is treated as immutable
return self
- start, stop, step, l = slice_get_indices_ex(s)
+ start, stop, step, _ = slice_get_indices_ex(s)
start += other_int
stop += other_int
@@ -226,14 +226,14 @@ cdef class BlockPlacement:
"""
cdef:
slice nv, s = self._ensure_has_slice()
- Py_ssize_t other_int, start, stop, step, l
+ Py_ssize_t other_int, start, stop, step
ndarray[intp_t, ndim=1] newarr
if s is not None:
# see if we are either all-above or all-below, each of which
# have fastpaths available.
- start, stop, step, l = slice_get_indices_ex(s)
+ start, stop, step, _ = slice_get_indices_ex(s)
if start < loc and stop <= loc:
# We are entirely below, nothing to increment
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index b8f957a4c2ea8..f2b480642e083 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -808,12 +808,12 @@ class _timelex:
# TODO: Change \s --> \s+ (this doesn't match existing behavior)
# TODO: change the punctuation block to punc+ (does not match existing)
# TODO: can we merge the two digit patterns?
- tokens = re.findall('\s|'
- '(?<![\.\d])\d+\.\d+(?![\.\d])'
- '|\d+'
- '|[a-zA-Z]+'
- '|[\./:]+'
- '|[^\da-zA-Z\./:\s]+', stream)
+ tokens = re.findall(r"\s|"
+ r"(?<![\.\d])\d+\.\d+(?![\.\d])"
+ r"|\d+"
+ r"|[a-zA-Z]+"
+ r"|[\./:]+"
+ r"|[^\da-zA-Z\./:\s]+", stream)
# Re-combine token tuples of the form ["59", ",", "456"] because
# in this context the "," is treated as a decimal
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index dcf4323bc8755..337876d610c5e 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -983,14 +983,14 @@ def periodarr_to_dt64arr(const int64_t[:] periodarr, int freq):
"""
cdef:
int64_t[:] out
- Py_ssize_t i, l
+ Py_ssize_t i, N
if freq < 6000: # i.e. FR_DAY, hard-code to avoid need to cast
- l = len(periodarr)
- out = np.empty(l, dtype="i8")
+ N = len(periodarr)
+ out = np.empty(N, dtype="i8")
# We get here with freqs that do not correspond to a datetime64 unit
- for i in range(l):
+ for i in range(N):
out[i] = period_ordinal_to_dt64(periodarr[i], freq)
return out.base # .base to access underlying np.ndarray
@@ -2248,7 +2248,7 @@ cdef class _Period(PeriodMixin):
return (Period, object_state)
def strftime(self, fmt: str) -> str:
- """
+ r"""
Returns the string representation of the :class:`Period`, depending
on the selected ``fmt``. ``fmt`` must be a string
containing one or several directives. The method recognizes the same
diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx
index 79f551c9ebf6f..46f04cf8e15b3 100644
--- a/pandas/_libs/writers.pyx
+++ b/pandas/_libs/writers.pyx
@@ -125,15 +125,15 @@ def max_len_string_array(pandas_string[:] arr) -> Py_ssize_t:
Return the maximum size of elements in a 1-dim string array.
"""
cdef:
- Py_ssize_t i, m = 0, l = 0, length = arr.shape[0]
+ Py_ssize_t i, m = 0, wlen = 0, length = arr.shape[0]
pandas_string val
for i in range(length):
val = arr[i]
- l = word_len(val)
+ wlen = word_len(val)
- if l > m:
- m = l
+ if wlen > m:
+ m = wlen
return m
@@ -143,14 +143,14 @@ cpdef inline Py_ssize_t word_len(object val):
Return the maximum length of a string or bytes value.
"""
cdef:
- Py_ssize_t l = 0
+ Py_ssize_t wlen = 0
if isinstance(val, str):
- l = PyUnicode_GET_LENGTH(val)
+ wlen = PyUnicode_GET_LENGTH(val)
elif isinstance(val, bytes):
- l = PyBytes_GET_SIZE(val)
+ wlen = PyBytes_GET_SIZE(val)
- return l
+ return wlen
# ------------------------------------------------------------------
# PyTables Helpers
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44260 | 2021-10-31T23:50:27Z | 2021-11-01T12:50:38Z | 2021-11-01T12:50:38Z | 2021-11-01T15:43:53Z |
DataFrameGroupby value_counts | diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
index 2e4bfea165316..d4777bcd1d079 100644
--- a/.github/actions/build_pandas/action.yml
+++ b/.github/actions/build_pandas/action.yml
@@ -13,5 +13,5 @@ runs:
- name: Build Pandas
run: |
python setup.py build_ext -j 2
- python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
+ python -m pip install -e . --no-build-isolation --no-use-pep517
shell: bash -l {0}
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
index 7692dc522522f..2e890506073a8 100644
--- a/.github/workflows/sdist.yml
+++ b/.github/workflows/sdist.yml
@@ -23,7 +23,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ["3.8", "3.9", "3.10"]
+ python-version: ["3.8", "3.9"]
concurrency:
group: ${{github.ref}}-${{matrix.python-version}}-sdist
cancel-in-progress: ${{github.event_name == 'pull_request'}}
@@ -53,24 +53,13 @@ jobs:
- uses: conda-incubator/setup-miniconda@v2
with:
activate-environment: pandas-sdist
- python-version: '${{ matrix.python-version }}'
+ python-version: ${{ matrix.python-version }}
- name: Install pandas from sdist
run: |
- pip list
+ conda list
python -m pip install dist/*.gz
- - name: Force oldest supported NumPy
- run: |
- case "${{matrix.python-version}}" in
- 3.8)
- pip install numpy==1.18.5 ;;
- 3.9)
- pip install numpy==1.19.3 ;;
- 3.10)
- pip install numpy==1.21.2 ;;
- esac
-
- name: Import pandas
run: |
cd ..
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 469c4066e2387..b4e50fb051b05 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -132,12 +132,6 @@ repos:
entry: 'np\.random\.seed'
files: ^asv_bench/benchmarks
exclude: ^asv_bench/benchmarks/pandas_vb_common\.py
- - id: np-testing-array-equal
- name: Check for usage of numpy testing or array_equal
- language: pygrep
- entry: '(numpy|np)(\.testing|\.array_equal)'
- files: ^pandas/tests/
- types: [python]
- id: invalid-ea-testing
name: Check for invalid EA testing
language: pygrep
diff --git a/asv_bench/benchmarks/indexing_engines.py b/asv_bench/benchmarks/indexing_engines.py
index 60e07a9d1469c..0cbc300ee2fc4 100644
--- a/asv_bench/benchmarks/indexing_engines.py
+++ b/asv_bench/benchmarks/indexing_engines.py
@@ -35,49 +35,25 @@ class NumericEngineIndexing:
params = [
_get_numeric_engines(),
["monotonic_incr", "monotonic_decr", "non_monotonic"],
- [True, False],
- [10 ** 5, 2 * 10 ** 6], # 2e6 is above SIZE_CUTOFF
]
- param_names = ["engine_and_dtype", "index_type", "unique", "N"]
+ param_names = ["engine_and_dtype", "index_type"]
- def setup(self, engine_and_dtype, index_type, unique, N):
+ def setup(self, engine_and_dtype, index_type):
engine, dtype = engine_and_dtype
-
- if index_type == "monotonic_incr":
- if unique:
- arr = np.arange(N * 3, dtype=dtype)
- else:
- values = list([1] * N + [2] * N + [3] * N)
- arr = np.array(values, dtype=dtype)
- elif index_type == "monotonic_decr":
- if unique:
- arr = np.arange(N * 3, dtype=dtype)[::-1]
- else:
- values = list([1] * N + [2] * N + [3] * N)
- arr = np.array(values, dtype=dtype)[::-1]
- else:
- assert index_type == "non_monotonic"
- if unique:
- arr = np.empty(N * 3, dtype=dtype)
- arr[:N] = np.arange(N * 2, N * 3, dtype=dtype)
- arr[N:] = np.arange(N * 2, dtype=dtype)
- else:
- arr = np.array([1, 2, 3] * N, dtype=dtype)
+ N = 10 ** 5
+ values = list([1] * N + [2] * N + [3] * N)
+ arr = {
+ "monotonic_incr": np.array(values, dtype=dtype),
+ "monotonic_decr": np.array(list(reversed(values)), dtype=dtype),
+ "non_monotonic": np.array([1, 2, 3] * N, dtype=dtype),
+ }[index_type]
self.data = engine(arr)
# code belows avoids populating the mapping etc. while timing.
self.data.get_loc(2)
- self.key_middle = arr[len(arr) // 2]
- self.key_early = arr[2]
-
- def time_get_loc(self, engine_and_dtype, index_type, unique, N):
- self.data.get_loc(self.key_early)
-
- def time_get_loc_near_middle(self, engine_and_dtype, index_type, unique, N):
- # searchsorted performance may be different near the middle of a range
- # vs near an endpoint
- self.data.get_loc(self.key_middle)
+ def time_get_loc(self, engine_and_dtype, index_type):
+ self.data.get_loc(2)
class ObjectEngineIndexing:
diff --git a/ci/deps/actions-38-db-min.yaml b/ci/deps/actions-38-db-min.yaml
index f875f2ef88949..89cd73204c3fd 100644
--- a/ci/deps/actions-38-db-min.yaml
+++ b/ci/deps/actions-38-db-min.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-db.yaml b/ci/deps/actions-38-db.yaml
index 3e959f9b7e992..dcb7858948199 100644
--- a/ci/deps/actions-38-db.yaml
+++ b/ci/deps/actions-38-db.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-xdist>=1.31
- hypothesis>=5.5.3
diff --git a/ci/deps/actions-38-locale.yaml b/ci/deps/actions-38-locale.yaml
index 13b132109effb..46ebaadc3049a 100644
--- a/ci/deps/actions-38-locale.yaml
+++ b/ci/deps/actions-38-locale.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-locale_slow.yaml b/ci/deps/actions-38-locale_slow.yaml
index 0ff5dd6c3f7f0..e7276027f2a41 100644
--- a/ci/deps/actions-38-locale_slow.yaml
+++ b/ci/deps/actions-38-locale_slow.yaml
@@ -6,7 +6,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index cc1fd022ad24c..ab6e876573dc4 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8.0
# tools
- - cython=0.29.24
+ - cython=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38-slow.yaml b/ci/deps/actions-38-slow.yaml
index 903bd25655bd2..a4e6e0d0180d2 100644
--- a/ci/deps/actions-38-slow.yaml
+++ b/ci/deps/actions-38-slow.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index 899913d6e8c70..86b038ff7d4b6 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -6,7 +6,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-39-numpydev.yaml b/ci/deps/actions-39-numpydev.yaml
index 4a6acf55e265f..03181a9d71d1d 100644
--- a/ci/deps/actions-39-numpydev.yaml
+++ b/ci/deps/actions-39-numpydev.yaml
@@ -15,7 +15,7 @@ dependencies:
- pytz
- pip
- pip:
- - cython==0.29.24 # GH#34014
+ - cython==0.29.21 # GH#34014
- "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple"
- "--pre"
- "numpy"
diff --git a/ci/deps/actions-39-slow.yaml b/ci/deps/actions-39-slow.yaml
index 2d723354935d2..e8c431c59a564 100644
--- a/ci/deps/actions-39-slow.yaml
+++ b/ci/deps/actions-39-slow.yaml
@@ -6,7 +6,7 @@ dependencies:
- python=3.9
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 8751651ece115..322b3ae6007c7 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.9
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-cov
- pytest-xdist>=1.31
diff --git a/ci/deps/azure-macos-38.yaml b/ci/deps/azure-macos-38.yaml
index fe6fa6ca37e01..fffa0ec38f889 100644
--- a/ci/deps/azure-macos-38.yaml
+++ b/ci/deps/azure-macos-38.yaml
@@ -32,6 +32,6 @@ dependencies:
- xlwt
- pip
- pip:
- - cython>=0.29.24
+ - cython>=0.29.21
- pyreadstat
- pyxlsb
diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml
index d4e2c482d1c1c..e4b555f6b7f0e 100644
--- a/ci/deps/azure-windows-38.yaml
+++ b/ci/deps/azure-windows-38.yaml
@@ -6,7 +6,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-xdist>=1.31
- hypothesis>=5.5.3
diff --git a/ci/deps/azure-windows-39.yaml b/ci/deps/azure-windows-39.yaml
index 0e352a80a6d34..f8e3332347b69 100644
--- a/ci/deps/azure-windows-39.yaml
+++ b/ci/deps/azure-windows-39.yaml
@@ -6,7 +6,7 @@ dependencies:
- python=3.9
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-xdist>=1.31
- hypothesis>=5.5.3
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml
index 6627ed5073b46..17fe5b4b7b77b 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-38-arm64.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.8
# tools
- - cython>=0.29.24
+ - cython>=0.29.21
- pytest>=6.0
- pytest-xdist>=1.31
- hypothesis>=5.5.3
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst
index ce8d8d5c2ca10..4707d9503dcaa 100644
--- a/doc/source/reference/extensions.rst
+++ b/doc/source/reference/extensions.rst
@@ -61,7 +61,6 @@ objects.
api.extensions.ExtensionArray.nbytes
api.extensions.ExtensionArray.ndim
api.extensions.ExtensionArray.shape
- api.extensions.ExtensionArray.tolist
Additionally, we have some utility methods for ensuring your object
behaves correctly.
diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst
index 0be3184a9356c..5e230a533625f 100644
--- a/doc/source/reference/window.rst
+++ b/doc/source/reference/window.rst
@@ -88,7 +88,6 @@ Exponentially-weighted window functions
:toctree: api/
ExponentialMovingWindow.mean
- ExponentialMovingWindow.sum
ExponentialMovingWindow.std
ExponentialMovingWindow.var
ExponentialMovingWindow.corr
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index e41f938170417..584dd0f52ae28 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -997,15 +997,6 @@ a list of items you want to check for.
df.isin(values)
-To return the DataFrame of booleans where the values are *not* in the original DataFrame,
-use the ``~`` operator:
-
-.. ipython:: python
-
- values = {'ids': ['a', 'b'], 'vals': [1, 3]}
-
- ~df.isin(values)
-
Combine DataFrame's ``isin`` with the ``any()`` and ``all()`` methods to
quickly select subsets of your data that meet a given criteria.
To select a row where each column meets its own criterion:
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst
index b46744d51d74d..39f2cb33066c3 100644
--- a/doc/source/whatsnew/v1.3.4.rst
+++ b/doc/source/whatsnew/v1.3.4.rst
@@ -16,13 +16,13 @@ Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.convert_dtypes` incorrectly converts byte strings to strings (:issue:`43183`)
- Fixed regression in :meth:`.GroupBy.agg` where it was failing silently with mixed data types along ``axis=1`` and :class:`MultiIndex` (:issue:`43209`)
-- Fixed regression in :func:`merge` with integer and ``NaN`` keys failing with ``outer`` merge (:issue:`43550`)
+- Fixed regression in :meth:`merge` with integer and ``NaN`` keys failing with ``outer`` merge (:issue:`43550`)
- Fixed regression in :meth:`DataFrame.corr` raising ``ValueError`` with ``method="spearman"`` on 32-bit platforms (:issue:`43588`)
- Fixed performance regression in :meth:`MultiIndex.equals` (:issue:`43549`)
- Fixed performance regression in :meth:`.GroupBy.first` and :meth:`.GroupBy.last` with :class:`StringDtype` (:issue:`41596`)
- Fixed regression in :meth:`Series.cat.reorder_categories` failing to update the categories on the ``Series`` (:issue:`43232`)
- Fixed regression in :meth:`Series.cat.categories` setter failing to update the categories on the ``Series`` (:issue:`43334`)
-- Fixed regression in :func:`read_csv` raising ``UnicodeDecodeError`` exception when ``memory_map=True`` (:issue:`43540`)
+- Fixed regression in :meth:`pandas.read_csv` raising ``UnicodeDecodeError`` exception when ``memory_map=True`` (:issue:`43540`)
- Fixed regression in :meth:`DataFrame.explode` raising ``AssertionError`` when ``column`` is any scalar which is not a string (:issue:`43314`)
- Fixed regression in :meth:`Series.aggregate` attempting to pass ``args`` and ``kwargs`` multiple times to the user supplied ``func`` in certain cases (:issue:`43357`)
- Fixed regression when iterating over a :class:`DataFrame.groupby.rolling` object causing the resulting DataFrames to have an incorrect index if the input groupings were not sorted (:issue:`43386`)
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 5601048c409e1..6030dcf947fc3 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -181,6 +181,7 @@ Other enhancements
- The error raised when an optional dependency can't be imported now includes the original exception, for easier investigation (:issue:`43882`)
- Added :meth:`.ExponentialMovingWindow.sum` (:issue:`13297`)
- :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`)
+- Added :meth:`DataFrameGroupBy.value_counts` (:issue:`43564`)
-
.. ---------------------------------------------------------------------------
@@ -473,8 +474,6 @@ Datetimelike
- Bug in :func:`to_datetime` with ``format`` and ``pandas.NA`` was raising ``ValueError`` (:issue:`42957`)
- :func:`to_datetime` would silently swap ``MM/DD/YYYY`` and ``DD/MM/YYYY`` formats if the given ``dayfirst`` option could not be respected - now, a warning is raised in the case of delimited date strings (e.g. ``31-12-2012``) (:issue:`12585`)
- Bug in :meth:`date_range` and :meth:`bdate_range` do not return right bound when ``start`` = ``end`` and set is closed on one side (:issue:`43394`)
-- Bug in inplace addition and subtraction of :class:`DatetimeIndex` or :class:`TimedeltaIndex` with :class:`DatetimeArray` or :class:`TimedeltaArray` (:issue:`43904`)
-- Bug in in calling ``np.isnan``, ``np.isfinite``, or ``np.isinf`` on a timezone-aware :class:`DatetimeIndex` incorrectly raising ``TypeError`` (:issue:`43917`)
-
Timedelta
@@ -494,7 +493,6 @@ Numeric
- Bug in :meth:`DataFrame.rank` treating missing values and extreme values as equal (for example ``np.nan`` and ``np.inf``), causing incorrect results when ``na_option="bottom"`` or ``na_option="top`` used (:issue:`41931`)
- Bug in ``numexpr`` engine still being used when the option ``compute.use_numexpr`` is set to ``False`` (:issue:`32556`)
- Bug in :class:`DataFrame` arithmetic ops with a subclass whose :meth:`_constructor` attribute is a callable other than the subclass itself (:issue:`43201`)
-- Bug in arithmetic operations involving :class:`RangeIndex` where the result would have the incorrect ``name`` (:issue:`43962`)
-
Conversion
diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi
index 9c1de67a7ba2a..bf7df5776896b 100644
--- a/pandas/_libs/hashtable.pyi
+++ b/pandas/_libs/hashtable.pyi
@@ -192,7 +192,6 @@ class UInt16HashTable(HashTable): ...
class UInt8HashTable(HashTable): ...
class StringHashTable(HashTable): ...
class PyObjectHashTable(HashTable): ...
-class IntpHashTable(HashTable): ...
def duplicated_int64(
values: np.ndarray, # const int64_t[:] values
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 6e97c13c644cf..3eb7bcc673cd4 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -65,18 +65,6 @@ cdef Py_ssize_t _INIT_VEC_CAP = 128
include "hashtable_class_helper.pxi"
include "hashtable_func_helper.pxi"
-
-# map derived hash-map types onto basic hash-map types:
-if np.dtype(np.intp) == np.dtype(np.int64):
- IntpHashTable = Int64HashTable
- unique_label_indices = _unique_label_indices_int64
-elif np.dtype(np.intp) == np.dtype(np.int32):
- IntpHashTable = Int32HashTable
- unique_label_indices = _unique_label_indices_int32
-else:
- raise ValueError(np.dtype(np.intp))
-
-
cdef class Factorizer:
cdef readonly:
Py_ssize_t count
@@ -180,3 +168,38 @@ cdef class Int64Factorizer(Factorizer):
self.count = len(self.uniques)
return labels
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def unique_label_indices(const int64_t[:] labels) -> ndarray:
+ """
+ Indices of the first occurrences of the unique labels
+ *excluding* -1. equivalent to:
+ np.unique(labels, return_index=True)[1]
+ """
+ cdef:
+ int ret = 0
+ Py_ssize_t i, n = len(labels)
+ kh_int64_t *table = kh_init_int64()
+ Int64Vector idx = Int64Vector()
+ ndarray[int64_t, ndim=1] arr
+ Int64VectorData *ud = idx.data
+
+ kh_resize_int64(table, min(kh_needed_n_buckets(n), SIZE_HINT_LIMIT))
+
+ with nogil:
+ for i in range(n):
+ kh_put_int64(table, labels[i], &ret)
+ if ret != 0:
+ if needs_resize(ud):
+ with gil:
+ idx.resize()
+ append_data_int64(ud, i)
+
+ kh_destroy_int64(table)
+
+ arr = idx.to_array()
+ arr = arr[np.asarray(labels)[arr].argsort()]
+
+ return arr[1:] if arr.size != 0 and labels[arr[0]] == -1 else arr
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index fb8ce79a924a4..ceb473a0b06af 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -470,51 +470,3 @@ cpdef mode(ndarray[htfunc_t] values, bint dropna):
else:
raise TypeError(values.dtype)
-
-
-{{py:
-
-# name, dtype, ttype, c_type
-dtypes = [('Int64', 'int64', 'int64', 'int64_t'),
- ('Int32', 'int32', 'int32', 'int32_t'), ]
-
-}}
-
-{{for name, dtype, ttype, c_type in dtypes}}
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def _unique_label_indices_{{dtype}}(const {{c_type}}[:] labels) -> ndarray:
- """
- Indices of the first occurrences of the unique labels
- *excluding* -1. equivalent to:
- np.unique(labels, return_index=True)[1]
- """
- cdef:
- int ret = 0
- Py_ssize_t i, n = len(labels)
- kh_{{ttype}}_t *table = kh_init_{{ttype}}()
- {{name}}Vector idx = {{name}}Vector()
- ndarray[{{c_type}}, ndim=1] arr
- {{name}}VectorData *ud = idx.data
-
- kh_resize_{{ttype}}(table, min(kh_needed_n_buckets(n), SIZE_HINT_LIMIT))
-
- with nogil:
- for i in range(n):
- kh_put_{{ttype}}(table, labels[i], &ret)
- if ret != 0:
- if needs_resize(ud):
- with gil:
- idx.resize()
- append_data_{{ttype}}(ud, i)
-
- kh_destroy_{{ttype}}(table)
-
- arr = idx.to_array()
- arr = arr[np.asarray(labels)[arr].argsort()]
-
- return arr[1:] if arr.size != 0 and labels[arr[0]] == -1 else arr
-
-{{endfor}}
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index dd1fa0780520c..b88a2e4c28cfb 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -55,7 +55,7 @@ def is_integer_array(values: np.ndarray, skipna: bool = ...): ...
def is_bool_array(values: np.ndarray, skipna: bool = ...): ...
def fast_multiget(mapping: dict, keys: np.ndarray, default=...) -> np.ndarray: ...
def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ...
-def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ...
+def fast_unique_multiple_list(lists: list, sort: bool = ...) -> list: ...
def fast_unique_multiple(arrays: list, sort: bool = ...) -> list: ...
def map_infer(
arr: np.ndarray,
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index ec89e52e2eff7..2c7b052917463 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -350,7 +350,7 @@ def fast_unique_multiple(list arrays, sort: bool = True):
@cython.wraparound(False)
@cython.boundscheck(False)
-def fast_unique_multiple_list(lists: list, sort: bool | None = True) -> list:
+def fast_unique_multiple_list(lists: list, sort: bool = True) -> list:
cdef:
list buf
Py_ssize_t k = len(lists)
@@ -543,7 +543,7 @@ def has_infs(floating[:] arr) -> bool:
def maybe_indices_to_slice(ndarray[intp_t, ndim=1] indices, int max_len):
cdef:
Py_ssize_t i, n = len(indices)
- intp_t k, vstart, vlast, v
+ int k, vstart, vlast, v
if n == 0:
return slice(0, 0)
@@ -553,7 +553,7 @@ def maybe_indices_to_slice(ndarray[intp_t, ndim=1] indices, int max_len):
return indices
if n == 1:
- return slice(vstart, <intp_t>(vstart + 1))
+ return slice(vstart, vstart + 1)
vlast = indices[n - 1]
if vlast < 0 or max_len <= vlast:
@@ -569,12 +569,12 @@ def maybe_indices_to_slice(ndarray[intp_t, ndim=1] indices, int max_len):
return indices
if k > 0:
- return slice(vstart, <intp_t>(vlast + 1), k)
+ return slice(vstart, vlast + 1, k)
else:
if vlast == 0:
return slice(vstart, None, k)
else:
- return slice(vstart, <intp_t>(vlast - 1), k)
+ return slice(vstart, vlast - 1, k)
@cython.wraparound(False)
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index c9f3e1f01a55c..5fe6818ff4b0e 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -203,7 +203,7 @@ cdef extern from "parser/tokenizer.h":
int usecols
- Py_ssize_t expected_fields
+ int expected_fields
BadLineHandleMethod on_bad_lines
# floating point options
@@ -398,7 +398,7 @@ cdef class TextReader:
else:
if len(delimiter) > 1:
raise ValueError('only length-1 separators excluded right now')
- self.parser.delimiter = <char>ord(delimiter)
+ self.parser.delimiter = ord(delimiter)
# ----------------------------------------
# parser options
@@ -410,21 +410,21 @@ cdef class TextReader:
if lineterminator is not None:
if len(lineterminator) != 1:
raise ValueError('Only length-1 line terminators supported')
- self.parser.lineterminator = <char>ord(lineterminator)
+ self.parser.lineterminator = ord(lineterminator)
if len(decimal) != 1:
raise ValueError('Only length-1 decimal markers supported')
- self.parser.decimal = <char>ord(decimal)
+ self.parser.decimal = ord(decimal)
if thousands is not None:
if len(thousands) != 1:
raise ValueError('Only length-1 thousands markers supported')
- self.parser.thousands = <char>ord(thousands)
+ self.parser.thousands = ord(thousands)
if escapechar is not None:
if len(escapechar) != 1:
raise ValueError('Only length-1 escapes supported')
- self.parser.escapechar = <char>ord(escapechar)
+ self.parser.escapechar = ord(escapechar)
self._set_quoting(quotechar, quoting)
@@ -437,7 +437,7 @@ cdef class TextReader:
if comment is not None:
if len(comment) > 1:
raise ValueError('Only length-1 comment characters supported')
- self.parser.commentchar = <char>ord(comment)
+ self.parser.commentchar = ord(comment)
self.parser.on_bad_lines = on_bad_lines
@@ -591,7 +591,7 @@ cdef class TextReader:
raise TypeError('"quotechar" must be a 1-character string')
else:
self.parser.quoting = quoting
- self.parser.quotechar = <char>ord(quote_char)
+ self.parser.quotechar = ord(quote_char)
cdef _make_skiprow_set(self):
if util.is_integer_object(self.skiprows):
@@ -1045,8 +1045,8 @@ cdef class TextReader:
return results
# -> tuple["ArrayLike", int]:
- cdef inline _convert_tokens(self, Py_ssize_t i, int64_t start,
- int64_t end, object name, bint na_filter,
+ cdef inline _convert_tokens(self, Py_ssize_t i, int start, int end,
+ object name, bint na_filter,
kh_str_starts_t *na_hashset,
object na_flist, object col_dtype):
@@ -1537,7 +1537,7 @@ cdef inline int _try_double_nogil(parser_t *parser,
float64_t (*double_converter)(
const char *, char **, char,
char, char, int, int *, int *) nogil,
- int64_t col, int64_t line_start, int64_t line_end,
+ int col, int line_start, int line_end,
bint na_filter, kh_str_starts_t *na_hashset,
bint use_na_flist,
const kh_float64_t *na_flist,
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index 60c6180453c72..6785bf628919a 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -25,8 +25,7 @@ GitHub. See Python Software Foundation License and BSD licenses for these.
#include "../headers/portable.h"
-void coliter_setup(coliter_t *self, parser_t *parser, int64_t i,
- int64_t start) {
+void coliter_setup(coliter_t *self, parser_t *parser, int i, int start) {
// column i, starting at 0
self->words = parser->words;
self->col = i;
@@ -412,7 +411,7 @@ static void append_warning(parser_t *self, const char *msg) {
static int end_line(parser_t *self) {
char *msg;
int64_t fields;
- int64_t ex_fields = self->expected_fields;
+ int ex_fields = self->expected_fields;
int64_t bufsize = 100; // for error or warning messages
fields = self->line_fields[self->lines];
@@ -460,8 +459,8 @@ static int end_line(parser_t *self) {
if (self->on_bad_lines == ERROR) {
self->error_msg = malloc(bufsize);
snprintf(self->error_msg, bufsize,
- "Expected %" PRId64 " fields in line %" PRIu64 ", saw %"
- PRId64 "\n", ex_fields, self->file_lines, fields);
+ "Expected %d fields in line %" PRIu64 ", saw %" PRId64 "\n",
+ ex_fields, self->file_lines, fields);
TRACE(("Error at line %d, %d fields\n", self->file_lines, fields));
@@ -472,9 +471,8 @@ static int end_line(parser_t *self) {
// pass up error message
msg = malloc(bufsize);
snprintf(msg, bufsize,
- "Skipping line %" PRIu64 ": expected %" PRId64
- " fields, saw %" PRId64 "\n",
- self->file_lines, ex_fields, fields);
+ "Skipping line %" PRIu64 ": expected %d fields, saw %"
+ PRId64 "\n", self->file_lines, ex_fields, fields);
append_warning(self, msg);
free(msg);
}
diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h
index d403435cfca9e..623d3690f252a 100644
--- a/pandas/_libs/src/parser/tokenizer.h
+++ b/pandas/_libs/src/parser/tokenizer.h
@@ -141,7 +141,7 @@ typedef struct parser_t {
int usecols; // Boolean: 1: usecols provided, 0: none provided
- Py_ssize_t expected_fields;
+ int expected_fields;
BadLineHandleMethod on_bad_lines;
// floating point options
@@ -175,7 +175,7 @@ typedef struct coliter_t {
int64_t col;
} coliter_t;
-void coliter_setup(coliter_t *self, parser_t *parser, int64_t i, int64_t start);
+void coliter_setup(coliter_t *self, parser_t *parser, int i, int start);
#define COLITER_NEXT(iter, word) \
do { \
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index b8f957a4c2ea8..c7b05e6067d67 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -422,8 +422,7 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default,
cdef:
object ret
# year initialized to prevent compiler warnings
- int year = -1, quarter = -1, month, mnum
- Py_ssize_t date_len
+ int year = -1, quarter = -1, month, mnum, date_len
# special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
assert isinstance(date_string, str)
diff --git a/pandas/_libs/window/aggregations.pyi b/pandas/_libs/window/aggregations.pyi
index f3317ff5a60be..879809a259266 100644
--- a/pandas/_libs/window/aggregations.pyi
+++ b/pandas/_libs/window/aggregations.pyi
@@ -100,7 +100,7 @@ def roll_weighted_var(
minp: int, # int64_t
ddof: int, # unsigned int
) -> np.ndarray: ... # np.ndarray[np.float64]
-def ewm(
+def ewma(
vals: np.ndarray, # const float64_t[:]
start: np.ndarray, # const int64_t[:]
end: np.ndarray, # const int64_t[:]
@@ -109,7 +109,6 @@ def ewm(
adjust: bool,
ignore_na: bool,
deltas: np.ndarray, # const float64_t[:]
- normalize: bool,
) -> np.ndarray: ... # np.ndarray[np.float64]
def ewmcov(
input_x: np.ndarray, # const float64_t[:]
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 98201a6f58499..c615fc5a2611b 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -1606,13 +1606,13 @@ def roll_weighted_var(const float64_t[:] values, const float64_t[:] weights,
# ----------------------------------------------------------------------
-# Exponentially weighted moving
+# Exponentially weighted moving average
-def ewm(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
- int minp, float64_t com, bint adjust, bint ignore_na,
- const float64_t[:] deltas=None, bint normalize=True) -> np.ndarray:
+def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
+ int minp, float64_t com, bint adjust, bint ignore_na,
+ const float64_t[:] deltas=None) -> np.ndarray:
"""
- Compute exponentially-weighted moving average or sum using center-of-mass.
+ Compute exponentially-weighted moving average using center-of-mass.
Parameters
----------
@@ -1625,8 +1625,6 @@ def ewm(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
ignore_na : bool
deltas : ndarray (float64 type), optional. If None, implicitly assumes equally
spaced points (used when `times` is not passed)
- normalize : bool, optional.
- If True, calculate the mean. If False, calculate the sum.
Returns
-------
@@ -1638,7 +1636,7 @@ def ewm(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
const float64_t[:] sub_vals
const float64_t[:] sub_deltas=None
ndarray[float64_t] sub_output, output = np.empty(N, dtype=np.float64)
- float64_t alpha, old_wt_factor, new_wt, weighted, old_wt, cur
+ float64_t alpha, old_wt_factor, new_wt, weighted_avg, old_wt, cur
bint is_observation, use_deltas
if N == 0:
@@ -1661,10 +1659,10 @@ def ewm(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
win_size = len(sub_vals)
sub_output = np.empty(win_size, dtype=np.float64)
- weighted = sub_vals[0]
- is_observation = weighted == weighted
+ weighted_avg = sub_vals[0]
+ is_observation = weighted_avg == weighted_avg
nobs = int(is_observation)
- sub_output[0] = weighted if nobs >= minp else NaN
+ sub_output[0] = weighted_avg if nobs >= minp else NaN
old_wt = 1.
with nogil:
@@ -1672,38 +1670,37 @@ def ewm(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
cur = sub_vals[i]
is_observation = cur == cur
nobs += is_observation
- if weighted == weighted:
+ if weighted_avg == weighted_avg:
if is_observation or not ignore_na:
- if normalize:
- if use_deltas:
- old_wt *= old_wt_factor ** sub_deltas[i - 1]
- else:
- old_wt *= old_wt_factor
+ if use_deltas:
+ old_wt *= old_wt_factor ** sub_deltas[i - 1]
else:
- weighted = old_wt_factor * weighted
+ old_wt *= old_wt_factor
if is_observation:
- if normalize:
- # avoid numerical errors on constant series
- if weighted != cur:
- weighted = old_wt * weighted + new_wt * cur
- weighted /= (old_wt + new_wt)
- if adjust:
- old_wt += new_wt
- else:
- old_wt = 1.
+
+ # avoid numerical errors on constant series
+ if weighted_avg != cur:
+ weighted_avg = ((old_wt * weighted_avg) +
+ (new_wt * cur)) / (old_wt + new_wt)
+ if adjust:
+ old_wt += new_wt
else:
- weighted += cur
+ old_wt = 1.
elif is_observation:
- weighted = cur
+ weighted_avg = cur
- sub_output[i] = weighted if nobs >= minp else NaN
+ sub_output[i] = weighted_avg if nobs >= minp else NaN
output[s:e] = sub_output
return output
+# ----------------------------------------------------------------------
+# Exponentially weighted moving covariance
+
+
def ewmcov(const float64_t[:] input_x, const int64_t[:] start, const int64_t[:] end,
int minp, const float64_t[:] input_y, float64_t com, bint adjust,
bint ignore_na, bint bias) -> np.ndarray:
diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx
index 4b3a858ade773..76925aaa22781 100644
--- a/pandas/_libs/window/indexers.pyx
+++ b/pandas/_libs/window/indexers.pyx
@@ -89,11 +89,9 @@ def calculate_variable_window_bounds(
if center:
end_bound = index[0] + index_growth_sign * window_size / 2
for j in range(0, num_values):
- if (index[j] - end_bound) * index_growth_sign < 0:
+ if (index[j] < end_bound) or (index[j] == end_bound and right_closed):
end[0] = j + 1
- elif (index[j] - end_bound) * index_growth_sign == 0 and right_closed:
- end[0] = j + 1
- elif (index[j] - end_bound) * index_growth_sign >= 0:
+ elif index[j] >= end_bound:
end[0] = j
break
@@ -130,6 +128,7 @@ def calculate_variable_window_bounds(
elif ((index[j] - end_bound) * index_growth_sign == 0 and
right_closed):
end[i] = j + 1
+ break
elif (index[j] - end_bound) * index_growth_sign >= 0:
end[i] = j
break
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index c9f7fd43c1050..fc7e36dda4619 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -33,7 +33,6 @@
IntervalIndex,
MultiIndex,
PeriodIndex,
- RangeIndex,
Series,
TimedeltaIndex,
)
@@ -553,19 +552,8 @@ def assert_categorical_equal(
"""
_check_isinstance(left, right, Categorical)
- exact: bool | str
- if isinstance(left.categories, RangeIndex) or isinstance(
- right.categories, RangeIndex
- ):
- exact = "equiv"
- else:
- # We still want to require exact matches for NumericIndex
- exact = True
-
if check_category_order:
- assert_index_equal(
- left.categories, right.categories, obj=f"{obj}.categories", exact=exact
- )
+ assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
@@ -576,12 +564,11 @@ def assert_categorical_equal(
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
- assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact)
+ assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
- exact=exact,
)
assert_attr_equal("ordered", left, right, obj=obj)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c1b587ce3a6b2..a0256a8adf1fe 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1121,6 +1121,89 @@ def checked_add_with_arr(
return arr + b
+def quantile(x, q, interpolation_method="fraction"):
+ """
+ Compute sample quantile or quantiles of the input array. For example, q=0.5
+ computes the median.
+
+ The `interpolation_method` parameter supports three values, namely
+ `fraction` (default), `lower` and `higher`. Interpolation is done only,
+ if the desired quantile lies between two data points `i` and `j`. For
+ `fraction`, the result is an interpolated value between `i` and `j`;
+ for `lower`, the result is `i`, for `higher` the result is `j`.
+
+ Parameters
+ ----------
+ x : ndarray
+ Values from which to extract score.
+ q : scalar or array
+ Percentile at which to extract score.
+ interpolation_method : {'fraction', 'lower', 'higher'}, optional
+ This optional parameter specifies the interpolation method to use,
+ when the desired quantile lies between two data points `i` and `j`:
+
+ - fraction: `i + (j - i)*fraction`, where `fraction` is the
+ fractional part of the index surrounded by `i` and `j`.
+ -lower: `i`.
+ - higher: `j`.
+
+ Returns
+ -------
+ score : float
+ Score at percentile.
+
+ Examples
+ --------
+ >>> from scipy import stats
+ >>> a = np.arange(100)
+ >>> stats.scoreatpercentile(a, 50)
+ 49.5
+
+ """
+ x = np.asarray(x)
+ mask = isna(x)
+
+ x = x[~mask]
+
+ values = np.sort(x)
+
+ def _interpolate(a, b, fraction):
+ """
+ Returns the point at the given fraction between a and b, where
+ 'fraction' must be between 0 and 1.
+ """
+ return a + (b - a) * fraction
+
+ def _get_score(at):
+ if len(values) == 0:
+ return np.nan
+
+ idx = at * (len(values) - 1)
+ if idx % 1 == 0:
+ score = values[int(idx)]
+ else:
+ if interpolation_method == "fraction":
+ score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
+ elif interpolation_method == "lower":
+ score = values[np.floor(idx)]
+ elif interpolation_method == "higher":
+ score = values[np.ceil(idx)]
+ else:
+ raise ValueError(
+ "interpolation_method can only be 'fraction' "
+ ", 'lower' or 'higher'"
+ )
+
+ return score
+
+ if is_scalar(q):
+ return _get_score(q)
+
+ q = np.asarray(q, np.float64)
+ result = [_get_score(x) for x in q]
+ return np.array(result, dtype=np.float64)
+
+
# --------------- #
# select n #
# --------------- #
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index fe09a044566f8..f114278caf3ee 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -357,7 +357,7 @@ def reconstruct(result):
return result
if "out" in kwargs:
- result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
+ result = _dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
return reconstruct(result)
# We still get here with kwargs `axis` for e.g. np.maximum.accumulate
@@ -371,8 +371,6 @@ def reconstruct(result):
# * len(inputs) > 1 is doable when we know that we have
# aligned blocks / dtypes.
inputs = tuple(np.asarray(x) for x in inputs)
- # Note: we can't use default_array_ufunc here bc reindexing means
- # that `self` may not be among `inputs`
result = getattr(ufunc, method)(*inputs, **kwargs)
elif self.ndim == 1:
# ufunc(series, ...)
@@ -389,7 +387,7 @@ def reconstruct(result):
else:
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
# Those can have an axis keyword and thus can't be called block-by-block
- result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs)
+ result = getattr(ufunc, method)(np.asarray(inputs[0]), **kwargs)
result = reconstruct(result)
return result
@@ -410,7 +408,7 @@ def _standardize_out_kwarg(**kwargs) -> dict:
return kwargs
-def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
+def _dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
"""
If we have an `out` keyword, then call the ufunc without `out` and then
set the result into the given `out`.
@@ -454,19 +452,3 @@ def _assign_where(out, result, where) -> None:
out[:] = result
else:
np.putmask(out, where, result)
-
-
-def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
- """
- Fallback to the behavior we would get if we did not define __array_ufunc__.
-
- Notes
- -----
- We are assuming that `self` is among `inputs`.
- """
- if not any(x is self for x in inputs):
- raise NotImplementedError
-
- new_inputs = [x if x is not self else np.asarray(x) for x in inputs]
-
- return getattr(ufunc, method)(*new_inputs, **kwargs)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 46b505e7384b4..f8a938a45f3f1 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -66,7 +66,6 @@
from pandas.core.dtypes.missing import isna
from pandas.core import (
- arraylike,
missing,
ops,
)
@@ -132,7 +131,6 @@ class ExtensionArray:
searchsorted
shift
take
- tolist
unique
view
_concat_same_type
@@ -1363,22 +1361,6 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
# ------------------------------------------------------------------------
# Non-Optimized Default Methods
- def tolist(self) -> list:
- """
- Return a list of the values.
-
- These are each a scalar type, which is a Python scalar
- (for str, int, float) or a pandas scalar
- (for Timestamp/Timedelta/Interval/Period)
-
- Returns
- -------
- list
- """
- if self.ndim > 1:
- return [x.tolist() for x in self]
- return list(self)
-
def delete(self: ExtensionArrayT, loc: PositionalIndexer) -> ExtensionArrayT:
indexer = np.delete(np.arange(len(self)), loc)
return self.take(indexer)
@@ -1451,25 +1433,6 @@ def _empty(cls, shape: Shape, dtype: ExtensionDtype):
)
return result
- def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
- if any(
- isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs
- ):
- return NotImplemented
-
- result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
- self, ufunc, method, *inputs, **kwargs
- )
- if result is not NotImplemented:
- return result
-
- if "out" in kwargs:
- return arraylike.dispatch_ufunc_with_out(
- self, ufunc, method, *inputs, **kwargs
- )
-
- return arraylike.default_array_ufunc(self, ufunc, method, *inputs, **kwargs)
-
class ExtensionOpsMixin:
"""
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 58e7abbbe1ddd..38fdc86a47783 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -741,6 +741,3 @@ def _maybe_mask_result(self, result, mask, other, op_name: str):
else:
result[mask] = np.nan
return result
-
- def __abs__(self):
- return self.copy()
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index c7f587b35f557..8bc285f6afba1 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -40,6 +40,7 @@
Ordered,
PositionalIndexer2D,
PositionalIndexerTuple,
+ Scalar,
ScalarIndexer,
SequenceIndexer,
Shape,
@@ -563,11 +564,17 @@ def itemsize(self) -> int:
"""
return self.categories.itemsize
- def to_list(self):
+ def tolist(self) -> list[Scalar]:
"""
- Alias for tolist.
+ Return a list of the values.
+
+ These are each a scalar type, which is a Python scalar
+ (for str, int, float) or a pandas scalar
+ (for Timestamp/Timedelta/Interval/Period)
"""
- return self.tolist()
+ return list(self)
+
+ to_list = tolist
@classmethod
def _from_inferred_categories(
@@ -1161,17 +1168,6 @@ def add_categories(self, new_categories, inplace=no_default):
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
-
- Examples
- --------
- >>> c = pd.Categorical(['c', 'b', 'c'])
- >>> c
- ['c', 'b', 'c']
- Categories (2, object): ['b', 'c']
-
- >>> c.add_categories(['d', 'a'])
- ['c', 'b', 'c']
- Categories (4, object): ['b', 'c', 'd', 'a']
"""
if inplace is not no_default:
warn(
@@ -1236,17 +1232,6 @@ def remove_categories(self, removals, inplace=no_default):
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
-
- Examples
- --------
- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])
- >>> c
- ['a', 'c', 'b', 'c', 'd']
- Categories (4, object): ['a', 'b', 'c', 'd']
-
- >>> c.remove_categories(['d', 'a'])
- [NaN, 'c', 'b', 'c', NaN]
- Categories (2, object): ['b', 'c']
"""
if inplace is not no_default:
warn(
@@ -1306,23 +1291,6 @@ def remove_unused_categories(self, inplace=no_default):
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
-
- Examples
- --------
- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])
- >>> c
- ['a', 'c', 'b', 'c', 'd']
- Categories (4, object): ['a', 'b', 'c', 'd']
-
- >>> c[2] = 'a'
- >>> c[4] = 'c'
- >>> c
- ['a', 'c', 'a', 'c', 'c']
- Categories (4, object): ['a', 'b', 'c', 'd']
-
- >>> c.remove_unused_categories()
- ['a', 'c', 'a', 'c', 'c']
- Categories (2, object): ['a', 'c']
"""
if inplace is not no_default:
warn(
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 72c00dfe7c65a..301bd539c6164 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -714,9 +714,8 @@ def _validate_listlike(self, value, allow_object: bool = False):
msg = self._validation_error_message(value, True)
raise TypeError(msg)
- # Do type inference if necessary up front (after unpacking PandasArray)
+ # Do type inference if necessary up front
# e.g. we passed PeriodIndex.values and got an ndarray of Periods
- value = extract_array(value, extract_numpy=True)
value = pd_array(value)
value = extract_array(value, extract_numpy=True)
@@ -1411,7 +1410,7 @@ def __iadd__(self, other):
if not is_period_dtype(self.dtype):
# restore freq, which is invalidated by setitem
- self._freq = result.freq
+ self._freq = result._freq
return self
def __isub__(self, other):
@@ -1420,7 +1419,7 @@ def __isub__(self, other):
if not is_period_dtype(self.dtype):
# restore freq, which is invalidated by setitem
- self._freq = result.freq
+ self._freq = result._freq
return self
# --------------------------------------------------------------
@@ -1700,17 +1699,6 @@ class TimelikeOps(DatetimeLikeArrayMixin):
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
- def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
- if (
- ufunc in [np.isnan, np.isinf, np.isfinite]
- and len(inputs) == 1
- and inputs[0] is self
- ):
- # numpy 1.18 changed isinf and isnan to not raise on dt64/td64
- return getattr(ufunc, method)(self._ndarray, **kwargs)
-
- return super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
-
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self.dtype):
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 4fecbe4be9681..fa286dc2e3b85 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -635,13 +635,17 @@ def __iter__(self):
chunksize = 10000
chunks = (length // chunksize) + 1
- for i in range(chunks):
- start_i = i * chunksize
- end_i = min((i + 1) * chunksize, length)
- converted = ints_to_pydatetime(
- data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
- )
- yield from converted
+ with warnings.catch_warnings():
+ # filter out warnings about Timestamp.freq
+ warnings.filterwarnings("ignore", category=FutureWarning)
+
+ for i in range(chunks):
+ start_i = i * chunksize
+ end_i = min((i + 1) * chunksize, length)
+ converted = ints_to_pydatetime(
+ data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
+ )
+ yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
diff --git a/pandas/core/base.py b/pandas/core/base.py
index a1bf448df18c4..24fa362eea9c3 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -740,6 +740,9 @@ def tolist(self):
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
"""
+ if not isinstance(self._values, np.ndarray):
+ # check for ndarray instead of dtype to catch DTA/TDA
+ return list(self._values)
return self._values.tolist()
to_list = tolist
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index c6f131a9daba6..8d3fd0c520a6d 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -297,7 +297,6 @@ def array(
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
- ExtensionArray,
FloatingArray,
IntegerArray,
IntervalArray,
@@ -311,7 +310,7 @@ def array(
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
- if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)):
+ if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ABCExtensionArray)):
# Note: we exclude np.ndarray here, will do type inference on it
dtype = data.dtype
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 565c153603b86..6e4a2a2c51ba9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2610,13 +2610,6 @@ def to_feather(self, path: FilePathOrBuffer[bytes], **kwargs) -> None:
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
-
- Notes
- -----
- This function writes the dataframe as a `feather file
- <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default
- index. For saving the DataFrame with your custom index use a method that
- supports custom indices e.g. `to_parquet`.
"""
from pandas.io.feather_format import to_feather
@@ -6661,7 +6654,7 @@ def nlargest(self, n: int, columns: IndexLabel, keep: str = "first") -> DataFram
- ``first`` : prioritize the first occurrence(s)
- ``last`` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
- selecting more than `n` items.
+ selecting more than `n` items.
Returns
-------
@@ -9424,17 +9417,17 @@ def round(
"""
from pandas.core.reshape.concat import concat
- def _dict_round(df: DataFrame, decimals):
+ def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
- def _series_round(ser: Series, decimals: int):
- if is_integer_dtype(ser.dtype) or is_float_dtype(ser.dtype):
- return ser.round(decimals)
- return ser
+ def _series_round(s, decimals):
+ if is_integer_dtype(s) or is_float_dtype(s):
+ return s.round(decimals)
+ return s
nv.validate_round(args, kwargs)
@@ -10645,13 +10638,6 @@ def isin(self, values) -> DataFrame:
falcon True True
dog False True
- To check if ``values`` is *not* in the DataFrame, use the ``~`` operator:
-
- >>> ~df.isin([0, 2])
- num_legs num_wings
- falcon False False
- dog True False
-
When ``values`` is a dict, we can pass values to check for each
column separately:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 8a330d08bef78..7a2dcb9fdcce6 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -15,8 +15,10 @@
Callable,
Hashable,
Iterable,
+ List,
Mapping,
NamedTuple,
+ Sequence,
TypeVar,
Union,
cast,
@@ -67,7 +69,10 @@
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
-from pandas.core.groupby import base
+from pandas.core.groupby import (
+ base,
+ ops,
+)
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
@@ -75,6 +80,11 @@
_transform_template,
warn_dropping_nuisance_columns_deprecated,
)
+from pandas.core.groupby.grouper import (
+ Grouper,
+ Grouping,
+ get_grouper,
+)
from pandas.core.indexes.api import (
Index,
MultiIndex,
@@ -1568,6 +1578,130 @@ def func(df):
boxplot = boxplot_frame_groupby
+ def value_counts(
+ self,
+ subset: Sequence[Hashable] | None = None,
+ normalize: bool = False,
+ sort: bool = True,
+ ascending: bool = False,
+ dropna: bool = True,
+ ) -> DataFrame | Series:
+ with self._group_selection_context():
+ df = self.obj
+
+ # Check for index rather than column grouping
+ index_grouping = self.grouper.groupings[0].name is None
+
+ # Try to find column names
+ if index_grouping:
+ keys = []
+ remaining_columns = self._selected_obj.columns
+ elif isinstance(self._selected_obj, Series):
+ keys = [grouping.name for grouping in self.grouper.groupings]
+ remaining_columns = [self._selected_obj.name]
+ else:
+ if isinstance(self.keys, ops.BaseGrouper):
+ keys = [grouping.name for grouping in self.keys.groupings]
+ elif isinstance(self.keys, str):
+ keys = [self.keys]
+ else:
+ keys = cast(List[str], self.keys)
+
+ remaining_columns = [
+ key for key in self._selected_obj.columns if key not in keys
+ ]
+
+ if subset is not None:
+ remaining_columns = [key for key in subset if key not in keys]
+
+ if dropna:
+ df = df.dropna(subset=remaining_columns, axis="index", how="any")
+
+ grouper, _, _ = get_grouper(
+ df,
+ key=self.keys,
+ axis=self.axis,
+ level=self.level,
+ sort=self.sort,
+ mutated=self.mutated,
+ )
+
+ groupings = grouper.groupings + [
+ cast(Grouping, Grouper(key)) for key in remaining_columns
+ ]
+
+ result = df.groupby(
+ groupings,
+ as_index=self.as_index,
+ sort=self.sort,
+ dropna=dropna,
+ ).size()
+ result.name = "size"
+
+ if normalize:
+ indexed_group_size = df.groupby(
+ grouper, sort=self.sort, dropna=dropna
+ ).size()
+ if self.as_index:
+ if index_grouping:
+ # The common index needs a common name
+ indexed_group_size.index.set_names("Group", inplace=True)
+ result.index.set_names("Group", level=0, inplace=True)
+ # Use indexed group size series
+ result /= indexed_group_size
+ if index_grouping:
+ result.index.set_names(None, level=0, inplace=True)
+ else:
+ # Make indexed key group size series
+ indexed_group_size.name = "group_size"
+ if index_grouping:
+ # Get the column name of the added groupby index column
+ index_column_name = result.columns[0]
+ indexed_group_size.index.set_names(
+ index_column_name, inplace=True
+ )
+ left_on = index_column_name
+ else:
+ left_on = keys
+ if not index_grouping and len(keys) == 1:
+ # Compose with single key group size series
+ group_size = indexed_group_size[result[keys[0]]]
+ else:
+ # Merge multiple key group size series
+ merged = result.merge(
+ indexed_group_size,
+ how="left",
+ left_on=left_on,
+ right_index=True,
+ )
+ group_size = merged["group_size"]
+
+ result["size"] /= group_size.values
+
+ if sort:
+ if self.as_index:
+ if index_grouping:
+ level: Any = 0
+ else:
+ level = keys
+ result = (
+ cast(Series, result)
+ .sort_values(ascending=ascending)
+ .sort_index(level=level, sort_remaining=False)
+ )
+ else:
+ if index_grouping:
+ by: Any = "level_0"
+ else:
+ by = keys
+ result = (
+ cast(DataFrame, result)
+ .sort_values(by="size", ascending=ascending)
+ .sort_values(by=by, ascending=True)
+ )
+
+ return result
+
def _wrap_transform_general_frame(
obj: DataFrame, group: DataFrame, res: DataFrame | Series
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index d583c2fc6dd9e..e83857ccff973 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2555,16 +2555,13 @@ def nth(
return out
result_index = self.grouper.result_index
- if self.axis == 0:
- out.index = result_index[ids[mask]]
- if not self.observed and isinstance(result_index, CategoricalIndex):
- out = out.reindex(result_index)
+ out.index = result_index[ids[mask]]
- out = self._reindex_output(out)
- else:
- out.columns = result_index[ids[mask]]
+ if not self.observed and isinstance(result_index, CategoricalIndex):
+ out = out.reindex(result_index)
- return out.sort_index(axis=self.axis) if self.sort else out
+ out = self._reindex_output(out)
+ return out.sort_index() if self.sort else out
# dropna is truthy
if not is_integer(n):
@@ -2608,9 +2605,7 @@ def nth(
mutated=self.mutated,
)
- grb = dropped.groupby(
- grouper, as_index=self.as_index, sort=self.sort, axis=self.axis
- )
+ grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len)._values
diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index bf901683de602..a2d1695578646 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -171,7 +171,7 @@ def check_setitem_lengths(indexer, value, values) -> bool:
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
- and indexer.sum() == len(value)
+ and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index e497012f23b68..8efc07a2ef148 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -147,7 +147,7 @@ def _get_combined_index(
for other in indexes[1:]:
index = index.intersection(other)
else:
- index = union_indexes(indexes, sort=False)
+ index = union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
@@ -163,7 +163,7 @@ def _get_combined_index(
return index
-def union_indexes(indexes, sort: bool | None = True) -> Index:
+def union_indexes(indexes, sort: bool = True) -> Index:
"""
Return the union of indexes.
@@ -219,7 +219,7 @@ def conv(i):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
- result = result.union(other, sort=None if sort else False)
+ result = result.union(other)
return result
elif kind == "array":
index = indexes[0]
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index a0902a5fb32fe..dc4dcbc0f2e5d 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -71,7 +71,6 @@
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
-_TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin")
@inherit_names(
@@ -551,7 +550,7 @@ def _can_fast_union(self: _T, other: _T) -> bool:
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
- def _fast_union(self: _TDT, other: _TDT, sort=None) -> _TDT:
+ def _fast_union(self: _T, other: _T, sort=None) -> _T:
# Caller is responsible for ensuring self and other are non-empty
# to make our life easier, "sort" the two ranges
@@ -698,6 +697,15 @@ def insert(self, loc: int, item):
# --------------------------------------------------------------------
# NDArray-Like Methods
+ def __array_wrap__(self, result, context=None):
+ """
+ Gets called after a ufunc and other functions.
+ """
+ out = super().__array_wrap__(result, context=context)
+ if isinstance(out, DatetimeTimedeltaMixin) and self.freq is not None:
+ out = out._with_freq("infer")
+ return out
+
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take((), kwargs)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index fe97d61be7548..ac73254816259 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2163,7 +2163,7 @@ def repeat(self, repeats: int, axis=None) -> MultiIndex:
return MultiIndex(
levels=self.levels,
codes=[
- level_codes.view(np.ndarray).astype(np.intp, copy=False).repeat(repeats)
+ level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 8aedc56d8e1cd..10ea72bada316 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -29,6 +29,7 @@
cache_readonly,
doc,
)
+from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
@@ -194,6 +195,17 @@ def _data(self) -> np.ndarray: # type: ignore[override]
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
+ @cache_readonly
+ def _cached_int64index(self) -> Int64Index:
+ return Int64Index._simple_new(self._data, name=self.name)
+
+ @property
+ def _int64index(self) -> Int64Index:
+ # wrap _cached_int64index so we can be sure its name matches self.name
+ res = self._cached_int64index
+ res._name = self._name
+ return res
+
def _get_data_as_items(self):
"""return a list of tuples of start, stop, step"""
rng = self._range
@@ -420,6 +432,24 @@ def _get_indexer(
# --------------------------------------------------------------------
+ def repeat(self, repeats, axis=None) -> Int64Index:
+ return self._int64index.repeat(repeats, axis=axis)
+
+ def delete(self, loc) -> Int64Index: # type: ignore[override]
+ return self._int64index.delete(loc)
+
+ def take(
+ self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
+ ) -> Int64Index:
+ with rewrite_exception("Int64Index", type(self).__name__):
+ return self._int64index.take(
+ indices,
+ axis=axis,
+ allow_fill=allow_fill,
+ fill_value=fill_value,
+ **kwargs,
+ )
+
def tolist(self) -> list[int]:
return list(self._range)
@@ -696,8 +726,7 @@ def _union(self, other: Index, sort):
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
-
- return super()._union(other, sort=sort)
+ return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
@@ -960,8 +989,7 @@ def __floordiv__(self, other):
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
-
- return super().__floordiv__(other)
+ return self._int64index // other
# --------------------------------------------------------------------
# Reductions
@@ -995,30 +1023,30 @@ def _arith_method(self, other, op):
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
- return super()._arith_method(other, op)
+ return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
- return super()._arith_method(other, op)
+ return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
- operator.floordiv,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
- return super()._arith_method(other, op)
+ return op(self._int64index, other)
step: Callable | None = None
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
- right = extract_array(other, extract_numpy=True, extract_range=True)
- left = self
+ other = extract_array(other, extract_numpy=True, extract_range=True)
+
+ left, right = self, other
try:
# apply if we have an override
@@ -1038,8 +1066,7 @@ def _arith_method(self, other, op):
rstart = op(left.start, right)
rstop = op(left.stop, right)
- res_name = ops.get_op_result_name(self, other)
- result = type(self)(rstart, rstop, rstep, name=res_name)
+ result = type(self)(rstart, rstop, rstep, name=self.name)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
@@ -1051,5 +1078,5 @@ def _arith_method(self, other, op):
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
- # test_arithmetic_explicit_conversions
- return super()._arith_method(other, op)
+ return op(self._int64index, other)
+ # TODO: Do attrs get handled reliably?
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 8327e5f1bb532..a6be7de4a1389 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -47,6 +47,7 @@
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
+ is_categorical_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_interval_dtype,
@@ -91,6 +92,8 @@
Categorical,
DatetimeArray,
ExtensionArray,
+ FloatingArray,
+ IntegerArray,
IntervalArray,
PandasArray,
PeriodArray,
@@ -108,6 +111,7 @@
from pandas.core.indexers import (
check_setitem_lengths,
is_empty_indexer,
+ is_exact_shape_match,
is_scalar_indexer,
)
import pandas.core.missing as missing
@@ -907,8 +911,10 @@ def setitem(self, indexer, value):
if is_extension_array_dtype(getattr(value, "dtype", None)):
# We need to be careful not to allow through strings that
# can be parsed to EADtypes
+ is_ea_value = True
arr_value = value
else:
+ is_ea_value = False
arr_value = np.asarray(value)
if transpose:
@@ -916,6 +922,7 @@ def setitem(self, indexer, value):
# length checking
check_setitem_lengths(indexer, value, values)
+ exact_match = is_exact_shape_match(values, arr_value)
if is_empty_indexer(indexer, arr_value):
# GH#8669 empty indexers
@@ -926,6 +933,30 @@ def setitem(self, indexer, value):
# be e.g. a list; see GH#6043
values[indexer] = value
+ elif exact_match and is_categorical_dtype(arr_value.dtype):
+ # GH25495 - If the current dtype is not categorical,
+ # we need to create a new categorical block
+ values[indexer] = value
+
+ elif exact_match and is_ea_value:
+ # GH#32395 if we're going to replace the values entirely, just
+ # substitute in the new array
+ if not self.is_object and isinstance(value, (IntegerArray, FloatingArray)):
+ # _can_hold_element will only allow us to get here if value
+ # has no NA entries.
+ values[indexer] = value.to_numpy(value.dtype.numpy_dtype)
+ else:
+ values[indexer] = np.asarray(value)
+
+ # if we are an exact match (ex-broadcasting),
+ # then use the resultant dtype
+ elif exact_match:
+ # We are setting _all_ of the array's values, so can cast to new dtype
+ values[indexer] = value
+
+ elif is_ea_value:
+ values[indexer] = value
+
else:
value = setitem_datetimelike_compat(values, len(values[indexer]), value)
values[indexer] = value
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index a1b058224795e..4ea4c055c12b0 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -367,12 +367,11 @@ def qcut(
x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
- quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q
-
- x_np = np.asarray(x)
- x_np = x_np[~np.isnan(x_np)]
- bins = np.quantile(x_np, quantiles)
-
+ if is_integer(q):
+ quantiles = np.linspace(0, 1, q + 1)
+ else:
+ quantiles = q
+ bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(
x,
bins,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0aaca406df9b4..cd8892fb0babb 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3682,11 +3682,11 @@ def nlargest(self, n=5, keep="first") -> Series:
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
- of appearance.
+ of appearance.
- ``last`` : return the last `n` occurrences in reverse
- order of appearance.
+ order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
- size larger than `n`.
+ size larger than `n`.
Returns
-------
@@ -3780,11 +3780,11 @@ def nsmallest(self, n: int = 5, keep: str = "first") -> Series:
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
- of appearance.
+ of appearance.
- ``last`` : return the last `n` occurrences in reverse
- order of appearance.
+ order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
- size larger than `n`.
+ size larger than `n`.
Returns
-------
@@ -5008,17 +5008,6 @@ def isin(self, values) -> Series:
5 False
Name: animal, dtype: bool
- To invert the boolean values, use the ``~`` operator:
-
- >>> ~s.isin(['cow', 'lama'])
- 0 False
- 1 False
- 2 False
- 3 True
- 4 False
- 5 True
- Name: animal, dtype: bool
-
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index bc4f4d657b859..2c0c4392a1442 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -414,51 +414,51 @@
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
- replaced with `value`
+ replaced with `value`
- str: string exactly matching `to_replace` will be replaced
- with `value`
+ with `value`
- regex: regexs matching `to_replace` will be replaced with
- `value`
+ `value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
- **must** be the same length.
+ **must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
- lists will be interpreted as regexs otherwise they will match
- directly. This doesn't matter much for `value` since there
- are only a few possible substitution regexes you can use.
+ lists will be interpreted as regexs otherwise they will match
+ directly. This doesn't matter much for `value` since there
+ are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
- for different existing values. For example,
- ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and
- 'y' with 'z'. To use a dict in this way the `value`
- parameter should be `None`.
+ for different existing values. For example,
+ ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and
+ 'y' with 'z'. To use a dict in this way the `value`
+ parameter should be `None`.
- For a DataFrame a dict can specify that different values
- should be replaced in different columns. For example,
- ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'
- and the value 'z' in column 'b' and replaces these values
- with whatever is specified in `value`. The `value` parameter
- should not be ``None`` in this case. You can treat this as a
- special case of passing two lists except that you are
- specifying the column to search in.
+ should be replaced in different columns. For example,
+ ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'
+ and the value 'z' in column 'b' and replaces these values
+ with whatever is specified in `value`. The `value` parameter
+ should not be ``None`` in this case. You can treat this as a
+ special case of passing two lists except that you are
+ specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
- ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column
- 'a' for the value 'b' and replace it with NaN. The `value`
- parameter should be ``None`` to use a nested dict in this
- way. You can nest regular expressions as well. Note that
- column names (the top-level dictionary keys in a nested
- dictionary) **cannot** be regular expressions.
+ ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column
+ 'a' for the value 'b' and replace it with NaN. The `value`
+ parameter should be ``None`` to use a nested dict in this
+ way. You can nest regular expressions as well. Note that
+ column names (the top-level dictionary keys in a nested
+ dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
- compiled regular expression, or list, dict, ndarray or
- Series of such elements. If `value` is also ``None`` then
- this **must** be a nested dictionary or Series.
+ compiled regular expression, or list, dict, ndarray or
+ Series of such elements. If `value` is also ``None`` then
+ this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
@@ -492,22 +492,22 @@
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
- ``None``.
+ ``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
- ``dict``, ``ndarray``, or ``Series``
+ ``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
- into a regular expression or is a list, dict, ndarray, or
- Series.
+ into a regular expression or is a list, dict, ndarray, or
+ Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
- the arguments to `to_replace` does not match the type of the
- value being replaced
+ the arguments to `to_replace` does not match the type of the
+ value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
- `value` but they are not the same length.
+ `value` but they are not the same length.
See Also
--------
@@ -518,17 +518,17 @@
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
- rules for substitution for ``re.sub`` are the same.
+ rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
- cannot provide, for example, a regular expression matching floating
- point numbers and expect the columns in your frame that have a
- numeric dtype to be matched. However, if those floating point
- numbers *are* strings, then you can do this.
+ cannot provide, for example, a regular expression matching floating
+ point numbers and expect the columns in your frame that have a
+ numeric dtype to be matched. However, if those floating point
+ numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
- and play with this method to gain intuition about how it works.
+ and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
- key(s) in the dict are the to_replace part and
- value(s) in the dict are the value parameter.
+ key(s) in the dict are the to_replace part and
+ value(s) in the dict are the value parameter.
Examples
--------
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 7813182222d67..596038b1ca146 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -263,7 +263,8 @@ def decons_obs_group_ids(
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]
- indexer = unique_label_indices(comp_ids)
+ # TODO: unique_label_indices only used here, should take ndarray[np.intp]
+ indexer = unique_label_indices(ensure_int64(comp_ids))
return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels]
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index c17af442fe2cc..9080c457f0f6d 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -46,8 +46,8 @@
window_agg_numba_parameters,
)
from pandas.core.window.numba_ import (
- generate_numba_ewm_func,
- generate_numba_ewm_table_func,
+ generate_ewma_numba_table_func,
+ generate_numba_ewma_func,
)
from pandas.core.window.online import (
EWMMeanState,
@@ -519,21 +519,17 @@ def aggregate(self, func, *args, **kwargs):
def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
if self.method == "single":
- func = generate_numba_ewm_func
- numba_cache_key = (lambda x: x, "ewm_mean")
+ ewma_func = generate_numba_ewma_func(
+ engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
+ )
+ numba_cache_key = (lambda x: x, "ewma")
else:
- func = generate_numba_ewm_table_func
- numba_cache_key = (lambda x: x, "ewm_mean_table")
- ewm_func = func(
- engine_kwargs=engine_kwargs,
- com=self._com,
- adjust=self.adjust,
- ignore_na=self.ignore_na,
- deltas=self._deltas,
- normalize=True,
- )
+ ewma_func = generate_ewma_numba_table_func(
+ engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
+ )
+ numba_cache_key = (lambda x: x, "ewma_table")
return self._apply(
- ewm_func,
+ ewma_func,
numba_cache_key=numba_cache_key,
)
elif engine in ("cython", None):
@@ -543,68 +539,11 @@ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
deltas = None if self.times is None else self._deltas
window_func = partial(
- window_aggregations.ewm,
- com=self._com,
- adjust=self.adjust,
- ignore_na=self.ignore_na,
- deltas=deltas,
- normalize=True,
- )
- return self._apply(window_func)
- else:
- raise ValueError("engine must be either 'numba' or 'cython'")
-
- @doc(
- template_header,
- create_section_header("Parameters"),
- args_compat,
- window_agg_numba_parameters,
- kwargs_compat,
- create_section_header("Returns"),
- template_returns,
- create_section_header("See Also"),
- template_see_also,
- create_section_header("Notes"),
- numba_notes.replace("\n", "", 1),
- window_method="ewm",
- aggregation_description="(exponential weighted moment) sum",
- agg_method="sum",
- )
- def sum(self, *args, engine=None, engine_kwargs=None, **kwargs):
- if not self.adjust:
- raise NotImplementedError("sum is not implemented with adjust=False")
- if maybe_use_numba(engine):
- if self.method == "single":
- func = generate_numba_ewm_func
- numba_cache_key = (lambda x: x, "ewm_sum")
- else:
- func = generate_numba_ewm_table_func
- numba_cache_key = (lambda x: x, "ewm_sum_table")
- ewm_func = func(
- engine_kwargs=engine_kwargs,
- com=self._com,
- adjust=self.adjust,
- ignore_na=self.ignore_na,
- deltas=self._deltas,
- normalize=False,
- )
- return self._apply(
- ewm_func,
- numba_cache_key=numba_cache_key,
- )
- elif engine in ("cython", None):
- if engine_kwargs is not None:
- raise ValueError("cython engine does not accept engine_kwargs")
- nv.validate_window_func("sum", args, kwargs)
-
- deltas = None if self.times is None else self._deltas
- window_func = partial(
- window_aggregations.ewm,
+ window_aggregations.ewma,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=deltas,
- normalize=False,
)
return self._apply(window_func)
else:
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index d91c0bb54f8dc..2fba2b22c1e2a 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -34,6 +34,7 @@
IndexSlice,
RangeIndex,
)
+from pandas.api.types import is_list_like
from pandas.core import generic
import pandas.core.common as com
from pandas.core.frame import (
@@ -60,7 +61,7 @@
)
try:
- import matplotlib as mpl
+ from matplotlib import colors
import matplotlib.pyplot as plt
has_mpl = True
@@ -72,7 +73,7 @@
@contextmanager
def _mpl(func: Callable):
if has_mpl:
- yield plt, mpl
+ yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
@@ -2756,8 +2757,7 @@ def bar(
subset: Subset | None = None,
axis: Axis | None = 0,
*,
- color: str | list | tuple | None = None,
- cmap: Any | None = None,
+ color="#d65f5f",
width: float = 100,
height: float = 100,
align: str | float | int | Callable = "mid",
@@ -2785,11 +2785,6 @@ def bar(
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
- cmap : str, matplotlib.cm.ColorMap
- A string name of a matplotlib Colormap, or a Colormap object. Cannot be
- used together with ``color``.
-
- .. versionadded:: 1.4.0
width : float, default 100
The percentage of the cell, measured from the left, in which to draw the
bars, in [0, 100].
@@ -2832,25 +2827,17 @@ def bar(
Returns
-------
self : Styler
-
- Notes
- -----
- This section of the user guide:
- `Table Visualization <../../user_guide/style.ipynb>`_ gives
- a number of examples for different settings and color coordination.
- """
- if color is None and cmap is None:
- color = "#d65f5f"
- elif color is not None and cmap is not None:
- raise ValueError("`color` and `cmap` cannot both be given")
- elif color is not None:
- if (isinstance(color, (list, tuple)) and len(color) > 2) or not isinstance(
- color, (str, list, tuple)
- ):
- raise ValueError(
- "`color` must be string or list or tuple of 2 strings,"
- "(eg: color=['#d65f5f', '#5fba7d'])"
- )
+ """
+ if not (is_list_like(color)):
+ color = [color, color]
+ elif len(color) == 1:
+ color = [color[0], color[0]]
+ elif len(color) > 2:
+ raise ValueError(
+ "`color` must be string or a list-like "
+ "of length 2: [`color_neg`, `color_pos`] "
+ "(eg: color=['#d65f5f', '#5fba7d'])"
+ )
if not (0 <= width <= 100):
raise ValueError(f"`width` must be a value in [0, 100], got {width}")
@@ -2866,7 +2853,6 @@ def bar(
axis=axis,
align=align,
colors=color,
- cmap=cmap,
width=width / 100,
height=height / 100,
vmin=vmin,
@@ -3423,12 +3409,12 @@ def _background_gradient(
else: # else validate gmap against the underlying data
gmap = _validate_apply_axis_arg(gmap, "gmap", float, data)
- with _mpl(Styler.background_gradient) as (plt, mpl):
+ with _mpl(Styler.background_gradient) as (plt, colors):
smin = np.nanmin(gmap) if vmin is None else vmin
smax = np.nanmax(gmap) if vmax is None else vmax
rng = smax - smin
# extend lower / upper bounds, compresses color range
- norm = mpl.colors.Normalize(smin - (rng * low), smax + (rng * high))
+ norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
rgbas = plt.cm.get_cmap(cmap)(norm(gmap))
def relative_luminance(rgba) -> float:
@@ -3457,11 +3443,9 @@ def css(rgba, text_only) -> str:
if not text_only:
dark = relative_luminance(rgba) < text_color_threshold
text_color = "#f1f1f1" if dark else "#000000"
- return (
- f"background-color: {mpl.colors.rgb2hex(rgba)};color: {text_color};"
- )
+ return f"background-color: {colors.rgb2hex(rgba)};color: {text_color};"
else:
- return f"color: {mpl.colors.rgb2hex(rgba)};"
+ return f"color: {colors.rgb2hex(rgba)};"
if data.ndim == 1:
return [css(rgba, text_only) for rgba in rgbas]
@@ -3534,8 +3518,7 @@ def _highlight_value(data: DataFrame | Series, op: str, props: str) -> np.ndarra
def _bar(
data: NDFrame,
align: str | float | int | Callable,
- colors: str | list | tuple,
- cmap: Any,
+ colors: list[str],
width: float,
height: float,
vmin: float | None,
@@ -3597,7 +3580,7 @@ def css_bar(start: float, end: float, color: str) -> str:
cell_css += f" {color} {end*100:.1f}%, transparent {end*100:.1f}%)"
return cell_css
- def css_calc(x, left: float, right: float, align: str, color: str | list | tuple):
+ def css_calc(x, left: float, right: float, align: str):
"""
Return the correct CSS for bar placement based on calculated values.
@@ -3628,10 +3611,7 @@ def css_calc(x, left: float, right: float, align: str, color: str | list | tuple
if pd.isna(x):
return base_css
- if isinstance(color, (list, tuple)):
- color = color[0] if x < 0 else color[1]
- assert isinstance(color, str) # mypy redefinition
-
+ color = colors[0] if x < 0 else colors[1]
x = left if x < left else x
x = right if x > right else x # trim data if outside of the window
@@ -3694,43 +3674,15 @@ def css_calc(x, left: float, right: float, align: str, color: str | list | tuple
"value defining the center line or a callable that returns a float"
)
- rgbas = None
- if cmap is not None:
- # use the matplotlib colormap input
- with _mpl(Styler.bar) as (plt, mpl):
- cmap = (
- mpl.cm.get_cmap(cmap)
- if isinstance(cmap, str)
- else cmap # assumed to be a Colormap instance as documented
- )
- norm = mpl.colors.Normalize(left, right)
- rgbas = cmap(norm(values))
- if data.ndim == 1:
- rgbas = [mpl.colors.rgb2hex(rgba) for rgba in rgbas]
- else:
- rgbas = [[mpl.colors.rgb2hex(rgba) for rgba in row] for row in rgbas]
-
assert isinstance(align, str) # mypy: should now be in [left, right, mid, zero]
if data.ndim == 1:
- return [
- css_calc(
- x - z, left - z, right - z, align, colors if rgbas is None else rgbas[i]
- )
- for i, x in enumerate(values)
- ]
+ return [css_calc(x - z, left - z, right - z, align) for x in values]
else:
- return np.array(
+ return DataFrame(
[
- [
- css_calc(
- x - z,
- left - z,
- right - z,
- align,
- colors if rgbas is None else rgbas[i][j],
- )
- for j, x in enumerate(row)
- ]
- for i, row in enumerate(values)
- ]
+ [css_calc(x - z, left - z, right - z, align) for x in row]
+ for row in values
+ ],
+ index=data.index,
+ columns=data.columns,
)
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 82f1e60f0aea5..cf203908036b7 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -2144,15 +2144,6 @@ def test_dti_isub_tdi(self, tz_naive_fixture):
result -= tdi
tm.assert_index_equal(result, expected)
- # DTA.__isub__ GH#43904
- dta = dti._data.copy()
- dta -= tdi
- tm.assert_datetime_array_equal(dta, expected._data)
-
- out = dti._data.copy()
- np.subtract(out, tdi, out=out)
- tm.assert_datetime_array_equal(out, expected._data)
-
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
@@ -2162,13 +2153,9 @@ def test_dti_isub_tdi(self, tz_naive_fixture):
result -= tdi.values
tm.assert_index_equal(result, expected)
- msg = "cannot subtract DatetimeArray from ndarray"
- with pytest.raises(TypeError, match=msg):
- tdi.values -= dti
-
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
- tdi._values -= dti
+ tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 9932adccdbaf2..61c13330fbb72 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -693,10 +693,11 @@ def test_mul_float_series(self, numeric_idx):
tm.assert_series_equal(result, expected)
def test_mul_index(self, numeric_idx):
+ # in general not true for RangeIndex
idx = numeric_idx
-
- result = idx * idx
- tm.assert_index_equal(result, idx ** 2)
+ if not isinstance(idx, RangeIndex):
+ result = idx * idx
+ tm.assert_index_equal(result, idx ** 2)
def test_mul_datelike_raises(self, numeric_idx):
idx = numeric_idx
@@ -1089,11 +1090,11 @@ def test_ufunc_compat(self, holder):
box = Series if holder is Series else Index
if holder is RangeIndex:
- idx = RangeIndex(0, 5, name="foo")
+ idx = RangeIndex(0, 5)
else:
- idx = holder(np.arange(5, dtype="int64"), name="foo")
+ idx = holder(np.arange(5, dtype="int64"))
result = np.sin(idx)
- expected = box(np.sin(np.arange(5, dtype="int64")), name="foo")
+ expected = box(np.sin(np.arange(5, dtype="int64")))
tm.assert_equal(result, expected)
@pytest.mark.parametrize("holder", [Int64Index, UInt64Index, Float64Index, Series])
@@ -1211,16 +1212,14 @@ class TestNumericArithmeticUnsorted:
def check_binop(self, ops, scalars, idxs):
for op in ops:
for a, b in combinations(idxs, 2):
- a = a._rename("foo")
- b = b._rename("bar")
result = op(a, b)
expected = op(Int64Index(a), Int64Index(b))
- tm.assert_index_equal(result, expected, exact="equiv")
+ tm.assert_index_equal(result, expected)
for idx in idxs:
for scalar in scalars:
result = op(idx, scalar)
expected = op(Int64Index(idx), scalar)
- tm.assert_index_equal(result, expected, exact="equiv")
+ tm.assert_index_equal(result, expected)
def test_binops(self):
ops = [
@@ -1312,22 +1311,18 @@ def test_numeric_compat2(self):
# __pow__
idx = RangeIndex(0, 1000, 2)
result = idx ** 2
- expected = Int64Index(idx._values) ** 2
+ expected = idx._int64index ** 2
tm.assert_index_equal(Index(result.values), expected, exact=True)
# __floordiv__
cases_exact = [
(RangeIndex(0, 1000, 2), 2, RangeIndex(0, 500, 1)),
(RangeIndex(-99, -201, -3), -3, RangeIndex(33, 67, 1)),
- (
- RangeIndex(0, 1000, 1),
- 2,
- Int64Index(RangeIndex(0, 1000, 1)._values) // 2,
- ),
+ (RangeIndex(0, 1000, 1), 2, RangeIndex(0, 1000, 1)._int64index // 2),
(
RangeIndex(0, 100, 1),
2.0,
- Int64Index(RangeIndex(0, 100, 1)._values) // 2.0,
+ RangeIndex(0, 100, 1)._int64index // 2.0,
),
(RangeIndex(0), 50, RangeIndex(0)),
(RangeIndex(2, 4, 2), 3, RangeIndex(0, 1, 1)),
diff --git a/pandas/tests/arrays/boolean/test_ops.py b/pandas/tests/arrays/boolean/test_ops.py
index 95ebe8528c2e5..52f602258a049 100644
--- a/pandas/tests/arrays/boolean/test_ops.py
+++ b/pandas/tests/arrays/boolean/test_ops.py
@@ -18,10 +18,3 @@ def test_invert(self):
{"A": expected, "B": [False, True, True]}, index=["a", "b", "c"]
)
tm.assert_frame_equal(result, expected)
-
- def test_abs(self):
- # matching numpy behavior, abs is the identity function
- arr = pd.array([True, False, None], dtype="boolean")
- result = abs(arr)
-
- tm.assert_extension_array_equal(result, arr)
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 96021bfa18fb7..cc70e727edf02 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -1256,7 +1256,7 @@ def test_to_coo(
row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels
)
assert isinstance(A, scipy.sparse.coo.coo_matrix)
- tm.assert_numpy_array_equal(A.toarray(), expected_A)
+ np.testing.assert_array_equal(A.toarray(), expected_A)
assert rows == expected_rows
assert cols == expected_cols
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index e953e7dc6dcba..cc8a47f545d92 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -50,13 +50,6 @@
),
# String alias passes through to NumPy
([1, 2], "float32", PandasArray(np.array([1, 2], dtype="float32"))),
- ([1, 2], "int64", PandasArray(np.array([1, 2], dtype=np.int64))),
- # idempotency with e.g. pd.array(pd.array([1, 2], dtype="int64"))
- (
- PandasArray(np.array([1, 2], dtype=np.int32)),
- None,
- PandasArray(np.array([1, 2], dtype=np.int32)),
- ),
# Period alias
(
[pd.Period("2000", "D"), pd.Period("2001", "D")],
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 98329776242f1..9e2b8e0f1603e 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -90,19 +90,6 @@ def test_abs(self):
result = abs(arr)
tm.assert_timedelta_array_equal(result, expected)
- result2 = np.abs(arr)
- tm.assert_timedelta_array_equal(result2, expected)
-
- def test_pos(self):
- vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
- arr = TimedeltaArray(vals)
-
- result = +arr
- tm.assert_timedelta_array_equal(result, arr)
-
- result2 = np.positive(arr)
- tm.assert_timedelta_array_equal(result2, arr)
-
def test_neg(self):
vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
@@ -113,9 +100,6 @@ def test_neg(self):
result = -arr
tm.assert_timedelta_array_equal(result, expected)
- result2 = np.negative(arr)
- tm.assert_timedelta_array_equal(result2, expected)
-
def test_neg_freq(self):
tdi = pd.timedelta_range("2 Days", periods=4, freq="H")
arr = TimedeltaArray(tdi, freq=tdi.freq)
@@ -124,6 +108,3 @@ def test_neg_freq(self):
result = -arr
tm.assert_timedelta_array_equal(result, expected)
-
- result2 = np.negative(arr)
- tm.assert_timedelta_array_equal(result2, expected)
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index d262f09182a9c..6a16433aa0a32 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -54,10 +54,7 @@ def test_view(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.view()
- @pytest.mark.xfail(
- raises=AttributeError,
- reason="__eq__ incorrectly returns bool instead of ndarray[bool]",
- )
+ @pytest.mark.xfail(raises=AssertionError, reason="Not implemented yet")
def test_contains(self, data, data_missing):
super().test_contains(data, data_missing)
diff --git a/pandas/tests/extension/base/dim2.py b/pandas/tests/extension/base/dim2.py
index b4a817cbc37ec..866a1964c3319 100644
--- a/pandas/tests/extension/base/dim2.py
+++ b/pandas/tests/extension/base/dim2.py
@@ -102,17 +102,6 @@ def test_iter_2d(self, data):
assert obj.ndim == 1
assert len(obj) == arr2d.shape[1]
- def test_tolist_2d(self, data):
- arr2d = data.reshape(1, -1)
-
- result = arr2d.tolist()
- expected = [data.tolist()]
-
- assert isinstance(result, list)
- assert all(isinstance(x, list) for x in result)
-
- assert result == expected
-
def test_concat_2d(self, data):
left = data.reshape(-1, 1)
right = left.copy()
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 3e8a754c8c527..f51f9f732bace 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -119,9 +119,3 @@ def test_view(self, data):
# check specifically that the `dtype` kwarg is accepted
data.view(dtype=None)
-
- def test_tolist(self, data):
- result = data.tolist()
- expected = list(data)
- assert isinstance(result, list)
- assert result == expected
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 05455905860d2..a95db54c3b6d1 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -150,6 +150,9 @@ def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
+ def _compare_other(self, s, data, op_name, other):
+ self.check_opname(s, op_name, other)
+
@pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py")
def test_compare_scalar(self, data, comparison_op):
pass
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index de5a6b7a5bb06..54e31e05e8b0e 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -172,7 +172,10 @@ class TestCasting(BaseDatetimeTests, base.BaseCastingTests):
class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests):
- pass
+ def _compare_other(self, s, data, op_name, other):
+ # the base test is not appropriate for us. We raise on comparison
+ # with (some) integers, depending on the value.
+ pass
class TestMissing(BaseDatetimeTests, base.BaseMissingTests):
@@ -184,6 +187,12 @@ class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests):
def test_concat(self, data, in_frame):
pass
+ def test_concat_mixed_dtypes(self, data):
+ # concat(Series[datetimetz], Series[category]) uses a
+ # plain np.array(values) on the DatetimeArray, which
+ # drops the tz.
+ super().test_concat_mixed_dtypes(data)
+
class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests):
pass
diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py
index 2b08c5b7be450..1d92a7b52863d 100644
--- a/pandas/tests/extension/test_floating.py
+++ b/pandas/tests/extension/test_floating.py
@@ -128,7 +128,6 @@ def _check_divmod_op(self, s, op, other, exc=None):
class TestComparisonOps(base.BaseComparisonOpsTests):
- # TODO: share with IntegerArray?
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index f210a4ce56091..4c845055b56c4 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -154,7 +154,10 @@ class TestCasting(BasePeriodTests, base.BaseCastingTests):
class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests):
- pass
+ def _compare_other(self, s, data, op_name, other):
+ # the base test is not appropriate for us. We raise on comparison
+ # with (some) integers, depending on the value.
+ pass
class TestMissing(BasePeriodTests, base.BaseMissingTests):
diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py
index bee8025275b42..c6b19547904ec 100644
--- a/pandas/tests/frame/methods/test_reindex.py
+++ b/pandas/tests/frame/methods/test_reindex.py
@@ -1078,35 +1078,3 @@ def test_reindex_datetimelike_to_object(self, dtype):
assert res.iloc[-1, 0] is fv
assert res.iloc[-1, 1] is fv
tm.assert_frame_equal(res, expected)
-
- @pytest.mark.parametrize(
- "index_df,index_res,index_exp",
- [
- (
- CategoricalIndex([], categories=["A"]),
- Index(["A"]),
- Index(["A"]),
- ),
- (
- CategoricalIndex([], categories=["A"]),
- Index(["B"]),
- Index(["B"]),
- ),
- (
- CategoricalIndex([], categories=["A"]),
- CategoricalIndex(["A"]),
- CategoricalIndex(["A"]),
- ),
- (
- CategoricalIndex([], categories=["A"]),
- CategoricalIndex(["B"]),
- CategoricalIndex(["B"]),
- ),
- ],
- )
- def test_reindex_not_category(self, index_df, index_res, index_exp):
- # GH#28690
- df = DataFrame(index=index_df)
- result = df.reindex(index=index_res)
- expected = DataFrame(index=index_exp)
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_sample.py b/pandas/tests/frame/methods/test_sample.py
index be85d6a186fcc..d5d1f975deefa 100644
--- a/pandas/tests/frame/methods/test_sample.py
+++ b/pandas/tests/frame/methods/test_sample.py
@@ -363,5 +363,5 @@ def test_sample_ignore_index(self):
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
result = df.sample(3, ignore_index=True)
- expected_index = Index(range(3))
- tm.assert_index_equal(result.index, expected_index, exact=True)
+ expected_index = Index([0, 1, 2])
+ tm.assert_index_equal(result.index, expected_index)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 1bb4b24266de0..56ab1013a797b 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2349,8 +2349,10 @@ def test_construct_with_two_categoricalindex_series(self):
)
result = DataFrame([s1, s2])
expected = DataFrame(
- np.array([[39, 6, 4, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]]),
- columns=["female", "male", "unknown", "f", "m"],
+ np.array(
+ [[np.nan, 39.0, np.nan, 6.0, 4.0], [2.0, 152.0, 2.0, 242.0, 150.0]]
+ ),
+ columns=["f", "female", "m", "male", "unknown"],
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 258e4e6eb0cc9..886cdfb7d76b0 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1366,9 +1366,11 @@ def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture):
# GH#36907
tz = tz_naive_fixture
if isinstance(tz, tzlocal) and is_platform_windows():
- pytest.skip(
- "GH#37659 OSError raised within tzlocal bc Windows "
- "chokes in times before 1970-01-01"
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="GH#37659 OSError raised within tzlocal bc Windows "
+ "chokes in times before 1970-01-01"
+ )
)
df = DataFrame(
diff --git a/pandas/tests/groupby/test_frame_value_counts.py b/pandas/tests/groupby/test_frame_value_counts.py
new file mode 100644
index 0000000000000..4e0b6b5622a16
--- /dev/null
+++ b/pandas/tests/groupby/test_frame_value_counts.py
@@ -0,0 +1,208 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+
+@pytest.fixture
+def education_df():
+ return pd.DataFrame(
+ {
+ "gender": ["male", "male", "female", "male", "female", "male"],
+ "education": ["low", "medium", "high", "low", "high", "low"],
+ "country": ["US", "FR", "US", "FR", "FR", "FR"],
+ }
+ )
+
+
+@pytest.fixture
+def country_index():
+ return ["U", "F", "U", "F", "F", "F"]
+
+
+def _frame_value_counts(df, keys, normalize, sort, ascending):
+ return df[keys].value_counts(normalize=normalize, sort=sort, ascending=ascending)
+
+
+@pytest.mark.parametrize("column", [True, False])
+@pytest.mark.parametrize("normalize", [True, False])
+@pytest.mark.parametrize(
+ "sort, ascending",
+ [
+ (False, None),
+ (True, True),
+ (True, False),
+ ],
+)
+@pytest.mark.parametrize("as_index", [True, False])
+@pytest.mark.parametrize("frame", [True, False])
+def test_basic(
+ education_df, country_index, column, normalize, sort, ascending, as_index, frame
+):
+ # gh43564 with added:
+ # - Use column or index
+ # - Whether or not to normalize
+ # - Whether or not to sort and how
+ # - Whether or not to use the groupby as an index
+ # - 3-way compare against :meth:`~DataFrame.value_counts`
+ # and `~SeriesGroupBy.value_counts`
+ gp = education_df.groupby("country" if column else country_index, as_index=as_index)
+ result = gp[["gender", "education"]].value_counts(
+ normalize=normalize, sort=sort, ascending=ascending
+ )
+
+ if frame:
+ # compare against apply with DataFrame value_counts
+ expected = gp.apply(
+ _frame_value_counts, ["gender", "education"], normalize, sort, ascending
+ )
+ expected.name = "size"
+ if as_index:
+ tm.assert_series_equal(result, expected)
+ else:
+ assert np.array_equal(result["size"].values, expected.values)
+ elif column or as_index:
+ # (otherwise SeriesGroupby crashes)
+ # compare against SeriesGroupBy value_counts
+ education_df["both"] = education_df["gender"] + "-" + education_df["education"]
+ expected = gp["both"].value_counts(
+ normalize=normalize, sort=sort, ascending=ascending
+ )
+ if as_index:
+ assert np.array_equal(result.values, expected.values)
+ else:
+ assert np.array_equal(result["size"].values, expected["size"].values)
+
+
+@pytest.mark.parametrize("normalize", [True, False])
+@pytest.mark.parametrize(
+ "sort, ascending",
+ [
+ (False, None),
+ (True, True),
+ (True, False),
+ ],
+)
+def test_compound(education_df, normalize, sort, ascending):
+ # Multiple groupby keys and as_index=False
+ gp = education_df.groupby(["country", "gender"], as_index=False, sort=False)
+ result = gp["education"].value_counts(
+ normalize=normalize, sort=sort, ascending=ascending
+ )
+
+ if sort:
+ # compare against apply with DataFrame value_counts
+ expected = gp.apply(
+ _frame_value_counts, "education", normalize, sort, ascending
+ ).values
+ else:
+ expected = [1.0, 1.0 / 3, 1.0, 2.0 / 3, 1.0] if normalize else [1, 1, 1, 2, 1]
+
+ assert np.array_equal(result["size"].values, expected)
+
+
+@pytest.fixture
+def animals_df():
+ return pd.DataFrame(
+ {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
+ index=["falcon", "dog", "cat", "ant"],
+ )
+
+
+@pytest.mark.parametrize(
+ "sort, ascending, normalize, expected_data, expected_index",
+ [
+ (False, None, False, [1, 2, 1], [(2, 4, 6), (2, 0, 0)]),
+ (True, True, False, [1, 1, 2], [(2, 6, 4), (2, 0, 0)]),
+ (True, False, False, [2, 1, 1], [(4, 2, 6), (0, 2, 0)]),
+ (True, False, False, [2, 1, 1], [(4, 2, 6), (0, 2, 0)]),
+ (True, False, True, [0.5, 0.25, 0.25], [(4, 2, 6), (0, 2, 0)]),
+ ],
+)
+def test_data_frame_value_counts(
+ animals_df, sort, ascending, normalize, expected_data, expected_index
+):
+ # 3-way compare with :meth:`~DataFrame.value_counts`
+ # Tests from frame/methods/test_value_counts.py
+ result_frame = animals_df.value_counts(
+ sort=sort, ascending=ascending, normalize=normalize
+ )
+ expected = pd.Series(
+ data=expected_data,
+ index=pd.MultiIndex.from_arrays(
+ expected_index, names=["num_legs", "num_wings"]
+ ),
+ )
+ tm.assert_series_equal(result_frame, expected)
+
+ animals_df["key"] = 1
+
+ result_frame_groupby = animals_df.groupby("key").value_counts(
+ sort=sort, ascending=ascending, normalize=normalize
+ )
+ result_frame_groupby.reset_index(drop=True, level="key", inplace=True)
+ result_frame_groupby.name = None
+ tm.assert_series_equal(result_frame_groupby, expected)
+
+
+@pytest.fixture
+def names_with_nulls_df(nulls_fixture):
+ return pd.DataFrame(
+ {
+ "first_name": ["John", "Anne", "John", "Beth"],
+ "middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],
+ },
+ )
+
+
+@pytest.mark.parametrize(
+ "dropna, expected_data, expected_index",
+ [
+ (
+ True,
+ [1, 1],
+ pd.MultiIndex.from_arrays(
+ [("Beth", "John"), ("Louise", "Smith")],
+ names=["first_name", "middle_name"],
+ ),
+ ),
+ (
+ False,
+ [1, 1, 1, 1],
+ pd.MultiIndex(
+ levels=[
+ pd.Index(["Anne", "Beth", "John"]),
+ pd.Index(["Louise", "Smith", np.nan]),
+ ],
+ codes=[[0, 1, 2, 2], [2, 0, 1, 2]],
+ names=["first_name", "middle_name"],
+ ),
+ ),
+ ],
+)
+@pytest.mark.parametrize("normalize", [False, True])
+def test_data_frame_value_counts_dropna(
+ names_with_nulls_df, dropna, normalize, expected_data, expected_index
+):
+ # GH 41334
+ # 3-way compare with :meth:`~DataFrame.value_counts`
+ # Tests with nulls from frame/methods/test_value_counts.py
+ result_frame = names_with_nulls_df.value_counts(dropna=dropna, normalize=normalize)
+ expected = pd.Series(
+ data=expected_data,
+ index=expected_index,
+ )
+ if normalize:
+ expected /= float(len(expected_data))
+
+ tm.assert_series_equal(result_frame, expected)
+
+ names_with_nulls_df["key"] = 1
+ result_frame_groupby = names_with_nulls_df.groupby("key").value_counts(
+ dropna=dropna, normalize=normalize
+ )
+ result_frame_groupby.reset_index(drop=True, level="key", inplace=True)
+ result_frame_groupby.name = None
+
+ tm.assert_series_equal(result_frame_groupby, expected)
diff --git a/pandas/tests/groupby/test_groupby_value_counts.py b/pandas/tests/groupby/test_groupby_value_counts.py
new file mode 100644
index 0000000000000..3aa6c4cbf8ea0
--- /dev/null
+++ b/pandas/tests/groupby/test_groupby_value_counts.py
@@ -0,0 +1,21 @@
+import pandas as pd
+import pytest
+
+def test_basic():
+ #gh43564
+ df = pd.DataFrame({'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
+ 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
+ 'country': ['US', 'FR', 'US', 'FR', 'FR', 'US']})
+ print("SeriesGroupBy:\n", df.groupby('country')['gender'].value_counts(normalize=True))
+
+
+ def compute_proportions(df, var):
+ return (df[var]
+ .value_counts(normalize=True)
+ )
+ print("apply:\n", df.groupby('country')
+ .apply(compute_proportions, var=['gender', 'education'])
+ )
+
+ # but fails on DataFrameGroupBy
+ print("DataFrameGroupBy:\n", df.groupby('country')[['gender', 'education']].value_counts(normalize=True))
diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py
index 6f8b18f449779..71eeb1bd9378a 100644
--- a/pandas/tests/indexes/categorical/test_indexing.py
+++ b/pandas/tests/indexes/categorical/test_indexing.py
@@ -300,9 +300,8 @@ def test_get_indexer_same_categories_different_order(self):
class TestWhere:
- def test_where(self, listlike_box_with_tuple):
- klass = listlike_box_with_tuple
-
+ @pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
+ def test_where(self, klass):
i = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
cond = [True] * len(i)
expected = i
diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py
index 5a0b2672e397c..0f35ed15b312b 100644
--- a/pandas/tests/indexes/categorical/test_reindex.py
+++ b/pandas/tests/indexes/categorical/test_reindex.py
@@ -1,10 +1,13 @@
import numpy as np
+import pytest
from pandas import (
Categorical,
CategoricalIndex,
+ DataFrame,
Index,
Interval,
+ Series,
)
import pandas._testing as tm
@@ -66,6 +69,45 @@ def test_reindex_empty_index(self):
tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
+ def test_reindex_missing_category(self):
+ # GH: 18185
+ ser = Series([1, 2, 3, 1], dtype="category")
+ msg = r"Cannot setitem on a Categorical with a new category \(-1\)"
+ with pytest.raises(TypeError, match=msg):
+ ser.reindex([1, 2, 3, 4, 5], fill_value=-1)
+
+ @pytest.mark.parametrize(
+ "index_df,index_res,index_exp",
+ [
+ (
+ CategoricalIndex([], categories=["A"]),
+ Index(["A"]),
+ Index(["A"]),
+ ),
+ (
+ CategoricalIndex([], categories=["A"]),
+ Index(["B"]),
+ Index(["B"]),
+ ),
+ (
+ CategoricalIndex([], categories=["A"]),
+ CategoricalIndex(["A"]),
+ CategoricalIndex(["A"]),
+ ),
+ (
+ CategoricalIndex([], categories=["A"]),
+ CategoricalIndex(["B"]),
+ CategoricalIndex(["B"]),
+ ),
+ ],
+ )
+ def test_reindex_not_category(self, index_df, index_res, index_exp):
+ # GH: 28690
+ df = DataFrame(index=index_df)
+ result = df.reindex(index=index_res)
+ expected = DataFrame(index=index_exp)
+ tm.assert_frame_equal(result, expected)
+
def test_reindex_categorical_added_category(self):
# GH 42424
ci = CategoricalIndex(
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 8f37413dd53c8..fb0c4641e9a6b 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -377,9 +377,8 @@ def test_numpy_repeat(self, simple_index):
with pytest.raises(ValueError, match=msg):
np.repeat(idx, rep, axis=0)
- def test_where(self, listlike_box_with_tuple, simple_index):
- klass = listlike_box_with_tuple
-
+ @pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
+ def test_where(self, klass, simple_index):
idx = simple_index
if isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
# where does not preserve freq
@@ -552,6 +551,38 @@ def test_format_empty(self):
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
+ def test_hasnans_isnans(self, index_flat):
+ # GH 11343, added tests for hasnans / isnans
+ index = index_flat
+
+ # cases in indices doesn't include NaN
+ idx = index.copy(deep=True)
+ expected = np.array([False] * len(idx), dtype=bool)
+ tm.assert_numpy_array_equal(idx._isnan, expected)
+ assert idx.hasnans is False
+
+ idx = index.copy(deep=True)
+ values = np.asarray(idx.values)
+
+ if len(index) == 0:
+ return
+ elif isinstance(index, NumericIndex) and is_integer_dtype(index.dtype):
+ return
+ elif isinstance(index, DatetimeIndexOpsMixin):
+ values[1] = iNaT
+ else:
+ values[1] = np.nan
+
+ if isinstance(index, PeriodIndex):
+ idx = type(index)(values, freq=index.freq)
+ else:
+ idx = type(index)(values)
+
+ expected = np.array([False] * len(idx), dtype=bool)
+ expected[1] = True
+ tm.assert_numpy_array_equal(idx._isnan, expected)
+ assert idx.hasnans is True
+
def test_fillna(self, index):
# GH 11343
if len(index) == 0:
@@ -624,15 +655,12 @@ def test_map(self, simple_index):
expected = idx.astype("int64")
elif is_float_dtype(idx.dtype):
expected = idx.astype("float64")
- if idx._is_backward_compat_public_numeric_index:
- # We get a NumericIndex back, not Float64Index
- expected = type(idx)(expected)
else:
expected = idx
result = idx.map(lambda x: x)
# For RangeIndex we convert to Int64Index
- tm.assert_index_equal(result, expected, exact="equiv")
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
@@ -657,7 +685,7 @@ def test_map_dictlike(self, mapper, simple_index):
result = idx.map(identity)
# For RangeIndex we convert to Int64Index
- tm.assert_index_equal(result, expected, exact="equiv")
+ tm.assert_index_equal(result, expected)
# empty mappable
if idx._is_backward_compat_public_numeric_index:
@@ -783,19 +811,6 @@ def test_index_groupby(self, simple_index):
expected = {ex_keys[0]: idx[[0, 4]], ex_keys[1]: idx[[1, 3]]}
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
- def test_append_preserves_dtype(self, simple_index):
- # In particular NumericIndex with dtype float32
- index = simple_index
- N = len(index)
-
- result = index.append(index)
- assert result.dtype == index.dtype
- tm.assert_index_equal(result[:N], index, check_exact=True)
- tm.assert_index_equal(result[N:], index, check_exact=True)
-
- alt = index.take(list(range(N)) * 2)
- tm.assert_index_equal(result, alt, check_exact=True)
-
class NumericBase(Base):
"""
@@ -829,20 +844,6 @@ def test_format(self, simple_index):
def test_numeric_compat(self):
pass # override Base method
- def test_insert_non_na(self, simple_index):
- # GH#43921 inserting an element that we know we can hold should
- # not change dtype or type (except for RangeIndex)
- index = simple_index
-
- result = index.insert(0, index[0])
-
- cls = type(index)
- if cls is RangeIndex:
- cls = Int64Index
-
- expected = cls([index[0]] + list(index), dtype=index.dtype)
- tm.assert_index_equal(result, expected, exact=True)
-
def test_insert_na(self, nulls_fixture, simple_index):
# GH 18295 (test missing)
index = simple_index
@@ -853,15 +854,8 @@ def test_insert_na(self, nulls_fixture, simple_index):
else:
expected = Float64Index([index[0], np.nan] + list(index[1:]))
- if index._is_backward_compat_public_numeric_index:
- # GH#43921 we preserve NumericIndex
- if index.dtype.kind == "f":
- expected = NumericIndex(expected, dtype=index.dtype)
- else:
- expected = NumericIndex(expected)
-
result = index.insert(1, na_val)
- tm.assert_index_equal(result, expected, exact=True)
+ tm.assert_index_equal(result, expected)
def test_arithmetic_explicit_conversions(self):
# GH 8608
diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py
index 2eae51c62aa0d..ac4477e60d5dc 100644
--- a/pandas/tests/indexes/conftest.py
+++ b/pandas/tests/indexes/conftest.py
@@ -1,11 +1,5 @@
-import numpy as np
import pytest
-from pandas import (
- Series,
- array,
-)
-
@pytest.fixture(params=[None, False])
def sort(request):
@@ -31,21 +25,3 @@ def freq_sample(request):
timedelta_range..
"""
return request.param
-
-
-@pytest.fixture(params=[list, np.array, array, Series])
-def listlike_box(request):
- """
- Types that may be passed as the indexer to searchsorted.
- """
- return request.param
-
-
-# TODO: not clear if this _needs_ to be different from listlike_box or
-# if that is just a historical artifact
-@pytest.fixture(params=[list, tuple, np.array, Series])
-def listlike_box_with_tuple(request):
- """
- Types that may be passed as the indexer to searchsorted.
- """
- return request.param
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index ecdbf01fd41c1..70156092eeabe 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -9,18 +9,6 @@
class DatetimeLike(Base):
- def test_isin(self, simple_index):
- index = simple_index[:4]
- result = index.isin(index)
- assert result.all()
-
- result = index.isin(list(index))
- assert result.all()
-
- result = index.isin([index[2], 5])
- expected = np.array([False, False, True, False])
- tm.assert_numpy_array_equal(result, expected)
-
def test_argsort_matches_array(self, simple_index):
idx = simple_index
idx = idx.insert(1, pd.NaT)
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 80c86e0103436..7559d7ce645e0 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -121,41 +121,6 @@ def test_date_range_timestamp_equiv_preserve_frequency(self):
class TestDateRanges:
- @pytest.mark.parametrize("freq", ["N", "U", "L", "T", "S", "H", "D"])
- def test_date_range_edges(self, freq):
- # GH#13672
- td = Timedelta(f"1{freq}")
- ts = Timestamp("1970-01-01")
-
- idx = date_range(
- start=ts + td,
- end=ts + 4 * td,
- freq=freq,
- )
- exp = DatetimeIndex(
- [ts + n * td for n in range(1, 5)],
- freq=freq,
- )
- tm.assert_index_equal(idx, exp)
-
- # start after end
- idx = date_range(
- start=ts + 4 * td,
- end=ts + td,
- freq=freq,
- )
- exp = DatetimeIndex([], freq=freq)
- tm.assert_index_equal(idx, exp)
-
- # start matches end
- idx = date_range(
- start=ts + td,
- end=ts + td,
- freq=freq,
- )
- exp = DatetimeIndex([ts + td], freq=freq)
- tm.assert_index_equal(idx, exp)
-
def test_date_range_near_implementation_bound(self):
# GH#???
freq = Timedelta(1)
@@ -752,7 +717,7 @@ def test_timezone_comparaison_bug(self):
result = date_range(start, periods=2, tz="US/Eastern")
assert len(result) == 2
- def test_timezone_comparison_assert(self):
+ def test_timezone_comparaison_assert(self):
start = Timestamp("20130220 10:00", tz="US/Eastern")
msg = "Inferred time zone not equal to passed time zone"
with pytest.raises(AssertionError, match=msg):
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 5c85221c5a753..17b80fbc0afc2 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -17,6 +17,29 @@
class TestDatetimeIndex:
+ def test_time_loc(self): # GH8667
+ from datetime import time
+
+ from pandas._libs.index import _SIZE_CUTOFF
+
+ ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
+ key = time(15, 11, 30)
+ start = key.hour * 3600 + key.minute * 60 + key.second
+ step = 24 * 3600
+
+ for n in ns:
+ idx = date_range("2014-11-26", periods=n, freq="S")
+ ts = pd.Series(np.random.randn(n), index=idx)
+ i = np.arange(start, n, step)
+
+ tm.assert_numpy_array_equal(ts.index.get_loc(key), i, check_dtype=False)
+ tm.assert_series_equal(ts[key], ts.iloc[i])
+
+ left, right = ts.copy(), ts.copy()
+ left[key] *= -10
+ right.iloc[i] *= -10
+ tm.assert_series_equal(left, right)
+
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
@@ -55,6 +78,13 @@ def test_week_of_month_frequency(self):
expected = DatetimeIndex(dates, freq="WOM-1SAT")
tm.assert_index_equal(result, expected)
+ def test_stringified_slice_with_tz(self):
+ # GH#2658
+ start = "2013-01-07"
+ idx = date_range(start=start, freq="1d", periods=10, tz="US/Eastern")
+ df = DataFrame(np.arange(10), index=idx)
+ df["2013-01-14 23:44:34.437768-05:00":] # no exception here
+
def test_append_nondatetimeindex(self):
rng = date_range("1/1/2000", periods=10)
idx = Index(["a", "b", "c", "d"])
@@ -107,6 +137,16 @@ def test_misc_coverage(self):
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
+ def test_string_index_series_name_converted(self):
+ # #1644
+ df = DataFrame(np.random.randn(10, 4), index=date_range("1/1/2000", periods=10))
+
+ result = df.loc["1/3/2000"]
+ assert result.name == df.index[2]
+
+ result = df.T["1/3/2000"]
+ assert result.name == df.index[2]
+
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100), index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
@@ -114,6 +154,18 @@ def test_groupby_function_tuple_1677(self):
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
+ def test_isin(self):
+ index = tm.makeDateIndex(4)
+ result = index.isin(index)
+ assert result.all()
+
+ result = index.isin(list(index))
+ assert result.all()
+
+ tm.assert_almost_equal(
+ index.isin([index[2], 5]), np.array([False, False, True, False])
+ )
+
def assert_index_parameters(self, index):
assert index.freq == "40960N"
assert index.inferred_freq == "40960N"
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index c3152b77d39df..6eaf799ae2779 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -485,52 +485,24 @@ def test_get_loc(self):
with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
idx.get_loc(slice(2))
- idx = DatetimeIndex(["2000-01-01", "2000-01-04"])
+ idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
assert idx.get_loc("2000-01-02", method="nearest") == 0
assert idx.get_loc("2000-01-03", method="nearest") == 1
assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
- def test_get_loc_time_obj(self):
# time indexing
idx = date_range("2000-01-01", periods=24, freq="H")
-
- result = idx.get_loc(time(12))
- expected = np.array([12])
- tm.assert_numpy_array_equal(result, expected, check_dtype=False)
-
- result = idx.get_loc(time(12, 30))
- expected = np.array([])
- tm.assert_numpy_array_equal(result, expected, check_dtype=False)
-
+ tm.assert_numpy_array_equal(
+ idx.get_loc(time(12)), np.array([12]), check_dtype=False
+ )
+ tm.assert_numpy_array_equal(
+ idx.get_loc(time(12, 30)), np.array([]), check_dtype=False
+ )
msg = "cannot yet lookup inexact labels when key is a time object"
with pytest.raises(NotImplementedError, match=msg):
with tm.assert_produces_warning(FutureWarning, match="deprecated"):
idx.get_loc(time(12, 30), method="pad")
- def test_get_loc_time_obj2(self):
- # GH#8667
-
- from pandas._libs.index import _SIZE_CUTOFF
-
- ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
- key = time(15, 11, 30)
- start = key.hour * 3600 + key.minute * 60 + key.second
- step = 24 * 3600
-
- for n in ns:
- idx = date_range("2014-11-26", periods=n, freq="S")
- ts = pd.Series(np.random.randn(n), index=idx)
- locs = np.arange(start, n, step, dtype=np.intp)
-
- result = ts.index.get_loc(key)
- tm.assert_numpy_array_equal(result, locs)
- tm.assert_series_equal(ts[key], ts.iloc[locs])
-
- left, right = ts.copy(), ts.copy()
- left[key] *= -10
- right.iloc[locs] *= -10
- tm.assert_series_equal(left, right)
-
def test_get_loc_time_nat(self):
# GH#35114
# Case where key's total microseconds happens to match iNaT % 1e6 // 1000
@@ -729,7 +701,7 @@ def test_maybe_cast_slice_duplicate_monotonic(self):
assert result == expected
-class TestGetValue:
+class TestDatetimeIndex:
def test_get_value(self):
# specifically make sure we have test for np.datetime64 key
dti = date_range("2016-01-01", periods=3)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index f0757d0ba555e..408ed2db316ca 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -19,16 +19,143 @@
from pandas.core.arrays import DatetimeArray
-class TestDatetime64:
- def test_no_millisecond_field(self):
- msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
- with pytest.raises(AttributeError, match=msg):
- DatetimeIndex.millisecond
+class TestTimeSeries:
+ def test_range_edges(self):
+ # GH#13672
+ idx = date_range(
+ start=Timestamp("1970-01-01 00:00:00.000000001"),
+ end=Timestamp("1970-01-01 00:00:00.000000004"),
+ freq="N",
+ )
+ exp = DatetimeIndex(
+ [
+ "1970-01-01 00:00:00.000000001",
+ "1970-01-01 00:00:00.000000002",
+ "1970-01-01 00:00:00.000000003",
+ "1970-01-01 00:00:00.000000004",
+ ],
+ freq="N",
+ )
+ tm.assert_index_equal(idx, exp)
+
+ def test_range_edges2(self):
+
+ idx = date_range(
+ start=Timestamp("1970-01-01 00:00:00.000000004"),
+ end=Timestamp("1970-01-01 00:00:00.000000001"),
+ freq="N",
+ )
+ exp = DatetimeIndex([], freq="N")
+ tm.assert_index_equal(idx, exp)
+
+ def test_range_edges3(self):
+
+ idx = date_range(
+ start=Timestamp("1970-01-01 00:00:00.000000001"),
+ end=Timestamp("1970-01-01 00:00:00.000000001"),
+ freq="N",
+ )
+ exp = DatetimeIndex(["1970-01-01 00:00:00.000000001"], freq="N")
+ tm.assert_index_equal(idx, exp)
+
+ def test_range_edges4(self):
+
+ idx = date_range(
+ start=Timestamp("1970-01-01 00:00:00.000001"),
+ end=Timestamp("1970-01-01 00:00:00.000004"),
+ freq="U",
+ )
+ exp = DatetimeIndex(
+ [
+ "1970-01-01 00:00:00.000001",
+ "1970-01-01 00:00:00.000002",
+ "1970-01-01 00:00:00.000003",
+ "1970-01-01 00:00:00.000004",
+ ],
+ freq="U",
+ )
+ tm.assert_index_equal(idx, exp)
+
+ def test_range_edges5(self):
+
+ idx = date_range(
+ start=Timestamp("1970-01-01 00:00:00.001"),
+ end=Timestamp("1970-01-01 00:00:00.004"),
+ freq="L",
+ )
+ exp = DatetimeIndex(
+ [
+ "1970-01-01 00:00:00.001",
+ "1970-01-01 00:00:00.002",
+ "1970-01-01 00:00:00.003",
+ "1970-01-01 00:00:00.004",
+ ],
+ freq="L",
+ )
+ tm.assert_index_equal(idx, exp)
+
+ def test_range_edges6(self):
+ idx = date_range(
+ start=Timestamp("1970-01-01 00:00:01"),
+ end=Timestamp("1970-01-01 00:00:04"),
+ freq="S",
+ )
+ exp = DatetimeIndex(
+ [
+ "1970-01-01 00:00:01",
+ "1970-01-01 00:00:02",
+ "1970-01-01 00:00:03",
+ "1970-01-01 00:00:04",
+ ],
+ freq="S",
+ )
+ tm.assert_index_equal(idx, exp)
- msg = "'DatetimeIndex' object has no attribute 'millisecond'"
- with pytest.raises(AttributeError, match=msg):
- DatetimeIndex([]).millisecond
+ def test_range_edges7(self):
+ idx = date_range(
+ start=Timestamp("1970-01-01 00:01"),
+ end=Timestamp("1970-01-01 00:04"),
+ freq="T",
+ )
+ exp = DatetimeIndex(
+ [
+ "1970-01-01 00:01",
+ "1970-01-01 00:02",
+ "1970-01-01 00:03",
+ "1970-01-01 00:04",
+ ],
+ freq="T",
+ )
+ tm.assert_index_equal(idx, exp)
+ def test_range_edges8(self):
+ idx = date_range(
+ start=Timestamp("1970-01-01 01:00"),
+ end=Timestamp("1970-01-01 04:00"),
+ freq="H",
+ )
+ exp = DatetimeIndex(
+ [
+ "1970-01-01 01:00",
+ "1970-01-01 02:00",
+ "1970-01-01 03:00",
+ "1970-01-01 04:00",
+ ],
+ freq="H",
+ )
+ tm.assert_index_equal(idx, exp)
+
+ def test_range_edges9(self):
+ idx = date_range(
+ start=Timestamp("1970-01-01"), end=Timestamp("1970-01-04"), freq="D"
+ )
+ exp = DatetimeIndex(
+ ["1970-01-01", "1970-01-02", "1970-01-03", "1970-01-04"], freq="D"
+ )
+ tm.assert_index_equal(idx, exp)
+
+
+class TestDatetime64:
def test_datetimeindex_accessors(self):
dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index 896c43db5e356..c5b47053471eb 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -19,23 +19,6 @@
class TestSlicing:
- def test_string_index_series_name_converted(self):
- # GH#1644
- df = DataFrame(np.random.randn(10, 4), index=date_range("1/1/2000", periods=10))
-
- result = df.loc["1/3/2000"]
- assert result.name == df.index[2]
-
- result = df.T["1/3/2000"]
- assert result.name == df.index[2]
-
- def test_stringified_slice_with_tz(self):
- # GH#2658
- start = "2013-01-07"
- idx = date_range(start=start, freq="1d", periods=10, tz="US/Eastern")
- df = DataFrame(np.arange(10), index=idx)
- df["2013-01-14 23:44:34.437768-05:00":] # no exception here
-
def test_return_type_doesnt_depend_on_monotonicity(self):
# GH#24892 we get Series back regardless of whether our DTI is monotonic
dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3)
diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py
index 411e76ca5d8b7..3589fe726b3bb 100644
--- a/pandas/tests/indexes/interval/test_base.py
+++ b/pandas/tests/indexes/interval/test_base.py
@@ -1,7 +1,11 @@
import numpy as np
import pytest
-from pandas import IntervalIndex
+from pandas import (
+ IntervalIndex,
+ Series,
+ date_range,
+)
import pandas._testing as tm
from pandas.tests.indexes.common import Base
@@ -43,9 +47,8 @@ def test_take(self, closed):
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
- def test_where(self, simple_index, listlike_box_with_tuple):
- klass = listlike_box_with_tuple
-
+ @pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
+ def test_where(self, simple_index, klass):
idx = simple_index
cond = [True] * len(idx)
expected = idx
@@ -63,3 +66,29 @@ def test_getitem_2d_deprecated(self, simple_index):
with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"):
with tm.assert_produces_warning(FutureWarning):
idx[:, None]
+
+
+class TestPutmask:
+ @pytest.mark.parametrize("tz", ["US/Pacific", None])
+ def test_putmask_dt64(self, tz):
+ # GH#37968
+ dti = date_range("2016-01-01", periods=9, tz=tz)
+ idx = IntervalIndex.from_breaks(dti)
+ mask = np.zeros(idx.shape, dtype=bool)
+ mask[0:3] = True
+
+ result = idx.putmask(mask, idx[-1])
+ expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:]))
+ tm.assert_index_equal(result, expected)
+
+ def test_putmask_td64(self):
+ # GH#37968
+ dti = date_range("2016-01-01", periods=9)
+ tdi = dti - dti[0]
+ idx = IntervalIndex.from_breaks(tdi)
+ mask = np.zeros(idx.shape, dtype=bool)
+ mask[0:3] = True
+
+ result = idx.putmask(mask, idx[-1])
+ expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:]))
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 8df8eef69e9c9..aa3359d775c5a 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -497,29 +497,3 @@ def test_slice_locs_with_ints_and_floats_errors(self, tuples, query):
),
):
index.slice_locs(start, stop)
-
-
-class TestPutmask:
- @pytest.mark.parametrize("tz", ["US/Pacific", None])
- def test_putmask_dt64(self, tz):
- # GH#37968
- dti = date_range("2016-01-01", periods=9, tz=tz)
- idx = IntervalIndex.from_breaks(dti)
- mask = np.zeros(idx.shape, dtype=bool)
- mask[0:3] = True
-
- result = idx.putmask(mask, idx[-1])
- expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:]))
- tm.assert_index_equal(result, expected)
-
- def test_putmask_td64(self):
- # GH#37968
- dti = date_range("2016-01-01", periods=9)
- tdi = dti - dti[0]
- idx = IntervalIndex.from_breaks(tdi)
- mask = np.zeros(idx.shape, dtype=bool)
- mask[0:3] = True
-
- result = idx.putmask(mask, idx[-1])
- expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:]))
- tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 321d1aa34b9af..ce8323199ce62 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -934,14 +934,15 @@ def test_dir():
assert "str" not in result
-def test_searchsorted_different_argument_classes(listlike_box):
+@pytest.mark.parametrize("klass", [list, np.array, pd.array, pd.Series])
+def test_searchsorted_different_argument_classes(klass):
# https://github.com/pandas-dev/pandas/issues/32762
values = IntervalIndex([Interval(0, 1), Interval(1, 2)])
- result = values.searchsorted(listlike_box(values))
+ result = values.searchsorted(klass(values))
expected = np.array([0, 1], dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
- result = values._data.searchsorted(listlike_box(values))
+ result = values._data.searchsorted(klass(values))
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 99322f474dd9e..e142cbf89f1bd 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -136,31 +136,18 @@ def test_slice_locs_with_missing_value(
assert result == expected
-class TestPutmask:
- def test_putmask_with_wrong_mask(self, idx):
- # GH18368
+def test_putmask_with_wrong_mask(idx):
+ # GH18368
- msg = "putmask: mask and data must be the same size"
- with pytest.raises(ValueError, match=msg):
- idx.putmask(np.ones(len(idx) + 1, np.bool_), 1)
-
- with pytest.raises(ValueError, match=msg):
- idx.putmask(np.ones(len(idx) - 1, np.bool_), 1)
-
- with pytest.raises(ValueError, match=msg):
- idx.putmask("foo", 1)
-
- def test_putmask_multiindex_other(self):
- # GH#43212 `value` is also a MultiIndex
+ msg = "putmask: mask and data must be the same size"
+ with pytest.raises(ValueError, match=msg):
+ idx.putmask(np.ones(len(idx) + 1, np.bool_), 1)
- left = MultiIndex.from_tuples([(np.nan, 6), (np.nan, 6), ("a", 4)])
- right = MultiIndex.from_tuples([("a", 1), ("a", 1), ("d", 1)])
- mask = np.array([True, True, False])
+ with pytest.raises(ValueError, match=msg):
+ idx.putmask(np.ones(len(idx) - 1, np.bool_), 1)
- result = left.putmask(mask, right)
-
- expected = MultiIndex.from_tuples([right[0], right[1], left[2]])
- tm.assert_index_equal(result, expected)
+ with pytest.raises(ValueError, match=msg):
+ idx.putmask("foo", 1)
class TestGetIndexer:
@@ -720,12 +707,13 @@ def test_where(self):
with pytest.raises(NotImplementedError, match=msg):
i.where(True)
- def test_where_array_like(self, listlike_box_with_tuple):
- mi = MultiIndex.from_tuples([("A", 1), ("A", 2)])
+ @pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
+ def test_where_array_like(self, klass):
+ i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
cond = [False, True]
msg = r"\.where is not supported for MultiIndex operations"
with pytest.raises(NotImplementedError, match=msg):
- mi.where(listlike_box_with_tuple(cond))
+ i.where(klass(cond))
class TestContains:
diff --git a/pandas/tests/indexes/multi/test_putmask.py b/pandas/tests/indexes/multi/test_putmask.py
new file mode 100644
index 0000000000000..2a24be9003302
--- /dev/null
+++ b/pandas/tests/indexes/multi/test_putmask.py
@@ -0,0 +1,17 @@
+import numpy as np
+
+from pandas import MultiIndex
+import pandas._testing as tm
+
+
+def test_putmask_multiindex_other():
+ # GH#43212 `value` is also a MultiIndex
+
+ left = MultiIndex.from_tuples([(np.nan, 6), (np.nan, 6), ("a", 4)])
+ right = MultiIndex.from_tuples([("a", 1), ("a", 1), ("d", 1)])
+ mask = np.array([True, True, False])
+
+ result = left.putmask(mask, right)
+
+ expected = MultiIndex.from_tuples([right[0], right[1], left[2]])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/numeric/test_astype.py b/pandas/tests/indexes/numeric/test_astype.py
index 89f26e953400d..bda66856fb57a 100644
--- a/pandas/tests/indexes/numeric/test_astype.py
+++ b/pandas/tests/indexes/numeric/test_astype.py
@@ -5,12 +5,12 @@
from pandas.core.dtypes.common import pandas_dtype
-from pandas import Index
-import pandas._testing as tm
-from pandas.core.indexes.api import (
+from pandas import (
Float64Index,
+ Index,
Int64Index,
)
+import pandas._testing as tm
class TestAstype:
diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py
index cc309beef92d6..4621cbcb9d462 100644
--- a/pandas/tests/indexes/numeric/test_indexing.py
+++ b/pandas/tests/indexes/numeric/test_indexing.py
@@ -2,17 +2,15 @@
import pytest
from pandas import (
+ Float64Index,
Index,
+ Int64Index,
RangeIndex,
Series,
Timestamp,
-)
-import pandas._testing as tm
-from pandas.core.indexes.api import (
- Float64Index,
- Int64Index,
UInt64Index,
)
+import pandas._testing as tm
@pytest.fixture
@@ -397,17 +395,18 @@ class TestWhere:
UInt64Index(np.arange(5, dtype="uint64")),
],
)
- def test_where(self, listlike_box_with_tuple, index):
+ @pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
+ def test_where(self, klass, index):
cond = [True] * len(index)
expected = index
- result = index.where(listlike_box_with_tuple(cond))
+ result = index.where(klass(cond))
cond = [False] + [True] * (len(index) - 1)
expected = Float64Index([index._na_value] + index[1:].tolist())
- result = index.where(listlike_box_with_tuple(cond))
+ result = index.where(klass(cond))
tm.assert_index_equal(result, expected)
- def test_where_uint64(self):
+ def test_where_uin64(self):
idx = UInt64Index([0, 6, 2])
mask = np.array([False, True, False])
other = np.array([1], dtype=np.int64)
diff --git a/pandas/tests/indexes/numeric/test_join.py b/pandas/tests/indexes/numeric/test_join.py
index 2a47289b65aad..43d731f8c3142 100644
--- a/pandas/tests/indexes/numeric/test_join.py
+++ b/pandas/tests/indexes/numeric/test_join.py
@@ -1,12 +1,12 @@
import numpy as np
import pytest
-import pandas._testing as tm
-from pandas.core.indexes.api import (
+from pandas import (
Index,
Int64Index,
UInt64Index,
)
+import pandas._testing as tm
class TestJoinInt64Index:
diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py
index ec451ac13ec44..6d35568b69fac 100644
--- a/pandas/tests/indexes/numeric/test_numeric.py
+++ b/pandas/tests/indexes/numeric/test_numeric.py
@@ -5,16 +5,14 @@
import pandas as pd
from pandas import (
+ Float64Index,
Index,
+ Int64Index,
NumericIndex,
Series,
-)
-import pandas._testing as tm
-from pandas.core.indexes.api import (
- Float64Index,
- Int64Index,
UInt64Index,
)
+import pandas._testing as tm
from pandas.tests.indexes.common import NumericBase
diff --git a/pandas/tests/indexes/numeric/test_setops.py b/pandas/tests/indexes/numeric/test_setops.py
index 4045cc0b91313..5a7db9858dbad 100644
--- a/pandas/tests/indexes/numeric/test_setops.py
+++ b/pandas/tests/indexes/numeric/test_setops.py
@@ -6,14 +6,14 @@
import numpy as np
import pytest
-import pandas._testing as tm
-from pandas.core.indexes.api import (
+from pandas import (
Float64Index,
Index,
Int64Index,
RangeIndex,
UInt64Index,
)
+import pandas._testing as tm
@pytest.fixture
diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py
index e2340a2db02f7..74f627478a29c 100644
--- a/pandas/tests/indexes/period/methods/test_astype.py
+++ b/pandas/tests/indexes/period/methods/test_astype.py
@@ -5,17 +5,15 @@
CategoricalIndex,
DatetimeIndex,
Index,
+ Int64Index,
NaT,
Period,
PeriodIndex,
Timedelta,
+ UInt64Index,
period_range,
)
import pandas._testing as tm
-from pandas.core.indexes.api import (
- Int64Index,
- UInt64Index,
-)
class TestPeriodIndexAsType:
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 78afcf2fdc78a..cef045766efcc 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -602,16 +602,17 @@ def test_get_indexer2(self):
class TestWhere:
- def test_where(self, listlike_box_with_tuple):
+ @pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
+ def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
- result = i.where(listlike_box_with_tuple(cond))
+ result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([NaT] + i[1:].tolist(), freq="D")
- result = i.where(listlike_box_with_tuple(cond))
+ result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index c565902d080c3..148999d90d554 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -3,7 +3,6 @@
from pandas import (
DataFrame,
- PeriodIndex,
Series,
date_range,
period_range,
@@ -12,31 +11,6 @@
class TestPeriodIndex:
- def test_getitem_periodindex_duplicates_string_slice(self):
- # monotonic
- idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
- ts = Series(np.random.randn(len(idx)), index=idx)
-
- result = ts["2007"]
- expected = ts[1:3]
- tm.assert_series_equal(result, expected)
- result[:] = 1
- assert (ts[1:3] == 1).all()
-
- # not monotonic
- idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="A-JUN")
- ts = Series(np.random.randn(len(idx)), index=idx)
-
- result = ts["2007"]
- expected = ts[idx == "2007"]
- tm.assert_series_equal(result, expected)
-
- def test_getitem_periodindex_quarter_string(self):
- pi = PeriodIndex(["2Q05", "3Q05", "4Q05", "1Q06", "2Q06"], freq="Q")
- ser = Series(np.random.rand(len(pi)), index=pi).cumsum()
- # Todo: fix these accessors!
- assert ser["05Q4"] == ser[2]
-
def test_pindex_slice_index(self):
pi = period_range(start="1/1/10", end="12/31/12", freq="M")
s = Series(np.random.rand(len(pi)), index=pi)
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index e6c31d22e626f..83c82c18f3d1e 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -4,6 +4,7 @@
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas import (
+ DatetimeIndex,
Index,
NaT,
Period,
@@ -48,6 +49,15 @@ def test_where(self):
# This is handled in test_indexing
pass
+ def test_no_millisecond_field(self):
+ msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
+ with pytest.raises(AttributeError, match=msg):
+ DatetimeIndex.millisecond
+
+ msg = "'DatetimeIndex' object has no attribute 'millisecond'"
+ with pytest.raises(AttributeError, match=msg):
+ DatetimeIndex([]).millisecond
+
def test_make_time_series(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index)
@@ -245,6 +255,25 @@ def test_is_(self):
assert not index.is_(index - 2)
assert not index.is_(index - 0)
+ def test_index_duplicate_periods(self):
+ # monotonic
+ idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
+ ts = Series(np.random.randn(len(idx)), index=idx)
+
+ result = ts["2007"]
+ expected = ts[1:3]
+ tm.assert_series_equal(result, expected)
+ result[:] = 1
+ assert (ts[1:3] == 1).all()
+
+ # not monotonic
+ idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="A-JUN")
+ ts = Series(np.random.randn(len(idx)), index=idx)
+
+ result = ts["2007"]
+ expected = ts[idx == "2007"]
+ tm.assert_series_equal(result, expected)
+
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
expected = PeriodIndex([2000, 2007, 2009], freq="A-JUN")
@@ -273,6 +302,12 @@ def test_pindex_fieldaccessor_nat(self):
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name="name")
tm.assert_index_equal(idx.month, exp)
+ def test_pindex_qaccess(self):
+ pi = PeriodIndex(["2Q05", "3Q05", "4Q05", "1Q06", "2Q06"], freq="Q")
+ s = Series(np.random.rand(len(pi)), index=pi).cumsum()
+ # Todo: fix these accessors!
+ assert s["05Q4"] == s[2]
+
def test_pindex_multiples(self):
expected = PeriodIndex(
["2011-01", "2011-03", "2011-05", "2011-07", "2011-09", "2011-11"],
diff --git a/pandas/tests/indexes/period/test_searchsorted.py b/pandas/tests/indexes/period/test_searchsorted.py
index b9863d1bb019a..27e998284c189 100644
--- a/pandas/tests/indexes/period/test_searchsorted.py
+++ b/pandas/tests/indexes/period/test_searchsorted.py
@@ -7,6 +7,8 @@
NaT,
Period,
PeriodIndex,
+ Series,
+ array,
)
import pandas._testing as tm
@@ -35,16 +37,17 @@ def test_searchsorted(self, freq):
with pytest.raises(IncompatibleFrequency, match=msg):
pidx.searchsorted(Period("2014-01-01", freq="5D"))
- def test_searchsorted_different_argument_classes(self, listlike_box):
+ @pytest.mark.parametrize("klass", [list, np.array, array, Series])
+ def test_searchsorted_different_argument_classes(self, klass):
pidx = PeriodIndex(
["2014-01-01", "2014-01-02", "2014-01-03", "2014-01-04", "2014-01-05"],
freq="D",
)
- result = pidx.searchsorted(listlike_box(pidx))
+ result = pidx.searchsorted(klass(pidx))
expected = np.arange(len(pidx), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
- result = pidx._data.searchsorted(listlike_box(pidx))
+ result = pidx._data.searchsorted(klass(pidx))
tm.assert_numpy_array_equal(result, expected)
def test_searchsorted_invalid(self):
diff --git a/pandas/tests/indexes/ranges/test_constructors.py b/pandas/tests/indexes/ranges/test_constructors.py
index c4f26220f87d1..8fb1d7a210cee 100644
--- a/pandas/tests/indexes/ranges/test_constructors.py
+++ b/pandas/tests/indexes/ranges/test_constructors.py
@@ -31,7 +31,7 @@ def test_constructor(self, args, kwargs, start, stop, step, name):
assert isinstance(result, RangeIndex)
assert result.name is name
assert result._range == range(start, stop, step)
- tm.assert_index_equal(result, expected, exact="equiv")
+ tm.assert_index_equal(result, expected)
def test_constructor_invalid_args(self):
msg = "RangeIndex\\(\\.\\.\\.\\) must be called with integers"
@@ -149,9 +149,7 @@ def test_constructor_corner(self):
index = RangeIndex(1, 5)
assert index.values.dtype == np.int64
with tm.assert_produces_warning(FutureWarning, match="will not infer"):
- expected = Index(arr).astype("int64")
-
- tm.assert_index_equal(index, expected, exact="equiv")
+ tm.assert_index_equal(index, Index(arr).astype("int64"))
# non-int raise Exception
with pytest.raises(TypeError, match=r"Wrong type \<class 'str'\>"):
diff --git a/pandas/tests/indexes/ranges/test_indexing.py b/pandas/tests/indexes/ranges/test_indexing.py
index f8c3eff0ab80a..b46354939f3c5 100644
--- a/pandas/tests/indexes/ranges/test_indexing.py
+++ b/pandas/tests/indexes/ranges/test_indexing.py
@@ -77,17 +77,3 @@ def test_take_fill_value(self):
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
-
-
-class TestWhere:
- def test_where_putmask_range_cast(self):
- # GH#43240
- idx = RangeIndex(0, 5, name="test")
-
- mask = np.array([True, True, False, False, False])
- result = idx.putmask(mask, 10)
- expected = Int64Index([10, 10, 2, 3, 4], name="test")
- tm.assert_index_equal(result, expected)
-
- result = idx.where(~mask, 10)
- tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/ranges/test_join.py b/pandas/tests/indexes/ranges/test_join.py
index ed21996de891b..6668a7c6a3d02 100644
--- a/pandas/tests/indexes/ranges/test_join.py
+++ b/pandas/tests/indexes/ranges/test_join.py
@@ -2,10 +2,10 @@
from pandas import (
Index,
+ Int64Index,
RangeIndex,
)
import pandas._testing as tm
-from pandas.core.indexes.api import Int64Index
class TestJoin:
@@ -77,7 +77,7 @@ def test_join_inner(self):
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
assert isinstance(res, RangeIndex)
- tm.assert_index_equal(res, eres, exact="equiv")
+ tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index 277f686a8487a..cc266034b78af 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -4,13 +4,13 @@
from pandas.core.dtypes.common import ensure_platform_int
import pandas as pd
-import pandas._testing as tm
-from pandas.core.indexes.api import (
+from pandas import (
Float64Index,
Index,
Int64Index,
RangeIndex,
)
+import pandas._testing as tm
from pandas.tests.indexes.common import NumericBase
# aliases to make some tests easier to read
@@ -115,7 +115,7 @@ def test_insert(self):
result = idx[1:4]
# test 0th element
- tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]), exact="equiv")
+ tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))
# GH 18295 (test missing)
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
@@ -385,12 +385,12 @@ def test_identical(self, simple_index):
def test_nbytes(self):
# memory savings vs int index
- idx = RangeIndex(0, 1000)
- assert idx.nbytes < Int64Index(idx._values).nbytes / 10
+ i = RangeIndex(0, 1000)
+ assert i.nbytes < i._int64index.nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
- assert idx.nbytes == i2.nbytes
+ assert i.nbytes == i2.nbytes
@pytest.mark.parametrize(
"start,stop,step",
@@ -481,38 +481,38 @@ def test_slice_specialised(self, simple_index):
# positive slice values
index_slice = index[7:10:2]
expected = Index(np.array([14, 18]), name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
# negative slice values
index_slice = index[-1:-5:-2]
expected = Index(np.array([18, 14]), name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
# stop overshoot
index_slice = index[2:100:4]
expected = Index(np.array([4, 12]), name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
# reverse
index_slice = index[::-1]
expected = Index(index.values[::-1], name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
index_slice = index[-8::-1]
expected = Index(np.array([4, 2, 0]), name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
index_slice = index[40::-1]
expected = Index(index.values[40::-1], name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
index_slice = index[10::-1]
expected = Index(index.values[::-1], name="foo")
- tm.assert_index_equal(index_slice, expected, exact="equiv")
+ tm.assert_index_equal(index_slice, expected)
@pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
def test_len_specialised(self, step):
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
index 39a1ddcbc8a6a..d955cf1c507a8 100644
--- a/pandas/tests/indexes/test_any_index.py
+++ b/pandas/tests/indexes/test_any_index.py
@@ -58,7 +58,7 @@ def test_map_identity_mapping(index):
expected = index.astype(np.int64)
else:
expected = index
- tm.assert_index_equal(result, expected, exact="equiv")
+ tm.assert_index_equal(result, expected)
def test_wrong_number_names(index):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index f1ece3e363bb6..c06652a71d179 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -22,7 +22,6 @@
DataFrame,
DatetimeIndex,
IntervalIndex,
- NumericIndex,
PeriodIndex,
RangeIndex,
Series,
@@ -631,8 +630,8 @@ def test_map_tseries_indices_return_index(self, attr):
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq="h", name="hourly")
- expected = Int64Index(range(24), name="hourly")
- tm.assert_index_equal(expected, date_index.map(lambda x: x.hour), exact=True)
+ expected = Index(range(24), name="hourly")
+ tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
@@ -1701,15 +1700,14 @@ def test_validate_1d_input():
[Float64Index, {}],
[DatetimeIndex, {}],
[TimedeltaIndex, {}],
- [NumericIndex, {}],
[PeriodIndex, {"freq": "Y"}],
],
)
def test_construct_from_memoryview(klass, extra_kwargs):
# GH 13120
result = klass(memoryview(np.arange(2000, 2005)), **extra_kwargs)
- expected = klass(list(range(2000, 2005)), **extra_kwargs)
- tm.assert_index_equal(result, expected, exact=True)
+ expected = klass(range(2000, 2005), **extra_kwargs)
+ tm.assert_index_equal(result, expected)
def test_index_set_names_pos_args_deprecation():
diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py
index 93b51cf9611f9..9cc1205310ea7 100644
--- a/pandas/tests/indexes/test_numpy_compat.py
+++ b/pandas/tests/indexes/test_numpy_compat.py
@@ -74,6 +74,13 @@ def test_numpy_ufuncs_other(index, func, request):
# test ufuncs of numpy, see:
# https://numpy.org/doc/stable/reference/ufuncs.html
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
+ if (
+ isinstance(index, DatetimeIndex)
+ and index.tz is not None
+ and func in [np.isfinite, np.isnan, np.isinf]
+ ):
+ mark = pytest.mark.xfail(reason="__array_ufunc__ is not defined")
+ request.node.add_marker(mark)
if func in (np.isfinite, np.isinf, np.isnan):
# numpy 1.18 changed isinf and isnan to not raise on dt64/td64
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index a0e97223435e6..44d1d9710df45 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -15,7 +15,6 @@
DatetimeIndex,
Index,
MultiIndex,
- RangeIndex,
Series,
TimedeltaIndex,
Timestamp,
@@ -454,20 +453,19 @@ def test_intersection_difference_match_empty(self, index, sort):
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_setop_with_categorical(index, sort, method):
- if isinstance(index, MultiIndex): # TODO: flat_index?
+ if isinstance(index, MultiIndex):
# tested separately in tests.indexes.multi.test_setops
return
other = index.astype("category")
- exact = "equiv" if isinstance(index, RangeIndex) else True
result = getattr(index, method)(other, sort=sort)
expected = getattr(index, method)(index, sort=sort)
- tm.assert_index_equal(result, expected, exact=exact)
+ tm.assert_index_equal(result, expected)
result = getattr(index, method)(other[:5], sort=sort)
expected = getattr(index, method)(index[:5], sort=sort)
- tm.assert_index_equal(result, expected, exact=exact)
+ tm.assert_index_equal(result, expected)
def test_intersection_duplicates_all_indexes(index):
diff --git a/pandas/tests/indexes/timedeltas/test_pickle.py b/pandas/tests/indexes/timedeltas/test_pickle.py
deleted file mode 100644
index befe709728bdd..0000000000000
--- a/pandas/tests/indexes/timedeltas/test_pickle.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from pandas import timedelta_range
-import pandas._testing as tm
-
-
-class TestPickle:
- def test_pickle_after_set_freq(self):
- tdi = timedelta_range("1 day", periods=4, freq="s")
- tdi = tdi._with_freq(None)
-
- res = tm.round_trip_pickle(tdi)
- tm.assert_index_equal(res, tdi)
diff --git a/pandas/tests/indexes/timedeltas/test_searchsorted.py b/pandas/tests/indexes/timedeltas/test_searchsorted.py
index 710571ef38397..8a48da91ef31d 100644
--- a/pandas/tests/indexes/timedeltas/test_searchsorted.py
+++ b/pandas/tests/indexes/timedeltas/test_searchsorted.py
@@ -2,20 +2,23 @@
import pytest
from pandas import (
+ Series,
TimedeltaIndex,
Timestamp,
+ array,
)
import pandas._testing as tm
class TestSearchSorted:
- def test_searchsorted_different_argument_classes(self, listlike_box):
+ @pytest.mark.parametrize("klass", [list, np.array, array, Series])
+ def test_searchsorted_different_argument_classes(self, klass):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
- result = idx.searchsorted(listlike_box(idx))
+ result = idx.searchsorted(klass(idx))
expected = np.arange(len(idx), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
- result = idx._data.searchsorted(listlike_box(idx))
+ result = idx._data.searchsorted(klass(idx))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 8ceef8186e4ea..33f0565c0b23b 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -6,14 +6,14 @@
import pandas as pd
from pandas import (
Index,
- NaT,
+ Int64Index,
Series,
Timedelta,
TimedeltaIndex,
+ date_range,
timedelta_range,
)
import pandas._testing as tm
-from pandas.core.indexes.api import Int64Index
from pandas.tests.indexes.datetimelike import DatetimeLike
randn = np.random.randn
@@ -42,6 +42,26 @@ def test_numeric_compat(self):
def test_shift(self):
pass # this is handled in test_arithmetic.py
+ def test_pickle_after_set_freq(self):
+ tdi = timedelta_range("1 day", periods=4, freq="s")
+ tdi = tdi._with_freq(None)
+
+ res = tm.round_trip_pickle(tdi)
+ tm.assert_index_equal(res, tdi)
+
+ def test_isin(self):
+
+ index = tm.makeTimedeltaIndex(4)
+ result = index.isin(index)
+ assert result.all()
+
+ result = index.isin(list(index))
+ assert result.all()
+
+ tm.assert_almost_equal(
+ index.isin([index[2], 5]), np.array([False, False, True, False])
+ )
+
def test_misc_coverage(self):
rng = timedelta_range("1 day", periods=5)
@@ -115,31 +135,46 @@ def test_freq_conversion_always_floating(self):
res = tdi.to_series().astype("m8[s]")
tm.assert_numpy_array_equal(res._values, expected._values)
- def test_freq_conversion(self, index_or_series):
+ def test_freq_conversion(self):
# doc example
- scalar = Timedelta(days=31)
- td = index_or_series(
- [scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT],
- dtype="m8[ns]",
+ # series
+ td = Series(date_range("20130101", periods=4)) - Series(
+ date_range("20121201", periods=4)
)
+ td[2] += timedelta(minutes=5, seconds=3)
+ td[3] = np.nan
result = td / np.timedelta64(1, "D")
- expected = index_or_series(
- [31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan]
- )
- tm.assert_equal(result, expected)
+ expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
+ tm.assert_series_equal(result, expected)
result = td.astype("timedelta64[D]")
- expected = index_or_series([31, 31, 31, np.nan])
- tm.assert_equal(result, expected)
+ expected = Series([31, 31, 31, np.nan])
+ tm.assert_series_equal(result, expected)
result = td / np.timedelta64(1, "s")
- expected = index_or_series(
- [31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan]
- )
- tm.assert_equal(result, expected)
+ expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])
+ tm.assert_series_equal(result, expected)
+
+ result = td.astype("timedelta64[s]")
+ tm.assert_series_equal(result, expected)
+
+ # tdi
+ td = TimedeltaIndex(td)
+
+ result = td / np.timedelta64(1, "D")
+ expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
+ tm.assert_index_equal(result, expected)
+
+ result = td.astype("timedelta64[D]")
+ expected = Index([31, 31, 31, np.nan])
+ tm.assert_index_equal(result, expected)
+
+ result = td / np.timedelta64(1, "s")
+ expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])
+ tm.assert_index_equal(result, expected)
result = td.astype("timedelta64[s]")
- tm.assert_equal(result, expected)
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index c487777fc339e..21d10b23312b9 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -541,9 +541,9 @@ def test_partial_set_empty_frame_empty_consistencies(self):
date_range(start="2000", periods=20, freq="D"),
["2000-01-04", "2000-01-08", "2000-01-12"],
[
- Timestamp("2000-01-04"),
- Timestamp("2000-01-08"),
- Timestamp("2000-01-12"),
+ Timestamp("2000-01-04", freq="D"),
+ Timestamp("2000-01-08", freq="D"),
+ Timestamp("2000-01-12", freq="D"),
],
),
(
diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py
index a350b6fe7546d..029936283327a 100644
--- a/pandas/tests/io/formats/style/test_matplotlib.py
+++ b/pandas/tests/io/formats/style/test_matplotlib.py
@@ -10,8 +10,6 @@
pytest.importorskip("matplotlib")
pytest.importorskip("jinja2")
-import matplotlib as mpl
-
from pandas.io.formats.style import Styler
@@ -258,29 +256,3 @@ def test_background_gradient_gmap_wrong_series(styler_blank):
gmap = Series([1, 2], index=["X", "Y"])
with pytest.raises(ValueError, match=msg):
styler_blank.background_gradient(gmap=gmap, axis=None)._compute()
-
-
-@pytest.mark.parametrize("cmap", ["PuBu", mpl.cm.get_cmap("PuBu")])
-def test_bar_colormap(cmap):
- data = DataFrame([[1, 2], [3, 4]])
- ctx = data.style.bar(cmap=cmap, axis=None)._compute().ctx
- pubu_colors = {
- (0, 0): "#d0d1e6",
- (1, 0): "#056faf",
- (0, 1): "#73a9cf",
- (1, 1): "#023858",
- }
- for k, v in pubu_colors.items():
- assert v in ctx[k][1][1]
-
-
-def test_bar_color_raises(df):
- msg = "`color` must be string or list or tuple of 2 strings"
- with pytest.raises(ValueError, match=msg):
- df.style.bar(color={"a", "b"}).to_html()
- with pytest.raises(ValueError, match=msg):
- df.style.bar(color=["a", "b", "c"]).to_html()
-
- msg = "`color` and `cmap` cannot both be given"
- with pytest.raises(ValueError, match=msg):
- df.style.bar(color="something", cmap="something else").to_html()
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
index 6d958f46a49dd..ff54a378806fa 100644
--- a/pandas/tests/io/parser/common/test_common_basic.py
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -12,7 +12,6 @@
import numpy as np
import pytest
-from pandas.compat import PY310
from pandas.errors import (
EmptyDataError,
ParserError,
@@ -675,11 +674,6 @@ def test_read_table_equivalency_to_read_csv(all_parsers):
tm.assert_frame_equal(result, expected)
-@pytest.mark.skipif(
- PY310,
- reason="GH41935 This test is leaking only on Python 3.10,"
- "causing other tests to fail with a cryptic error.",
-)
@pytest.mark.parametrize("read_func", ["read_csv", "read_table"])
def test_read_csv_and_table_sys_setprofile(all_parsers, read_func):
# GH#41069
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 3a8ae03015628..09e4514d4d47c 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -221,7 +221,7 @@ class TestIndexReductions:
def test_max_min_range(self, start, stop, step):
# GH#17607
idx = RangeIndex(start, stop, step)
- expected = idx._values.max()
+ expected = idx._int64index.max()
result = idx.max()
assert result == expected
@@ -229,7 +229,7 @@ def test_max_min_range(self, start, stop, step):
result2 = idx.max(skipna=False)
assert result2 == expected
- expected = idx._values.min()
+ expected = idx._int64index.min()
result = idx.min()
assert result == expected
@@ -431,11 +431,13 @@ def test_numpy_minmax_range(self):
# GH#26125
idx = RangeIndex(0, 10, 3)
+ expected = idx._int64index.max()
result = np.max(idx)
- assert result == 9
+ assert result == expected
+ expected = idx._int64index.min()
result = np.min(idx)
- assert result == 0
+ assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
diff --git a/pandas/tests/reshape/concat/test_sort.py b/pandas/tests/reshape/concat/test_sort.py
index 3d362ef42d276..865f696b7a73a 100644
--- a/pandas/tests/reshape/concat/test_sort.py
+++ b/pandas/tests/reshape/concat/test_sort.py
@@ -1,5 +1,3 @@
-import numpy as np
-
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
@@ -83,12 +81,3 @@ def test_concat_aligned_sort_does_not_raise(self):
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
-
- def test_concat_frame_with_sort_false(self):
- # GH 43375
- result = pd.concat(
- [DataFrame({i: i}, index=[i]) for i in range(2, 0, -1)], sort=False
- )
- expected = DataFrame([[2, np.nan], [np.nan, 1]], index=[2, 1], columns=[2, 1])
-
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index af9d6dd83bee3..4972cb34aac69 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -746,11 +746,11 @@ def test_unbalanced(self):
)
df["id"] = df.index
exp_data = {
- "X": ["X1", "X2", "X1", "X2"],
- "A": [1.0, 2.0, 3.0, 4.0],
- "B": [5.0, 6.0, np.nan, np.nan],
- "id": [0, 1, 0, 1],
- "year": [2010, 2010, 2011, 2011],
+ "X": ["X1", "X1", "X2", "X2"],
+ "A": [1.0, 3.0, 2.0, 4.0],
+ "B": [5.0, np.nan, 6.0, np.nan],
+ "id": [0, 0, 1, 1],
+ "year": [2010, 2011, 2010, 2011],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
@@ -993,10 +993,10 @@ def test_nonnumeric_suffix(self):
)
expected = DataFrame(
{
- "A": ["X1", "X2", "X1", "X2"],
- "colname": ["placebo", "placebo", "test", "test"],
- "result": [5.0, 6.0, np.nan, np.nan],
- "treatment": [1.0, 2.0, 3.0, 4.0],
+ "A": ["X1", "X1", "X2", "X2"],
+ "colname": ["placebo", "test", "placebo", "test"],
+ "result": [5.0, np.nan, 6.0, np.nan],
+ "treatment": [1.0, 3.0, 2.0, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
@@ -1040,10 +1040,10 @@ def test_float_suffix(self):
)
expected = DataFrame(
{
- "A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"],
- "colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1],
- "result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan],
- "treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0],
+ "A": ["X1", "X1", "X1", "X1", "X2", "X2", "X2", "X2"],
+ "colname": [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1],
+ "result": [0.0, np.nan, 5.0, np.nan, 9.0, np.nan, 6.0, np.nan],
+ "treatment": [np.nan, 1.0, np.nan, 3.0, np.nan, 2.0, np.nan, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py
index f7c7204d02a49..6c4d14f1dede3 100644
--- a/pandas/tests/reshape/test_qcut.py
+++ b/pandas/tests/reshape/test_qcut.py
@@ -21,6 +21,7 @@
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
+from pandas.core.algorithms import quantile
from pandas.tseries.offsets import (
Day,
@@ -33,8 +34,8 @@ def test_qcut():
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
- labels, _ = qcut(arr, 4, retbins=True)
- ex_bins = np.quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
+ labels, bins = qcut(arr, 4, retbins=True)
+ ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 5521bee09b19b..6bd5c0eea218e 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -142,24 +142,6 @@ def test_setitem_with_tz_dst(self, indexer_sli):
indexer_sli(ser)[[1, 2]] = vals
tm.assert_series_equal(ser, exp)
- def test_object_series_setitem_dt64array_exact_match(self):
- # make sure the dt64 isn't cast by numpy to integers
- # https://github.com/numpy/numpy/issues/12550
-
- ser = Series({"X": np.nan}, dtype=object)
-
- indexer = [True]
-
- # "exact_match" -> size of array being set matches size of ser
- value = np.array([4], dtype="M8[ns]")
-
- ser.iloc[indexer] = value
-
- expected = Series([value[0]], index=["X"], dtype=object)
- assert all(isinstance(x, np.datetime64) for x in expected.values)
-
- tm.assert_series_equal(ser, expected)
-
class TestSetitemScalarIndexer:
def test_setitem_negative_out_of_bounds(self):
diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py
index be9f96c8b509a..36d3971d10a3d 100644
--- a/pandas/tests/series/methods/test_reindex.py
+++ b/pandas/tests/series/methods/test_reindex.py
@@ -359,11 +359,3 @@ def test_reindex_empty_with_level(values):
index=MultiIndex(levels=[["b"], values[1]], codes=[[], []]), dtype="object"
)
tm.assert_series_equal(result, expected)
-
-
-def test_reindex_missing_category():
- # GH#18185
- ser = Series([1, 2, 3, 1], dtype="category")
- msg = r"Cannot setitem on a Categorical with a new category \(-1\)"
- with pytest.raises(TypeError, match=msg):
- ser.reindex([1, 2, 3, 4, 5], fill_value=-1)
diff --git a/pandas/tests/strings/test_cat.py b/pandas/tests/strings/test_cat.py
index 8abbc59343e78..48f853cfdcb10 100644
--- a/pandas/tests/strings/test_cat.py
+++ b/pandas/tests/strings/test_cat.py
@@ -278,11 +278,7 @@ def test_str_cat_align_mixed_inputs(join):
expected_outer = Series(["aaA", "bbB", "c-C", "ddD", "-e-"])
# joint index of rhs [t, u]; u will be forced have index of s
rhs_idx = (
- t.index.intersection(s.index)
- if join == "inner"
- else t.index.union(s.index)
- if join == "outer"
- else t.index.append(s.index.difference(t.index))
+ t.index.intersection(s.index) if join == "inner" else t.index.union(s.index)
)
expected = expected_outer.loc[s.index.join(rhs_idx, how=join)]
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 779d6e6b6bb0f..b610e51bfd055 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1727,9 +1727,17 @@ def test_hashtable_large_sizehint(self, hashtable):
tbl = hashtable(size_hint=size_hint) # noqa
+def test_quantile():
+ s = Series(np.random.randn(100))
+
+ result = algos.quantile(s, [0, 0.25, 0.5, 0.75, 1.0])
+ expected = algos.quantile(s.values, [0, 0.25, 0.5, 0.75, 1.0])
+ tm.assert_almost_equal(result, expected)
+
+
def test_unique_label_indices():
- a = np.random.randint(1, 1 << 10, 1 << 15).astype(np.intp)
+ a = np.random.randint(1, 1 << 10, 1 << 15).astype("int64")
left = ht.unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
@@ -1785,13 +1793,13 @@ def test_too_many_ndims(self):
@pytest.mark.single
@pytest.mark.high_memory
- def test_pct_max_many_rows(self):
+ @pytest.mark.parametrize(
+ "values",
+ [np.arange(2 ** 24 + 1), np.arange(2 ** 25 + 2).reshape(2 ** 24 + 1, 2)],
+ ids=["1d", "2d"],
+ )
+ def test_pct_max_many_rows(self, values):
# GH 18271
- values = np.arange(2 ** 24 + 1)
- result = algos.rank(values, pct=True).max()
- assert result == 1
-
- values = np.arange(2 ** 25 + 2).reshape(2 ** 24 + 1, 2)
result = algos.rank(values, pct=True).max()
assert result == 1
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index c4efcd140baae..2077d2a210765 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -680,7 +680,6 @@ def test_groupby_rolling_resulting_multiindex(self):
)
tm.assert_index_equal(result.index, expected_index)
- def test_groupby_rolling_resulting_multiindex2(self):
# grouping by 2 columns -> 3-level MI as result
df = DataFrame({"a": np.arange(12.0), "b": [1, 2] * 6, "c": [1, 2, 3, 4] * 3})
result = df.groupby(["b", "c"]).rolling(2).sum()
@@ -703,7 +702,6 @@ def test_groupby_rolling_resulting_multiindex2(self):
)
tm.assert_index_equal(result.index, expected_index)
- def test_groupby_rolling_resulting_multiindex3(self):
# grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result
df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4, "c": [1, 2, 3, 4] * 2})
df = df.set_index("c", append=True)
@@ -721,7 +719,7 @@ def test_groupby_rolling_resulting_multiindex3(self):
],
names=["b", None, "c"],
)
- tm.assert_index_equal(result.index, expected_index, exact="equiv")
+ tm.assert_index_equal(result.index, expected_index)
def test_groupby_rolling_object_doesnt_affect_groupby_apply(self):
# GH 39732
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index 9fd4bd422178a..9c7313d081d8f 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -161,31 +161,28 @@ def add(values, x):
@td.skip_if_no("numba")
-class TestEWM:
+class TestEWMMean:
@pytest.mark.parametrize(
"grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]
)
- @pytest.mark.parametrize("method", ["mean", "sum"])
- def test_invalid_engine(self, grouper, method):
+ def test_invalid_engine(self, grouper):
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
with pytest.raises(ValueError, match="engine must be either"):
- getattr(grouper(df).ewm(com=1.0), method)(engine="foo")
+ grouper(df).ewm(com=1.0).mean(engine="foo")
@pytest.mark.parametrize(
"grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]
)
- @pytest.mark.parametrize("method", ["mean", "sum"])
- def test_invalid_engine_kwargs(self, grouper, method):
+ def test_invalid_engine_kwargs(self, grouper):
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
with pytest.raises(ValueError, match="cython engine does not"):
- getattr(grouper(df).ewm(com=1.0), method)(
+ grouper(df).ewm(com=1.0).mean(
engine="cython", engine_kwargs={"nopython": True}
)
@pytest.mark.parametrize("grouper", ["None", "groupby"])
- @pytest.mark.parametrize("method", ["mean", "sum"])
def test_cython_vs_numba(
- self, grouper, method, nogil, parallel, nopython, ignore_na, adjust
+ self, grouper, nogil, parallel, nopython, ignore_na, adjust
):
if grouper == "None":
grouper = lambda x: x
@@ -193,16 +190,15 @@ def test_cython_vs_numba(
else:
grouper = lambda x: x.groupby("A")
warn = None
- if method == "sum":
- adjust = True
+
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
ewm = grouper(df).ewm(com=1.0, adjust=adjust, ignore_na=ignore_na)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
with tm.assert_produces_warning(warn, match="nuisance"):
# GH#42738
- result = getattr(ewm, method)(engine="numba", engine_kwargs=engine_kwargs)
- expected = getattr(ewm, method)(engine="cython")
+ result = ewm.mean(engine="numba", engine_kwargs=engine_kwargs)
+ expected = ewm.mean(engine="cython")
tm.assert_frame_equal(result, expected)
@@ -364,16 +360,15 @@ def test_table_method_expanding_methods(
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data", [np.eye(3), np.ones((2, 3)), np.ones((3, 2))])
- @pytest.mark.parametrize("method", ["mean", "sum"])
- def test_table_method_ewm(self, data, method, axis, nogil, parallel, nopython):
+ def test_table_method_ewm(self, data, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(data)
- result = getattr(df.ewm(com=1, method="table", axis=axis), method)(
+ result = df.ewm(com=1, method="table", axis=axis).mean(
engine_kwargs=engine_kwargs, engine="numba"
)
- expected = getattr(df.ewm(com=1, method="single", axis=axis), method)(
+ expected = df.ewm(com=1, method="single", axis=axis).mean(
engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
diff --git a/pyproject.toml b/pyproject.toml
index d84024eb09de2..43fac011f3b5c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,8 +4,18 @@
requires = [
"setuptools>=51.0.0",
"wheel",
- "Cython>=0.29.24,<3", # Note: sync with setup.py
- "oldest-supported-numpy>=0.10"
+ "Cython>=0.29.21,<3", # Note: sync with setup.py
+ # Numpy requirements for different OS/architectures
+ # Copied from https://github.com/scipy/scipy/blob/master/pyproject.toml (which is also licensed under BSD)
+ "numpy==1.17.3; python_version=='3.7' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'",
+ "numpy==1.18.3; python_version=='3.8' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'",
+ "numpy==1.19.3; python_version>='3.9' and (platform_machine!='arm64' or platform_system!='Darwin')",
+ # Aarch64(Python 3.9 requirements are the same as AMD64)
+ "numpy==1.19.2; python_version=='3.7' and platform_machine=='aarch64'",
+ "numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'",
+ # Darwin Arm64
+ "numpy>=1.20.0; python_version=='3.8' and platform_machine=='arm64' and platform_system=='Darwin'",
+ "numpy>=1.20.0; python_version=='3.9' and platform_machine=='arm64' and platform_system=='Darwin'"
]
# uncomment to enable pep517 after versioneer problem is fixed.
# https://github.com/python-versioneer/python-versioneer/issues/193
diff --git a/setup.cfg b/setup.cfg
index 9deebb835eff7..62ff0c6934f77 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,7 +21,6 @@ classifiers =
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
- Programming Language :: Python :: 3.10
Topic :: Scientific/Engineering
project_urls =
Bug Tracker = https://github.com/pandas-dev/pandas/issues
@@ -31,10 +30,7 @@ project_urls =
[options]
packages = find:
install_requires =
- numpy>=1.18.5; platform_machine!='aarch64' and platform_machine!='arm64' and python_version<'3.10'
- numpy>=1.19.2; platform_machine=='aarch64' and python_version<'3.10'
- numpy>=1.20.0; platform_machine=='arm64' and python_version<'3.10'
- numpy>=1.21.0; python_version>='3.10'
+ numpy>=1.18.5
python-dateutil>=2.8.1
pytz>=2020.1
python_requires = >=3.8
diff --git a/setup.py b/setup.py
index f5151621c9efe..337719053585c 100755
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@ def is_platform_mac():
return sys.platform == "darwin"
-min_cython_ver = "0.29.24" # note: sync with pyproject.toml
+min_cython_ver = "0.29.21" # note: sync with pyproject.toml
try:
from Cython import (
| - [x] closes #43564
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44259 | 2021-10-31T23:45:34Z | 2021-11-01T00:16:35Z | null | 2021-11-01T10:54:31Z |
CLN: address TODOs, FIXMEs | diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index dc3bb09c1b462..b908fa2c65e4d 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -264,6 +264,9 @@ def left_join_indexer_unique(
ndarray[numeric_object_t] left,
ndarray[numeric_object_t] right
):
+ """
+ Both left and right are strictly monotonic increasing.
+ """
cdef:
Py_ssize_t i, j, nleft, nright
ndarray[intp_t] indexer
@@ -311,6 +314,9 @@ def left_join_indexer_unique(
def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] right):
"""
Two-pass algorithm for monotonic indexes. Handles many-to-one merges.
+
+ Both left and right are monotonic increasing, but at least one of them
+ is non-unique (if both were unique we'd use left_join_indexer_unique).
"""
cdef:
Py_ssize_t i, j, k, nright, nleft, count
@@ -321,6 +327,7 @@ def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
nleft = len(left)
nright = len(right)
+ # First pass is to find the size 'count' of our output indexers.
i = 0
j = 0
count = 0
@@ -334,6 +341,8 @@ def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
rval = right[j]
if lval == rval:
+ # This block is identical across
+ # left_join_indexer, inner_join_indexer, outer_join_indexer
count += 1
if i < nleft - 1:
if j < nright - 1 and right[j + 1] == rval:
@@ -398,12 +407,14 @@ def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
# end of the road
break
elif lval < rval:
+ # i.e. lval not in right; we keep for left_join_indexer
lindexer[count] = i
rindexer[count] = -1
- result[count] = left[i]
+ result[count] = lval
count += 1
i += 1
else:
+ # i.e. rval not in left; we discard for left_join_indexer
j += 1
return result, lindexer, rindexer
@@ -414,6 +425,8 @@ def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] right):
"""
Two-pass algorithm for monotonic indexes. Handles many-to-one merges.
+
+ Both left and right are monotonic increasing but not necessarily unique.
"""
cdef:
Py_ssize_t i, j, k, nright, nleft, count
@@ -424,6 +437,7 @@ def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
nleft = len(left)
nright = len(right)
+ # First pass is to find the size 'count' of our output indexers.
i = 0
j = 0
count = 0
@@ -453,8 +467,10 @@ def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
# end of the road
break
elif lval < rval:
+ # i.e. lval not in right; we discard for inner_indexer
i += 1
else:
+ # i.e. rval not in left; we discard for inner_indexer
j += 1
# do it again now that result size is known
@@ -478,7 +494,7 @@ def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
if lval == rval:
lindexer[count] = i
rindexer[count] = j
- result[count] = rval
+ result[count] = lval
count += 1
if i < nleft - 1:
if j < nright - 1 and right[j + 1] == rval:
@@ -495,8 +511,10 @@ def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
# end of the road
break
elif lval < rval:
+ # i.e. lval not in right; we discard for inner_indexer
i += 1
else:
+ # i.e. rval not in left; we discard for inner_indexer
j += 1
return result, lindexer, rindexer
@@ -505,6 +523,9 @@ def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
@cython.wraparound(False)
@cython.boundscheck(False)
def outer_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] right):
+ """
+ Both left and right are monotonic increasing but not necessarily unique.
+ """
cdef:
Py_ssize_t i, j, nright, nleft, count
numeric_object_t lval, rval
@@ -514,6 +535,9 @@ def outer_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
nleft = len(left)
nright = len(right)
+ # First pass is to find the size 'count' of our output indexers.
+ # count will be length of left plus the number of elements of right not in
+ # left (counting duplicates)
i = 0
j = 0
count = 0
@@ -616,12 +640,14 @@ def outer_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t]
# end of the road
break
elif lval < rval:
+ # i.e. lval not in right; we keep for outer_join_indexer
lindexer[count] = i
rindexer[count] = -1
result[count] = lval
count += 1
i += 1
else:
+ # i.e. rval not in left; we keep for outer_join_indexer
lindexer[count] = -1
rindexer[count] = j
result[count] = rval
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 2c4b420656259..c1915e719f515 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -198,7 +198,7 @@ cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil:
@cython.wraparound(False)
@cython.boundscheck(False)
def get_start_end_field(const int64_t[:] dtindex, str field,
- object freqstr=None, int month_kw=12):
+ str freqstr=None, int month_kw=12):
"""
Given an int64-based datetime index return array of indicators
of whether timestamps are at the start/end of the month/quarter/year
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py
index a8f69497d4019..ac27aaa42d151 100644
--- a/pandas/core/array_algos/putmask.py
+++ b/pandas/core/array_algos/putmask.py
@@ -9,7 +9,10 @@
import numpy as np
from pandas._libs import lib
-from pandas._typing import ArrayLike
+from pandas._typing import (
+ ArrayLike,
+ npt,
+)
from pandas.core.dtypes.cast import (
convert_scalar_for_putitemlike,
@@ -26,13 +29,14 @@
from pandas.core.arrays import ExtensionArray
-def putmask_inplace(values: ArrayLike, mask: np.ndarray, value: Any) -> None:
+def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:
"""
ExtensionArray-compatible implementation of np.putmask. The main
difference is we do not handle repeating or truncating like numpy.
Parameters
----------
+ values: np.ndarray or ExtensionArray
mask : np.ndarray[bool]
We assume extract_bool_array has already been called.
value : Any
@@ -51,6 +55,7 @@ def putmask_inplace(values: ArrayLike, mask: np.ndarray, value: Any) -> None:
)
):
# GH#19266 using np.putmask gives unexpected results with listlike value
+ # along with object dtype
if is_list_like(value) and len(value) == len(values):
values[mask] = value[mask]
else:
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 6be2e803b5910..21675ca0cdc7c 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -1259,7 +1259,6 @@ def __from_arrow__(
return IntervalArray._concat_same_type(results)
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
- # NB: this doesn't handle checking for closed match
if not all(isinstance(x, IntervalDtype) for x in dtypes):
return None
diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index bf901683de602..b1824413512c5 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -104,14 +104,14 @@ def is_scalar_indexer(indexer, ndim: int) -> bool:
return False
-def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
+def is_empty_indexer(indexer, arr_value: ArrayLike) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
- arr_value : np.ndarray
+ arr_value : np.ndarray or ExtensionArray
Returns
-------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 926ab0b544abd..0cbe16c9aaf13 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3123,7 +3123,9 @@ def _union(self, other: Index, sort):
and not (self.has_duplicates and other.has_duplicates)
and self._can_use_libjoin
):
- # Both are unique and monotonic, so can use outer join
+ # Both are monotonic and at least one is unique, so can use outer join
+ # (actually don't need either unique, but without this restriction
+ # test_union_same_value_duplicated_in_both fails)
try:
return self._outer_indexer(other)[0]
except (TypeError, IncompatibleFrequency):
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 8327e5f1bb532..751cf41a09f14 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -918,7 +918,7 @@ def setitem(self, indexer, value):
check_setitem_lengths(indexer, value, values)
if is_empty_indexer(indexer, arr_value):
- # GH#8669 empty indexers
+ # GH#8669 empty indexers, test_loc_setitem_boolean_mask_allfalse
pass
elif is_scalar_indexer(indexer, self.ndim):
@@ -1698,7 +1698,7 @@ def putmask(self, mask, new) -> list[Block]:
mask = extract_bool_array(mask)
if not self._can_hold_element(new):
- return self.astype(_dtype_obj).putmask(mask, new)
+ return self.coerce_to_target_dtype(new).putmask(mask, new)
arr = self.values
arr.T.putmask(mask, new)
@@ -1755,7 +1755,9 @@ def fillna(
# We support filling a DatetimeTZ with a `value` whose timezone
# is different by coercing to object.
# TODO: don't special-case td64
- return self.astype(_dtype_obj).fillna(value, limit, inplace, downcast)
+ return self.coerce_to_target_dtype(value).fillna(
+ value, limit, inplace, downcast
+ )
values = self.values
values = values if inplace else values.copy()
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 7765c29ee59c8..f0d01f8727d5a 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -2075,7 +2075,7 @@ def test_td64arr_div_numeric_array(
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
- def test_td64arr_mul_int_series(self, box_with_array, names, request):
+ def test_td64arr_mul_int_series(self, box_with_array, names):
# GH#19042 test for correct name attachment
box = box_with_array
exname = get_expected_name(box, names)
| Will have to see what the CI says about the cython flake8 config change. <b>update</b> cython flake8 config moved to #44260 | https://api.github.com/repos/pandas-dev/pandas/pulls/44258 | 2021-10-31T20:45:21Z | 2021-11-01T17:08:28Z | 2021-11-01T17:08:28Z | 2021-11-01T17:35:16Z |
TST: Add test for where inplace | diff --git a/pandas/_testing/_hypothesis.py b/pandas/_testing/_hypothesis.py
new file mode 100644
index 0000000000000..0e506f5e878b4
--- /dev/null
+++ b/pandas/_testing/_hypothesis.py
@@ -0,0 +1,85 @@
+"""
+Hypothesis data generator helpers.
+"""
+from datetime import datetime
+
+from hypothesis import strategies as st
+from hypothesis.extra.dateutil import timezones as dateutil_timezones
+from hypothesis.extra.pytz import timezones as pytz_timezones
+
+from pandas.compat import is_platform_windows
+
+import pandas as pd
+
+from pandas.tseries.offsets import (
+ BMonthBegin,
+ BMonthEnd,
+ BQuarterBegin,
+ BQuarterEnd,
+ BYearBegin,
+ BYearEnd,
+ MonthBegin,
+ MonthEnd,
+ QuarterBegin,
+ QuarterEnd,
+ YearBegin,
+ YearEnd,
+)
+
+OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3)
+
+OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3)
+
+OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3)
+
+OPTIONAL_DICTS = st.lists(
+ st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
+ max_size=10,
+ min_size=3,
+)
+
+OPTIONAL_LISTS = st.lists(
+ st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)),
+ max_size=10,
+ min_size=3,
+)
+
+if is_platform_windows():
+ DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1))
+else:
+ DATETIME_NO_TZ = st.datetimes()
+
+DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
+ min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
+ max_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
+ timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
+)
+
+DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
+ min_value=pd.Timestamp.min.to_pydatetime(warn=False),
+ max_value=pd.Timestamp.max.to_pydatetime(warn=False),
+)
+
+INT_NEG_999_TO_POS_999 = st.integers(-999, 999)
+
+# The strategy for each type is registered in conftest.py, as they don't carry
+# enough runtime information (e.g. type hints) to infer how to build them.
+YQM_OFFSET = st.one_of(
+ *map(
+ st.from_type,
+ [
+ MonthBegin,
+ MonthEnd,
+ BMonthBegin,
+ BMonthEnd,
+ QuarterBegin,
+ QuarterEnd,
+ BQuarterBegin,
+ BQuarterEnd,
+ YearBegin,
+ YearEnd,
+ BYearBegin,
+ BYearEnd,
+ ],
+ )
+)
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index 0906186418c0a..525bf75476fc7 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -1,5 +1,9 @@
from datetime import datetime
+from hypothesis import (
+ given,
+ strategies as st,
+)
import numpy as np
import pytest
@@ -16,6 +20,13 @@
isna,
)
import pandas._testing as tm
+from pandas._testing._hypothesis import (
+ OPTIONAL_DICTS,
+ OPTIONAL_FLOATS,
+ OPTIONAL_INTS,
+ OPTIONAL_LISTS,
+ OPTIONAL_TEXT,
+)
@pytest.fixture(params=["default", "float_string", "mixed_float", "mixed_int"])
@@ -797,3 +808,16 @@ def test_where_columns_casting():
result = df.where(pd.notnull(df), None)
# make sure dtypes don't change
tm.assert_frame_equal(expected, result)
+
+
+@given(
+ data=st.one_of(
+ OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT
+ )
+)
+def test_where_inplace_casting(data):
+ # GH 22051
+ df = DataFrame({"a": data})
+ df_copy = df.where(pd.notnull(df), None).copy()
+ df.where(pd.notnull(df), None, inplace=True)
+ tm.assert_equal(df, df_copy)
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index 1cfc86899f1e7..13a457500d6fb 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -14,7 +14,6 @@
from hypothesis import (
given,
settings,
- strategies as st,
)
import numpy as np
import pytest
@@ -22,10 +21,7 @@
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_datetime_string
-from pandas.compat import (
- is_platform_windows,
- np_array_datetime64_compat,
-)
+from pandas.compat import np_array_datetime64_compat
from pandas.compat.pyarrow import pa_version_under6p0
import pandas as pd
@@ -38,6 +34,7 @@
Timestamp,
)
import pandas._testing as tm
+from pandas._testing._hypothesis import DATETIME_NO_TZ
from pandas.core.indexes.datetimes import date_range
import pandas.io.date_converters as conv
@@ -52,12 +49,6 @@
# constant
_DEFAULT_DATETIME = datetime(1, 1, 1)
-# Strategy for hypothesis
-if is_platform_windows():
- date_strategy = st.datetimes(min_value=datetime(1900, 1, 1))
-else:
- date_strategy = st.datetimes()
-
@xfail_pyarrow
def test_read_csv_with_custom_date_parser(all_parsers):
@@ -1683,7 +1674,7 @@ def _helper_hypothesis_delimited_date(call, date_string, **kwargs):
@skip_pyarrow
-@given(date_strategy)
+@given(DATETIME_NO_TZ)
@settings(deadline=None)
@pytest.mark.parametrize("delimiter", list(" -./"))
@pytest.mark.parametrize("dayfirst", [True, False])
diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py
index 2d88f6690a794..ef9f2390922ff 100644
--- a/pandas/tests/tseries/offsets/test_offsets_properties.py
+++ b/pandas/tests/tseries/offsets/test_offsets_properties.py
@@ -7,92 +7,26 @@
You may wish to consult the previous version for inspiration on further
tests, or when trying to pin down the bugs exposed by the tests below.
"""
-import warnings
-
from hypothesis import (
assume,
given,
- strategies as st,
)
from hypothesis.errors import Flaky
-from hypothesis.extra.dateutil import timezones as dateutil_timezones
-from hypothesis.extra.pytz import timezones as pytz_timezones
import pytest
import pytz
import pandas as pd
-from pandas import Timestamp
-
-from pandas.tseries.offsets import (
- BMonthBegin,
- BMonthEnd,
- BQuarterBegin,
- BQuarterEnd,
- BYearBegin,
- BYearEnd,
- MonthBegin,
- MonthEnd,
- QuarterBegin,
- QuarterEnd,
- YearBegin,
- YearEnd,
-)
-
-# ----------------------------------------------------------------
-# Helpers for generating random data
-
-with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- min_dt = Timestamp(1900, 1, 1).to_pydatetime()
- max_dt = Timestamp(1900, 1, 1).to_pydatetime()
-
-gen_date_range = st.builds(
- pd.date_range,
- start=st.datetimes(
- # TODO: Choose the min/max values more systematically
- min_value=Timestamp(1900, 1, 1).to_pydatetime(),
- max_value=Timestamp(2100, 1, 1).to_pydatetime(),
- ),
- periods=st.integers(min_value=2, max_value=100),
- freq=st.sampled_from("Y Q M D H T s ms us ns".split()),
- tz=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
+from pandas._testing._hypothesis import (
+ DATETIME_JAN_1_1900_OPTIONAL_TZ,
+ YQM_OFFSET,
)
-gen_random_datetime = st.datetimes(
- min_value=min_dt,
- max_value=max_dt,
- timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
-)
-
-# The strategy for each type is registered in conftest.py, as they don't carry
-# enough runtime information (e.g. type hints) to infer how to build them.
-gen_yqm_offset = st.one_of(
- *map(
- st.from_type,
- [
- MonthBegin,
- MonthEnd,
- BMonthBegin,
- BMonthEnd,
- QuarterBegin,
- QuarterEnd,
- BQuarterBegin,
- BQuarterEnd,
- YearBegin,
- YearEnd,
- BYearBegin,
- BYearEnd,
- ],
- )
-)
-
-
# ----------------------------------------------------------------
# Offset-specific behaviour tests
@pytest.mark.arm_slow
-@given(gen_random_datetime, gen_yqm_offset)
+@given(DATETIME_JAN_1_1900_OPTIONAL_TZ, YQM_OFFSET)
def test_on_offset_implementations(dt, offset):
assume(not offset.normalize)
# check that the class-specific implementations of is_on_offset match
@@ -112,7 +46,7 @@ def test_on_offset_implementations(dt, offset):
@pytest.mark.xfail(strict=False, raises=Flaky, reason="unreliable test timings")
-@given(gen_yqm_offset)
+@given(YQM_OFFSET)
def test_shift_across_dst(offset):
# GH#18319 check that 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py
index 464eeaed1e725..47e7b2aa4d401 100644
--- a/pandas/tests/tseries/offsets/test_ticks.py
+++ b/pandas/tests/tseries/offsets/test_ticks.py
@@ -11,7 +11,6 @@
example,
given,
settings,
- strategies as st,
)
import numpy as np
import pytest
@@ -23,6 +22,7 @@
Timestamp,
)
import pandas._testing as tm
+from pandas._testing._hypothesis import INT_NEG_999_TO_POS_999
from pandas.tests.tseries.offsets.common import assert_offset_equal
from pandas.tseries import offsets
@@ -66,7 +66,7 @@ def test_delta_to_tick():
@example(n=2, m=3)
@example(n=800, m=300)
@example(n=1000, m=5)
-@given(n=st.integers(-999, 999), m=st.integers(-999, 999))
+@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999)
def test_tick_add_sub(cls, n, m):
# For all Tick subclasses and all integers n, m, we should have
# tick(n) + tick(m) == tick(n+m)
@@ -86,7 +86,7 @@ def test_tick_add_sub(cls, n, m):
@pytest.mark.parametrize("cls", tick_classes)
@settings(deadline=None)
@example(n=2, m=3)
-@given(n=st.integers(-999, 999), m=st.integers(-999, 999))
+@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999)
def test_tick_equality(cls, n, m):
assume(m != n)
# tick == tock iff tick.n == tock.n
diff --git a/pandas/tests/tslibs/test_ccalendar.py b/pandas/tests/tslibs/test_ccalendar.py
index bba833abd3ad0..6a0d0a8d92955 100644
--- a/pandas/tests/tslibs/test_ccalendar.py
+++ b/pandas/tests/tslibs/test_ccalendar.py
@@ -3,16 +3,13 @@
datetime,
)
-from hypothesis import (
- given,
- strategies as st,
-)
+from hypothesis import given
import numpy as np
import pytest
from pandas._libs.tslibs import ccalendar
-import pandas as pd
+from pandas._testing._hypothesis import DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ
@pytest.mark.parametrize(
@@ -59,12 +56,7 @@ def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tu
assert result == expected_iso_tuple
-@given(
- st.datetimes(
- min_value=pd.Timestamp.min.to_pydatetime(warn=False),
- max_value=pd.Timestamp.max.to_pydatetime(warn=False),
- )
-)
+@given(DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ)
def test_isocalendar(dt):
expected = dt.isocalendar()
result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day)
| This adds a test for ensuring that None-to-NaN type-casting is consistent with `where` when `inplace=True` and otherwise.
- [x] closes #22051
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
| https://api.github.com/repos/pandas-dev/pandas/pulls/44255 | 2021-10-31T17:49:05Z | 2021-12-02T02:02:05Z | 2021-12-02T02:02:05Z | 2021-12-02T02:02:09Z |
TYP: check typings with pyright | diff --git a/pyproject.toml b/pyproject.toml
index d84024eb09de2..98ab112ab459a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -150,7 +150,7 @@ skip = "pandas/__init__.py"
[tool.pyright]
pythonVersion = "3.8"
typeCheckingMode = "strict"
-include = ["pandas"]
+include = ["pandas", "typings"]
exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
reportGeneralTypeIssues = false
reportConstantRedefinition = false
diff --git a/typings/numba.pyi b/typings/numba.pyi
index d6a2729d36db3..526119951a000 100644
--- a/typings/numba.pyi
+++ b/typings/numba.pyi
@@ -1,3 +1,4 @@
+# pyright: reportIncompleteStub = false
from typing import (
Any,
Callable,
| small followup to #44233 | https://api.github.com/repos/pandas-dev/pandas/pulls/44254 | 2021-10-31T16:35:33Z | 2021-11-02T08:56:44Z | 2021-11-02T08:56:44Z | 2021-11-02T08:56:53Z |
TYP: Subset of "Improved the type stubs in the _libs directory to help with type checking" | diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 9d5922f8a50bd..aba635e19995a 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -516,9 +516,9 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True):
Returns
-------
- tuple of tuples
- left : (ndarray, object, array)
- right : (ndarray, object, array)
+ tuple of
+ left : ndarray
+ right : ndarray
closed: str
"""
cdef:
diff --git a/pandas/_libs/missing.pyi b/pandas/_libs/missing.pyi
new file mode 100644
index 0000000000000..1177e82906190
--- /dev/null
+++ b/pandas/_libs/missing.pyi
@@ -0,0 +1,15 @@
+import numpy as np
+from numpy import typing as npt
+
+class NAType: ...
+
+NA: NAType
+
+def is_matching_na(
+ left: object, right: object, nan_matches_none: bool = ...
+) -> bool: ...
+def isposinf_scalar(val: object) -> bool: ...
+def isneginf_scalar(val: object) -> bool: ...
+def checknull(val: object, inf_as_na: bool = ...) -> bool: ...
+def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ...
+def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ...
diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi
index 8c510b05de4ce..8e47993e9d85f 100644
--- a/pandas/_libs/tslibs/dtypes.pyi
+++ b/pandas/_libs/tslibs/dtypes.pyi
@@ -18,33 +18,33 @@ class PeriodDtypeBase:
def resolution(self) -> Resolution: ...
class FreqGroup(Enum):
- FR_ANN: int = ...
- FR_QTR: int = ...
- FR_MTH: int = ...
- FR_WK: int = ...
- FR_BUS: int = ...
- FR_DAY: int = ...
- FR_HR: int = ...
- FR_MIN: int = ...
- FR_SEC: int = ...
- FR_MS: int = ...
- FR_US: int = ...
- FR_NS: int = ...
- FR_UND: int = ...
+ FR_ANN: int
+ FR_QTR: int
+ FR_MTH: int
+ FR_WK: int
+ FR_BUS: int
+ FR_DAY: int
+ FR_HR: int
+ FR_MIN: int
+ FR_SEC: int
+ FR_MS: int
+ FR_US: int
+ FR_NS: int
+ FR_UND: int
@staticmethod
def get_freq_group(code: int) -> FreqGroup: ...
class Resolution(Enum):
- RESO_NS: int = ...
- RESO_US: int = ...
- RESO_MS: int = ...
- RESO_SEC: int = ...
- RESO_MIN: int = ...
- RESO_HR: int = ...
- RESO_DAY: int = ...
- RESO_MTH: int = ...
- RESO_QTR: int = ...
- RESO_YR: int = ...
+ RESO_NS: int
+ RESO_US: int
+ RESO_MS: int
+ RESO_SEC: int
+ RESO_MIN: int
+ RESO_HR: int
+ RESO_DAY: int
+ RESO_MTH: int
+ RESO_QTR: int
+ RESO_YR: int
def __lt__(self, other: Resolution) -> bool: ...
def __ge__(self, other: Resolution) -> bool: ...
@property
diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi
index 6a5555cfff030..a7ee9a70342d4 100644
--- a/pandas/_libs/tslibs/nattype.pyi
+++ b/pandas/_libs/tslibs/nattype.pyi
@@ -12,6 +12,8 @@ NaT: NaTType
iNaT: int
nat_strings: set[str]
+def is_null_datetimelike(val: object, inat_is_null: bool = ...) -> bool: ...
+
class NaTType(datetime):
value: np.int64
def asm8(self) -> np.datetime64: ...
diff --git a/pandas/_libs/tslibs/np_datetime.pyi b/pandas/_libs/tslibs/np_datetime.pyi
new file mode 100644
index 0000000000000..db0c277b73bd5
--- /dev/null
+++ b/pandas/_libs/tslibs/np_datetime.pyi
@@ -0,0 +1 @@
+class OutOfBoundsDatetime(ValueError): ...
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index f293557a51ac2..7e6d8fa38aa45 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -3573,7 +3573,7 @@ cpdef to_offset(freq):
Parameters
----------
- freq : str, tuple, datetime.timedelta, DateOffset or None
+ freq : str, datetime.timedelta, BaseOffset or None
Returns
-------
@@ -3586,7 +3586,7 @@ cpdef to_offset(freq):
See Also
--------
- DateOffset : Standard kind of date increment used for a date range.
+ BaseOffset : Standard kind of date increment used for a date range.
Examples
--------
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index a89d0aecfc26c..17df594a39c44 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -17,7 +17,6 @@ import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
- NaT,
NaTType,
Period,
Timedelta,
@@ -25,7 +24,7 @@ from pandas._libs.tslibs import (
_S = TypeVar("_S")
-def integer_op_not_supported(obj) -> None: ...
+def integer_op_not_supported(obj) -> TypeError: ...
class Timestamp(datetime):
min: ClassVar[Timestamp]
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index df71501d55b20..c6987d9a11e4c 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -512,7 +512,9 @@ def _cmp_method(self, other, op):
# ------------------------------------------------------------------------
# String methods interface
- _str_na_value = StringDtype.na_value
+ # error: Incompatible types in assignment (expression has type "NAType",
+ # base class "PandasArray" defined the type as "float")
+ _str_na_value = StringDtype.na_value # type: ignore[assignment]
def _str_map(
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 79ea7731466d4..3b04490ae098c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -912,6 +912,10 @@ def maybe_upcast(
# We get a copy in all cases _except_ (values.dtype == new_dtype and not copy)
upcast_values = values.astype(new_dtype, copy=copy)
+ # error: Incompatible return value type (got "Tuple[ndarray[Any, dtype[Any]],
+ # Union[Union[str, int, float, bool] Union[Period, Timestamp, Timedelta, Any]]]",
+ # expected "Tuple[NumpyArrayT, Union[Union[str, int, float, bool], Union[Period,
+ # Timestamp, Timedelta, Any]]]")
return upcast_values, fill_value # type: ignore[return-value]
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 71da0a4b20b41..e74d73b84e94b 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -876,15 +876,15 @@ def freq(self):
@classmethod
def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:
- if isinstance(freq, str):
+ if isinstance(freq, str): # note: freq is already of type str!
if freq.startswith("period[") or freq.startswith("Period["):
m = cls._match.search(freq)
if m is not None:
freq = m.group("freq")
- freq = to_offset(freq)
- if freq is not None:
- return freq
+ freq_offset = to_offset(freq)
+ if freq_offset is not None:
+ return freq_offset
raise ValueError("could not construct PeriodDtype")
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 47949334df021..4e3306e84c1a1 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -241,7 +241,10 @@ def _isna_array(values: ArrayLike, inf_as_na: bool = False):
if inf_as_na and is_categorical_dtype(dtype):
result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)
else:
- result = values.isna()
+ # error: Incompatible types in assignment (expression has type
+ # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has
+ # type "ndarray[Any, dtype[bool_]]")
+ result = values.isna() # type: ignore[assignment]
elif is_string_or_object_np_dtype(values.dtype):
result = _isna_string_dtype(values, inf_as_na=inf_as_na)
elif needs_i8_conversion(dtype):
diff --git a/pandas/core/ops/mask_ops.py b/pandas/core/ops/mask_ops.py
index d21c80b81b582..57bacba0d4bee 100644
--- a/pandas/core/ops/mask_ops.py
+++ b/pandas/core/ops/mask_ops.py
@@ -12,8 +12,8 @@
def kleene_or(
- left: bool | np.ndarray,
- right: bool | np.ndarray,
+ left: bool | np.ndarray | libmissing.NAType,
+ right: bool | np.ndarray | libmissing.NAType,
left_mask: np.ndarray | None,
right_mask: np.ndarray | None,
):
@@ -37,12 +37,13 @@ def kleene_or(
The result of the logical or, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
- # always come from an array, not a scalar. This is safe, since because
+ # always come from an array, not a scalar. This is safe, since
# A | B == B | A
if left_mask is None:
return kleene_or(right, left, right_mask, left_mask)
- assert isinstance(left, np.ndarray)
+ if not isinstance(left, np.ndarray):
+ raise TypeError("Either `left` or `right` need to be a np.ndarray.")
raise_for_nan(right, method="or")
@@ -73,8 +74,8 @@ def kleene_or(
def kleene_xor(
- left: bool | np.ndarray,
- right: bool | np.ndarray,
+ left: bool | np.ndarray | libmissing.NAType,
+ right: bool | np.ndarray | libmissing.NAType,
left_mask: np.ndarray | None,
right_mask: np.ndarray | None,
):
@@ -99,16 +100,20 @@ def kleene_xor(
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
+ # To reduce the number of cases, we ensure that `left` & `left_mask`
+ # always come from an array, not a scalar. This is safe, since
+ # A ^ B == B ^ A
if left_mask is None:
return kleene_xor(right, left, right_mask, left_mask)
+ if not isinstance(left, np.ndarray):
+ raise TypeError("Either `left` or `right` need to be a np.ndarray.")
+
raise_for_nan(right, method="xor")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
- # error: Incompatible types in assignment (expression has type
- # "Union[bool, Any]", variable has type "ndarray")
- result = left ^ right # type: ignore[assignment]
+ result = left ^ right
if right_mask is None:
if right is libmissing.NA:
@@ -146,12 +151,13 @@ def kleene_and(
The result of the logical xor, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
- # always come from an array, not a scalar. This is safe, since because
- # A | B == B | A
+ # always come from an array, not a scalar. This is safe, since
+ # A & B == B & A
if left_mask is None:
return kleene_and(right, left, right_mask, left_mask)
- assert isinstance(left, np.ndarray)
+ if not isinstance(left, np.ndarray):
+ raise TypeError("Either `left` or `right` need to be a np.ndarray.")
raise_for_nan(right, method="and")
if right is libmissing.NA:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index f132dd88d5147..e00defcfcffd1 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -2012,30 +2012,30 @@ def _adjust_dates_anchored(
if closed == "right":
if foffset > 0:
# roll back
- fresult = first.value - foffset
+ fresult_int = first.value - foffset
else:
- fresult = first.value - freq.nanos
+ fresult_int = first.value - freq.nanos
if loffset > 0:
# roll forward
- lresult = last.value + (freq.nanos - loffset)
+ lresult_int = last.value + (freq.nanos - loffset)
else:
# already the end of the road
- lresult = last.value
+ lresult_int = last.value
else: # closed == 'left'
if foffset > 0:
- fresult = first.value - foffset
+ fresult_int = first.value - foffset
else:
# start of the road
- fresult = first.value
+ fresult_int = first.value
if loffset > 0:
# roll forward
- lresult = last.value + (freq.nanos - loffset)
+ lresult_int = last.value + (freq.nanos - loffset)
else:
- lresult = last.value + freq.nanos
- fresult = Timestamp(fresult)
- lresult = Timestamp(lresult)
+ lresult_int = last.value + freq.nanos
+ fresult = Timestamp(fresult_int)
+ lresult = Timestamp(lresult_int)
if first_tzinfo is not None:
fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo)
if last_tzinfo is not None:
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 2ce5c0cbea272..6b0380a292f07 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -193,7 +193,7 @@ def rep(x, r):
return result
def _str_match(
- self, pat: str, case: bool = True, flags: int = 0, na: Scalar = None
+ self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None
):
if not case:
flags |= re.IGNORECASE
@@ -208,7 +208,7 @@ def _str_fullmatch(
pat: str | re.Pattern,
case: bool = True,
flags: int = 0,
- na: Scalar = None,
+ na: Scalar | None = None,
):
if not case:
flags |= re.IGNORECASE
diff --git a/pandas/tests/arrays/boolean/test_logical.py b/pandas/tests/arrays/boolean/test_logical.py
index 938fa8f1a5d6a..b4cca635fa238 100644
--- a/pandas/tests/arrays/boolean/test_logical.py
+++ b/pandas/tests/arrays/boolean/test_logical.py
@@ -6,6 +6,11 @@
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
+from pandas.core.ops.mask_ops import (
+ kleene_and,
+ kleene_or,
+ kleene_xor,
+)
from pandas.tests.extension.base import BaseOpsUtil
@@ -239,3 +244,11 @@ def test_no_masked_assumptions(self, other, all_logical_operators):
result = getattr(a, all_logical_operators)(other)
expected = getattr(b, all_logical_operators)(other)
tm.assert_extension_array_equal(result, expected)
+
+
+@pytest.mark.parametrize("operation", [kleene_or, kleene_xor, kleene_and])
+def test_error_both_scalar(operation):
+ msg = r"Either `left` or `right` need to be a np\.ndarray."
+ with pytest.raises(TypeError, match=msg):
+ # masks need to be non-None, otherwise it ends up in an infinite recursion
+ operation(True, True, np.zeros(1), np.zeros(1))
| The two big parts missing from #43744 are interval.pyi and offsets.pyi as they result in many mypy errors.
I had already made changes to somewhat accommodate for interval.pyi and offsets.pyi (if these files are included, we have now "only" ~100 mypy errors). I kept these changes in this PR. | https://api.github.com/repos/pandas-dev/pandas/pulls/44251 | 2021-10-31T13:33:16Z | 2021-12-14T01:38:43Z | 2021-12-14T01:38:43Z | 2022-03-09T02:56:32Z |
DataFrame.convert_dtypes doesn't preserve subclasses | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index ee1dd58149451..560c3fad59e5e 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -538,6 +538,7 @@ Conversion
- Bug in :class:`Series` constructor returning 0 for missing values with dtype ``int64`` and ``False`` for dtype ``bool`` (:issue:`43017`, :issue:`43018`)
- Bug in :class:`IntegerDtype` not allowing coercion from string dtype (:issue:`25472`)
- Bug in :func:`to_datetime` with ``arg:xr.DataArray`` and ``unit="ns"`` specified raises TypeError (:issue:`44053`)
+- Bug in :meth:`DataFrame.convert_dtypes` not returning the correct type when a subclass does not overload :meth:`_constructor_sliced` (:issue:`43201`)
-
Strings
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 23608cf0192df..6b51456006021 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -18,6 +18,7 @@
Literal,
Mapping,
Sequence,
+ Type,
cast,
final,
overload,
@@ -6219,8 +6220,12 @@ def convert_dtypes(
for col_name, col in self.items()
]
if len(results) > 0:
+ result = concat(results, axis=1, copy=False)
+ cons = cast(Type["DataFrame"], self._constructor)
+ result = cons(result)
+ result = result.__finalize__(self, method="convert_dtypes")
# https://github.com/python/mypy/issues/8354
- return cast(NDFrameT, concat(results, axis=1, copy=False))
+ return cast(NDFrameT, result)
else:
return self.copy()
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 42474ff00ad6d..8d9957b24300f 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -13,6 +13,16 @@
import pandas._testing as tm
+@pytest.fixture()
+def gpd_style_subclass_df():
+ class SubclassedDataFrame(DataFrame):
+ @property
+ def _constructor(self):
+ return SubclassedDataFrame
+
+ return SubclassedDataFrame({"a": [1, 2, 3]})
+
+
class TestDataFrameSubclassing:
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
@@ -704,6 +714,15 @@ def test_idxmax_preserves_subclass(self):
result = df.idxmax()
assert isinstance(result, tm.SubclassedSeries)
+ def test_convert_dtypes_preserves_subclass(self, gpd_style_subclass_df):
+ # GH 43668
+ df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
+ result = df.convert_dtypes()
+ assert isinstance(result, tm.SubclassedDataFrame)
+
+ result = gpd_style_subclass_df.convert_dtypes()
+ assert isinstance(result, type(gpd_style_subclass_df))
+
def test_equals_subclass(self):
# https://github.com/pandas-dev/pandas/pull/34402
# allow subclass in both directions
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index c1f8b5dd7cf41..135e8cc7b7aba 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -347,10 +347,7 @@
operator.methodcaller("infer_objects"),
),
(pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")),
- pytest.param(
- (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")),
- marks=not_implemented_mark,
- ),
+ (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")),
(pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")),
(pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")),
(pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)),
| - [x] closes #43668
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
I've added a fixture for the specific construction of a subclassed dataframe from the issue - which mirrors the setup in geopandas, I expect this probably needs to be relocated (I could have constructed this inline with the test and maybe that's the right solution, but I can see analogous changes for `astype` perhaps being useful in the geopandas context as well).
I've also added the call to `__finalize__` although it's not strictly needed, since that's also needed by #28283 (Note that the 1 dimensional case already has `__finalize__` called indirectly via `astype`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44249 | 2021-10-31T07:57:37Z | 2021-11-13T17:03:31Z | 2021-11-13T17:03:31Z | 2023-09-16T01:22:47Z |
BUG: styler render when using `hide`, `MultiIndex` and `max_rows` in combination | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 5601048c409e1..210dfc0050bf4 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -81,7 +81,7 @@ Styler
- Naive sparsification is now possible for LaTeX without the multirow package (:issue:`43369`)
- :meth:`.Styler.to_html` omits CSSStyle rules for hidden table elements (:issue:`43619`)
- Custom CSS classes can now be directly specified without string replacement (:issue:`43686`)
- - Bug where row trimming failed to reflect hidden rows (:issue:`43703`)
+ - Bug where row trimming failed to reflect hidden rows (:issue:`43703`, :issue:`44247`)
- Update and expand the export and use mechanics (:issue:`40675`)
- New method :meth:`.Styler.hide` added and deprecates :meth:`.Styler.hide_index` and :meth:`.Styler.hide_columns` (:issue:`43758`)
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 27a5170e48949..a71dd6f33e3c8 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -1230,29 +1230,37 @@ def _get_level_lengths(
return lengths
for i, lvl in enumerate(levels):
+ visible_row_count = 0 # used to break loop due to display trimming
for j, row in enumerate(lvl):
- if j >= max_index:
- # stop the loop due to display trimming
+ if visible_row_count > max_index:
break
if not sparsify:
+ # then lengths will always equal 1 since no aggregation.
if j not in hidden_elements:
lengths[(i, j)] = 1
+ visible_row_count += 1
elif (row is not lib.no_default) and (j not in hidden_elements):
+ # this element has not been sparsified so must be the start of section
last_label = j
lengths[(i, last_label)] = 1
+ visible_row_count += 1
elif row is not lib.no_default:
- # even if its hidden, keep track of it in case
- # length >1 and later elements are visible
+ # even if the above is hidden, keep track of it in case length > 1 and
+ # later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif j not in hidden_elements:
+ # then element must be part of sparsified section and is visible
+ visible_row_count += 1
if lengths[(i, last_label)] == 0:
- # if the previous iteration was first-of-kind but hidden then offset
+ # if previous iteration was first-of-section but hidden then offset
last_label = j
lengths[(i, last_label)] = 1
else:
- # else add to previous iteration
- lengths[(i, last_label)] += 1
+ # else add to previous iteration but do not extend more than max
+ lengths[(i, last_label)] = min(
+ max_index, 1 + lengths[(i, last_label)]
+ )
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1
diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index cf2ec347015d1..8ac0dd03c9fd6 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -1577,3 +1577,26 @@ def test_row_trimming_hide_index():
assert len(ctx["body"]) == 3
for r, val in enumerate(["3", "4", "..."]):
assert ctx["body"][r][1]["display_value"] == val
+
+
+def test_row_trimming_hide_index_mi():
+ # gh 44247
+ df = DataFrame([[1], [2], [3], [4], [5]])
+ df.index = MultiIndex.from_product([[0], [0, 1, 2, 3, 4]])
+ with pd.option_context("styler.render.max_rows", 2):
+ ctx = df.style.hide([(0, 0), (0, 1)], axis="index")._translate(True, True)
+ assert len(ctx["body"]) == 3
+
+ # level 0 index headers (sparsified)
+ assert {"value": 0, "attributes": 'rowspan="2"', "is_visible": True}.items() <= ctx[
+ "body"
+ ][0][0].items()
+ assert {"value": 0, "attributes": "", "is_visible": False}.items() <= ctx["body"][
+ 1
+ ][0].items()
+ assert {"value": "...", "is_visible": True}.items() <= ctx["body"][2][0].items()
+
+ for r, val in enumerate(["2", "3", "..."]):
+ assert ctx["body"][r][1]["display_value"] == val # level 1 index headers
+ for r, val in enumerate(["3", "4", "..."]):
+ assert ctx["body"][r][2]["display_value"] == val # data values
| There was a previous fix for #43703 but that dealt only with the SingleIndex case
- [x] closes #44247
- [x] tests added / passed
- [x] whatsnew entry
Visuals of the fix compared to the issue:

| https://api.github.com/repos/pandas-dev/pandas/pulls/44248 | 2021-10-31T07:54:14Z | 2021-11-01T13:50:40Z | 2021-11-01T13:50:39Z | 2022-03-06T07:43:41Z |
TST: Old issues | diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index 48f984c21623b..1d0b64c1835df 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -878,3 +878,15 @@ def test_apply_dictlike_transformer(string_series, ops):
expected.name = string_series.name
result = string_series.apply(ops)
tm.assert_series_equal(result, expected)
+
+
+def test_apply_retains_column_name():
+ # GH 16380
+ df = DataFrame({"x": range(3)}, Index(range(3), name="x"))
+ result = df.x.apply(lambda x: Series(range(x + 1), Index(range(x + 1), name="y")))
+ expected = DataFrame(
+ [[0.0, np.nan, np.nan], [0.0, 1.0, np.nan], [0.0, 1.0, 2.0]],
+ columns=Index(range(3), name="y"),
+ index=Index(range(3), name="x"),
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_categorical.py b/pandas/tests/arithmetic/test_categorical.py
index 924f32b5ac9ac..d6f3a13ce6705 100644
--- a/pandas/tests/arithmetic/test_categorical.py
+++ b/pandas/tests/arithmetic/test_categorical.py
@@ -13,3 +13,13 @@ def test_categorical_nan_equality(self):
expected = Series([True, True, True, False])
result = cat == cat
tm.assert_series_equal(result, expected)
+
+ def test_categorical_tuple_equality(self):
+ # GH 18050
+ ser = Series([(0, 0), (0, 1), (0, 0), (1, 0), (1, 1)])
+ expected = Series([True, False, True, False, False])
+ result = ser == (0, 0)
+ tm.assert_series_equal(result, expected)
+
+ result = ser.astype("category") == (0, 0)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index 905b33b285625..404baecdfecac 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -2081,3 +2081,21 @@ def test_unstack_categorical_columns(self):
)
expected.columns = MultiIndex.from_tuples([("cat", 0), ("cat", 1)])
tm.assert_frame_equal(result, expected)
+
+ def test_stack_unsorted(self):
+ # GH 16925
+ PAE = ["ITA", "FRA"]
+ VAR = ["A1", "A2"]
+ TYP = ["CRT", "DBT", "NET"]
+ MI = MultiIndex.from_product([PAE, VAR, TYP], names=["PAE", "VAR", "TYP"])
+
+ V = list(range(len(MI)))
+ DF = DataFrame(data=V, index=MI, columns=["VALUE"])
+
+ DF = DF.unstack(["VAR", "TYP"])
+ DF.columns = DF.columns.droplevel(0)
+ DF.loc[:, ("A0", "NET")] = 9999
+
+ result = DF.stack(["VAR", "TYP"]).sort_index()
+ expected = DF.sort_index(axis=1).stack(["VAR", "TYP"]).sort_index()
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 3ae11847cc06b..3c402480ea2ec 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1155,3 +1155,13 @@ def test_groupby_sum_below_mincount_nullable_integer():
result = grouped.sum(min_count=2)
expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx)
tm.assert_frame_equal(result, expected)
+
+
+def test_mean_on_timedelta():
+ # GH 17382
+ df = DataFrame({"time": pd.to_timedelta(range(10)), "cat": ["A", "B"] * 5})
+ result = df.groupby("cat")["time"].mean()
+ expected = Series(
+ pd.to_timedelta([4, 5]), name="time", index=Index(["A", "B"], name="cat")
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
index c006d5a287bcd..83b8d5c29bbf0 100644
--- a/pandas/tests/groupby/test_rank.py
+++ b/pandas/tests/groupby/test_rank.py
@@ -648,3 +648,16 @@ def test_groupby_axis0_cummax_axis1():
expected = df[[0, 1]].astype(np.float64)
expected[2] = expected[1]
tm.assert_frame_equal(cmax, expected)
+
+
+def test_non_unique_index():
+ # GH 16577
+ df = DataFrame(
+ {"A": [1.0, 2.0, 3.0, np.nan], "value": 1.0},
+ index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4,
+ )
+ result = df.groupby([df.index, "A"]).value.rank(ascending=True, pct=True)
+ expected = Series(
+ [1.0] * 4, index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4, name="value"
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 441cbfe66f1d8..1e78bb1e58583 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -1289,3 +1289,11 @@ def test_transform_cumcount():
result = grp.transform("cumcount")
tm.assert_series_equal(result, expected)
+
+
+def test_null_group_lambda_self():
+ # GH 17093
+ df = DataFrame({"A": [1, np.nan], "B": [1, 1]})
+ result = df.groupby("A").transform(lambda x: x)
+ expected = DataFrame([1], columns=["B"])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 6b136618de721..8d1fa97f9f8bb 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -853,3 +853,12 @@ def test_len_colspecs_len_names_with_index_col(
index_col=index_col,
)
tm.assert_frame_equal(result, expected)
+
+
+def test_colspecs_with_comment():
+ # GH 14135
+ result = read_fwf(
+ StringIO("#\nA1K\n"), colspecs=[(1, 2), (2, 3)], comment="#", header=None
+ )
+ expected = DataFrame([[1, "K"]], columns=[0, 1])
+ tm.assert_frame_equal(result, expected)
| - [x] closes #16380
- [x] closes #14135
- [x] closes #16577
- [x] closes #16925
- [x] closes #17093
- [x] closes #17382
- [x] closes #18050
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
| https://api.github.com/repos/pandas-dev/pandas/pulls/44245 | 2021-10-31T03:29:23Z | 2021-11-01T13:14:52Z | 2021-11-01T13:14:50Z | 2021-11-01T21:45:20Z |
Revert "Backport PR #44204 on branch 1.3.x (CI: Python Dev build)" | diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 96d5542451f06..4fe58ad4d60e9 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -17,6 +17,7 @@ env:
PANDAS_CI: 1
PATTERN: "not slow and not network and not clipboard"
COVERAGE: true
+ PYTEST_TARGET: pandas
jobs:
build:
@@ -25,13 +26,12 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest, macOS-latest, windows-latest]
- pytest_target: ["pandas/tests/[a-h]*", "pandas/tests/[i-z]*"]
name: actions-310-dev
- timeout-minutes: 80
+ timeout-minutes: 60
concurrency:
- group: ${{ github.ref }}-${{ matrix.os }}-${{ matrix.pytest_target }}-dev
+ group: ${{ github.ref }}-${{ matrix.os }}-dev
cancel-in-progress: ${{github.event_name == 'pull_request'}}
steps:
@@ -63,8 +63,6 @@ jobs:
python -c "import pandas; pandas.show_versions();"
- name: Test with pytest
- env:
- PYTEST_TARGET: ${{ matrix.pytest_target }}
shell: bash
run: |
ci/run_tests.sh
| Reverts pandas-dev/pandas#44208
THIS IS ONLY ON 1.3.x.
Let's try reverting this. In theory, the test changes that caused the timeouts shouldn't be backported, so this shouldn't be needed. | https://api.github.com/repos/pandas-dev/pandas/pulls/44244 | 2021-10-30T23:58:48Z | 2021-10-31T11:48:27Z | 2021-10-31T11:48:27Z | 2021-10-31T13:17:53Z |
DEPR: datetime64tz cast mismatched timezones on setitemlike | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 699d8a81243db..d2433402662f7 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -394,6 +394,9 @@ Other Deprecations
- Deprecated :meth:`.Rolling.validate`, :meth:`.Expanding.validate`, and :meth:`.ExponentialMovingWindow.validate` (:issue:`43665`)
- Deprecated silent dropping of columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a dictionary (:issue:`43740`)
- Deprecated silent dropping of columns that raised a ``TypeError``, ``DataError``, and some cases of ``ValueError`` in :meth:`Series.aggregate`, :meth:`DataFrame.aggregate`, :meth:`Series.groupby.aggregate`, and :meth:`DataFrame.groupby.aggregate` when used with a list (:issue:`43740`)
+- Deprecated casting behavior when setting timezone-aware value(s) into a timezone-aware :class:`Series` or :class:`DataFrame` column when the timezones do not match. Previously this cast to object dtype. In a future version, the values being inserted will be converted to the series or column's existing timezone (:issue:`37605`)
+- Deprecated casting behavior when passing an item with mismatched-timezone to :meth:`DatetimeIndex.insert`, :meth:`DatetimeIndex.putmask`, :meth:`DatetimeIndex.where` :meth:`DatetimeIndex.fillna`, :meth:`Series.mask`, :meth:`Series.where`, :meth:`Series.fillna`, :meth:`Series.shift`, :meth:`Series.replace`, :meth:`Series.reindex` (and :class:`DataFrame` column analogues). In the past this has cast to object dtype. In a future version, these will cast the passed item to the index or series's timezone (:issue:`37605`)
+-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 71d38d3b3f73b..4fecbe4be9681 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -39,6 +39,7 @@
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
+from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
@@ -509,6 +510,19 @@ def _check_compatible_with(self, other, setitem: bool = False):
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
+ # TODO(2.0): remove this check. GH#37605
+ warnings.warn(
+ "Setitem-like behavior with mismatched timezones is deprecated "
+ "and will change in a future version. Instead of raising "
+ "(or for Index, Series, and DataFrame methods, coercing to "
+ "object dtype), the value being set (or passed as a "
+ "fill_value, or inserted) will be cast to the existing "
+ "DatetimeArray/DatetimeIndex/Series/DataFrame column's "
+ "timezone. To retain the old behavior, explicitly cast to "
+ "object dtype before the operation.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 1e150f1b431c7..c7c1ce6c04692 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -883,7 +883,15 @@ def test_take_fill_valid(self, arr1d):
msg = "Timezones don't match. .* != 'Australia/Melbourne'"
with pytest.raises(ValueError, match=msg):
# require tz match, not just tzawareness match
- arr.take([-1, 1], allow_fill=True, fill_value=value)
+ with tm.assert_produces_warning(
+ FutureWarning, match="mismatched timezone"
+ ):
+ result = arr.take([-1, 1], allow_fill=True, fill_value=value)
+
+ # once deprecation is enforced
+ # expected = arr.take([-1, 1], allow_fill=True,
+ # fill_value=value.tz_convert(arr.dtype.tz))
+ # tm.assert_equal(result, expected)
def test_concat_same_type_invalid(self, arr1d):
# different timezones
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index b9c1113e7f441..180fb9d29224e 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -128,8 +128,14 @@ def test_setitem_different_tz_raises(self):
with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"):
arr[0] = pd.Timestamp("2000")
+ ts = pd.Timestamp("2000", tz="US/Eastern")
with pytest.raises(ValueError, match="US/Central"):
- arr[0] = pd.Timestamp("2000", tz="US/Eastern")
+ with tm.assert_produces_warning(
+ FutureWarning, match="mismatched timezones"
+ ):
+ arr[0] = ts
+ # once deprecation is enforced
+ # assert arr[0] == ts.tz_convert("US/Central")
def test_setitem_clears_freq(self):
a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central"))
@@ -385,7 +391,14 @@ def test_shift_requires_tzmatch(self):
msg = "Timezones don't match. 'UTC' != 'US/Pacific'"
with pytest.raises(ValueError, match=msg):
- dta.shift(1, fill_value=fill_value)
+ with tm.assert_produces_warning(
+ FutureWarning, match="mismatched timezones"
+ ):
+ dta.shift(1, fill_value=fill_value)
+
+ # once deprecation is enforced
+ # expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC"))
+ # tm.assert_equal(result, expected)
def test_tz_localize_t2d(self):
dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific")
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index a89e089f3d8a2..5e321ad33a2bb 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1109,12 +1109,17 @@ def test_replace_datetimetz(self):
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
- result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific"))
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ result = result.replace(
+ {"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific")
+ )
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Pacific"),
+ # once deprecation is enforced
+ # Timestamp("20130104", tz="US/Pacific").tz_convert("US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
diff --git a/pandas/tests/indexes/datetimes/methods/test_insert.py b/pandas/tests/indexes/datetimes/methods/test_insert.py
index aa9b2c5291585..016a29e4cc266 100644
--- a/pandas/tests/indexes/datetimes/methods/test_insert.py
+++ b/pandas/tests/indexes/datetimes/methods/test_insert.py
@@ -197,18 +197,32 @@ def test_insert_mismatched_tz(self):
# mismatched tz -> cast to object (could reasonably cast to same tz or UTC)
item = Timestamp("2000-01-04", tz="US/Eastern")
- result = idx.insert(3, item)
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ result = idx.insert(3, item)
expected = Index(
- list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx"
+ list(idx[:3]) + [item] + list(idx[3:]),
+ dtype=object,
+ # once deprecation is enforced
+ # list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]),
+ name="idx",
)
+ # once deprecation is enforced
+ # assert expected.dtype == idx.dtype
tm.assert_index_equal(result, expected)
# mismatched tz -> cast to object (could reasonably cast to same tz)
item = datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern"))
- result = idx.insert(3, item)
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ result = idx.insert(3, item)
expected = Index(
- list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx"
+ list(idx[:3]) + [item] + list(idx[3:]),
+ dtype=object,
+ # once deprecation is enforced
+ # list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]),
+ name="idx",
)
+ # once deprecation is enforced
+ # assert expected.dtype == idx.dtype
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 9a22a16106469..27aeb411e36f0 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -237,11 +237,17 @@ def test_setitem_series_datetime64tz(self, val, exp_dtype):
[
pd.Timestamp("2011-01-01", tz=tz),
val,
+ # once deprecation is enforced
+ # val if getattr(val, "tz", None) is None else val.tz_convert(tz),
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
- self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
+ warn = None
+ if getattr(val, "tz", None) is not None and val.tz != obj[0].tz:
+ warn = FutureWarning
+ with tm.assert_produces_warning(warn, match="mismatched timezones"):
+ self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
@@ -467,9 +473,12 @@ def test_insert_index_datetimes(self, request, fill_val, exp_dtype, insert_value
# mismatched tz --> cast to object (could reasonably cast to common tz)
ts = pd.Timestamp("2012-01-01", tz="Asia/Tokyo")
- result = obj.insert(1, ts)
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ result = obj.insert(1, ts)
+ # once deprecation is enforced:
+ # expected = obj.insert(1, ts.tz_convert(obj.dtype.tz))
+ # assert expected.dtype == obj.dtype
expected = obj.astype(object).insert(1, ts)
- assert expected.dtype == object
tm.assert_index_equal(result, expected)
else:
@@ -990,11 +999,18 @@ def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype):
[
pd.Timestamp("2011-01-01", tz=tz),
fill_val,
+ # Once deprecation is enforced, this becomes:
+ # fill_val.tz_convert(tz) if getattr(fill_val, "tz", None)
+ # is not None else fill_val,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
- self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
+ warn = None
+ if getattr(fill_val, "tz", None) is not None and fill_val.tz != obj[0].tz:
+ warn = FutureWarning
+ with tm.assert_produces_warning(warn, match="mismatched timezone"):
+ self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_int64(self):
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index b82ecac37634e..cf2a4a75f95b5 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1890,7 +1890,8 @@ def test_setitem_with_expansion(self):
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
- df2.loc[df2.new_col == "new", "time"] = v
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index a922a937ce9d3..5521bee09b19b 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -898,6 +898,18 @@ def expected(self):
)
return expected
+ @pytest.fixture(autouse=True)
+ def assert_warns(self, request):
+ # check that we issue a FutureWarning about timezone-matching
+ if request.function.__name__ == "test_slice_key":
+ key = request.getfixturevalue("key")
+ if not isinstance(key, slice):
+ # The test is a no-op, so no warning will be issued
+ yield
+ return
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ yield
+
@pytest.mark.parametrize(
"obj,expected",
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index a28da1d856cf9..2feaf4e951ab8 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -523,7 +523,8 @@ def test_datetime64_tz_fillna(self, tz):
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
- result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
@@ -766,8 +767,15 @@ def test_fillna_datetime64_with_timezone_tzinfo(self):
# but we dont (yet) consider distinct tzinfos for non-UTC tz equivalent
ts = Timestamp("2000-01-01", tz="US/Pacific")
ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific"))
- result = ser2.fillna(ts)
+ assert ser2.dtype.kind == "M"
+ with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
+ result = ser2.fillna(ts)
expected = Series([ser[0], ts, ser[2]], dtype=object)
+ # once deprecation is enforced
+ # expected = Series(
+ # [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]],
+ # dtype=ser2.dtype,
+ # )
tm.assert_series_equal(result, expected)
def test_fillna_pos_args_deprecation(self):
| - [x] closes #37605
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44243 | 2021-10-30T23:46:55Z | 2021-10-31T14:53:56Z | 2021-10-31T14:53:56Z | 2021-10-31T15:32:01Z |
TST: fixturize, collect | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 1e1d14f46cc6e..65d4b936efe44 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -995,17 +995,19 @@ def all_reductions(request):
return request.param
-@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
-def all_compare_operators(request):
+@pytest.fixture(
+ params=[
+ operator.eq,
+ operator.ne,
+ operator.gt,
+ operator.ge,
+ operator.lt,
+ operator.le,
+ ]
+)
+def comparison_op(request):
"""
- Fixture for dunder names for common compare operations
-
- * >=
- * >
- * ==
- * !=
- * <
- * <=
+ Fixture for operator module comparison functions.
"""
return request.param
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index e511c1bdaca9c..82f1e60f0aea5 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -365,18 +365,14 @@ def test_dt64arr_timestamp_equality(self, box_with_array):
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
- @pytest.mark.parametrize(
- "op",
- [operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
- )
- def test_comparators(self, op):
+ def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
- arr_result = op(arr, element)
- index_result = op(index, element)
+ arr_result = comparison_op(arr, element)
+ index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@@ -554,12 +550,9 @@ def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
- @pytest.mark.parametrize(
- "op",
- [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
- )
- def test_comparison_tzawareness_compat(self, op, box_with_array):
+ def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
+ op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
@@ -606,12 +599,10 @@ def test_comparison_tzawareness_compat(self, op, box_with_array):
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
- @pytest.mark.parametrize(
- "op",
- [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
- )
- def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
+ def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
+ op = comparison_op
+
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
@@ -638,10 +629,6 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
with pytest.raises(TypeError, match=msg):
op(ts, dz)
- @pytest.mark.parametrize(
- "op",
- [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
- )
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
@@ -652,8 +639,9 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
- self, op, other, tz_aware_fixture, box_with_array
+ self, comparison_op, other, tz_aware_fixture, box_with_array
):
+ op = comparison_op
box = box_with_array
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
@@ -680,13 +668,11 @@ def test_scalar_comparison_tzawareness(
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
- @pytest.mark.parametrize(
- "op",
- [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
- )
- def test_nat_comparison_tzawareness(self, op):
+ def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
+ op = comparison_op
+
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
diff --git a/pandas/tests/arrays/boolean/test_comparison.py b/pandas/tests/arrays/boolean/test_comparison.py
index 8730837b518e5..2741d13ee599b 100644
--- a/pandas/tests/arrays/boolean/test_comparison.py
+++ b/pandas/tests/arrays/boolean/test_comparison.py
@@ -21,25 +21,23 @@ def dtype():
class TestComparisonOps(ComparisonOps):
- def test_compare_scalar(self, data, all_compare_operators):
- op_name = all_compare_operators
- self._compare_other(data, op_name, True)
+ def test_compare_scalar(self, data, comparison_op):
+ self._compare_other(data, comparison_op, True)
- def test_compare_array(self, data, all_compare_operators):
- op_name = all_compare_operators
+ def test_compare_array(self, data, comparison_op):
other = pd.array([True] * len(data), dtype="boolean")
- self._compare_other(data, op_name, other)
+ self._compare_other(data, comparison_op, other)
other = np.array([True] * len(data))
- self._compare_other(data, op_name, other)
+ self._compare_other(data, comparison_op, other)
other = pd.Series([True] * len(data))
- self._compare_other(data, op_name, other)
+ self._compare_other(data, comparison_op, other)
@pytest.mark.parametrize("other", [True, False, pd.NA])
- def test_scalar(self, other, all_compare_operators, dtype):
- ComparisonOps.test_scalar(self, other, all_compare_operators, dtype)
+ def test_scalar(self, other, comparison_op, dtype):
+ ComparisonOps.test_scalar(self, other, comparison_op, dtype)
- def test_array(self, all_compare_operators):
- op = self.get_op_from_name(all_compare_operators)
+ def test_array(self, comparison_op):
+ op = comparison_op
a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
b = pd.array([True, False, None] * 3, dtype="boolean")
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 4a00df2d783cf..06296a954c059 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -1,4 +1,3 @@
-import operator
import warnings
import numpy as np
@@ -145,9 +144,9 @@ def test_compare_frame(self):
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
- def test_compare_frame_raises(self, all_compare_operators):
+ def test_compare_frame_raises(self, comparison_op):
# alignment raises unless we transpose
- op = getattr(operator, all_compare_operators)
+ op = comparison_op
cat = Categorical(["a", "b", 2, "a"])
df = DataFrame(cat)
msg = "Unable to coerce to Series, length must be 1: given 4"
diff --git a/pandas/tests/arrays/floating/test_comparison.py b/pandas/tests/arrays/floating/test_comparison.py
index bfd54e125159c..c4163c25ae74d 100644
--- a/pandas/tests/arrays/floating/test_comparison.py
+++ b/pandas/tests/arrays/floating/test_comparison.py
@@ -10,11 +10,11 @@
class TestComparisonOps(NumericOps, ComparisonOps):
@pytest.mark.parametrize("other", [True, False, pd.NA, -1.0, 0.0, 1])
- def test_scalar(self, other, all_compare_operators, dtype):
- ComparisonOps.test_scalar(self, other, all_compare_operators, dtype)
+ def test_scalar(self, other, comparison_op, dtype):
+ ComparisonOps.test_scalar(self, other, comparison_op, dtype)
- def test_compare_with_integerarray(self, all_compare_operators):
- op = self.get_op_from_name(all_compare_operators)
+ def test_compare_with_integerarray(self, comparison_op):
+ op = comparison_op
a = pd.array([0, 1, None] * 3, dtype="Int64")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Float64")
other = b.astype("Int64")
diff --git a/pandas/tests/arrays/integer/test_comparison.py b/pandas/tests/arrays/integer/test_comparison.py
index 043f5d64d159b..3bbf6866076e8 100644
--- a/pandas/tests/arrays/integer/test_comparison.py
+++ b/pandas/tests/arrays/integer/test_comparison.py
@@ -9,18 +9,19 @@
class TestComparisonOps(NumericOps, ComparisonOps):
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
- def test_scalar(self, other, all_compare_operators, dtype):
- ComparisonOps.test_scalar(self, other, all_compare_operators, dtype)
+ def test_scalar(self, other, comparison_op, dtype):
+ ComparisonOps.test_scalar(self, other, comparison_op, dtype)
- def test_compare_to_int(self, dtype, all_compare_operators):
+ def test_compare_to_int(self, dtype, comparison_op):
# GH 28930
+ op_name = f"__{comparison_op.__name__}__"
s1 = pd.Series([1, None, 3], dtype=dtype)
s2 = pd.Series([1, None, 3], dtype="float")
- method = getattr(s1, all_compare_operators)
+ method = getattr(s1, op_name)
result = method(2)
- method = getattr(s2, all_compare_operators)
+ method = getattr(s2, op_name)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
diff --git a/pandas/tests/arrays/masked_shared.py b/pandas/tests/arrays/masked_shared.py
index 1a461777e08e3..06c05e458958b 100644
--- a/pandas/tests/arrays/masked_shared.py
+++ b/pandas/tests/arrays/masked_shared.py
@@ -9,8 +9,7 @@
class ComparisonOps(BaseOpsUtil):
- def _compare_other(self, data, op_name, other):
- op = self.get_op_from_name(op_name)
+ def _compare_other(self, data, op, other):
# array
result = pd.Series(op(data, other))
@@ -34,8 +33,8 @@ def _compare_other(self, data, op_name, other):
tm.assert_series_equal(result, expected)
# subclass will override to parametrize 'other'
- def test_scalar(self, other, all_compare_operators, dtype):
- op = self.get_op_from_name(all_compare_operators)
+ def test_scalar(self, other, comparison_op, dtype):
+ op = comparison_op
left = pd.array([1, 0, None], dtype=dtype)
result = op(left, other)
@@ -59,8 +58,8 @@ def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
- def test_array(self, all_compare_operators, dtype):
- op = self.get_op_from_name(all_compare_operators)
+ def test_array(self, comparison_op, dtype):
+ op = comparison_op
left = pd.array([0, 1, 2, None, None, None], dtype=dtype)
right = pd.array([0, 1, None, 0, 1, None], dtype=dtype)
@@ -81,8 +80,8 @@ def test_array(self, all_compare_operators, dtype):
right, pd.array([0, 1, None, 0, 1, None], dtype=dtype)
)
- def test_compare_with_booleanarray(self, all_compare_operators, dtype):
- op = self.get_op_from_name(all_compare_operators)
+ def test_compare_with_booleanarray(self, comparison_op, dtype):
+ op = comparison_op
left = pd.array([True, False, None] * 3, dtype="boolean")
right = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype=dtype)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index fa564ac76f8bb..501a79a8bc5ed 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -199,8 +199,8 @@ def test_add_frame(dtype):
tm.assert_frame_equal(result, expected)
-def test_comparison_methods_scalar(all_compare_operators, dtype):
- op_name = all_compare_operators
+def test_comparison_methods_scalar(comparison_op, dtype):
+ op_name = f"__{comparison_op.__name__}__"
a = pd.array(["a", None, "c"], dtype=dtype)
other = "a"
result = getattr(a, op_name)(other)
@@ -209,21 +209,21 @@ def test_comparison_methods_scalar(all_compare_operators, dtype):
tm.assert_extension_array_equal(result, expected)
-def test_comparison_methods_scalar_pd_na(all_compare_operators, dtype):
- op_name = all_compare_operators
+def test_comparison_methods_scalar_pd_na(comparison_op, dtype):
+ op_name = f"__{comparison_op.__name__}__"
a = pd.array(["a", None, "c"], dtype=dtype)
result = getattr(a, op_name)(pd.NA)
expected = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
-def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, request):
- if all_compare_operators not in ["__eq__", "__ne__"]:
+def test_comparison_methods_scalar_not_string(comparison_op, dtype, request):
+ op_name = f"__{comparison_op.__name__}__"
+ if op_name not in ["__eq__", "__ne__"]:
reason = "comparison op not supported between instances of 'str' and 'int'"
mark = pytest.mark.xfail(raises=TypeError, reason=reason)
request.node.add_marker(mark)
- op_name = all_compare_operators
a = pd.array(["a", None, "c"], dtype=dtype)
other = 42
result = getattr(a, op_name)(other)
@@ -234,14 +234,14 @@ def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, requ
tm.assert_extension_array_equal(result, expected)
-def test_comparison_methods_array(all_compare_operators, dtype, request):
+def test_comparison_methods_array(comparison_op, dtype, request):
if dtype.storage == "pyarrow":
mark = pytest.mark.xfail(
raises=AssertionError, reason="left is not an ExtensionArray"
)
request.node.add_marker(mark)
- op_name = all_compare_operators
+ op_name = f"__{comparison_op.__name__}__"
a = pd.array(["a", None, "c"], dtype=dtype)
other = [None, None, "c"]
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 180fb9d29224e..5b9df44f5b565 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -1,8 +1,6 @@
"""
Tests for DatetimeArray
"""
-import operator
-
import numpy as np
import pytest
@@ -17,10 +15,9 @@ class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
# sufficiently robust
- def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators):
+ def test_cmp_dt64_arraylike_tznaive(self, comparison_op):
# arbitrary tz-naive DatetimeIndex
- opname = all_compare_operators.strip("_")
- op = getattr(operator, opname)
+ op = comparison_op
dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None)
arr = DatetimeArray(dti)
@@ -30,7 +27,7 @@ def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators):
right = dti
expected = np.ones(len(arr), dtype=bool)
- if opname in ["ne", "gt", "lt"]:
+ if comparison_op.__name__ in ["ne", "gt", "lt"]:
# for these the comparisons should be all-False
expected = ~expected
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index 73bff29305f20..bf3985ad198dd 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -138,6 +138,7 @@ def test_getitem_invalid(self, data):
"list index out of range", # json
"index out of bounds", # pyarrow
"Out of bounds access", # Sparse
+ f"loc must be an integer between -{ub} and {ub}", # Sparse
f"index {ub+1} is out of bounds for axis 0 with size {ub}",
f"index -{ub+1} is out of bounds for axis 0 with size {ub}",
]
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index 88437321b1028..c52f20255eb81 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -130,10 +130,9 @@ def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
class BaseComparisonOpsTests(BaseOpsUtil):
"""Various Series and DataFrame comparison ops methods."""
- def _compare_other(self, ser: pd.Series, data, op_name: str, other):
+ def _compare_other(self, ser: pd.Series, data, op, other):
- op = self.get_op_from_name(op_name)
- if op_name in ["__eq__", "__ne__"]:
+ if op.__name__ in ["eq", "ne"]:
# comparison should match point-wise comparisons
result = op(ser, other)
expected = ser.combine(other, op)
@@ -154,23 +153,22 @@ def _compare_other(self, ser: pd.Series, data, op_name: str, other):
with pytest.raises(type(exc)):
ser.combine(other, op)
- def test_compare_scalar(self, data, all_compare_operators):
- op_name = all_compare_operators
+ def test_compare_scalar(self, data, comparison_op):
ser = pd.Series(data)
- self._compare_other(ser, data, op_name, 0)
+ self._compare_other(ser, data, comparison_op, 0)
- def test_compare_array(self, data, all_compare_operators):
- op_name = all_compare_operators
+ def test_compare_array(self, data, comparison_op):
ser = pd.Series(data)
other = pd.Series([data[0]] * len(data))
- self._compare_other(ser, data, op_name, other)
+ self._compare_other(ser, data, comparison_op, other)
- @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
- def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
+ def test_direct_arith_with_ndframe_returns_not_implemented(
+ self, data, frame_or_series
+ ):
# EAs should return NotImplemented for ops with Series/DataFrame
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
- if box is pd.DataFrame:
+ if frame_or_series is pd.DataFrame:
other = other.to_frame()
if hasattr(data, "__eq__"):
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 461dbe5575022..bca87bb8ec2aa 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -266,19 +266,17 @@ def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
class TestComparisonOps(base.BaseComparisonOpsTests):
- def test_compare_scalar(self, data, all_compare_operators):
- op_name = all_compare_operators
+ def test_compare_scalar(self, data, comparison_op):
s = pd.Series(data)
- self._compare_other(s, data, op_name, 0.5)
+ self._compare_other(s, data, comparison_op, 0.5)
- def test_compare_array(self, data, all_compare_operators):
- op_name = all_compare_operators
+ def test_compare_array(self, data, comparison_op):
s = pd.Series(data)
alter = np.random.choice([-1, 0, 1], len(data))
# Randomly double, halve or keep same value
other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter]
- self._compare_other(s, data, op_name, other)
+ self._compare_other(s, data, comparison_op, other)
class DecimalArrayWithoutFromSequence(DecimalArray):
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 9c4bf76b27c14..05455905860d2 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -151,11 +151,11 @@ def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
@pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py")
- def test_compare_scalar(self, data, all_compare_operators):
+ def test_compare_scalar(self, data, comparison_op):
pass
@pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py")
- def test_compare_array(self, data, all_compare_operators):
+ def test_compare_array(self, data, comparison_op):
pass
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index ea8b1cfb738f5..e9dc63e9bd903 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -270,8 +270,8 @@ def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
class TestComparisonOps(base.BaseComparisonOpsTests):
- def _compare_other(self, s, data, op_name, other):
- op = self.get_op_from_name(op_name)
+ def _compare_other(self, s, data, op, other):
+ op_name = f"__{op.__name__}__"
if op_name == "__eq__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x == y)
diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py
index 500c2fbb74d17..2b08c5b7be450 100644
--- a/pandas/tests/extension/test_floating.py
+++ b/pandas/tests/extension/test_floating.py
@@ -142,7 +142,8 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
- def _compare_other(self, s, data, op_name, other):
+ def _compare_other(self, s, data, op, other):
+ op_name = f"__{op.__name__}__"
self.check_opname(s, op_name, other)
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index c9c0a4de60a46..7d343aab3c7a0 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -161,7 +161,8 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
- def _compare_other(self, s, data, op_name, other):
+ def _compare_other(self, s, data, op, other):
+ op_name = f"__{op.__name__}__"
self.check_opname(s, op_name, other)
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 6358b2fe27ef3..012a3fbb12cac 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -432,8 +432,8 @@ def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
class TestComparisonOps(BaseSparseTests, base.BaseComparisonOpsTests):
- def _compare_other(self, s, data, op_name, other):
- op = self.get_op_from_name(op_name)
+ def _compare_other(self, s, data, comparison_op, other):
+ op = comparison_op
# array
result = pd.Series(op(data, other))
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index af86c359c4c00..5049116a9320e 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -166,15 +166,15 @@ class TestCasting(base.BaseCastingTests):
class TestComparisonOps(base.BaseComparisonOpsTests):
- def _compare_other(self, s, data, op_name, other):
+ def _compare_other(self, s, data, op, other):
+ op_name = f"__{op.__name__}__"
result = getattr(s, op_name)(other)
expected = getattr(s.astype(object), op_name)(other).astype("boolean")
self.assert_series_equal(result, expected)
- def test_compare_scalar(self, data, all_compare_operators):
- op_name = all_compare_operators
+ def test_compare_scalar(self, data, comparison_op):
s = pd.Series(data)
- self._compare_other(s, data, op_name, "abc")
+ self._compare_other(s, data, comparison_op, "abc")
class TestParsing(base.BaseParsingTests):
diff --git a/pandas/tests/indexes/base_class/test_reshape.py b/pandas/tests/indexes/base_class/test_reshape.py
index 5ebab965e6f04..acb6936f70d0f 100644
--- a/pandas/tests/indexes/base_class/test_reshape.py
+++ b/pandas/tests/indexes/base_class/test_reshape.py
@@ -35,6 +35,13 @@ def test_insert(self):
null_index = Index([])
tm.assert_index_equal(Index(["a"]), null_index.insert(0, "a"))
+ def test_insert_missing(self, nulls_fixture):
+ # GH#22295
+ # test there is no mangling of NA values
+ expected = Index(["a", nulls_fixture, "b", "c"])
+ result = Index(list("abc")).insert(1, nulls_fixture)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize(
"pos,expected",
[
@@ -48,6 +55,12 @@ def test_delete(self, pos, expected):
tm.assert_index_equal(result, expected)
assert result.name == expected.name
+ def test_delete_raises(self):
+ index = Index(["a", "b", "c", "d"], name="index")
+ msg = "index 5 is out of bounds for axis 0 with size 4"
+ with pytest.raises(IndexError, match=msg):
+ index.delete(5)
+
def test_append_multiple(self):
index = Index(["a", "b", "c", "d", "e", "f"])
diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py
index 72130ef9e4627..5a0b2672e397c 100644
--- a/pandas/tests/indexes/categorical/test_reindex.py
+++ b/pandas/tests/indexes/categorical/test_reindex.py
@@ -10,7 +10,7 @@
class TestReindex:
- def test_reindex_dtype(self):
+ def test_reindex_list_non_unique(self):
# GH#11586
ci = CategoricalIndex(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
@@ -19,6 +19,7 @@ def test_reindex_dtype(self):
tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+ def test_reindex_categorcal_non_unique(self):
ci = CategoricalIndex(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
res, indexer = ci.reindex(Categorical(["a", "c"]))
@@ -27,6 +28,7 @@ def test_reindex_dtype(self):
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+ def test_reindex_list_non_unique_unused_category(self):
ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
res, indexer = ci.reindex(["a", "c"])
@@ -34,6 +36,7 @@ def test_reindex_dtype(self):
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
+ def test_reindex_categorical_non_unique_unused_category(self):
ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
res, indexer = ci.reindex(Categorical(["a", "c"]))
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index ea76a4b4b1cfc..8f37413dd53c8 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -333,6 +333,12 @@ def test_numpy_argsort(self, index):
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
+ if not isinstance(index, RangeIndex):
+ # TODO: add compatibility to RangeIndex?
+ result = np.argsort(index, kind="mergesort")
+ expected = index.argsort(kind="mergesort")
+ tm.assert_numpy_array_equal(result, expected)
+
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
@@ -340,16 +346,11 @@ def test_numpy_argsort(self, index):
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
- if isinstance(type(index), (CategoricalIndex, RangeIndex)):
- # TODO: why type(index)?
+ if isinstance(index, (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, axis=1)
- msg = "the 'kind' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- np.argsort(index, kind="mergesort")
-
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, order=("a", "b"))
diff --git a/pandas/tests/indexes/datetimes/test_pickle.py b/pandas/tests/indexes/datetimes/test_pickle.py
index 3905daa9688ac..922b4a18119f4 100644
--- a/pandas/tests/indexes/datetimes/test_pickle.py
+++ b/pandas/tests/indexes/datetimes/test_pickle.py
@@ -18,7 +18,7 @@ def test_pickle(self):
assert idx_p[2] == idx[2]
def test_pickle_dont_infer_freq(self):
- # GH##11002
+ # GH#11002
# don't infer freq
idx = date_range("1750-1-1", "2050-1-1", freq="7D")
idx_p = tm.round_trip_pickle(idx)
diff --git a/pandas/tests/indexes/datetimes/test_unique.py b/pandas/tests/indexes/datetimes/test_unique.py
index a6df9cb748294..68ac770f612e6 100644
--- a/pandas/tests/indexes/datetimes/test_unique.py
+++ b/pandas/tests/indexes/datetimes/test_unique.py
@@ -3,8 +3,6 @@
timedelta,
)
-import pytest
-
from pandas import (
DatetimeIndex,
NaT,
@@ -13,18 +11,12 @@
import pandas._testing as tm
-@pytest.mark.parametrize(
- "arr, expected",
- [
- (DatetimeIndex(["2017", "2017"]), DatetimeIndex(["2017"])),
- (
- DatetimeIndex(["2017", "2017"], tz="US/Eastern"),
- DatetimeIndex(["2017"], tz="US/Eastern"),
- ),
- ],
-)
-def test_unique(arr, expected):
- result = arr.unique()
+def test_unique(tz_naive_fixture):
+
+ idx = DatetimeIndex(["2017"] * 2, tz=tz_naive_fixture)
+ expected = idx[:1]
+
+ result = idx.unique()
tm.assert_index_equal(result, expected)
# GH#21737
# Ensure the underlying data is consistent
@@ -60,6 +52,8 @@ def test_index_unique(rand_series_with_duplicate_datetimeindex):
assert result.name == "foo"
tm.assert_index_equal(result, expected)
+
+def test_index_unique2():
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [NaT.value]
idx = DatetimeIndex(arr * 3)
@@ -67,6 +61,8 @@ def test_index_unique(rand_series_with_duplicate_datetimeindex):
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
+
+def test_index_unique3():
arr = [
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)
] + [NaT]
diff --git a/pandas/tests/indexes/period/test_join.py b/pandas/tests/indexes/period/test_join.py
index b8b15708466cb..27cba8676d22b 100644
--- a/pandas/tests/indexes/period/test_join.py
+++ b/pandas/tests/indexes/period/test_join.py
@@ -42,10 +42,12 @@ def test_join_does_not_recur(self):
c_idx_type="p",
r_idx_type="dt",
)
- s = df.iloc[:2, 0]
+ ser = df.iloc[:2, 0]
- res = s.index.join(df.columns, how="outer")
- expected = Index([s.index[0], s.index[1], df.columns[0], df.columns[1]], object)
+ res = ser.index.join(df.columns, how="outer")
+ expected = Index(
+ [ser.index[0], ser.index[1], df.columns[0], df.columns[1]], object
+ )
tm.assert_index_equal(res, expected)
def test_join_mismatched_freq_raises(self):
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
index 2313afcae607a..39a1ddcbc8a6a 100644
--- a/pandas/tests/indexes/test_any_index.py
+++ b/pandas/tests/indexes/test_any_index.py
@@ -117,7 +117,7 @@ def test_tolist_matches_list(self, index):
class TestRoundTrips:
def test_pickle_roundtrip(self, index):
result = tm.round_trip_pickle(index)
- tm.assert_index_equal(result, index)
+ tm.assert_index_equal(result, index, exact=True)
if result.nlevels > 1:
# GH#8367 round-trip with timezone
assert index.equal_levels(result)
@@ -133,7 +133,7 @@ class TestIndexing:
def test_slice_keeps_name(self, index):
assert index.name == index[1:].name
- @pytest.mark.parametrize("item", [101, "no_int"])
+ @pytest.mark.parametrize("item", [101, "no_int", 2.5])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, item):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index cbcb00a4230cc..f1ece3e363bb6 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -181,29 +181,6 @@ def test_constructor_from_frame_series_freq(self):
freq = pd.infer_freq(df["date"])
assert freq == "MS"
- @pytest.mark.parametrize(
- "array",
- [
- np.arange(5),
- np.array(["a", "b", "c"]),
- date_range("2000-01-01", periods=3).values,
- ],
- )
- def test_constructor_ndarray_like(self, array):
- # GH 5460#issuecomment-44474502
- # it should be possible to convert any object that satisfies the numpy
- # ndarray interface directly into an Index
- class ArrayLike:
- def __init__(self, array):
- self.array = array
-
- def __array__(self, dtype=None) -> np.ndarray:
- return self.array
-
- expected = Index(array)
- result = Index(ArrayLike(array))
- tm.assert_index_equal(result, expected)
-
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
@@ -211,20 +188,6 @@ def test_constructor_int_dtype_nan(self):
result = Index(data, dtype="float")
tm.assert_index_equal(result, expected)
- @pytest.mark.parametrize("dtype", ["int64", "uint64"])
- def test_constructor_int_dtype_nan_raises(self, dtype):
- # see gh-15187
- data = [np.nan]
- msg = "cannot convert"
- with pytest.raises(ValueError, match=msg):
- Index(data, dtype=dtype)
-
- def test_constructor_no_pandas_array(self):
- ser = Series([1, 2, 3])
- result = Index(ser.array)
- expected = Index([1, 2, 3])
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize(
"klass,dtype,na_val",
[
@@ -497,19 +460,6 @@ def test_equals_object(self):
def test_not_equals_object(self, comp):
assert not Index(["a", "b", "c"]).equals(comp)
- def test_insert_missing(self, nulls_fixture):
- # GH 22295
- # test there is no mangling of NA values
- expected = Index(["a", nulls_fixture, "b", "c"])
- result = Index(list("abc")).insert(1, nulls_fixture)
- tm.assert_index_equal(result, expected)
-
- def test_delete_raises(self):
- index = Index(["a", "b", "c", "d"], name="index")
- msg = "index 5 is out of bounds for axis 0 with size 4"
- with pytest.raises(IndexError, match=msg):
- index.delete(5)
-
def test_identical(self):
# index
@@ -1574,10 +1524,9 @@ def test_is_monotonic_na(self, index):
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_int_name_format(self, klass):
+ def test_int_name_format(self, frame_or_series):
index = Index(["a", "b", "c"], name=0)
- result = klass(list(range(3)), index=index)
+ result = frame_or_series(list(range(3)), index=index)
assert "0" in repr(result)
def test_str_to_bytes_raises(self):
diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py
index e9bbe60f2d5ea..293aa6dd57124 100644
--- a/pandas/tests/indexes/test_index_new.py
+++ b/pandas/tests/indexes/test_index_new.py
@@ -224,6 +224,14 @@ def test_constructor_datetime64_values_mismatched_period_dtype(self):
expected = dti.to_period("D")
tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize("dtype", ["int64", "uint64"])
+ def test_constructor_int_dtype_nan_raises(self, dtype):
+ # see GH#15187
+ data = [np.nan]
+ msg = "cannot convert"
+ with pytest.raises(ValueError, match=msg):
+ Index(data, dtype=dtype)
+
class TestIndexConstructorUnwrapping:
# Test passing different arraylike values to pd.Index
@@ -235,3 +243,32 @@ def test_constructor_from_series_dt64(self, klass):
ser = Series(stamps)
result = klass(ser)
tm.assert_index_equal(result, expected)
+
+ def test_constructor_no_pandas_array(self):
+ ser = Series([1, 2, 3])
+ result = Index(ser.array)
+ expected = Index([1, 2, 3])
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "array",
+ [
+ np.arange(5),
+ np.array(["a", "b", "c"]),
+ date_range("2000-01-01", periods=3).values,
+ ],
+ )
+ def test_constructor_ndarray_like(self, array):
+ # GH#5460#issuecomment-44474502
+ # it should be possible to convert any object that satisfies the numpy
+ # ndarray interface directly into an Index
+ class ArrayLike:
+ def __init__(self, array):
+ self.array = array
+
+ def __array__(self, dtype=None) -> np.ndarray:
+ return self.array
+
+ expected = Index(array)
+ result = Index(ArrayLike(array))
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py
index ee36223eb2496..b7cb7ca8d7069 100644
--- a/pandas/tests/scalar/timestamp/test_comparisons.py
+++ b/pandas/tests/scalar/timestamp/test_comparisons.py
@@ -49,8 +49,7 @@ def test_comparison_dt64_ndarray(self):
tm.assert_numpy_array_equal(result, np.array([[True, False]], dtype=bool))
@pytest.mark.parametrize("reverse", [True, False])
- def test_comparison_dt64_ndarray_tzaware(self, reverse, all_compare_operators):
- op = getattr(operator, all_compare_operators.strip("__"))
+ def test_comparison_dt64_ndarray_tzaware(self, reverse, comparison_op):
ts = Timestamp.now("UTC")
arr = np.array([ts.asm8, ts.asm8], dtype="M8[ns]")
@@ -59,18 +58,18 @@ def test_comparison_dt64_ndarray_tzaware(self, reverse, all_compare_operators):
if reverse:
left, right = arr, ts
- if op is operator.eq:
+ if comparison_op is operator.eq:
expected = np.array([False, False], dtype=bool)
- result = op(left, right)
+ result = comparison_op(left, right)
tm.assert_numpy_array_equal(result, expected)
- elif op is operator.ne:
+ elif comparison_op is operator.ne:
expected = np.array([True, True], dtype=bool)
- result = op(left, right)
+ result = comparison_op(left, right)
tm.assert_numpy_array_equal(result, expected)
else:
msg = "Cannot compare tz-naive and tz-aware timestamps"
with pytest.raises(TypeError, match=msg):
- op(left, right)
+ comparison_op(left, right)
def test_comparison_object_array(self):
# GH#15183
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index e4f9366be8dd7..ed83377f31317 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -322,22 +322,20 @@ def test_arithmetic_with_duplicate_index(self):
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
- def test_comparison_flex_basic(self, axis, all_compare_operators):
- op = all_compare_operators.strip("__")
+ def test_comparison_flex_basic(self, axis, comparison_op):
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
- result = getattr(left, op)(right, axis=axis)
- expected = getattr(operator, op)(left, right)
+ result = getattr(left, comparison_op.__name__)(right, axis=axis)
+ expected = comparison_op(left, right)
tm.assert_series_equal(result, expected)
- def test_comparison_bad_axis(self, all_compare_operators):
- op = all_compare_operators.strip("__")
+ def test_comparison_bad_axis(self, comparison_op):
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
- getattr(left, op)(right, axis=1)
+ getattr(left, comparison_op.__name__)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
@@ -598,20 +596,17 @@ def test_comparison_tuples(self):
expected = Series([True, False])
tm.assert_series_equal(result, expected)
- def test_comparison_operators_with_nas(self, all_compare_operators):
- op = all_compare_operators
+ def test_comparison_operators_with_nas(self, comparison_op):
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
- f = getattr(operator, op)
-
# test that comparisons work
val = ser[5]
- result = f(ser, val)
- expected = f(ser.dropna(), val).reindex(ser.index)
+ result = comparison_op(ser, val)
+ expected = comparison_op(ser.dropna(), val).reindex(ser.index)
- if op == "__ne__":
+ if comparison_op is operator.ne:
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
@@ -619,8 +614,8 @@ def test_comparison_operators_with_nas(self, all_compare_operators):
tm.assert_series_equal(result, expected)
# FIXME: dont leave commented-out
- # result = f(val, ser)
- # expected = f(val, ser.dropna()).reindex(ser.index)
+ # result = comparison_op(val, ser)
+ # expected = comparison_op(val, ser.dropna()).reindex(ser.index)
# tm.assert_series_equal(result, expected)
def test_ne(self):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44242 | 2021-10-30T23:41:44Z | 2021-10-31T17:26:12Z | 2021-10-31T17:26:12Z | 2021-11-06T10:22:50Z |
CLN/TST: Remove redundant/unnecessary windows/moments tests | diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 53a2bf151d3bf..9fea696b6ea81 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -22,9 +22,7 @@ fi
PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE $PYTEST_TARGET"
if [[ $(uname) != "Linux" && $(uname) != "Darwin" ]]; then
- # GH#37455 windows py38 build appears to be running out of memory
- # skip collection of window tests
- PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/window/moments --ignore=pandas/tests/plotting/"
+ PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/plotting/"
fi
echo $PYTEST_CMD
diff --git a/pandas/tests/window/moments/conftest.py b/pandas/tests/window/moments/conftest.py
index e843caa48f6d7..b192f72c8f08b 100644
--- a/pandas/tests/window/moments/conftest.py
+++ b/pandas/tests/window/moments/conftest.py
@@ -1,3 +1,5 @@
+import itertools
+
import numpy as np
import pytest
@@ -12,133 +14,20 @@
def _create_consistency_data():
def create_series():
return [
- Series(dtype=object),
- Series([np.nan]),
- Series([np.nan, np.nan]),
- Series([3.0]),
- Series([np.nan, 3.0]),
- Series([3.0, np.nan]),
- Series([1.0, 3.0]),
- Series([2.0, 2.0]),
- Series([3.0, 1.0]),
- Series(
- [5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan]
- ),
- Series(
- [
- np.nan,
- 5.0,
- 5.0,
- 5.0,
- np.nan,
- np.nan,
- np.nan,
- 5.0,
- 5.0,
- np.nan,
- np.nan,
- ]
- ),
- Series(
- [
- np.nan,
- np.nan,
- 5.0,
- 5.0,
- np.nan,
- np.nan,
- np.nan,
- 5.0,
- 5.0,
- np.nan,
- np.nan,
- ]
- ),
- Series(
- [
- np.nan,
- 3.0,
- np.nan,
- 3.0,
- 4.0,
- 5.0,
- 6.0,
- np.nan,
- np.nan,
- 7.0,
- 12.0,
- 13.0,
- 14.0,
- 15.0,
- ]
- ),
- Series(
- [
- np.nan,
- 5.0,
- np.nan,
- 2.0,
- 4.0,
- 0.0,
- 9.0,
- np.nan,
- np.nan,
- 3.0,
- 12.0,
- 13.0,
- 14.0,
- 15.0,
- ]
- ),
- Series(
- [
- 2.0,
- 3.0,
- np.nan,
- 3.0,
- 4.0,
- 5.0,
- 6.0,
- np.nan,
- np.nan,
- 7.0,
- 12.0,
- 13.0,
- 14.0,
- 15.0,
- ]
- ),
- Series(
- [
- 2.0,
- 5.0,
- np.nan,
- 2.0,
- 4.0,
- 0.0,
- 9.0,
- np.nan,
- np.nan,
- 3.0,
- 12.0,
- 13.0,
- 14.0,
- 15.0,
- ]
- ),
- Series(range(10)),
- Series(range(20, 0, -2)),
+ Series(dtype=np.float64, name="a"),
+ Series([np.nan] * 5),
+ Series([1.0] * 5),
+ Series(range(5, 0, -1)),
+ Series(range(5)),
+ Series([np.nan, 1.0, np.nan, 1.0, 1.0]),
+ Series([np.nan, 1.0, np.nan, 2.0, 3.0]),
+ Series([np.nan, 1.0, np.nan, 3.0, 2.0]),
]
def create_dataframes():
return [
- DataFrame(),
- DataFrame(columns=["a"]),
DataFrame(columns=["a", "a"]),
- DataFrame(columns=["a", "b"]),
- DataFrame(np.arange(10).reshape((5, 2))),
- DataFrame(np.arange(25).reshape((5, 5))),
- DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]),
+ DataFrame(np.arange(15).reshape((5, 3)), columns=["a", "a", 99]),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
@@ -148,13 +37,34 @@ def is_constant(x):
def no_nans(x):
return x.notna().all().all()
- # data is a tuple(object, is_constant, no_nans)
- data = create_series() + create_dataframes()
-
- return [(x, is_constant(x), no_nans(x)) for x in data]
+ return [
+ (x, is_constant(x), no_nans(x))
+ for x in itertools.chain(create_dataframes(), create_dataframes())
+ ]
@pytest.fixture(params=_create_consistency_data())
def consistency_data(request):
- """Create consistency data"""
+ """
+ Test:
+ - Empty Series / DataFrame
+ - All NaN
+ - All consistent value
+ - Monotonically decreasing
+ - Monotonically increasing
+ - Monotonically consistent with NaNs
+ - Monotonically increasing with NaNs
+ - Monotonically decreasing with NaNs
+ """
+ return request.param
+
+
+@pytest.fixture(params=[(1, 0), (5, 1)])
+def rolling_consistency_cases(request):
+ """window, min_periods"""
+ return request.param
+
+
+@pytest.fixture(params=[0, 2])
+def min_periods(request):
return request.param
diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py
index 800ee2164693b..8feec32ba99c5 100644
--- a/pandas/tests/window/moments/test_moments_consistency_ewm.py
+++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py
@@ -18,7 +18,7 @@ def create_mock_weights(obj, com, adjust, ignore_na):
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
- for i, _ in enumerate(obj.columns)
+ for i in range(len(obj.columns))
],
axis=1,
)
@@ -58,7 +58,6 @@ def create_mock_series_weights(s, com, adjust, ignore_na):
return w
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
@@ -76,7 +75,6 @@ def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
tm.assert_equal(result, expected.astype("float64"))
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
@@ -102,7 +100,6 @@ def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_per
tm.assert_equal(corr_x_x, expected)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_var_debiasing_factors(
consistency_data, adjust, ignore_na, min_periods
):
@@ -128,7 +125,6 @@ def test_ewm_consistency_var_debiasing_factors(
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var(
consistency_data, adjust, ignore_na, min_periods, bias
@@ -154,7 +150,6 @@ def test_moments_consistency_var(
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var_constant(
consistency_data, adjust, ignore_na, min_periods, bias
@@ -176,7 +171,6 @@ def test_moments_consistency_var_constant(
tm.assert_equal(var_x, expected)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
@@ -184,26 +178,16 @@ def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, b
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
+ assert not (var_x < 0).any().any()
+
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
- assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
-
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
-@pytest.mark.parametrize("bias", [True, False])
-def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias):
- x, is_constant, no_nans = consistency_data
- com = 3.0
- var_x = x.ewm(
- com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
- ).var(bias=bias)
- assert not (var_x < 0).any().any()
-
cov_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
@@ -213,7 +197,6 @@ def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, b
tm.assert_equal(var_x, cov_x_x)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_series_cov_corr(
consistency_data, adjust, ignore_na, min_periods, bias
diff --git a/pandas/tests/window/moments/test_moments_consistency_expanding.py b/pandas/tests/window/moments/test_moments_consistency_expanding.py
index d0fe7bf9fc2d2..14314f80f152c 100644
--- a/pandas/tests/window/moments/test_moments_consistency_expanding.py
+++ b/pandas/tests/window/moments/test_moments_consistency_expanding.py
@@ -5,13 +5,14 @@
import pandas._testing as tm
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
-@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
+@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
+ elif f is np.sum and not no_nans:
+ pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
@@ -20,39 +21,20 @@ def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
-@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
-def test_expanding_apply_consistency_sum_no_nans(consistency_data, min_periods, f):
-
- x, is_constant, no_nans = consistency_data
-
- if no_nans:
- if f is np.nansum and min_periods == 0:
- pass
- else:
- expanding_f_result = x.expanding(min_periods=min_periods).sum()
- expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
- func=f, raw=True
- )
- tm.assert_equal(expanding_f_result, expanding_apply_f_result)
-
-
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
- mean_x = x.expanding(min_periods=min_periods).mean()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (x * x).expanding(min_periods=min_periods).mean()
+ mean_x = x.expanding(min_periods=min_periods).mean()
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
@@ -70,27 +52,19 @@ def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
tm.assert_equal(var_x, expected)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
-def test_expanding_consistency_std(consistency_data, min_periods, ddof):
+def test_expanding_consistency_var_std_cov(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
- std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (var_x < 0).any().any()
+
+ std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
-
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
-@pytest.mark.parametrize("ddof", [0, 1])
-def test_expanding_consistency_cov(consistency_data, min_periods, ddof):
- x, is_constant, no_nans = consistency_data
- var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
- assert not (var_x < 0).any().any()
-
cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
assert not (cov_x_x < 0).any().any()
@@ -98,7 +72,6 @@ def test_expanding_consistency_cov(consistency_data, min_periods, ddof):
tm.assert_equal(var_x, cov_x_x)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
@@ -128,7 +101,6 @@ def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, dd
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_mean(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
@@ -140,7 +112,6 @@ def test_expanding_consistency_mean(consistency_data, min_periods):
tm.assert_equal(result, expected.astype("float64"))
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_constant(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
@@ -162,7 +133,6 @@ def test_expanding_consistency_constant(consistency_data, min_periods):
tm.assert_equal(corr_x_x, expected)
-@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_var_debiasing_factors(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index bda8ba05d4024..49bc5af4e9d69 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -5,26 +5,17 @@
import pandas._testing as tm
-def _rolling_consistency_cases():
- for window in [1, 2, 3, 10, 20]:
- for min_periods in {0, 1, 2, 3, 4, window}:
- if min_periods and (min_periods > window):
- continue
- for center in [False, True]:
- yield window, min_periods, center
-
-
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
-@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
-def test_rolling_apply_consistency_sum_nans(
- consistency_data, window, min_periods, center, f
+@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
+def test_rolling_apply_consistency_sum(
+ consistency_data, rolling_consistency_cases, center, f
):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
if f is np.nansum and min_periods == 0:
pass
+ elif f is np.sum and not no_nans:
+ pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
@@ -35,36 +26,13 @@ def test_rolling_apply_consistency_sum_nans(
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
-@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
-def test_rolling_apply_consistency_sum_no_nans(
- consistency_data, window, min_periods, center, f
-):
- x, is_constant, no_nans = consistency_data
-
- if no_nans:
- if f is np.nansum and min_periods == 0:
- pass
- else:
- rolling_f_result = x.rolling(
- window=window, min_periods=min_periods, center=center
- ).sum()
- rolling_apply_f_result = x.rolling(
- window=window, min_periods=min_periods, center=center
- ).apply(func=f, raw=True)
- tm.assert_equal(rolling_f_result, rolling_apply_f_result)
-
-
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
@pytest.mark.parametrize("ddof", [0, 1])
-def test_moments_consistency_var(consistency_data, window, min_periods, center, ddof):
+def test_moments_consistency_var(
+ consistency_data, rolling_consistency_cases, center, ddof
+):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
- mean_x = x.rolling(window=window, min_periods=min_periods, center=center).mean()
var_x = x.rolling(window=window, min_periods=min_periods, center=center).var(
ddof=ddof
)
@@ -72,6 +40,7 @@ def test_moments_consistency_var(consistency_data, window, min_periods, center,
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
+ mean_x = x.rolling(window=window, min_periods=min_periods, center=center).mean()
mean_x2 = (
(x * x)
.rolling(window=window, min_periods=min_periods, center=center)
@@ -80,14 +49,12 @@ def test_moments_consistency_var(consistency_data, window, min_periods, center,
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(
- consistency_data, window, min_periods, center, ddof
+ consistency_data, rolling_consistency_cases, center, ddof
):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
if is_constant:
count_x = x.rolling(
@@ -106,37 +73,26 @@ def test_moments_consistency_var_constant(
tm.assert_equal(var_x, expected)
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
@pytest.mark.parametrize("ddof", [0, 1])
-def test_rolling_consistency_std(consistency_data, window, min_periods, center, ddof):
+def test_rolling_consistency_var_std_cov(
+ consistency_data, rolling_consistency_cases, center, ddof
+):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
var_x = x.rolling(window=window, min_periods=min_periods, center=center).var(
ddof=ddof
)
+ assert not (var_x < 0).any().any()
+
std_x = x.rolling(window=window, min_periods=min_periods, center=center).std(
ddof=ddof
)
- assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
-
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
-@pytest.mark.parametrize("ddof", [0, 1])
-def test_rolling_consistency_cov(consistency_data, window, min_periods, center, ddof):
- x, is_constant, no_nans = consistency_data
- var_x = x.rolling(window=window, min_periods=min_periods, center=center).var(
- ddof=ddof
- )
- assert not (var_x < 0).any().any()
-
cov_x_x = x.rolling(window=window, min_periods=min_periods, center=center).cov(
x, ddof=ddof
)
@@ -146,14 +102,12 @@ def test_rolling_consistency_cov(consistency_data, window, min_periods, center,
tm.assert_equal(var_x, cov_x_x)
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
@pytest.mark.parametrize("ddof", [0, 1])
def test_rolling_consistency_series_cov_corr(
- consistency_data, window, min_periods, center, ddof
+ consistency_data, rolling_consistency_cases, center, ddof
):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
if isinstance(x, Series):
var_x_plus_y = (
@@ -204,11 +158,9 @@ def test_rolling_consistency_series_cov_corr(
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
-def test_rolling_consistency_mean(consistency_data, window, min_periods, center):
+def test_rolling_consistency_mean(consistency_data, rolling_consistency_cases, center):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
result = x.rolling(window=window, min_periods=min_periods, center=center).mean()
expected = (
@@ -221,11 +173,11 @@ def test_rolling_consistency_mean(consistency_data, window, min_periods, center)
tm.assert_equal(result, expected.astype("float64"))
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
-def test_rolling_consistency_constant(consistency_data, window, min_periods, center):
+def test_rolling_consistency_constant(
+ consistency_data, rolling_consistency_cases, center
+):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
if is_constant:
count_x = x.rolling(
@@ -249,13 +201,11 @@ def test_rolling_consistency_constant(consistency_data, window, min_periods, cen
tm.assert_equal(corr_x_x, expected)
-@pytest.mark.parametrize(
- "window,min_periods,center", list(_rolling_consistency_cases())
-)
def test_rolling_consistency_var_debiasing_factors(
- consistency_data, window, min_periods, center
+ consistency_data, rolling_consistency_cases, center
):
x, is_constant, no_nans = consistency_data
+ window, min_periods = rolling_consistency_cases
# check variance debiasing factors
var_unbiased_x = x.rolling(
| - [x] closes #37535
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
* Avoids running similar `def test_*` checks more than once
* Thins out the `consistency_data` pytest fixture to test data with certain properties only once (constant values, all nans, monotonically increasing, etc) | https://api.github.com/repos/pandas-dev/pandas/pulls/44239 | 2021-10-30T06:42:28Z | 2021-10-30T14:10:07Z | 2021-10-30T14:10:06Z | 2021-10-30T17:07:20Z |
CLN: de-duplicate Ellipsis-handling | diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index bed64efc690ec..69d89e2f32203 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -42,6 +42,7 @@
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import validate_insert_loc
from pandas.core.dtypes.cast import (
astype_nansafe,
@@ -81,7 +82,10 @@
extract_array,
sanitize_array,
)
-from pandas.core.indexers import check_array_indexer
+from pandas.core.indexers import (
+ check_array_indexer,
+ unpack_tuple_and_ellipses,
+)
from pandas.core.missing import interpolate_2d
from pandas.core.nanops import check_below_min_count
import pandas.core.ops as ops
@@ -878,16 +882,13 @@ def __getitem__(
) -> SparseArrayT | Any:
if isinstance(key, tuple):
- if len(key) > 1:
- if key[0] is Ellipsis:
- key = key[1:]
- elif key[-1] is Ellipsis:
- key = key[:-1]
- if len(key) > 1:
- raise IndexError("too many indices for array.")
- if key[0] is Ellipsis:
+ key = unpack_tuple_and_ellipses(key)
+ # Non-overlapping identity check (left operand type:
+ # "Union[Union[Union[int, integer[Any]], Union[slice, List[int],
+ # ndarray[Any, Any]]], Tuple[Union[int, ellipsis], ...]]",
+ # right operand type: "ellipsis")
+ if key is Ellipsis: # type: ignore[comparison-overlap]
raise ValueError("Cannot slice with Ellipsis")
- key = key[0]
if is_integer(key):
return self._get_val_at(key)
@@ -952,12 +953,7 @@ def __getitem__(
return type(self)(data_slice, kind=self.kind)
def _get_val_at(self, loc):
- n = len(self)
- if loc < 0:
- loc += n
-
- if loc >= n or loc < 0:
- raise IndexError("Out of bounds access")
+ loc = validate_insert_loc(loc, len(self))
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 4e3bd05d2cc8d..e6058ad9dbaf2 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -58,6 +58,7 @@
)
from pandas.core.indexers import (
check_array_indexer,
+ unpack_tuple_and_ellipses,
validate_indices,
)
from pandas.core.strings.object_array import ObjectStringArrayMixin
@@ -313,14 +314,7 @@ def __getitem__(
"boolean arrays are valid indices."
)
elif isinstance(item, tuple):
- # possibly unpack arr[..., n] to arr[n]
- if len(item) == 1:
- item = item[0]
- elif len(item) == 2:
- if item[0] is Ellipsis:
- item = item[1]
- elif item[1] is Ellipsis:
- item = item[0]
+ item = unpack_tuple_and_ellipses(item)
# We are not an array indexer, so maybe e.g. a slice or integer
# indexer. We dispatch to pyarrow.
diff --git a/pandas/core/indexers/__init__.py b/pandas/core/indexers/__init__.py
index 1558b03162d22..86ec36144b134 100644
--- a/pandas/core/indexers/__init__.py
+++ b/pandas/core/indexers/__init__.py
@@ -11,6 +11,7 @@
length_of_indexer,
maybe_convert_indices,
unpack_1tuple,
+ unpack_tuple_and_ellipses,
validate_indices,
)
@@ -28,4 +29,5 @@
"unpack_1tuple",
"check_key_length",
"check_array_indexer",
+ "unpack_tuple_and_ellipses",
]
diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index cf9be5eb95eb4..bc51dbd54d010 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -435,6 +435,24 @@ def check_key_length(columns: Index, key, value: DataFrame) -> None:
raise ValueError("Columns must be same length as key")
+def unpack_tuple_and_ellipses(item: tuple):
+ """
+ Possibly unpack arr[..., n] to arr[n]
+ """
+ if len(item) > 1:
+ # Note: we are assuming this indexing is being done on a 1D arraylike
+ if item[0] is Ellipsis:
+ item = item[1:]
+ elif item[-1] is Ellipsis:
+ item = item[:-1]
+
+ if len(item) > 1:
+ raise IndexError("too many indices for array.")
+
+ item = item[0]
+ return item
+
+
# -----------------------------------------------------------
# Public indexer validation
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 07ae7511bb333..96021bfa18fb7 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -258,7 +258,7 @@ def test_get_item(self):
assert self.zarr[2] == 1
assert self.zarr[7] == 5
- errmsg = re.compile("bounds")
+ errmsg = "must be an integer between -10 and 10"
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 2eef828288e59..e3e5e092f143b 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -40,6 +40,7 @@
ExtensionDtype,
)
from pandas.api.types import is_bool_dtype
+from pandas.core.indexers import unpack_tuple_and_ellipses
class JSONDtype(ExtensionDtype):
@@ -86,14 +87,7 @@ def _from_factorized(cls, values, original):
def __getitem__(self, item):
if isinstance(item, tuple):
- if len(item) > 1:
- if item[0] is Ellipsis:
- item = item[1:]
- elif item[-1] is Ellipsis:
- item = item[:-1]
- if len(item) > 1:
- raise IndexError("too many indices for array.")
- item = item[0]
+ item = unpack_tuple_and_ellipses(item)
if isinstance(item, numbers.Integral):
return self.data[item]
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44238 | 2021-10-30T04:40:19Z | 2021-10-30T18:40:12Z | 2021-10-30T18:40:12Z | 2021-10-30T18:40:12Z |
BUG: all-NaT TDI division with object dtype preserve td64 | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 5601048c409e1..ae3154f651384 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -479,7 +479,7 @@ Datetimelike
Timedelta
^^^^^^^^^
--
+- Bug in division of all-``NaT`` :class:`TimeDeltaIndex`, :class:`Series` or :class:`DataFrame` column with object-dtype arraylike of numbers failing to infer the result as timedelta64-dtype (:issue:`39750`)
-
Timezones
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 040c7e6804f64..3d8f9f7edcc74 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -573,12 +573,17 @@ def __truediv__(self, other):
# We need to do dtype inference in order to keep DataFrame ops
# behavior consistent with Series behavior
- inferred = lib.infer_dtype(result)
+ inferred = lib.infer_dtype(result, skipna=False)
if inferred == "timedelta":
flat = result.ravel()
result = type(self)._from_sequence(flat).reshape(result.shape)
elif inferred == "floating":
result = result.astype(float)
+ elif inferred == "datetime":
+ # GH#39750 this occurs when result is all-NaT, in which case
+ # we want to interpret these NaTs as td64.
+ # We construct an all-td64NaT result.
+ result = self * np.nan
return result
@@ -679,13 +684,22 @@ def __floordiv__(self, other):
elif is_object_dtype(other.dtype):
# error: Incompatible types in assignment (expression has type
# "List[Any]", variable has type "ndarray")
- result = [ # type: ignore[assignment]
- self[n] // other[n] for n in range(len(self))
- ]
- result = np.array(result)
- if lib.infer_dtype(result, skipna=False) == "timedelta":
+ srav = self.ravel()
+ orav = other.ravel()
+ res_list = [srav[n] // orav[n] for n in range(len(srav))]
+ result_flat = np.asarray(res_list)
+ inferred = lib.infer_dtype(result_flat, skipna=False)
+
+ result = result_flat.reshape(self.shape)
+
+ if inferred == "timedelta":
result, _ = sequence_to_td64ns(result)
return type(self)(result)
+ if inferred == "datetime":
+ # GH#39750 occurs when result is all-NaT, which in this
+ # case should be interpreted as td64nat. This can only
+ # occur when self is all-td64nat
+ return self * np.nan
return result
elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype):
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 7765c29ee59c8..0b43cb4f3d78c 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -2022,7 +2022,7 @@ def test_td64arr_rmul_numeric_array(
ids=lambda x: type(x).__name__,
)
def test_td64arr_div_numeric_array(
- self, box_with_array, vector, any_real_numpy_dtype, using_array_manager
+ self, box_with_array, vector, any_real_numpy_dtype
):
# GH#4521
# divide/multiply by integers
@@ -2062,14 +2062,6 @@ def test_td64arr_div_numeric_array(
expected = tm.box_expected(expected, xbox)
assert tm.get_dtype(expected) == "m8[ns]"
- if using_array_manager and box_with_array is DataFrame:
- # TODO the behaviour is buggy here (third column with all-NaT
- # as result doesn't get preserved as timedelta64 dtype).
- # Reported at https://github.com/pandas-dev/pandas/issues/39750
- # Changing the expected instead of xfailing to continue to test
- # the correct behaviour for the other columns
- expected[2] = Series([NaT, NaT], dtype=object)
-
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
@@ -2137,6 +2129,19 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names):
else:
tm.assert_equal(result, expected)
+ def test_td64arr_all_nat_div_object_dtype_numeric(self, box_with_array):
+ # GH#39750 make sure we infer the result as td64
+ tdi = TimedeltaIndex([NaT, NaT])
+
+ left = tm.box_expected(tdi, box_with_array)
+ right = np.array([2, 2.0], dtype=object)
+
+ result = left / right
+ tm.assert_equal(result, left)
+
+ result = left // right
+ tm.assert_equal(result, left)
+
class TestTimedelta64ArrayLikeArithmetic:
# Arithmetic tests for timedelta64[ns] vectors fully parametrized over
| - [x] closes #39750
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44237 | 2021-10-30T02:44:17Z | 2021-11-06T22:58:58Z | 2021-11-06T22:58:58Z | 2021-11-06T23:37:19Z |
BUG: setitem into td64/dt64 series/frame with Categorical[strings] | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 5601048c409e1..492df4f1e3612 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -535,7 +535,7 @@ Indexing
- Bug in :meth:`DataFrame.sort_index` where ``ignore_index=True`` was not being respected when the index was already sorted (:issue:`43591`)
- Bug in :meth:`Index.get_indexer_non_unique` when index contains multiple ``np.datetime64("NaT")`` and ``np.timedelta64("NaT")`` (:issue:`43869`)
- Bug in setting a scalar :class:`Interval` value into a :class:`Series` with ``IntervalDtype`` when the scalar's sides are floats and the values' sides are integers (:issue:`44201`)
--
+- Bug when setting string-backed :class:`Categorical` values that can be parsed to datetimes into a :class:`DatetimeArray` or :class:`Series` or :class:`DataFrame` column backed by :class:`DatetimeArray` failing to parse these strings (:issue:`44236`)
Missing
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 72c00dfe7c65a..f8aa1656c8c30 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -68,6 +68,7 @@
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
+ is_all_strings,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
@@ -720,7 +721,7 @@ def _validate_listlike(self, value, allow_object: bool = False):
value = pd_array(value)
value = extract_array(value, extract_numpy=True)
- if is_dtype_equal(value.dtype, "string"):
+ if is_all_strings(value):
# We got a StringArray
try:
# TODO: Could use from_sequence_of_strings if implemented
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 0788ecdd8b4b5..815a0a2040ddb 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -15,6 +15,7 @@
Interval,
Period,
algos,
+ lib,
)
from pandas._libs.tslibs import conversion
from pandas._typing import (
@@ -1788,3 +1789,23 @@ def pandas_dtype(dtype) -> DtypeObj:
raise TypeError(f"dtype '{dtype}' not understood")
return npdtype
+
+
+def is_all_strings(value: ArrayLike) -> bool:
+ """
+ Check if this is an array of strings that we should try parsing.
+
+ Includes object-dtype ndarray containing all-strings, StringArray,
+ and Categorical with all-string categories.
+ Does not include numpy string dtypes.
+ """
+ dtype = value.dtype
+
+ if isinstance(dtype, np.dtype):
+ return (
+ dtype == np.dtype("object")
+ and lib.infer_dtype(value, skipna=False) == "string"
+ )
+ elif isinstance(dtype, CategoricalDtype):
+ return dtype.categories.inferred_type == "string"
+ return dtype == "string"
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 4604fad019eca..d6402e027be98 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -871,7 +871,7 @@ def test_setitem_dt64_string_scalar(self, tz_naive_fixture, indexer_sli):
else:
assert ser._values is values
- @pytest.mark.parametrize("box", [list, np.array, pd.array])
+ @pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index])
@pytest.mark.parametrize(
"key", [[0, 1], slice(0, 2), np.array([True, True, False])]
)
@@ -911,7 +911,7 @@ def test_setitem_td64_scalar(self, indexer_sli, scalar):
indexer_sli(ser)[0] = scalar
assert ser._values._data is values._data
- @pytest.mark.parametrize("box", [list, np.array, pd.array])
+ @pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index])
@pytest.mark.parametrize(
"key", [[0, 1], slice(0, 2), np.array([True, True, False])]
)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44236 | 2021-10-30T02:05:44Z | 2021-11-01T13:55:56Z | 2021-11-01T13:55:56Z | 2021-11-01T15:42:32Z |
CI: Don't split tests on Windows Python 3.10 | diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 96d5542451f06..d6647e8059306 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -24,8 +24,12 @@ jobs:
strategy:
fail-fast: false
matrix:
- os: [ubuntu-latest, macOS-latest, windows-latest]
+ os: [ubuntu-latest, macOS-latest]
pytest_target: ["pandas/tests/[a-h]*", "pandas/tests/[i-z]*"]
+ include:
+ # No need to split tests on windows
+ - os: windows-latest
+ pytest_target: pandas
name: actions-310-dev
timeout-minutes: 80
| Ref: https://github.com/pandas-dev/pandas/issues/44173
| https://api.github.com/repos/pandas-dev/pandas/pulls/44235 | 2021-10-29T23:03:02Z | 2021-10-30T11:18:39Z | 2021-10-30T11:18:39Z | 2021-11-11T01:37:42Z |
REF: simplify is_scalar_indexer | diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index cf9be5eb95eb4..fe003281fb82e 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -100,10 +100,7 @@ def is_scalar_indexer(indexer, ndim: int) -> bool:
# GH37748: allow indexer to be an integer for Series
return True
if isinstance(indexer, tuple) and len(indexer) == ndim:
- return all(
- is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
- for x in indexer
- )
+ return all(is_integer(x) for x in indexer)
return False
diff --git a/pandas/tests/indexing/test_indexers.py b/pandas/tests/indexing/test_indexers.py
index 45dcaf95ffdd0..ddc5c039160d5 100644
--- a/pandas/tests/indexing/test_indexers.py
+++ b/pandas/tests/indexing/test_indexers.py
@@ -22,10 +22,10 @@ def test_is_scalar_indexer():
assert not is_scalar_indexer(indexer[0], 2)
indexer = (np.array([2]), 1)
- assert is_scalar_indexer(indexer, 2)
+ assert not is_scalar_indexer(indexer, 2)
indexer = (np.array([2]), np.array([3]))
- assert is_scalar_indexer(indexer, 2)
+ assert not is_scalar_indexer(indexer, 2)
indexer = (np.array([2]), np.array([3, 4]))
assert not is_scalar_indexer(indexer, 2)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44234 | 2021-10-29T20:52:41Z | 2021-10-30T20:29:58Z | 2021-10-30T20:29:58Z | 2021-10-30T21:38:33Z |
TYP: numba stub | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 21250789fde9f..ea9595fd88630 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -106,7 +106,7 @@ if [[ -z "$CHECK" || "$CHECK" == "typing" ]]; then
mypy --version
MSG='Performing static analysis using mypy' ; echo $MSG
- mypy pandas
+ mypy
RET=$(($RET + $?)) ; echo $MSG "DONE"
# run pyright, if it is installed
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index 936db7d1bee88..4cea030546635 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -399,7 +399,7 @@ pandas uses `mypy <http://mypy-lang.org>`_ and `pyright <https://github.com/micr
.. code-block:: shell
- mypy pandas
+ mypy
# let pre-commit setup and run pyright
pre-commit run --hook-stage manual --all-files pyright
diff --git a/pandas/core/_numba/kernels/mean_.py b/pandas/core/_numba/kernels/mean_.py
index f3a2c7c2170a8..8f67dd9b51c06 100644
--- a/pandas/core/_numba/kernels/mean_.py
+++ b/pandas/core/_numba/kernels/mean_.py
@@ -14,8 +14,7 @@
from pandas.core._numba.kernels.shared import is_monotonic_increasing
-# error: Untyped decorator makes function "add_mean" untyped
-@numba.jit(nopython=True, nogil=True, parallel=False) # type: ignore[misc]
+@numba.jit(nopython=True, nogil=True, parallel=False)
def add_mean(
val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float
) -> tuple[int, float, int, float]:
@@ -30,8 +29,7 @@ def add_mean(
return nobs, sum_x, neg_ct, compensation
-# error: Untyped decorator makes function "remove_mean" untyped
-@numba.jit(nopython=True, nogil=True, parallel=False) # type: ignore[misc]
+@numba.jit(nopython=True, nogil=True, parallel=False)
def remove_mean(
val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float
) -> tuple[int, float, int, float]:
@@ -46,8 +44,7 @@ def remove_mean(
return nobs, sum_x, neg_ct, compensation
-# error: Untyped decorator makes function "sliding_mean" untyped
-@numba.jit(nopython=True, nogil=True, parallel=False) # type: ignore[misc]
+@numba.jit(nopython=True, nogil=True, parallel=False)
def sliding_mean(
values: np.ndarray,
start: np.ndarray,
diff --git a/pandas/core/_numba/kernels/shared.py b/pandas/core/_numba/kernels/shared.py
index 7c2e7636c7d81..ec25e78a8d897 100644
--- a/pandas/core/_numba/kernels/shared.py
+++ b/pandas/core/_numba/kernels/shared.py
@@ -2,9 +2,12 @@
import numpy as np
-# error: Untyped decorator makes function "is_monotonic_increasing" untyped
-@numba.jit( # type: ignore[misc]
- numba.boolean(numba.int64[:]), nopython=True, nogil=True, parallel=False
+@numba.jit(
+ # error: Any? not callable
+ numba.boolean(numba.int64[:]), # type: ignore[misc]
+ nopython=True,
+ nogil=True,
+ parallel=False,
)
def is_monotonic_increasing(bounds: np.ndarray) -> bool:
"""Check if int64 values are monotonically increasing."""
diff --git a/pandas/core/_numba/kernels/sum_.py b/pandas/core/_numba/kernels/sum_.py
index 66a1587c49f3f..c2e81b4990ba9 100644
--- a/pandas/core/_numba/kernels/sum_.py
+++ b/pandas/core/_numba/kernels/sum_.py
@@ -14,8 +14,7 @@
from pandas.core._numba.kernels.shared import is_monotonic_increasing
-# error: Untyped decorator makes function "add_sum" untyped
-@numba.jit(nopython=True, nogil=True, parallel=False) # type: ignore[misc]
+@numba.jit(nopython=True, nogil=True, parallel=False)
def add_sum(
val: float, nobs: int, sum_x: float, compensation: float
) -> tuple[int, float, float]:
@@ -28,8 +27,7 @@ def add_sum(
return nobs, sum_x, compensation
-# error: Untyped decorator makes function "remove_sum" untyped
-@numba.jit(nopython=True, nogil=True, parallel=False) # type: ignore[misc]
+@numba.jit(nopython=True, nogil=True, parallel=False)
def remove_sum(
val: float, nobs: int, sum_x: float, compensation: float
) -> tuple[int, float, float]:
@@ -42,8 +40,7 @@ def remove_sum(
return nobs, sum_x, compensation
-# error: Untyped decorator makes function "sliding_sum" untyped
-@numba.jit(nopython=True, nogil=True, parallel=False) # type: ignore[misc]
+@numba.jit(nopython=True, nogil=True, parallel=False)
def sliding_sum(
values: np.ndarray,
start: np.ndarray,
diff --git a/pyproject.toml b/pyproject.toml
index 7966e94cff2f3..d84024eb09de2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -60,6 +60,8 @@ markers = [
[tool.mypy]
# Import discovery
+mypy_path = "typings"
+files = ["pandas", "typings"]
namespace_packages = false
explicit_package_bases = false
ignore_missing_imports = true
diff --git a/typings/numba.pyi b/typings/numba.pyi
new file mode 100644
index 0000000000000..d6a2729d36db3
--- /dev/null
+++ b/typings/numba.pyi
@@ -0,0 +1,41 @@
+from typing import (
+ Any,
+ Callable,
+ Literal,
+ overload,
+)
+
+import numba
+
+from pandas._typing import F
+
+def __getattr__(name: str) -> Any: ... # incomplete
+@overload
+def jit(
+ signature_or_function: F = ...,
+) -> F: ...
+@overload
+def jit(
+ signature_or_function: str
+ | list[str]
+ | numba.core.types.abstract.Type
+ | list[numba.core.types.abstract.Type] = ...,
+ locals: dict = ..., # TODO: Mapping of local variable names to Numba types
+ cache: bool = ...,
+ pipeline_class: numba.compiler.CompilerBase = ...,
+ boundscheck: bool | None = ...,
+ *,
+ nopython: bool = ...,
+ forceobj: bool = ...,
+ looplift: bool = ...,
+ error_model: Literal["python", "numpy"] = ...,
+ inline: Literal["never", "always"] | Callable = ...,
+ # TODO: If a callable is provided it will be called with the call expression
+ # node that is requesting inlining, the caller's IR and callee's IR as
+ # arguments, it is expected to return Truthy as to whether to inline.
+ target: Literal["cpu", "gpu", "npyufunc", "cuda"] = ..., # deprecated
+ nogil: bool = ...,
+ parallel: bool = ...,
+) -> Callable[[F], F]: ...
+
+njit = jit
| partial stub to preserve type signatures for decorated functions.
draft since will rebase once #43828 is merged.
cc @twoertwein @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/44233 | 2021-10-29T20:45:56Z | 2021-10-31T14:54:12Z | 2021-10-31T14:54:11Z | 2021-11-01T18:35:24Z |
REF: RangeIndex.delete defer to .difference | diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 41ef824afc2a7..3fae0fbe7d2a0 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -807,24 +807,17 @@ def delete(self, loc) -> Index: # type: ignore[override]
return self[1:]
if loc == -1 or loc == len(self) - 1:
return self[:-1]
+ if len(self) == 3 and (loc == 1 or loc == -2):
+ return self[::2]
elif lib.is_list_like(loc):
slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))
- if isinstance(slc, slice) and slc.step is not None and slc.step < 0:
- rng = range(len(self))[slc][::-1]
- slc = slice(rng.start, rng.stop, rng.step)
-
- if isinstance(slc, slice) and slc.step in [1, None]:
- # Note: maybe_indices_to_slice will never return a slice
- # with 'slc.start is None'; may have slc.stop None in cases
- # with negative step
- if slc.start == 0:
- return self[slc.stop :]
- elif slc.stop in [len(self), None]:
- return self[: slc.start]
-
- # TODO: more generally, self.difference(self[slc]),
- # once _difference is better about retaining RangeIndex
+
+ if isinstance(slc, slice):
+ # defer to RangeIndex._difference, which is optimized to return
+ # a RangeIndex whenever possible
+ other = self[slc]
+ return self.difference(other, sort=False)
return super().delete(loc)
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index d58dff191cc73..277f686a8487a 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -178,6 +178,15 @@ def test_delete_preserves_rangeindex(self):
result = idx.delete(1)
tm.assert_index_equal(result, expected, exact=True)
+ def test_delete_preserves_rangeindex_middle(self):
+ idx = Index(range(3), name="foo")
+ result = idx.delete(1)
+ expected = idx[::2]
+ tm.assert_index_equal(result, expected, exact=True)
+
+ result = idx.delete(-2)
+ tm.assert_index_equal(result, expected, exact=True)
+
def test_delete_preserves_rangeindex_list_at_end(self):
idx = RangeIndex(0, 6, 1)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44232 | 2021-10-29T17:38:24Z | 2021-10-29T21:50:03Z | 2021-10-29T21:50:03Z | 2021-10-29T22:49:09Z |
Backport PR #44195 on branch 1.3.x (Fix series with none equals float series) | diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst
index ba9fcb5c1bfeb..589092c0dd7e3 100644
--- a/doc/source/whatsnew/v1.3.5.rst
+++ b/doc/source/whatsnew/v1.3.5.rst
@@ -14,6 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- Fixed regression in :meth:`Series.equals` when comparing floats with dtype object to None (:issue:`44190`)
- Fixed performance regression in :func:`read_csv` (:issue:`44106`)
-
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index cbe79d11fbfc9..835b288778473 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -64,7 +64,7 @@ cpdef bint is_matching_na(object left, object right, bint nan_matches_none=False
elif left is NaT:
return right is NaT
elif util.is_float_object(left):
- if nan_matches_none and right is None:
+ if nan_matches_none and right is None and util.is_nan(left):
return True
return (
util.is_nan(left)
diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py
index 0b3689afac764..a3faf783fd3fb 100644
--- a/pandas/tests/series/methods/test_equals.py
+++ b/pandas/tests/series/methods/test_equals.py
@@ -125,3 +125,18 @@ def test_equals_none_vs_nan():
assert ser.equals(ser2)
assert Index(ser).equals(Index(ser2))
assert ser.array.equals(ser2.array)
+
+
+def test_equals_None_vs_float():
+ # GH#44190
+ left = Series([-np.inf, np.nan, -1.0, 0.0, 1.0, 10 / 3, np.inf], dtype=object)
+ right = Series([None] * len(left))
+
+ # these series were found to be equal due to a bug, check that they are correctly
+ # found to not equal
+ assert not left.equals(right)
+ assert not right.equals(left)
+ assert not left.to_frame().equals(right.to_frame())
+ assert not right.to_frame().equals(left.to_frame())
+ assert not Index(left, dtype="object").equals(Index(right, dtype="object"))
+ assert not Index(right, dtype="object").equals(Index(left, dtype="object"))
| Backport PR #44195: Fix series with none equals float series | https://api.github.com/repos/pandas-dev/pandas/pulls/44229 | 2021-10-29T13:17:50Z | 2021-10-29T15:42:52Z | 2021-10-29T15:42:52Z | 2021-10-29T15:42:52Z |
DOC: local py.typed for users | diff --git a/.gitignore b/.gitignore
index 2c337be60e94e..87224f1d6060f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,8 @@ dist
*.egg-info
.eggs
.pypirc
+# type checkers
+pandas/py.typed
# tox testing tool
.tox
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index 4cea030546635..8baf103369a13 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -410,6 +410,26 @@ A recent version of ``numpy`` (>=1.21.0) is required for type validation.
.. _contributing.ci:
+Testing type hints in code using pandas
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. warning::
+
+ * Pandas is not yet a py.typed library (:pep:`561`)!
+ The primary purpose of locally declaring pandas as a py.typed library is to test and
+ improve the pandas-builtin type annotations.
+
+Until pandas becomes a py.typed library, it is possible to easily experiment with the type
+annotations shipped with pandas by creating an empty file named "py.typed" in the pandas
+installation folder:
+
+.. code-block:: none
+
+ python -c "import pandas; import pathlib; (pathlib.Path(pandas.__path__[0]) / 'py.typed').touch()"
+
+The existence of the py.typed file signals to type checkers that pandas is already a py.typed
+library. This makes type checkers aware of the type annotations shipped with pandas.
+
Testing with continuous integration
-----------------------------------
| xref https://github.com/pandas-dev/pandas/pull/44223#issuecomment-954672983 | https://api.github.com/repos/pandas-dev/pandas/pulls/44228 | 2021-10-29T12:55:21Z | 2021-12-17T22:01:59Z | 2021-12-17T22:01:58Z | 2022-03-09T02:56:36Z |
TYP: type annotations for nancorr | diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index 6dd1c7c9fb209..df8ac3f3b0696 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -50,18 +50,14 @@ def kth_smallest(
# Pairwise correlation/covariance
def nancorr(
- mat: np.ndarray, # const float64_t[:, :]
+ mat: npt.NDArray[np.float64], # const float64_t[:, :]
cov: bool = ...,
- minp=...,
-) -> np.ndarray: ... # ndarray[float64_t, ndim=2]
+ minp: int | None = ...,
+) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
def nancorr_spearman(
- mat: np.ndarray, # ndarray[float64_t, ndim=2]
+ mat: npt.NDArray[np.float64], # ndarray[float64_t, ndim=2]
minp: int = ...,
-) -> np.ndarray: ... # ndarray[float64_t, ndim=2]
-def nancorr_kendall(
- mat: np.ndarray, # ndarray[float64_t, ndim=2]
- minp: int = ...,
-) -> np.ndarray: ... # ndarray[float64_t, ndim=2]
+) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
# ----------------------------------------------------------------------
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/44227 | 2021-10-29T12:22:40Z | 2021-10-29T21:56:23Z | 2021-10-29T21:56:23Z | 2021-10-29T21:56:23Z |
DOC: added examples to DataFrame.std | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7ff48a262c4d6..c2fa345f7639d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10639,6 +10639,7 @@ def mad(self, axis=None, skipna=None, level=None):
name2=name2,
axis_descr=axis_descr,
notes="",
+ examples="",
)
def sem(
self,
@@ -10661,6 +10662,7 @@ def sem(
name2=name2,
axis_descr=axis_descr,
notes="",
+ examples="",
)
def var(
self,
@@ -10679,11 +10681,12 @@ def var(
_num_ddof_doc,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
- "ddof argument",
+ "ddof argument.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
notes=_std_notes,
+ examples=_std_examples,
)
def std(
self,
@@ -11175,7 +11178,8 @@ def _doc_params(cls):
Returns
-------
{name1} or {name2} (if level specified) \
-{notes}
+{notes}\
+{examples}
"""
_std_notes = """
@@ -11185,6 +11189,34 @@ def _doc_params(cls):
To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the
default `ddof=1`)"""
+_std_examples = """
+
+Examples
+--------
+>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3],
+... 'age': [21, 25, 62, 43],
+... 'height': [1.61, 1.87, 1.49, 2.01]}
+... ).set_index('person_id')
+>>> df
+ age height
+person_id
+0 21 1.61
+1 25 1.87
+2 62 1.49
+3 43 2.01
+
+The standard deviation of the columns can be found as follows:
+
+>>> df.std()
+age 18.786076
+height 0.237417
+
+Alternatively, `ddof=0` can be set to normalize by N instead of N-1:
+
+>>> df.std(ddof=0)
+age 16.269219
+height 0.205609"""
+
_bool_doc = """
{desc}
| xref https://github.com/pandas-dev/pandas/issues/44162
- [x] Added two examples to the documentation of DataFrame.std function so that users can understand how to use it with different delta degrees of freedom.
- [ ] Add examples to the documentation of DataFrame.var function
---
- [x] tests added / passed
- [x] All pre-commit linting tests pass
| https://api.github.com/repos/pandas-dev/pandas/pulls/44226 | 2021-10-29T11:03:40Z | 2021-11-01T17:09:11Z | 2021-11-01T17:09:11Z | 2021-11-01T17:09:24Z |
Update is_platform_arm() to detect 32-bit arm and other variants | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 3233de8e3b6d1..fd5c46f7a6d5a 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -99,7 +99,9 @@ def is_platform_arm() -> bool:
bool
True if the running platform uses ARM architecture.
"""
- return platform.machine() in ("arm64", "aarch64")
+ return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith(
+ "armv"
+ )
def import_lzma():
| - When running an arm32 ("linux32'd") chroot on an arm64 machine,
Python's platform.machine() will return "armv8l".
- In other cases, on "real" arm32, it'll return whatever uname says
(just like in the first case) which might be e.g. armv7a.
Keeping the other options ("aarch64", "arm64") given that Windows
or other kernels might choose to return different values and these
were added for a reason, but at least this fixes detection on Linux.
This allows tests like test_subtype_integer_errors to be skipped
as intended on arm.
Bug: https://bugs.gentoo.org/818964
Signed-off-by: Sam James <sam@gentoo.org>
- [X] tests added / passed
- [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44225 | 2021-10-29T11:01:32Z | 2021-10-29T16:24:01Z | 2021-10-29T16:24:01Z | 2021-11-01T00:28:35Z |
CLN: remove last getattr(arg, '_values', arg) usages | diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 3116f2b40900a..005c5f75e6cfa 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -53,7 +53,6 @@
@inherit_names(
[
"argsort",
- "_internal_get_values",
"tolist",
"codes",
"categories",
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 2838c33a42716..aca751362c915 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -69,6 +69,7 @@
objects_to_datetime64ns,
tz_to_dtype,
)
+from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.core.indexes.datetimes import DatetimeIndex
@@ -516,7 +517,7 @@ def _to_datetime_with_unit(arg, unit, name, tz, errors: str) -> Index:
"""
to_datetime specalized to the case where a 'unit' is passed.
"""
- arg = getattr(arg, "_values", arg) # TODO: extract_array
+ arg = extract_array(arg, extract_numpy=True)
# GH#30050 pass an ndarray to tslib.array_with_unit_to_datetime
# because it expects an ndarray argument
| - [x] closes #27167
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44224 | 2021-10-29T02:21:02Z | 2021-10-29T16:05:30Z | 2021-10-29T16:05:29Z | 2021-10-29T17:04:33Z |
CLN: remove checks for python < 3.8 | diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index ff6b18835322e..a89d0aecfc26c 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -5,7 +5,6 @@ from datetime import (
timedelta,
tzinfo as _tzinfo,
)
-import sys
from time import struct_time
from typing import (
ClassVar,
@@ -89,16 +88,8 @@ class Timestamp(datetime):
def today(cls: Type[_S]) -> _S: ...
@classmethod
def fromordinal(cls: Type[_S], n: int) -> _S: ...
- if sys.version_info >= (3, 8):
- @classmethod
- def now(cls: Type[_S], tz: _tzinfo | str | None = ...) -> _S: ...
- else:
- @overload
- @classmethod
- def now(cls: Type[_S], tz: None = ...) -> _S: ...
- @overload
- @classmethod
- def now(cls, tz: _tzinfo) -> datetime: ...
+ @classmethod
+ def now(cls: Type[_S], tz: _tzinfo | str | None = ...) -> _S: ...
@classmethod
def utcnow(cls: Type[_S]) -> _S: ...
@classmethod
@@ -129,10 +120,7 @@ class Timestamp(datetime):
*,
fold: int = ...,
) -> datetime: ...
- if sys.version_info >= (3, 8):
- def astimezone(self: _S, tz: _tzinfo | None = ...) -> _S: ...
- else:
- def astimezone(self, tz: _tzinfo | None = ...) -> datetime: ...
+ def astimezone(self: _S, tz: _tzinfo | None = ...) -> _S: ...
def ctime(self) -> str: ...
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
@classmethod
@@ -144,12 +132,8 @@ class Timestamp(datetime):
def __lt__(self, other: datetime) -> bool: ... # type: ignore
def __ge__(self, other: datetime) -> bool: ... # type: ignore
def __gt__(self, other: datetime) -> bool: ... # type: ignore
- if sys.version_info >= (3, 8):
- def __add__(self: _S, other: timedelta) -> _S: ...
- def __radd__(self: _S, other: timedelta) -> _S: ...
- else:
- def __add__(self, other: timedelta) -> datetime: ...
- def __radd__(self, other: timedelta) -> datetime: ...
+ def __add__(self: _S, other: timedelta) -> _S: ...
+ def __radd__(self: _S, other: timedelta) -> _S: ...
@overload # type: ignore
def __sub__(self, other: datetime) -> timedelta: ...
@overload
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 71f1d03ea6d1f..89b8783462f7e 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -1,7 +1,6 @@
"""Tests for Table Schema integration."""
from collections import OrderedDict
import json
-import sys
import numpy as np
import pytest
@@ -691,7 +690,6 @@ class TestTableOrientReader:
},
],
)
- @pytest.mark.skipif(sys.version_info[:3] == (3, 7, 0), reason="GH-35309")
def test_read_json_table_orient(self, index_nm, vals, recwarn):
df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
out = df.to_json(orient="table")
@@ -741,7 +739,6 @@ def test_read_json_table_orient_raises(self, index_nm, vals, recwarn):
},
],
)
- @pytest.mark.skipif(sys.version_info[:3] == (3, 7, 0), reason="GH-35309")
def test_read_json_table_timezones_orient(self, idx, vals, recwarn):
# GH 35973
df = DataFrame(vals, index=idx)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/44223 | 2021-10-28T21:49:08Z | 2021-10-29T13:10:29Z | 2021-10-29T13:10:29Z | 2022-04-01T01:36:46Z |
BUG: Min/max does not work for dates with timezones if there are missing values in the data frame | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 66b26e56b2258..cd2e6a297d4d7 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -549,7 +549,7 @@ Missing
^^^^^^^
- Bug in :meth:`DataFrame.fillna` with limit and no method ignores axis='columns' or ``axis = 1`` (:issue:`40989`)
- Bug in :meth:`DataFrame.fillna` not replacing missing values when using a dict-like ``value`` and duplicate column names (:issue:`43476`)
--
+- Bug in :meth:`DataFrame.max`, :meth:`DataFrame.min`, :meth:`Series.max` and :meth:`Series.min` when called on datetime columns with timezone aware data and missing elements (:issue:`27794` and :issue:`44196`)
MultiIndex
^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ebf3428020652..ae0343e5a3fca 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9979,7 +9979,11 @@ def _get_data() -> DataFrame:
data = self._get_bool_data()
return data
- if numeric_only is not None or axis == 0:
+ if (
+ numeric_only is not None
+ or axis == 0
+ or (name in ["max", "min"] and axis == 1)
+ ):
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 919d8ab14778e..4a5e1a9d1a864 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -17,7 +17,9 @@
DataFrame,
Index,
MultiIndex,
+ PeriodDtype,
Series,
+ Timedelta,
Timestamp,
date_range,
isna,
@@ -756,7 +758,7 @@ def test_operators_timedelta64(self):
# excludes numeric
with tm.assert_produces_warning(FutureWarning, match="Select only valid"):
result = mixed.min(axis=1)
- expected = Series([1, 1, 1.0], index=[0, 1, 2])
+ expected = Series([])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
@@ -1763,3 +1765,141 @@ def test_prod_sum_min_count_mixed_object():
msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'")
with pytest.raises(TypeError, match=msg):
df.sum(axis=0, min_count=1, numeric_only=False)
+
+
+def test_timezone_min_max_with_nat():
+ # GH#27794
+ df = pd.DataFrame(
+ {
+ "A": pd.date_range(start="2018-01-01", end="2018-01-03", tz="UTC"),
+ "B": pd.date_range(start="2018-01-01", end="2018-01-02", tz="UTC").insert(
+ 2, pd.NaT
+ ),
+ }
+ )
+
+ expected = pd.Series(
+ [
+ pd.Timestamp("2018-01-01", tz="UTC"),
+ pd.Timestamp("2018-01-02", tz="UTC"),
+ pd.Timestamp("2018-01-03", tz="UTC"),
+ ],
+ )
+ result = df.min(axis=1)
+ tm.assert_series_equal(result, expected)
+
+ expected = pd.Series(
+ [
+ pd.Timestamp("2018-01-01", tz="UTC"),
+ pd.Timestamp("2018-01-02", tz="UTC"),
+ pd.Timestamp("2018-01-03", tz="UTC"),
+ ],
+ )
+ result = df.max(axis=1)
+ tm.assert_series_equal(result, expected)
+
+
+def test_min_max_timestamp_timezone_nat():
+ # GH#44196
+ rng_with_tz = pd.date_range(
+ start="2021-10-01T12:00:00+02:00", end="2021-10-02T12:00:00+02:00", freq="4H"
+ )
+ df_with_tz = DataFrame(
+ data={"A": rng_with_tz, "B": rng_with_tz + pd.Timedelta(minutes=20)}
+ )
+ df_with_tz.iloc[2, 1] = pd.NaT
+
+ result = df_with_tz.max(axis=1)
+ expected = pd.Series(
+ [
+ pd.Timestamp("2021-10-01T12:20:00+02:00"),
+ pd.Timestamp("2021-10-01T16:20:00+02:00"),
+ pd.Timestamp("2021-10-01T20:00:00+02:00"),
+ pd.Timestamp("2021-10-02T00:20:00+02:00"),
+ pd.Timestamp("2021-10-02T04:20:00+02:00"),
+ pd.Timestamp("2021-10-02T08:20:00+02:00"),
+ pd.Timestamp("2021-10-02T12:20:00+02:00"),
+ ]
+ )
+ tm.assert_series_equal(result, expected)
+
+
+def test_timezone_min_max_both_axis():
+ rng_with_tz = pd.date_range(
+ start="2021-10-01T12:00:00+02:00", end="2021-10-02T12:00:00+02:00", freq="4H"
+ )
+ df_with_tz = DataFrame(
+ data={"A": rng_with_tz, "B": rng_with_tz + pd.Timedelta(minutes=20)}
+ )
+ df_with_tz.iloc[2, 1] = pd.NaT
+
+ result = df_with_tz.max(axis=1)
+ expected = df_with_tz.T.max(axis=0)
+
+ tm.assert_series_equal(result, expected)
+
+ result = df_with_tz.min(axis=1)
+ expected = df_with_tz.T.min(axis=0)
+
+ tm.assert_series_equal(result, expected)
+
+
+def test_min_max_timedelta64_nat():
+ df = DataFrame(
+ [
+ [Timedelta(minutes=20), Timedelta(days=2), Timedelta(seconds=3)],
+ [Timedelta(minutes=2, seconds=2), Timedelta(days=2, minutes=30), pd.NaT],
+ ]
+ )
+ expected = pd.Series(
+ [Timedelta(minutes=2, seconds=2), Timedelta(days=2), Timedelta(seconds=3)]
+ )
+ result = df.min(axis=0)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.min(axis=0), df.T.min(axis=1))
+
+ expected = pd.Series([Timedelta(seconds=3), Timedelta(minutes=2, seconds=2)])
+ result = df.min(axis=1)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.min(axis=1), df.T.min(axis=0))
+
+ expected = pd.Series(
+ [Timedelta(minutes=20), Timedelta(days=2, minutes=30), Timedelta(seconds=3)]
+ )
+ result = df.max(axis=0)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.max(axis=0), df.T.max(axis=1))
+
+ expected = pd.Series([Timedelta(days=2), Timedelta(days=2, minutes=30)])
+ result = df.max(axis=1)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.max(axis=1), df.T.max(axis=0))
+
+
+def test_min_max_perioddtype_nat():
+ df = DataFrame(
+ [
+ [PeriodDtype(freq="20m"), PeriodDtype(freq="1h"), PeriodDtype(freq="1d")],
+ [PeriodDtype(freq="25m"), PeriodDtype(freq="2h"), pd.NaT],
+ ]
+ )
+
+ expected = Series([])
+ result = df.min(axis=0)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.min(axis=0), df.T.min(axis=1))
+
+ expected = Series([])
+ result = df.min(axis=1)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.min(axis=1), df.T.min(axis=0))
+
+ expected = Series([])
+ result = df.max(axis=0)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.max(axis=0), df.T.max(axis=1))
+
+ expected = Series([])
+ result = df.max(axis=1)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(df.max(axis=1), df.T.max(axis=0))
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 80c86e0103436..a99d2f590be97 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -746,7 +746,7 @@ def test_cached_range_bug(self):
assert len(rng) == 50
assert rng[0] == datetime(2010, 9, 1, 5)
- def test_timezone_comparaison_bug(self):
+ def test_timezone_comparison_bug(self):
# smoke test
start = Timestamp("20130220 10:00", tz="US/Eastern")
result = date_range(start, periods=2, tz="US/Eastern")
| - [x] closes #44196 and #27794
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
This is an attempt at fixing the bug of wrong `min`/`max` aggregation with timezone aware data and at least one `pd.NaT` in the data to aggregate. While I still see quite a lot of weaknesses in my approach I want to elaborate why I did what I did, what the weaknesses of this approach are, how this might be handled better and why I didn't follow that path.
#### Problem description
To fully understand the problem at hand, let's look at three different cases:
```python
import pandas as pd
# CASE 1
# No Problem without NaT
rng_with_tz = pd.date_range(start='2021-10-01T12:00:00+02:00', end='2021-10-02T12:00:00+02:00', freq='4H')
df_with_tz = pd.DataFrame(data={'A': rng_with_tz, 'B': rng_with_tz + pd.Timedelta(minutes=20)})
df_with_tz.max(axis=1)
# No problem with timezone naive dataframe
# CASE 2
rng_tz_naive = pd.date_range(start='2021-10-01T12:00:00', end='2021-10-02T12:00:00', freq='4H')
df_tz_naive = pd.DataFrame(data={'A': rng_tz_naive, 'B': rng_tz_naive + pd.Timedelta(minutes=20)})
df_tz_naive.iloc[2, 1] = pd.NaT
df_tz_naive.max(axis=1)
# Incorrect result and warning with timezone aware df
# CASE 1
rng_with_tz = pd.date_range(start='2021-10-01T12:00:00+02:00', end='2021-10-02T12:00:00+02:00', freq='4H')
df_with_tz = pd.DataFrame(data={'A': rng_with_tz, 'B': rng_with_tz + pd.Timedelta(minutes=20)})
df_with_tz.iloc[2, 1] = pd.NaT
df_with_tz.max(axis=1)
```
When one drills down into the first case no problems arise. The `max` call is handled down to [`pandas/core/nanops.py:reduction`](https://github.com/pandas-dev/pandas/blob/master/pandas/core/nanops.py#L1023) and is running through fine. In the second case one finds that in [`pandas/core/nanops.py:reduction`](https://github.com/pandas-dev/pandas/blob/master/pandas/core/nanops.py#L1023) right after the call to [`pandas/core/nanops.py:_get_values`](https://github.com/pandas-dev/pandas/blob/66ce5de55b4aa6d4ca8bef65318cc9e2381689f6/pandas/core/nanops.py#L257) one realizes that all values are cast to integers, even the `pd.NaT`.
In the third case one can see that the values aren't casted and `pd.NaT` is replaced by `-np.inf` which comes from [this line](https://github.com/pandas-dev/pandas/blob/66ce5de55b4aa6d4ca8bef65318cc9e2381689f6/pandas/core/nanops.py#L1051). This replacement is the reason for our error:
```python
TypeError: '>=' not supported between instances of 'Timestamp' and 'float'
```
when we try to [apply the max from numpy](https://github.com/pandas-dev/pandas/blob/66ce5de55b4aa6d4ca8bef65318cc9e2381689f6/pandas/core/nanops.py#L1042)
which results from comparing `pd.Timestamp`s with `float`s (`np.inf`). This error is caught and all elements are converted into `np.nan`.
The reason behind the error is, that
```python
>>> import pandas as pd
>>> rng_with_tz = pd.date_range(start='2021-10-01T12:00:00+02:00', end='2021-10-02T12:00:00+02:00', freq='4H')
>>> rng_with_tz
DatetimeIndex(['2021-10-01 12:00:00+02:00', '2021-10-01 16:00:00+02:00',
'2021-10-01 20:00:00+02:00', '2021-10-02 00:00:00+02:00',
'2021-10-02 04:00:00+02:00', '2021-10-02 08:00:00+02:00',
'2021-10-02 12:00:00+02:00'],
dtype='datetime64[ns, pytz.FixedOffset(120)]', freq='4H')
>>> rng_with_tz.values.dtype
dtype('<M8[ns]')
>>> rng_with_tz = pd.date_range(start='2021-10-01T12:00:00+02:00', end='2021-10-02T12:00:00+02:00', freq='4H')
>>> df_with_tz = pd.DataFrame(data={'A': rng_with_tz, 'B': rng_with_tz + pd.Timedelta(minutes=20)})
>>> df_with_tz.values.dtype
dtype('O')
```
the `dtype` changes when using timezone aware data, though the `dtype`s of the single series is still `dtype('<M8[ns]')`. This has serious implications: In [`pandas/core/nanops.py:_get_values`](https://github.com/pandas-dev/pandas/blob/66ce5de55b4aa6d4ca8bef65318cc9e2381689f6/pandas/core/nanops.py#L257) the `if` condition in this line (https://github.com/pandas-dev/pandas/blob/66ce5de55b4aa6d4ca8bef65318cc9e2381689f6/pandas/core/nanops.py#L313) is `False` and no cast to happens but still the `pd.NaT` is replaced by `np.inf` which is the reason for the error stated above.
#### Possible improvements on this PR
If we could change the `dtype` of the timezone aware DataFrame with missing values to the `numpy` dtype: `dtype('<M8[ns]')` we should be fine. But I have no idea how we can achieve that, since these are not `pandas` but `numpy` types we are talking about and I didn't really understand how `numpy` determines it`s types.
#### Weaknesses of this PR
The assumption made while catching the exceptions aren't well tested (though all tests are passed) and there might be a better and more generic solution. | https://api.github.com/repos/pandas-dev/pandas/pulls/44222 | 2021-10-28T21:07:37Z | 2022-01-25T01:46:18Z | null | 2022-01-25T01:46:19Z |
CLN: simplify io.formats.format.get_series_repr_params | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 13bedac664ea3..15715af05f904 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1458,7 +1458,7 @@ def __repr__(self) -> str:
"""
Return a string representation for a particular Series.
"""
- repr_params = fmt.get_series_repr_params(self)
+ repr_params = fmt.get_series_repr_params()
return self.to_string(**repr_params)
def to_string(
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 4bab85a3c6739..ba85a1b340d05 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -516,7 +516,7 @@ def get_dataframe_repr_params() -> dict[str, Any]:
}
-def get_series_repr_params(series: Series) -> dict[str, Any]:
+def get_series_repr_params() -> dict[str, Any]:
"""Get the parameters used to repr(Series) calls using Series.to_string.
Supplying these parameters to Series.to_string is equivalent to calling
@@ -529,7 +529,7 @@ def get_series_repr_params(series: Series) -> dict[str, Any]:
>>> import pandas as pd
>>>
>>> ser = pd.Series([1, 2, 3, 4])
- >>> repr_params = pd.io.formats.format.get_series_repr_params(ser)
+ >>> repr_params = pd.io.formats.format.get_series_repr_params()
>>> repr(ser) == ser.to_string(**repr_params)
True
"""
@@ -546,8 +546,8 @@ def get_series_repr_params(series: Series) -> dict[str, Any]:
)
return {
- "name": series.name,
- "dtype": series.dtype,
+ "name": True,
+ "dtype": True,
"min_rows": min_rows,
"max_rows": max_rows,
"length": get_option("display.show_dimensions"),
| Some of the returned values should be booleans. Follow-up to #44218. | https://api.github.com/repos/pandas-dev/pandas/pulls/44221 | 2021-10-28T19:34:21Z | 2021-10-29T14:29:42Z | 2021-10-29T14:29:42Z | 2021-10-29T16:59:15Z |
TST/REF: share tz_localize tests, move misplaced arith | diff --git a/pandas/tests/series/methods/test_tz_convert.py b/pandas/tests/series/methods/test_tz_convert.py
deleted file mode 100644
index d826dde646cfb..0000000000000
--- a/pandas/tests/series/methods/test_tz_convert.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import numpy as np
-
-from pandas import (
- DatetimeIndex,
- Series,
-)
-import pandas._testing as tm
-
-
-class TestTZConvert:
- def test_series_tz_convert_to_utc(self):
- base = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
- idx1 = base.tz_convert("Asia/Tokyo")[:2]
- idx2 = base.tz_convert("US/Eastern")[1:]
-
- res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
- tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base))
diff --git a/pandas/tests/series/methods/test_tz_localize.py b/pandas/tests/series/methods/test_tz_localize.py
index a32c1fb8df502..b8a1ea55db4fe 100644
--- a/pandas/tests/series/methods/test_tz_localize.py
+++ b/pandas/tests/series/methods/test_tz_localize.py
@@ -68,22 +68,39 @@ def test_series_tz_localize_matching_index(self):
["foo", "invalid"],
],
)
- def test_series_tz_localize_nonexistent(self, tz, method, exp):
+ def test_tz_localize_nonexistent(self, tz, method, exp):
# GH 8917
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
- s = Series(1, dti)
+ ser = Series(1, index=dti)
+ df = ser.to_frame()
+
if method == "raise":
+
+ with tm.external_error_raised(pytz.NonExistentTimeError):
+ dti.tz_localize(tz, nonexistent=method)
+ with tm.external_error_raised(pytz.NonExistentTimeError):
+ ser.tz_localize(tz, nonexistent=method)
with tm.external_error_raised(pytz.NonExistentTimeError):
- s.tz_localize(tz, nonexistent=method)
+ df.tz_localize(tz, nonexistent=method)
+
elif exp == "invalid":
with pytest.raises(ValueError, match="argument must be one of"):
dti.tz_localize(tz, nonexistent=method)
+ with pytest.raises(ValueError, match="argument must be one of"):
+ ser.tz_localize(tz, nonexistent=method)
+ with pytest.raises(ValueError, match="argument must be one of"):
+ df.tz_localize(tz, nonexistent=method)
+
else:
- result = s.tz_localize(tz, nonexistent=method)
+ result = ser.tz_localize(tz, nonexistent=method)
expected = Series(1, index=DatetimeIndex([exp] * n, tz=tz))
tm.assert_series_equal(result, expected)
+ result = df.tz_localize(tz, nonexistent=method)
+ expected = expected.to_frame()
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_series_tz_localize_empty(self, tzstr):
# GH#2248
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 103130484f0e1..e4f9366be8dd7 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -714,6 +714,16 @@ def test_series_add_tz_mismatch_converts_to_utc(self):
assert result.index.tz == pytz.UTC
tm.assert_series_equal(result, expected)
+ # TODO: redundant with test_series_add_tz_mismatch_converts_to_utc?
+ def test_series_arithmetic_mismatched_tzs_convert_to_utc(self):
+ base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
+ idx1 = base.tz_convert("Asia/Tokyo")[:2]
+ idx2 = base.tz_convert("US/Eastern")[1:]
+
+ res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
+ expected = Series([np.nan, 3, np.nan], index=base)
+ tm.assert_series_equal(res, expected)
+
def test_series_add_aware_naive_raises(self):
rng = date_range("1/1/2011", periods=10, freq="H")
ser = Series(np.random.randn(len(rng)), index=rng)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44220 | 2021-10-28T18:00:31Z | 2021-10-29T13:15:06Z | 2021-10-29T13:15:06Z | 2021-10-29T15:20:29Z |
REF/TST: share asfreq tests | diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py
index 0d28af5ed7be9..0a8d7c43adffe 100644
--- a/pandas/tests/frame/methods/test_asfreq.py
+++ b/pandas/tests/frame/methods/test_asfreq.py
@@ -1,12 +1,14 @@
from datetime import datetime
import numpy as np
+import pytest
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
+ period_range,
to_datetime,
)
import pandas._testing as tm
@@ -15,29 +17,128 @@
class TestAsFreq:
- def test_asfreq_resample_set_correct_freq(self):
+ def test_asfreq2(self, frame_or_series):
+ ts = frame_or_series(
+ [0.0, 1.0, 2.0],
+ index=DatetimeIndex(
+ [
+ datetime(2009, 10, 30),
+ datetime(2009, 11, 30),
+ datetime(2009, 12, 31),
+ ],
+ freq="BM",
+ ),
+ )
+
+ daily_ts = ts.asfreq("B")
+ monthly_ts = daily_ts.asfreq("BM")
+ tm.assert_equal(monthly_ts, ts)
+
+ daily_ts = ts.asfreq("B", method="pad")
+ monthly_ts = daily_ts.asfreq("BM")
+ tm.assert_equal(monthly_ts, ts)
+
+ daily_ts = ts.asfreq(offsets.BDay())
+ monthly_ts = daily_ts.asfreq(offsets.BMonthEnd())
+ tm.assert_equal(monthly_ts, ts)
+
+ result = ts[:0].asfreq("M")
+ assert len(result) == 0
+ assert result is not ts
+
+ if frame_or_series is Series:
+ daily_ts = ts.asfreq("D", fill_value=-1)
+ result = daily_ts.value_counts().sort_index()
+ expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index()
+ tm.assert_series_equal(result, expected)
+
+ def test_asfreq_datetimeindex_empty(self, frame_or_series):
+ # GH#14320
+ index = DatetimeIndex(["2016-09-29 11:00"])
+ expected = frame_or_series(index=index, dtype=object).asfreq("H")
+ result = frame_or_series([3], index=index.copy()).asfreq("H")
+ tm.assert_index_equal(expected.index, result.index)
+
+ @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
+ def test_tz_aware_asfreq_smoke(self, tz, frame_or_series):
+ dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz)
+
+ obj = frame_or_series(np.random.randn(len(dr)), index=dr)
+
+ # it works!
+ obj.asfreq("T")
+
+ def test_asfreq_normalize(self, frame_or_series):
+ rng = date_range("1/1/2000 09:30", periods=20)
+ norm = date_range("1/1/2000", periods=20)
+
+ vals = np.random.randn(20, 3)
+
+ obj = DataFrame(vals, index=rng)
+ expected = DataFrame(vals, index=norm)
+ if frame_or_series is Series:
+ obj = obj[0]
+ expected = expected[0]
+
+ result = obj.asfreq("D", normalize=True)
+ tm.assert_equal(result, expected)
+
+ def test_asfreq_keep_index_name(self, frame_or_series):
+ # GH#9854
+ index_name = "bar"
+ index = date_range("20130101", periods=20, name=index_name)
+ obj = DataFrame(list(range(20)), columns=["foo"], index=index)
+ if frame_or_series is Series:
+ obj = obj["foo"]
+
+ assert index_name == obj.index.name
+ assert index_name == obj.asfreq("10D").index.name
+
+ def test_asfreq_ts(self, frame_or_series):
+ index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
+ obj = DataFrame(np.random.randn(len(index), 3), index=index)
+ if frame_or_series is Series:
+ obj = obj[0]
+
+ result = obj.asfreq("D", how="end")
+ exp_index = index.asfreq("D", how="end")
+ assert len(result) == len(obj)
+ tm.assert_index_equal(result.index, exp_index)
+
+ result = obj.asfreq("D", how="start")
+ exp_index = index.asfreq("D", how="start")
+ assert len(result) == len(obj)
+ tm.assert_index_equal(result.index, exp_index)
+
+ def test_asfreq_resample_set_correct_freq(self, frame_or_series):
# GH#5613
# we test if .asfreq() and .resample() set the correct value for .freq
- df = DataFrame(
- {"date": ["2012-01-01", "2012-01-02", "2012-01-03"], "col": [1, 2, 3]}
- )
- df = df.set_index(to_datetime(df.date))
+ dti = to_datetime(["2012-01-01", "2012-01-02", "2012-01-03"])
+ obj = DataFrame({"col": [1, 2, 3]}, index=dti)
+ if frame_or_series is Series:
+ obj = obj["col"]
# testing the settings before calling .asfreq() and .resample()
- assert df.index.freq is None
- assert df.index.inferred_freq == "D"
+ assert obj.index.freq is None
+ assert obj.index.inferred_freq == "D"
# does .asfreq() set .freq correctly?
- assert df.asfreq("D").index.freq == "D"
+ assert obj.asfreq("D").index.freq == "D"
# does .resample() set .freq correctly?
- assert df.resample("D").asfreq().index.freq == "D"
+ assert obj.resample("D").asfreq().index.freq == "D"
+
+ def test_asfreq_empty(self, datetime_frame):
+ # test does not blow up on length-0 DataFrame
+ zero_length = datetime_frame.reindex([])
+ result = zero_length.asfreq("BM")
+ assert result is not zero_length
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
- tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
+ tm.assert_frame_equal(offset_monthly, rule_monthly)
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
@@ -45,11 +146,6 @@ def test_asfreq(self, datetime_frame):
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
- # test does not blow up on length-0 DataFrame
- zero_length = datetime_frame.reindex([])
- result = zero_length.asfreq("BM")
- assert result is not zero_length
-
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
diff --git a/pandas/tests/series/methods/test_asfreq.py b/pandas/tests/series/methods/test_asfreq.py
deleted file mode 100644
index 9a7f2343984d6..0000000000000
--- a/pandas/tests/series/methods/test_asfreq.py
+++ /dev/null
@@ -1,116 +0,0 @@
-from datetime import datetime
-
-import numpy as np
-import pytest
-
-from pandas import (
- DataFrame,
- DatetimeIndex,
- Series,
- date_range,
- period_range,
-)
-import pandas._testing as tm
-
-from pandas.tseries.offsets import (
- BDay,
- BMonthEnd,
-)
-
-
-class TestAsFreq:
- # TODO: de-duplicate/parametrize or move DataFrame test
- def test_asfreq_ts(self):
- index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
- ts = Series(np.random.randn(len(index)), index=index)
- df = DataFrame(np.random.randn(len(index), 3), index=index)
-
- result = ts.asfreq("D", how="end")
- df_result = df.asfreq("D", how="end")
- exp_index = index.asfreq("D", how="end")
- assert len(result) == len(ts)
- tm.assert_index_equal(result.index, exp_index)
- tm.assert_index_equal(df_result.index, exp_index)
-
- result = ts.asfreq("D", how="start")
- assert len(result) == len(ts)
- tm.assert_index_equal(result.index, index.asfreq("D", how="start"))
-
- @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
- def test_tz_aware_asfreq(self, tz):
- dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz)
-
- ser = Series(np.random.randn(len(dr)), index=dr)
-
- # it works!
- ser.asfreq("T")
-
- def test_asfreq(self):
- ts = Series(
- [0.0, 1.0, 2.0],
- index=DatetimeIndex(
- [
- datetime(2009, 10, 30),
- datetime(2009, 11, 30),
- datetime(2009, 12, 31),
- ],
- freq="BM",
- ),
- )
-
- daily_ts = ts.asfreq("B")
- monthly_ts = daily_ts.asfreq("BM")
- tm.assert_series_equal(monthly_ts, ts)
-
- daily_ts = ts.asfreq("B", method="pad")
- monthly_ts = daily_ts.asfreq("BM")
- tm.assert_series_equal(monthly_ts, ts)
-
- daily_ts = ts.asfreq(BDay())
- monthly_ts = daily_ts.asfreq(BMonthEnd())
- tm.assert_series_equal(monthly_ts, ts)
-
- result = ts[:0].asfreq("M")
- assert len(result) == 0
- assert result is not ts
-
- daily_ts = ts.asfreq("D", fill_value=-1)
- result = daily_ts.value_counts().sort_index()
- expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index()
- tm.assert_series_equal(result, expected)
-
- def test_asfreq_datetimeindex_empty_series(self):
- # GH#14320
- index = DatetimeIndex(["2016-09-29 11:00"])
- expected = Series(index=index, dtype=object).asfreq("H")
- result = Series([3], index=index.copy()).asfreq("H")
- tm.assert_index_equal(expected.index, result.index)
-
- def test_asfreq_keep_index_name(self):
- # GH#9854
- index_name = "bar"
- index = date_range("20130101", periods=20, name=index_name)
- df = DataFrame(list(range(20)), columns=["foo"], index=index)
-
- assert index_name == df.index.name
- assert index_name == df.asfreq("10D").index.name
-
- def test_asfreq_normalize(self):
- rng = date_range("1/1/2000 09:30", periods=20)
- norm = date_range("1/1/2000", periods=20)
- vals = np.random.randn(20)
- ts = Series(vals, index=rng)
-
- result = ts.asfreq("D", normalize=True)
- norm = date_range("1/1/2000", periods=20)
- expected = Series(vals, index=norm)
-
- tm.assert_series_equal(result, expected)
-
- vals = np.random.randn(20, 3)
- ts = DataFrame(vals, index=rng)
-
- result = ts.asfreq("D", normalize=True)
- expected = DataFrame(vals, index=norm)
-
- tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44219 | 2021-10-28T17:38:05Z | 2021-10-29T17:20:49Z | 2021-10-29T17:20:49Z | 2021-10-29T17:20:49Z |
REF: extract params used in Series.__repr__ | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 65592be32b5ef..13bedac664ea3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3,8 +3,6 @@
"""
from __future__ import annotations
-from io import StringIO
-from shutil import get_terminal_size
from textwrap import dedent
from typing import (
IO,
@@ -1460,29 +1458,8 @@ def __repr__(self) -> str:
"""
Return a string representation for a particular Series.
"""
- buf = StringIO("")
- width, height = get_terminal_size()
- max_rows = (
- height
- if get_option("display.max_rows") == 0
- else get_option("display.max_rows")
- )
- min_rows = (
- height
- if get_option("display.max_rows") == 0
- else get_option("display.min_rows")
- )
- show_dimensions = get_option("display.show_dimensions")
-
- self.to_string(
- buf=buf,
- name=self.name,
- dtype=self.dtype,
- min_rows=min_rows,
- max_rows=max_rows,
- length=show_dimensions,
- )
- return buf.getvalue()
+ repr_params = fmt.get_series_repr_params(self)
+ return self.to_string(**repr_params)
def to_string(
self,
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 07811be909330..4bab85a3c6739 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -489,6 +489,8 @@ def get_dataframe_repr_params() -> dict[str, Any]:
Supplying these parameters to DataFrame.to_string is equivalent to calling
``repr(DataFrame)``. This is useful if you want to adjust the repr output.
+ .. versionadded:: 1.4.0
+
Example
-------
>>> import pandas as pd
@@ -514,6 +516,44 @@ def get_dataframe_repr_params() -> dict[str, Any]:
}
+def get_series_repr_params(series: Series) -> dict[str, Any]:
+ """Get the parameters used to repr(Series) calls using Series.to_string.
+
+ Supplying these parameters to Series.to_string is equivalent to calling
+ ``repr(series)``. This is useful if you want to adjust the series repr output.
+
+ .. versionadded:: 1.4.0
+
+ Example
+ -------
+ >>> import pandas as pd
+ >>>
+ >>> ser = pd.Series([1, 2, 3, 4])
+ >>> repr_params = pd.io.formats.format.get_series_repr_params(ser)
+ >>> repr(ser) == ser.to_string(**repr_params)
+ True
+ """
+ width, height = get_terminal_size()
+ max_rows = (
+ height
+ if get_option("display.max_rows") == 0
+ else get_option("display.max_rows")
+ )
+ min_rows = (
+ height
+ if get_option("display.max_rows") == 0
+ else get_option("display.min_rows")
+ )
+
+ return {
+ "name": series.name,
+ "dtype": series.dtype,
+ "min_rows": min_rows,
+ "max_rows": max_rows,
+ "length": get_option("display.show_dimensions"),
+ }
+
+
class DataFrameFormatter:
"""Class for processing dataframe formatting options and data."""
| This PR gives flexibility if we want the series string output to be like the Series repr, but with some adjustments.
So with this PR we we can now do:
```python
>>> params = pd.io.formats.format.get_series_repr_params(ser)
>>> params[my_param] = new_value
>>> ser.to_string(**params)
```
Followup to #43987.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44218 | 2021-10-28T10:46:53Z | 2021-10-28T13:17:57Z | 2021-10-28T13:17:57Z | 2021-10-28T16:32:58Z |
TST: parametrize/share indexing tests | diff --git a/pandas/tests/frame/indexing/test_mask.py b/pandas/tests/frame/indexing/test_mask.py
index 70ec02a2334af..dd75e232051fa 100644
--- a/pandas/tests/frame/indexing/test_mask.py
+++ b/pandas/tests/frame/indexing/test_mask.py
@@ -29,6 +29,7 @@ def test_mask(self):
tm.assert_frame_equal(rs, df.mask(df <= 0, other))
tm.assert_frame_equal(rs, df.mask(~cond, other))
+ def test_mask2(self):
# see GH#21891
df = DataFrame([1, 2])
res = df.mask([[True], [False]])
@@ -91,18 +92,23 @@ def test_mask_dtype_bool_conversion(self):
result = bools.mask(mask)
tm.assert_frame_equal(result, expected)
- def test_mask_pos_args_deprecation(self):
+ def test_mask_pos_args_deprecation(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/41485
- df = DataFrame({"a": range(5)})
+ obj = DataFrame({"a": range(5)})
expected = DataFrame({"a": [-1, 1, -1, 3, -1]})
- cond = df % 2 == 0
+ if frame_or_series is Series:
+ obj = obj["a"]
+ expected = expected["a"]
+
+ cond = obj % 2 == 0
msg = (
- r"In a future version of pandas all arguments of DataFrame.mask except for "
+ r"In a future version of pandas all arguments of "
+ f"{frame_or_series.__name__}.mask except for "
r"the arguments 'cond' and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.mask(cond, -1, False)
- tm.assert_frame_equal(result, expected)
+ result = obj.mask(cond, -1, False)
+ tm.assert_equal(result, expected)
def test_mask_try_cast_deprecated(frame_or_series):
@@ -118,25 +124,30 @@ def test_mask_try_cast_deprecated(frame_or_series):
obj.mask(mask, -1, try_cast=True)
-def test_mask_stringdtype():
+def test_mask_stringdtype(frame_or_series):
# GH 40824
- df = DataFrame(
+ obj = DataFrame(
{"A": ["foo", "bar", "baz", NA]},
index=["id1", "id2", "id3", "id4"],
dtype=StringDtype(),
)
- filtered_df = DataFrame(
+ filtered_obj = DataFrame(
{"A": ["this", "that"]}, index=["id2", "id3"], dtype=StringDtype()
)
- filter_ser = Series([False, True, True, False])
- result = df.mask(filter_ser, filtered_df)
-
expected = DataFrame(
{"A": [NA, "this", "that", NA]},
index=["id1", "id2", "id3", "id4"],
dtype=StringDtype(),
)
- tm.assert_frame_equal(result, expected)
+ if frame_or_series is Series:
+ obj = obj["A"]
+ filtered_obj = filtered_obj["A"]
+ expected = expected["A"]
+
+ filter_ser = Series([False, True, True, False])
+ result = obj.mask(filter_ser, filtered_obj)
+
+ tm.assert_equal(result, expected)
def test_mask_where_dtype_timedelta():
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index eaafd2f017e79..b675e9d703f44 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -598,12 +598,12 @@ def test_where_callable(self):
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, (df + 2).where((df + 2) > 8, (df + 2) + 10))
- def test_where_tz_values(self, tz_naive_fixture):
- df1 = DataFrame(
+ def test_where_tz_values(self, tz_naive_fixture, frame_or_series):
+ obj1 = DataFrame(
DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture),
columns=["date"],
)
- df2 = DataFrame(
+ obj2 = DataFrame(
DatetimeIndex(["20150103", "20150104", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
@@ -612,8 +612,14 @@ def test_where_tz_values(self, tz_naive_fixture):
DatetimeIndex(["20150101", "20150102", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
- result = df1.where(mask, df2)
- tm.assert_frame_equal(exp, result)
+ if frame_or_series is Series:
+ obj1 = obj1["date"]
+ obj2 = obj2["date"]
+ mask = mask["date"]
+ exp = exp["date"]
+
+ result = obj1.where(mask, obj2)
+ tm.assert_equal(exp, result)
def test_df_where_change_dtype(self):
# GH#16979
@@ -759,18 +765,18 @@ def test_where_none_nan_coerce():
tm.assert_frame_equal(result, expected)
-def test_where_non_keyword_deprecation():
+def test_where_non_keyword_deprecation(frame_or_series):
# GH 41485
- s = DataFrame(range(5))
+ obj = frame_or_series(range(5))
msg = (
"In a future version of pandas all arguments of "
- "DataFrame.where except for the arguments 'cond' "
+ f"{frame_or_series.__name__}.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
- result = s.where(s > 1, 10, False)
- expected = DataFrame([10, 10, 2, 3, 4])
- tm.assert_frame_equal(expected, result)
+ result = obj.where(obj > 1, 10, False)
+ expected = frame_or_series([10, 10, 2, 3, 4])
+ tm.assert_equal(expected, result)
def test_where_columns_casting():
diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py
index 62537d37a8c11..59de1ab0c1ce9 100644
--- a/pandas/tests/frame/methods/test_transpose.py
+++ b/pandas/tests/frame/methods/test_transpose.py
@@ -5,12 +5,25 @@
from pandas import (
DataFrame,
+ DatetimeIndex,
date_range,
)
import pandas._testing as tm
class TestTranspose:
+ def test_transpose_empty_preserves_datetimeindex(self):
+ # GH#41382
+ df = DataFrame(index=DatetimeIndex([]))
+
+ expected = DatetimeIndex([], dtype="datetime64[ns]", freq=None)
+
+ result1 = df.T.sum().index
+ result2 = df.sum(axis=1).index
+
+ tm.assert_index_equal(result1, expected)
+ tm.assert_index_equal(result2, expected)
+
def test_transpose_tzaware_1col_single_tz(self):
# GH#26825
dti = date_range("2016-04-05 04:30", periods=3, tz="UTC")
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 2aea2cc9b37cd..e46eed05caa86 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -10,21 +10,6 @@
class TestDatetimeIndex:
- def test_datetimeindex_transpose_empty_df(self):
- """
- Regression test for:
- https://github.com/pandas-dev/pandas/issues/41382
- """
- df = DataFrame(index=pd.DatetimeIndex([]))
-
- expected = pd.DatetimeIndex([], dtype="datetime64[ns]", freq=None)
-
- result1 = df.T.sum().index
- result2 = df.sum(axis=1).index
-
- tm.assert_index_equal(result1, expected)
- tm.assert_index_equal(result2, expected)
-
def test_indexing_with_datetime_tz(self):
# GH#8260
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 53ed840f9cc72..77c04ae34ea5f 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -480,7 +480,6 @@ def test_floating_index_doc_example(self):
s = Series(range(5), index=index)
assert s[3] == 2
assert s.loc[3] == 2
- assert s.loc[3] == 2
assert s.iloc[3] == 3
def test_floating_misc(self, indexer_sl):
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index b8c53c7b59239..6484ac1f8a8e2 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -270,17 +270,15 @@ def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals):
with pytest.raises(IndexError, match=msg):
df.iloc[index_vals, column_vals]
- @pytest.mark.parametrize("dims", [1, 2])
- def test_iloc_getitem_invalid_scalar(self, dims):
+ def test_iloc_getitem_invalid_scalar(self, frame_or_series):
# GH 21982
- if dims == 1:
- s = Series(np.arange(10))
- else:
- s = DataFrame(np.arange(100).reshape(10, 10))
+ obj = DataFrame(np.arange(100).reshape(10, 10))
+ if frame_or_series is Series:
+ obj = obj[0]
with pytest.raises(TypeError, match="Cannot index by location index"):
- s.iloc["a"]
+ obj.iloc["a"]
def test_iloc_array_not_mutating_negative_indices(self):
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 01407f1f9bae7..4604fad019eca 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -710,10 +710,10 @@ def assert_slices_equivalent(l_slc, i_slc):
assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0])
- def test_slice_with_zero_step_raises(self, indexer_sl):
- ser = Series(np.arange(20), index=_mklbl("A", 20))
+ def test_slice_with_zero_step_raises(self, indexer_sl, frame_or_series):
+ obj = frame_or_series(np.arange(20), index=_mklbl("A", 20))
with pytest.raises(ValueError, match="slice step cannot be zero"):
- indexer_sl(ser)[::0]
+ indexer_sl(obj)[::0]
def test_loc_setitem_indexing_assignment_dict_already_exists(self):
index = Index([-5, 0, 5], name="z")
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index c7c575b479988..b82ecac37634e 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -368,6 +368,7 @@ def test_loc_to_fail(self):
with pytest.raises(KeyError, match=msg):
df.loc[[1, 2], [1, 2]]
+ def test_loc_to_fail2(self):
# GH 7496
# loc should not fallback
@@ -406,6 +407,7 @@ def test_loc_to_fail(self):
with pytest.raises(KeyError, match=msg):
s.loc[[-2]] = 0
+ def test_loc_to_fail3(self):
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([["a"], ["b"]], index=[1, 2], columns=["value"])
diff --git a/pandas/tests/series/indexing/test_mask.py b/pandas/tests/series/indexing/test_mask.py
index 30a9d925ed7e5..dc4fb530dbb52 100644
--- a/pandas/tests/series/indexing/test_mask.py
+++ b/pandas/tests/series/indexing/test_mask.py
@@ -1,11 +1,7 @@
import numpy as np
import pytest
-from pandas import (
- NA,
- Series,
- StringDtype,
-)
+from pandas import Series
import pandas._testing as tm
@@ -67,36 +63,3 @@ def test_mask_inplace():
rs = s.copy()
rs.mask(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.mask(cond, -s))
-
-
-def test_mask_stringdtype():
- # GH 40824
- ser = Series(
- ["foo", "bar", "baz", NA],
- index=["id1", "id2", "id3", "id4"],
- dtype=StringDtype(),
- )
- filtered_ser = Series(["this", "that"], index=["id2", "id3"], dtype=StringDtype())
- filter_ser = Series([False, True, True, False])
- result = ser.mask(filter_ser, filtered_ser)
-
- expected = Series(
- [NA, "this", "that", NA],
- index=["id1", "id2", "id3", "id4"],
- dtype=StringDtype(),
- )
- tm.assert_series_equal(result, expected)
-
-
-def test_mask_pos_args_deprecation():
- # https://github.com/pandas-dev/pandas/issues/41485
- s = Series(range(5))
- expected = Series([-1, 1, -1, 3, -1])
- cond = s % 2 == 0
- msg = (
- r"In a future version of pandas all arguments of Series.mask except for "
- r"the arguments 'cond' and 'other' will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = s.mask(cond, -1, False)
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index ed1ba11c5fd55..178928b91b621 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -144,20 +144,6 @@ def test_where():
tm.assert_series_equal(rs, expected)
-def test_where_non_keyword_deprecation():
- # GH 41485
- s = Series(range(5))
- msg = (
- "In a future version of pandas all arguments of "
- "Series.where except for the arguments 'cond' "
- "and 'other' will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = s.where(s > 1, 10, False)
- expected = Series([10, 10, 2, 3, 4])
- tm.assert_series_equal(expected, result)
-
-
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
@@ -397,72 +383,38 @@ def test_where_numeric_with_string():
assert w.dtype == "object"
-def test_where_timedelta_coerce():
- s = Series([1, 2], dtype="timedelta64[ns]")
+@pytest.mark.parametrize("dtype", ["timedelta64[ns]", "datetime64[ns]"])
+def test_where_datetimelike_coerce(dtype):
+ ser = Series([1, 2], dtype=dtype)
expected = Series([10, 10])
mask = np.array([False, False])
- rs = s.where(mask, [10, 10])
+ rs = ser.where(mask, [10, 10])
tm.assert_series_equal(rs, expected)
- rs = s.where(mask, 10)
+ rs = ser.where(mask, 10)
tm.assert_series_equal(rs, expected)
- rs = s.where(mask, 10.0)
+ rs = ser.where(mask, 10.0)
tm.assert_series_equal(rs, expected)
- rs = s.where(mask, [10.0, 10.0])
+ rs = ser.where(mask, [10.0, 10.0])
tm.assert_series_equal(rs, expected)
- rs = s.where(mask, [10.0, np.nan])
+ rs = ser.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype="object")
tm.assert_series_equal(rs, expected)
-def test_where_datetime_conversion():
- s = Series(date_range("20130102", periods=2))
- expected = Series([10, 10])
- mask = np.array([False, False])
-
- rs = s.where(mask, [10, 10])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, 10.0)
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, 10.0])
- tm.assert_series_equal(rs, expected)
-
- rs = s.where(mask, [10.0, np.nan])
- expected = Series([10, None], dtype="object")
- tm.assert_series_equal(rs, expected)
-
+def test_where_datetimetz():
# GH 15701
timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
- s = Series([Timestamp(t) for t in timestamps])
- rs = s.where(Series([False, True]))
- expected = Series([pd.NaT, s[1]])
+ ser = Series([Timestamp(t) for t in timestamps], dtype="datetime64[ns, UTC]")
+ rs = ser.where(Series([False, True]))
+ expected = Series([pd.NaT, ser[1]], dtype="datetime64[ns, UTC]")
tm.assert_series_equal(rs, expected)
-def test_where_dt_tz_values(tz_naive_fixture):
- ser1 = Series(
- pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
- )
- ser2 = Series(
- pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
- )
- mask = Series([True, True, False])
- result = ser1.where(mask, ser2)
- exp = Series(
- pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
- )
- tm.assert_series_equal(exp, result)
-
-
def test_where_sparse():
# GH#17198 make sure we dont get an AttributeError for sp_index
ser = Series(pd.arrays.SparseArray([1, 2]))
@@ -478,14 +430,13 @@ def test_where_empty_series_and_empty_cond_having_non_bool_dtypes():
tm.assert_series_equal(result, ser)
-@pytest.mark.parametrize("klass", [Series, pd.DataFrame])
-def test_where_categorical(klass):
+def test_where_categorical(frame_or_series):
# https://github.com/pandas-dev/pandas/issues/18888
- exp = klass(
+ exp = frame_or_series(
pd.Categorical(["A", "A", "B", "B", np.nan], categories=["A", "B", "C"]),
dtype="category",
)
- df = klass(["A", "A", "B", "B", "C"], dtype="category")
+ df = frame_or_series(["A", "A", "B", "B", "C"], dtype="category")
res = df.where(df != "C")
tm.assert_equal(exp, res)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44215 | 2021-10-28T04:54:14Z | 2021-10-28T12:32:18Z | 2021-10-28T12:32:18Z | 2021-10-28T15:13:23Z |
TST: parametrize cumulative tests | diff --git a/pandas/tests/frame/test_cumulative.py b/pandas/tests/frame/test_cumulative.py
index 39714a4566494..5bd9c42612315 100644
--- a/pandas/tests/frame/test_cumulative.py
+++ b/pandas/tests/frame/test_cumulative.py
@@ -7,6 +7,7 @@
"""
import numpy as np
+import pytest
from pandas import (
DataFrame,
@@ -19,53 +20,22 @@ class TestDataFrameCumulativeOps:
# ---------------------------------------------------------------------
# Cumulative Operations - cumsum, cummax, ...
- def test_cumsum_corner(self):
- dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5))
- # TODO(wesm): do something with this?
- result = dm.cumsum() # noqa
-
- def test_cumsum(self, datetime_frame):
- datetime_frame.iloc[5:10, 0] = np.nan
- datetime_frame.iloc[10:15, 1] = np.nan
- datetime_frame.iloc[15:, 2] = np.nan
-
- # axis = 0
- cumsum = datetime_frame.cumsum()
- expected = datetime_frame.apply(Series.cumsum)
- tm.assert_frame_equal(cumsum, expected)
-
- # axis = 1
- cumsum = datetime_frame.cumsum(axis=1)
- expected = datetime_frame.apply(Series.cumsum, axis=1)
- tm.assert_frame_equal(cumsum, expected)
-
- # works
+ def test_cumulative_ops_smoke(self):
+ # it works
df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
+ df.cummax()
+ df.cummin()
df.cumsum()
- # fix issue
- cumsum_xs = datetime_frame.cumsum(axis=1)
- assert np.shape(cumsum_xs) == np.shape(datetime_frame)
+ dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5))
+ # TODO(wesm): do something with this?
+ dm.cumsum()
- def test_cumprod(self, datetime_frame):
+ def test_cumprod_smoke(self, datetime_frame):
datetime_frame.iloc[5:10, 0] = np.nan
datetime_frame.iloc[10:15, 1] = np.nan
datetime_frame.iloc[15:, 2] = np.nan
- # axis = 0
- cumprod = datetime_frame.cumprod()
- expected = datetime_frame.apply(Series.cumprod)
- tm.assert_frame_equal(cumprod, expected)
-
- # axis = 1
- cumprod = datetime_frame.cumprod(axis=1)
- expected = datetime_frame.apply(Series.cumprod, axis=1)
- tm.assert_frame_equal(cumprod, expected)
-
- # fix issue
- cumprod_xs = datetime_frame.cumprod(axis=1)
- assert np.shape(cumprod_xs) == np.shape(datetime_frame)
-
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
@@ -76,53 +46,26 @@ def test_cumprod(self, datetime_frame):
df.cumprod(0)
df.cumprod(1)
- def test_cummin(self, datetime_frame):
- datetime_frame.iloc[5:10, 0] = np.nan
- datetime_frame.iloc[10:15, 1] = np.nan
- datetime_frame.iloc[15:, 2] = np.nan
-
- # axis = 0
- cummin = datetime_frame.cummin()
- expected = datetime_frame.apply(Series.cummin)
- tm.assert_frame_equal(cummin, expected)
-
- # axis = 1
- cummin = datetime_frame.cummin(axis=1)
- expected = datetime_frame.apply(Series.cummin, axis=1)
- tm.assert_frame_equal(cummin, expected)
-
- # it works
- df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
- df.cummin()
-
- # fix issue
- cummin_xs = datetime_frame.cummin(axis=1)
- assert np.shape(cummin_xs) == np.shape(datetime_frame)
-
- def test_cummax(self, datetime_frame):
+ @pytest.mark.parametrize("method", ["cumsum", "cumprod", "cummin", "cummax"])
+ def test_cumulative_ops_match_series_apply(self, datetime_frame, method):
datetime_frame.iloc[5:10, 0] = np.nan
datetime_frame.iloc[10:15, 1] = np.nan
datetime_frame.iloc[15:, 2] = np.nan
# axis = 0
- cummax = datetime_frame.cummax()
- expected = datetime_frame.apply(Series.cummax)
- tm.assert_frame_equal(cummax, expected)
+ result = getattr(datetime_frame, method)()
+ expected = datetime_frame.apply(getattr(Series, method))
+ tm.assert_frame_equal(result, expected)
# axis = 1
- cummax = datetime_frame.cummax(axis=1)
- expected = datetime_frame.apply(Series.cummax, axis=1)
- tm.assert_frame_equal(cummax, expected)
-
- # it works
- df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
- df.cummax()
+ result = getattr(datetime_frame, method)(axis=1)
+ expected = datetime_frame.apply(getattr(Series, method), axis=1)
+ tm.assert_frame_equal(result, expected)
- # fix issue
- cummax_xs = datetime_frame.cummax(axis=1)
- assert np.shape(cummax_xs) == np.shape(datetime_frame)
+ # fix issue TODO: GH ref?
+ assert np.shape(result) == np.shape(datetime_frame)
- def test_cumulative_ops_preserve_dtypes(self):
+ def test_cumsum_preserve_dtypes(self):
# GH#19296 dont incorrectly upcast to object
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3.0], "C": [True, False, False]})
diff --git a/pandas/tests/series/test_cumulative.py b/pandas/tests/series/test_cumulative.py
index e070b86717503..74ab9376ed00f 100644
--- a/pandas/tests/series/test_cumulative.py
+++ b/pandas/tests/series/test_cumulative.py
@@ -5,7 +5,6 @@
--------
tests.frame.test_cumulative
"""
-from itertools import product
import numpy as np
import pytest
@@ -13,6 +12,13 @@
import pandas as pd
import pandas._testing as tm
+methods = {
+ "cumsum": np.cumsum,
+ "cumprod": np.cumprod,
+ "cummin": np.minimum.accumulate,
+ "cummax": np.maximum.accumulate,
+}
+
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
@@ -37,130 +43,82 @@ def test_cumsum(self, datetime_series):
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
- def test_cummin(self, datetime_series):
- tm.assert_numpy_array_equal(
- datetime_series.cummin().values,
- np.minimum.accumulate(np.array(datetime_series)),
- )
- ts = datetime_series.copy()
- ts[::2] = np.NaN
- result = ts.cummin()[1::2]
- expected = np.minimum.accumulate(ts.dropna())
+ @pytest.mark.parametrize("method", ["cummin", "cummax"])
+ def test_cummin_cummax(self, datetime_series, method):
+ ufunc = methods[method]
- result.index = result.index._with_freq(None)
- tm.assert_series_equal(result, expected)
+ result = getattr(datetime_series, method)().values
+ expected = ufunc(np.array(datetime_series))
- def test_cummax(self, datetime_series):
- tm.assert_numpy_array_equal(
- datetime_series.cummax().values,
- np.maximum.accumulate(np.array(datetime_series)),
- )
+ tm.assert_numpy_array_equal(result, expected)
ts = datetime_series.copy()
ts[::2] = np.NaN
- result = ts.cummax()[1::2]
- expected = np.maximum.accumulate(ts.dropna())
+ result = getattr(ts, method)()[1::2]
+ expected = ufunc(ts.dropna())
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
- @pytest.mark.parametrize("tz", [None, "US/Pacific"])
- def test_cummin_datetime64(self, tz):
- s = pd.Series(
- pd.to_datetime(
- ["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
- ).tz_localize(tz)
- )
-
- expected = pd.Series(
- pd.to_datetime(
- ["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
- ).tz_localize(tz)
- )
- result = s.cummin(skipna=True)
+ @pytest.mark.parametrize(
+ "ts",
+ [
+ pd.Timedelta(0),
+ pd.Timestamp("1999-12-31"),
+ pd.Timestamp("1999-12-31").tz_localize("US/Pacific"),
+ ],
+ )
+ def test_cummin_cummax_datetimelike(self, ts):
+ # with ts==pd.Timedelta(0), we are testing td64; with naive Timestamp
+ # we are testing datetime64[ns]; with Timestamp[US/Pacific]
+ # we are testing dt64tz
+ tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "3 days"])
+ ser = pd.Series(tdi + ts)
+
+ exp_tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "2 days", "NaT", "3 days"])
+ expected = pd.Series(exp_tdi + ts)
+ result = ser.cummax(skipna=True)
tm.assert_series_equal(expected, result)
- expected = pd.Series(
- pd.to_datetime(
- ["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
- ).tz_localize(tz)
- )
- result = s.cummin(skipna=False)
+ exp_tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "1 days"])
+ expected = pd.Series(exp_tdi + ts)
+ result = ser.cummin(skipna=True)
tm.assert_series_equal(expected, result)
- @pytest.mark.parametrize("tz", [None, "US/Pacific"])
- def test_cummax_datetime64(self, tz):
- s = pd.Series(
- pd.to_datetime(
- ["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
- ).tz_localize(tz)
+ exp_tdi = pd.to_timedelta(
+ ["NaT", "2 days", "2 days", "2 days", "2 days", "3 days"]
)
-
- expected = pd.Series(
- pd.to_datetime(
- ["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
- ).tz_localize(tz)
- )
- result = s.cummax(skipna=True)
+ expected = pd.Series(exp_tdi + ts)
+ result = ser.cummax(skipna=False)
tm.assert_series_equal(expected, result)
- expected = pd.Series(
- pd.to_datetime(
- ["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
- ).tz_localize(tz)
+ exp_tdi = pd.to_timedelta(
+ ["NaT", "2 days", "2 days", "1 days", "1 days", "1 days"]
)
- result = s.cummax(skipna=False)
+ expected = pd.Series(exp_tdi + ts)
+ result = ser.cummin(skipna=False)
tm.assert_series_equal(expected, result)
- def test_cummin_timedelta64(self):
- s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
+ def test_cummethods_bool(self):
+ # GH#6270
+ # checking Series method vs the ufunc applied to the values
- expected = pd.Series(
- pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
- )
- result = s.cummin(skipna=True)
- tm.assert_series_equal(expected, result)
+ a = pd.Series([False, False, False, True, True, False, False])
+ c = pd.Series([False] * len(a))
- expected = pd.Series(
- pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
- )
- result = s.cummin(skipna=False)
- tm.assert_series_equal(expected, result)
+ for method in methods:
+ for ser in [a, ~a, c, ~c]:
+ ufunc = methods[method]
- def test_cummax_timedelta64(self):
- s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
+ exp_vals = ufunc(ser.values)
+ expected = pd.Series(exp_vals)
- expected = pd.Series(
- pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
- )
- result = s.cummax(skipna=True)
- tm.assert_series_equal(expected, result)
+ result = getattr(ser, method)()
- expected = pd.Series(
- pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
- )
- result = s.cummax(skipna=False)
- tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(result, expected)
- def test_cummethods_bool(self):
- # GH#6270
+ def test_cummethods_bool_in_object_dtype(self):
- a = pd.Series([False, False, False, True, True, False, False])
- b = ~a
- c = pd.Series([False] * len(b))
- d = ~c
- methods = {
- "cumsum": np.cumsum,
- "cumprod": np.cumprod,
- "cummin": np.minimum.accumulate,
- "cummax": np.maximum.accumulate,
- }
- args = product((a, b, c, d), methods)
- for s, method in args:
- expected = pd.Series(methods[method](s.values))
- result = getattr(s, method)()
- tm.assert_series_equal(result, expected)
-
- e = pd.Series([False, True, np.nan, False])
+ ser = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin = pd.Series([False, False, np.nan, False])
@@ -168,5 +126,5 @@ def test_cummethods_bool(self):
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods:
- res = getattr(e, method)()
+ res = getattr(ser, method)()
tm.assert_series_equal(res, expecteds[method])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44214 | 2021-10-28T03:46:48Z | 2021-10-29T13:13:48Z | 2021-10-29T13:13:47Z | 2021-10-29T15:18:39Z |
BUG: Index/Series.to_frame not respecting explicit name=None | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 495669d316e95..24ba8a1272eec 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -642,6 +642,7 @@ Other
- Bug in :meth:`CustomBusinessMonthBegin.__add__` (:meth:`CustomBusinessMonthEnd.__add__`) not applying the extra ``offset`` parameter when beginning (end) of the target month is already a business day (:issue:`41356`)
- Bug in :meth:`RangeIndex.union` with another ``RangeIndex`` with matching (even) ``step`` and starts differing by strictly less than ``step / 2`` (:issue:`44019`)
- Bug in :meth:`RangeIndex.difference` with ``sort=None`` and ``step<0`` failing to sort (:issue:`44085`)
+- Bug in :meth:`Series.to_frame` and :meth:`Index.to_frame` ignoring the ``name`` argument when ``name=None`` is explicitly passed (:issue:`44212`)
.. ***DO NOT USE THIS SECTION***
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e82bd61938f15..d44a25c2677d1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1510,7 +1510,9 @@ def to_series(self, index=None, name: Hashable = None) -> Series:
return Series(self._values.copy(), index=index, name=name)
- def to_frame(self, index: bool = True, name: Hashable = None) -> DataFrame:
+ def to_frame(
+ self, index: bool = True, name: Hashable = lib.no_default
+ ) -> DataFrame:
"""
Create a DataFrame with a column containing the Index.
@@ -1561,7 +1563,7 @@ def to_frame(self, index: bool = True, name: Hashable = None) -> DataFrame:
"""
from pandas import DataFrame
- if name is None:
+ if name is lib.no_default:
name = self.name or 0
result = DataFrame({name: self._values.copy()})
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index e2f1a2d6a1e23..156488ca08102 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1684,7 +1684,7 @@ def unique(self, level=None):
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
- def to_frame(self, index: bool = True, name=None) -> DataFrame:
+ def to_frame(self, index: bool = True, name=lib.no_default) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
@@ -1736,7 +1736,7 @@ def to_frame(self, index: bool = True, name=None) -> DataFrame:
"""
from pandas import DataFrame
- if name is not None:
+ if name is not lib.no_default:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 15715af05f904..2c6d4ed445394 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1317,7 +1317,7 @@ def repeat(self, repeats, axis=None) -> Series:
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
- def reset_index(self, level=None, drop=False, name=None, inplace=False):
+ def reset_index(self, level=None, drop=False, name=lib.no_default, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
@@ -1427,6 +1427,9 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if drop:
+ if name is lib.no_default:
+ name = self.name
+
new_index = default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
@@ -1448,6 +1451,14 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
"Cannot reset_index inplace on a Series to create a DataFrame"
)
else:
+ if name is lib.no_default:
+ # For backwards compatibility, keep columns as [0] instead of
+ # [None] when self.name is None
+ if self.name is None:
+ name = 0
+ else:
+ name = self.name
+
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
@@ -1697,7 +1708,7 @@ def to_dict(self, into=dict):
into_c = com.standardize_mapping(into)
return into_c((k, maybe_box_native(v)) for k, v in self.items())
- def to_frame(self, name=None) -> DataFrame:
+ def to_frame(self, name: Hashable = lib.no_default) -> DataFrame:
"""
Convert Series to DataFrame.
@@ -1723,7 +1734,7 @@ def to_frame(self, name=None) -> DataFrame:
2 c
"""
columns: Index
- if name is None:
+ if name is lib.no_default:
name = self.name
if name is None:
# default to [0], same as we would get with DataFrame(self)
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 061e36e457443..ba47391513ed2 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -460,7 +460,11 @@ def _compute_plot_data(self):
label = self.label
if label is None and data.name is None:
label = "None"
- data = data.to_frame(name=label)
+ if label is None:
+ # We'll end up with columns of [0] instead of [None]
+ data = data.to_frame()
+ else:
+ data = data.to_frame(name=label)
elif self._kind in ("hist", "box"):
cols = self.columns if self.by is None else self.columns + self.by
data = data.loc[:, cols]
diff --git a/pandas/tests/indexes/datetimes/methods/test_to_frame.py b/pandas/tests/indexes/datetimes/methods/test_to_frame.py
index ec6254f52f4d5..80e8284abe031 100644
--- a/pandas/tests/indexes/datetimes/methods/test_to_frame.py
+++ b/pandas/tests/indexes/datetimes/methods/test_to_frame.py
@@ -1,5 +1,6 @@
from pandas import (
DataFrame,
+ Index,
date_range,
)
import pandas._testing as tm
@@ -12,3 +13,14 @@ def test_to_frame_datetime_tz(self):
result = idx.to_frame()
expected = DataFrame(idx, index=idx)
tm.assert_frame_equal(result, expected)
+
+ def test_to_frame_respects_none_name(self):
+ # GH#44212 if we explicitly pass name=None, then that should be respected,
+ # not changed to 0
+ idx = date_range(start="2019-01-01", end="2019-01-30", freq="D", tz="UTC")
+ result = idx.to_frame(name=None)
+ exp_idx = Index([None], dtype=object)
+ tm.assert_index_equal(exp_idx, result.columns)
+
+ result = idx.rename("foo").to_frame(name=None)
+ tm.assert_index_equal(exp_idx, result.columns)
diff --git a/pandas/tests/series/methods/test_to_frame.py b/pandas/tests/series/methods/test_to_frame.py
index 66e44f1a0caf0..55d49b8fbee70 100644
--- a/pandas/tests/series/methods/test_to_frame.py
+++ b/pandas/tests/series/methods/test_to_frame.py
@@ -1,11 +1,24 @@
from pandas import (
DataFrame,
+ Index,
Series,
)
import pandas._testing as tm
class TestToFrame:
+ def test_to_frame_respects_name_none(self):
+ # GH#44212 if we explicitly pass name=None, then that should be respected,
+ # not changed to 0
+ ser = Series(range(3))
+ result = ser.to_frame(None)
+
+ exp_index = Index([None], dtype=object)
+ tm.assert_index_equal(result.columns, exp_index)
+
+ result = ser.rename("foo").to_frame(None)
+ tm.assert_index_equal(result.columns, exp_index)
+
def test_to_frame(self, datetime_series):
datetime_series.name = None
rs = datetime_series.to_frame()
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44212 | 2021-10-27T23:23:00Z | 2021-10-30T15:02:18Z | 2021-10-30T15:02:18Z | 2021-10-30T15:09:53Z |
CLN/TST: address TODOs | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index cf9820c3aa8f8..848e724949bc5 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -197,8 +197,8 @@ def shift(self, periods=1, fill_value=None, axis=0):
return self._from_backing_data(new_values)
def _validate_shift_value(self, fill_value):
- # TODO: after deprecation in datetimelikearraymixin is enforced,
- # we can remove this and ust validate_fill_value directly
+ # TODO(2.0): after deprecation in datetimelikearraymixin is enforced,
+ # we can remove this and use validate_fill_value directly
return self._validate_scalar(fill_value)
def __setitem__(self, key, value):
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 35e2dd25678e5..107cefdf31188 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -524,7 +524,6 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
self = self.copy() if copy else self
result = self._set_dtype(dtype)
- # TODO: consolidate with ndarray case?
elif isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index 6684e559b6f88..915e13bc3bbb2 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -371,7 +371,7 @@ def _subtype_with_str(self):
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# TODO for now only handle SparseDtypes and numpy dtypes => extend
- # with other compatibtle extension dtypes
+ # with other compatible extension dtypes
if any(
isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
for x in dtypes
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 51af2cd732d09..f8716ca1bafe0 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -253,7 +253,6 @@ def _filter_nodes(superclass, all_nodes=_all_nodes):
assert not intersection, _msg
-# TODO: Python 3.6.2: replace Callable[..., None] with Callable[..., NoReturn]
def _node_not_implemented(node_name: str) -> Callable[..., None]:
"""
Return a function that raises a NotImplementedError with a passed node name.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5afb19f1d91fe..2488048e30ccb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5356,7 +5356,6 @@ def _replace_columnwise(
"""
Dispatch to Series.replace column-wise.
-
Parameters
----------
mapping : dict
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6895455a43160..8b2774fd0f1b3 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4974,7 +4974,7 @@ def _reindex_with_indexers(
if indexer is not None:
indexer = ensure_platform_int(indexer)
- # TODO: speed up on homogeneous DataFrame objects
+ # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi)
new_data = new_data.reindex_indexer(
index,
indexer,
@@ -6420,7 +6420,7 @@ def fillna(
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
- new_data = self.where(self.notna(), value)._data
+ new_data = self.where(self.notna(), value)._mgr
else:
raise ValueError(f"invalid fill value with a {type(value)}")
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 8a2ba69a61ed9..77281441c2ed2 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1797,7 +1797,7 @@ def count(self) -> Series | DataFrame:
is_series = data.ndim == 1
def hfunc(bvalues: ArrayLike) -> ArrayLike:
- # TODO(2DEA): reshape would not be necessary with 2D EAs
+ # TODO(EA2D): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 7e533233d5ecf..bc9f5c3243705 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -917,7 +917,7 @@ def setitem(self, indexer, value):
value = np.nan
# coerce if block dtype can store value
- values = self.values
+ values = cast(np.ndarray, self.values)
if not self._can_hold_element(value):
# current dtype cannot store value, coerce to common dtype
return self.coerce_to_target_dtype(value).setitem(indexer, value)
@@ -946,11 +946,7 @@ def setitem(self, indexer, value):
values[indexer] = value
else:
- # error: Argument 1 to "setitem_datetimelike_compat" has incompatible type
- # "Union[ndarray, ExtensionArray]"; expected "ndarray"
- value = setitem_datetimelike_compat(
- values, len(values[indexer]), value # type: ignore[arg-type]
- )
+ value = setitem_datetimelike_compat(values, len(values[indexer]), value)
values[indexer] = value
if transpose:
@@ -1729,8 +1725,7 @@ def is_view(self) -> bool:
def setitem(self, indexer, value):
if not self._can_hold_element(value):
- # TODO: general case needs casting logic.
- return self.astype(_dtype_obj).setitem(indexer, value)
+ return self.coerce_to_target_dtype(value).setitem(indexer, value)
values = self.values
if self.ndim > 1:
@@ -1751,7 +1746,6 @@ def putmask(self, mask, new) -> list[Block]:
return [self]
def where(self, other, cond, errors="raise") -> list[Block]:
- # TODO(EA2D): reshape unnecessary with 2D EAs
arr = self.values
cond = extract_bool_array(cond)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9795e1f7141ee..65592be32b5ef 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4528,6 +4528,9 @@ def rename(
5 3
dtype: int64
"""
+ if axis is not None:
+ axis = self._get_axis_number(axis)
+
if callable(index) or is_dict_like(index):
return super().rename(
index, copy=copy, inplace=inplace, level=level, errors=errors
diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py
index 649ad562307c0..6f4e35ad4dfb2 100644
--- a/pandas/tests/arithmetic/common.py
+++ b/pandas/tests/arithmetic/common.py
@@ -34,9 +34,18 @@ def assert_invalid_addsub_type(left, right, msg=None):
right - left
+def get_expected_box(box):
+ """
+ Get the box to use for 'expected' in a comparison operation.
+ """
+ if box in [Index, array]:
+ return np.ndarray
+ return box
+
+
def get_upcast_box(box, vector):
"""
- Given two box-types, find the one that takes priority
+ Given two box-types, find the one that takes priority.
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 0d3f7dcaaf65b..e511c1bdaca9c 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -43,6 +43,7 @@
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
+ get_expected_box,
get_upcast_box,
)
@@ -59,9 +60,7 @@ def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
- xbox = (
- box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
- )
+ xbox = get_expected_box(box)
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
@@ -148,7 +147,7 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
- xbox = box if box not in [pd.Index, pd.array] else np.ndarray
+ xbox = get_expected_box(box)
ts = Timestamp.now(tz)
ser = Series([ts, NaT])
@@ -245,7 +244,7 @@ def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
# on older numpys (since they check object identity)
return
- xbox = box if box not in [pd.Index, pd.array] else np.ndarray
+ xbox = get_expected_box(box)
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
@@ -324,9 +323,7 @@ def test_timestamp_compare_series(self, left, right):
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
- xbox = (
- box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
- )
+ xbox = get_expected_box(box_with_array)
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
@@ -424,9 +421,7 @@ def test_dti_cmp_nat(self, dtype, box_with_array):
# on older numpys (since they check object identity)
return
- xbox = (
- box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
- )
+ xbox = get_expected_box(box_with_array)
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
@@ -662,7 +657,7 @@ def test_scalar_comparison_tzawareness(
box = box_with_array
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
- xbox = box if box not in [pd.Index, pd.array] else np.ndarray
+ xbox = get_expected_box(box)
dtarr = tm.box_expected(dti, box_with_array)
if op in [operator.eq, operator.ne]:
@@ -2283,7 +2278,7 @@ def test_sub_dti_dti(self):
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
- def test_timedelta64_equal_timedelta_supported_ops(self, op):
+ def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
Timestamp("20130301"),
@@ -2292,6 +2287,7 @@ def test_timedelta64_equal_timedelta_supported_ops(self, op):
Timestamp("20130228 21:00:00"),
]
)
+ obj = box_with_array(ser)
intervals = ["D", "h", "m", "s", "us"]
@@ -2302,10 +2298,10 @@ def timedelta64(*args):
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
- lhs = op(ser, nptd)
- rhs = op(ser, pytd)
+ lhs = op(obj, nptd)
+ rhs = op(obj, pytd)
- tm.assert_series_equal(lhs, rhs)
+ tm.assert_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index 7d215c940c031..0c42be517b798 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -25,7 +25,10 @@
import pandas._testing as tm
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
-from pandas.tests.arithmetic.common import assert_invalid_comparison
+from pandas.tests.arithmetic.common import (
+ assert_invalid_comparison,
+ get_expected_box,
+)
# ------------------------------------------------------------------
# Comparisons
@@ -38,9 +41,7 @@ class TestPeriodArrayLikeComparisons:
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
- xbox = (
- box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
- )
+ xbox = get_expected_box(box_with_array)
pi = period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
@@ -77,11 +78,10 @@ def test_compare_invalid_listlike(self, box_with_array, other):
@pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)])
def test_compare_object_dtype(self, box_with_array, other_box):
+ xbox = get_expected_box(box_with_array)
pi = period_range("2000", periods=5)
parr = tm.box_expected(pi, box_with_array)
- xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
-
other = other_box(pi)
expected = np.array([True, True, True, True, True])
@@ -187,9 +187,7 @@ def test_pi_cmp_period(self):
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
- xbox = (
- box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
- )
+ xbox = get_expected_box(box_with_array)
pi = period_range("2000-01-01", periods=10, freq="D")
@@ -210,7 +208,7 @@ def test_parr_cmp_period_scalar2(self, box_with_array):
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
- xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
+ xbox = get_expected_box(box_with_array)
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
@@ -249,7 +247,7 @@ def test_parr_cmp_period_scalar(self, freq, box_with_array):
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
- xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
+ xbox = get_expected_box(box_with_array)
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index a2b7d93884a4e..7765c29ee59c8 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -1198,7 +1198,7 @@ def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# ------------------------------------------------------------------
# Operations with timedelta-like others
- def test_td64arr_add_td64_array(self, box_with_array):
+ def test_td64arr_add_sub_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
@@ -1213,20 +1213,11 @@ def test_td64arr_add_td64_array(self, box_with_array):
result = tdarr + tdi
tm.assert_equal(result, expected)
- def test_td64arr_sub_td64_array(self, box_with_array):
- box = box_with_array
- dti = pd.date_range("2016-01-01", periods=3)
- tdi = dti - dti.shift(1)
- tdarr = tdi.values
-
- expected = 0 * tdi
- tdi = tm.box_expected(tdi, box)
- expected = tm.box_expected(expected, box)
-
+ expected_sub = 0 * tdi
result = tdi - tdarr
- tm.assert_equal(result, expected)
+ tm.assert_equal(result, expected_sub)
result = tdarr - tdi
- tm.assert_equal(result, expected)
+ tm.assert_equal(result, expected_sub)
def test_td64arr_add_sub_tdi(self, box_with_array, names):
# GH#17250 make sure result dtype is correct
@@ -1263,37 +1254,25 @@ def test_td64arr_add_sub_tdi(self, box_with_array, names):
tm.assert_equal(result, -expected)
assert_dtype(result, "timedelta64[ns]")
- def test_td64arr_add_sub_td64_nat(self, box_with_array):
- # GH#23320 special handling for timedelta64("NaT")
+ @pytest.mark.parametrize("tdnat", [np.timedelta64("NaT"), NaT])
+ def test_td64arr_add_sub_td64_nat(self, box_with_array, tdnat):
+ # GH#18808, GH#23320 special handling for timedelta64("NaT")
box = box_with_array
tdi = TimedeltaIndex([NaT, Timedelta("1s")])
- other = np.timedelta64("NaT")
expected = TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
- result = obj + other
+ result = obj + tdnat
tm.assert_equal(result, expected)
- result = other + obj
+ result = tdnat + obj
tm.assert_equal(result, expected)
- result = obj - other
+ result = obj - tdnat
tm.assert_equal(result, expected)
- result = other - obj
+ result = tdnat - obj
tm.assert_equal(result, expected)
- def test_td64arr_sub_NaT(self, box_with_array):
- # GH#18808
- box = box_with_array
- ser = Series([NaT, Timedelta("1s")])
- expected = Series([NaT, NaT], dtype="timedelta64[ns]")
-
- ser = tm.box_expected(ser, box)
- expected = tm.box_expected(expected, box)
-
- res = ser - NaT
- tm.assert_equal(res, expected)
-
def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
# GH#10699 for Tick cases
@@ -1328,7 +1307,7 @@ def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
- def test_td64arr_add_offset_index(self, names, box_with_array):
+ def test_td64arr_add_sub_offset_index(self, names, box_with_array):
# GH#18849, GH#19744
box = box_with_array
exname = get_expected_name(box, names)
@@ -1340,8 +1319,13 @@ def test_td64arr_add_offset_index(self, names, box_with_array):
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=exname
)
+ expected_sub = TimedeltaIndex(
+ [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
+ )
+
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
+ expected_sub = tm.box_expected(expected_sub, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
@@ -1351,10 +1335,12 @@ def test_td64arr_add_offset_index(self, names, box_with_array):
res2 = other + tdi
tm.assert_equal(res2, expected)
- # TODO: combine with test_td64arr_add_offset_index by parametrizing
- # over second box?
- def test_td64arr_add_offset_array(self, box_with_array):
- # GH#18849
+ with tm.assert_produces_warning(PerformanceWarning):
+ res_sub = tdi - other
+ tm.assert_equal(res_sub, expected_sub)
+
+ def test_td64arr_add_sub_offset_array(self, box_with_array):
+ # GH#18849, GH#18824
box = box_with_array
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)])
@@ -1362,6 +1348,9 @@ def test_td64arr_add_offset_array(self, box_with_array):
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
+ expected_sub = TimedeltaIndex(
+ [tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
+ )
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
@@ -1374,41 +1363,10 @@ def test_td64arr_add_offset_array(self, box_with_array):
res2 = other + tdi
tm.assert_equal(res2, expected)
- def test_td64arr_sub_offset_index(self, names, box_with_array):
- # GH#18824, GH#19744
- box = box_with_array
- xbox = box if box not in [tm.to_array, pd.array] else pd.Index
- exname = get_expected_name(box, names)
-
- tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
- other = pd.Index([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1])
-
- expected = TimedeltaIndex(
- [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
- )
-
- tdi = tm.box_expected(tdi, box)
- expected = tm.box_expected(expected, xbox)
-
+ expected_sub = tm.box_expected(expected_sub, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
- res = tdi - other
- tm.assert_equal(res, expected)
-
- def test_td64arr_sub_offset_array(self, box_with_array):
- # GH#18824
- tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
- other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)])
-
- expected = TimedeltaIndex(
- [tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
- )
-
- tdi = tm.box_expected(tdi, box_with_array)
- expected = tm.box_expected(expected, box_with_array)
-
- with tm.assert_produces_warning(PerformanceWarning):
- res = tdi - other
- tm.assert_equal(res, expected)
+ res_sub = tdi - other
+ tm.assert_equal(res_sub, expected_sub)
def test_td64arr_with_offset_series(self, names, box_with_array):
# GH#18849
@@ -1968,10 +1926,12 @@ def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
- msg = (
- "cannot use operands with types dtype|"
- "Cannot multiply with unequal lengths|"
- "Unable to coerce to Series"
+ msg = "|".join(
+ [
+ "cannot use operands with types dtype",
+ "Cannot multiply with unequal lengths",
+ "Unable to coerce to Series",
+ ]
)
with pytest.raises(TypeError, match=msg):
# length check before dtype check
@@ -2079,12 +2039,14 @@ def test_td64arr_div_numeric_array(
result = tdser / vector
tm.assert_equal(result, expected)
- pattern = (
- "true_divide'? cannot use operands|"
- "cannot perform __div__|"
- "cannot perform __truediv__|"
- "unsupported operand|"
- "Cannot divide"
+ pattern = "|".join(
+ [
+ "true_divide'? cannot use operands",
+ "cannot perform __div__",
+ "cannot perform __truediv__",
+ "unsupported operand",
+ "Cannot divide",
+ ]
)
with pytest.raises(TypeError, match=pattern):
vector / tdser
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index ac181af7875b5..7efd3bdb6920a 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -233,9 +233,9 @@ def test_getitem_integer_with_missing_raises(self, data, idx):
# FIXME: dont leave commented-out
# TODO: this raises KeyError about labels not found (it tries label-based)
# import pandas._testing as tm
- # s = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
+ # ser = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
# with pytest.raises(ValueError, match=msg):
- # s[idx]
+ # ser[idx]
def test_getitem_slice(self, data):
# getitem[slice] should return an array
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 344b0be20fc7b..c9c0a4de60a46 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -115,15 +115,16 @@ def check_opname(self, s, op_name, other, exc=None):
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
sdtype = tm.get_dtype(s)
- if sdtype.is_unsigned_integer and (op_name == "__rsub__"):
- # TODO see https://github.com/pandas-dev/pandas/issues/22023
- pytest.skip("unsigned subtraction gives negative values")
if (
hasattr(other, "dtype")
and not is_extension_array_dtype(other.dtype)
and is_integer_dtype(other.dtype)
+ and sdtype.is_unsigned_integer
):
+ # TODO: comment below is inaccurate; other can be int8, int16, ...
+ # and the trouble is that e.g. if s is UInt8 and other is int8,
+ # then result is UInt16
# other is np.int64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(sdtype.numpy_dtype)
@@ -133,20 +134,9 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if op_name in ("__rtruediv__", "__truediv__", "__div__"):
expected = expected.fillna(np.nan).astype("Float64")
- elif op_name.startswith("__r"):
- # TODO reverse operators result in object dtype
- # see https://github.com/pandas-dev/pandas/issues/22024
- expected = expected.astype(sdtype)
- result = result.astype(sdtype)
else:
# combine method result in 'biggest' (int64) dtype
expected = expected.astype(sdtype)
- pass
-
- if (op_name == "__rpow__") and isinstance(other, pd.Series):
- # TODO pow on Int arrays gives different result with NA
- # see https://github.com/pandas-dev/pandas/issues/22022
- result = result.fillna(1)
self.assert_equal(result, expected)
else:
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 1ea436520bf20..74460a75d7b63 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1165,12 +1165,10 @@ def test_getitem_boolean_indexing_mixed(self):
def test_type_error_multiindex(self):
# See gh-12218
- df = DataFrame(
- columns=["i", "c", "x", "y"],
- data=[[0, 0, 1, 2], [1, 0, 3, 4], [0, 1, 1, 2], [1, 1, 3, 4]],
+ mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
+ dg = DataFrame(
+ [[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
- dg = df.pivot_table(index="i", columns="c", values=["x", "y"])
- # TODO: Is this test for pivot_table?
with pytest.raises(TypeError, match="unhashable type"):
dg[:, 0]
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
index f68bde2188e67..2313afcae607a 100644
--- a/pandas/tests/indexes/test_any_index.py
+++ b/pandas/tests/indexes/test_any_index.py
@@ -1,7 +1,5 @@
"""
Tests that can be parametrized over _any_ Index object.
-
-TODO: consider using hypothesis for these.
"""
import re
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 669bbe23af559..fc8abb83ed302 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -248,8 +248,7 @@ def test_take_invalid_kwargs(self):
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
- # TODO: This method came from test_timedelta; de-dup with version above
- def test_take2(self):
+ def test_take_equiv_getitem(self):
tds = ["1day 02:00:00", "1 day 04:00:00", "1 day 10:00:00"]
idx = timedelta_range(start="1d", end="2d", freq="H", name="idx")
expected = TimedeltaIndex(tds, freq=None, name="idx")
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 95da68510be6b..d9bd8f6809c73 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -239,7 +239,7 @@ def test_repr_truncation(self):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
- # FIXME: remove in future version after deprecation cycle
+ # TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 6d269a27e2656..f74cab9ed04da 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -6,7 +6,6 @@
timedelta,
)
import pickle
-import sys
import numpy as np
import pytest
@@ -1526,14 +1525,8 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
with tm.ensure_clean(return_filelike=True) as path:
plt.savefig(path)
- # GH18439
- # this is supported only in Python 3 pickle since
- # pickle in Python2 doesn't support instancemethod pickling
- # TODO(statsmodels 0.10.0): Remove the statsmodels check
- # https://github.com/pandas-dev/pandas/issues/24088
- # https://github.com/statsmodels/statsmodels/issues/4772
- if "statsmodels" not in sys.modules:
- with tm.ensure_clean(return_filelike=True) as path:
- pickle.dump(fig, path)
+ # GH18439, GH#24088, statsmodels#4772
+ with tm.ensure_clean(return_filelike=True) as path:
+ pickle.dump(fig, path)
finally:
plt.close(fig)
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 2c5c977624470..b8291471225d7 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -137,14 +137,14 @@ def test_getitem_setitem_datetimeindex():
tm.assert_series_equal(result, expected)
# But we do not give datetimes a pass on tzawareness compat
- # TODO: do the same with Timestamps and dt64
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
naive = datetime(1990, 1, 1, 4)
- with tm.assert_produces_warning(FutureWarning):
- # GH#36148 will require tzawareness compat
- result = ts[naive]
- expected = ts[4]
- assert result == expected
+ for key in [naive, Timestamp(naive), np.datetime64(naive, "ns")]:
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#36148 will require tzawareness compat
+ result = ts[key]
+ expected = ts[4]
+ assert result == expected
result = ts.copy()
with tm.assert_produces_warning(FutureWarning):
diff --git a/pandas/tests/series/methods/test_between.py b/pandas/tests/series/methods/test_between.py
index 9c11b71e4bee6..d81017633ff76 100644
--- a/pandas/tests/series/methods/test_between.py
+++ b/pandas/tests/series/methods/test_between.py
@@ -11,8 +11,6 @@
class TestBetween:
-
- # TODO: redundant with test_between_datetime_values?
def test_between(self):
series = Series(date_range("1/1/2000", periods=10))
left, right = series[[2, 7]]
@@ -21,7 +19,7 @@ def test_between(self):
expected = (series >= left) & (series <= right)
tm.assert_series_equal(result, expected)
- def test_between_datetime_values(self):
+ def test_between_datetime_object_dtype(self):
ser = Series(bdate_range("1/1/2000", periods=20).astype(object))
ser[::2] = np.nan
diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py
index 2930c657eb3b2..a78abfa63cff4 100644
--- a/pandas/tests/series/methods/test_rename.py
+++ b/pandas/tests/series/methods/test_rename.py
@@ -1,6 +1,7 @@
from datetime import datetime
import numpy as np
+import pytest
from pandas import (
Index,
@@ -65,10 +66,9 @@ def test_rename_axis_supported(self):
ser = Series(range(5))
ser.rename({}, axis=0)
ser.rename({}, axis="index")
- # FIXME: dont leave commented-out
- # TODO: clean up shared index validation
- # with pytest.raises(ValueError, match="No axis named 5"):
- # ser.rename({}, axis=5)
+
+ with pytest.raises(ValueError, match="No axis named 5"):
+ ser.rename({}, axis=5)
def test_rename_inplace(self, datetime_series):
renamer = lambda x: x.strftime("%Y%m%d")
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 4d1c75da72399..103130484f0e1 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -251,6 +251,7 @@ def test_add_corner_cases(self, datetime_series):
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
+ def test_add_float_plus_int(self, datetime_series):
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44211 | 2021-10-27T22:26:58Z | 2021-10-28T00:39:02Z | 2021-10-28T00:39:02Z | 2021-11-07T18:05:10Z |
REF: clarify missing.interpolate_2d is inplace | diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 87fcf54ed684b..45c55fc6bd3f2 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -743,8 +743,10 @@ def fillna(
elif method is not None:
msg = "fillna with 'method' requires high memory usage."
warnings.warn(msg, PerformanceWarning)
- filled = interpolate_2d(np.asarray(self), method=method, limit=limit)
- return type(self)(filled, fill_value=self.fill_value)
+ new_values = np.asarray(self)
+ # interpolate_2d modifies new_values inplace
+ interpolate_2d(new_values, method=method, limit=limit)
+ return type(self)(new_values, fill_value=self.fill_value)
else:
new_values = np.where(isna(self.sp_values), value, self.sp_values)
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 9e85cbec0f299..68ac7b4968d15 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -214,9 +214,11 @@ def interpolate_array_2d(
coerce: bool = False,
downcast: str | None = None,
**kwargs,
-):
+) -> np.ndarray:
"""
Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill.
+
+ Returned ndarray has same dtype as 'data'.
"""
try:
m = clean_fill_method(method)
@@ -228,13 +230,14 @@ def interpolate_array_2d(
# similar to validate_fillna_kwargs
raise ValueError("Cannot pass both fill_value and method")
- interp_values = interpolate_2d(
+ interpolate_2d(
data,
method=m,
axis=axis,
limit=limit,
limit_area=limit_area,
)
+ interp_values = data
else:
assert index is not None # for mypy
@@ -687,14 +690,14 @@ def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolat
def _interpolate_with_limit_area(
- values: ArrayLike, method: str, limit: int | None, limit_area: str | None
-) -> ArrayLike:
+ values: np.ndarray, method: str, limit: int | None, limit_area: str | None
+) -> None:
"""
Apply interpolation and limit_area logic to values along a to-be-specified axis.
Parameters
----------
- values: array-like
+ values: np.ndarray
Input array.
method: str
Interpolation method. Could be "bfill" or "pad"
@@ -703,10 +706,9 @@ def _interpolate_with_limit_area(
limit_area: str
Limit area for interpolation. Can be "inside" or "outside"
- Returns
- -------
- values: array-like
- Interpolated array.
+ Notes
+ -----
+ Modifies values in-place.
"""
invalid = isna(values)
@@ -719,7 +721,7 @@ def _interpolate_with_limit_area(
if last is None:
last = len(values)
- values = interpolate_2d(
+ interpolate_2d(
values,
method=method,
limit=limit,
@@ -732,23 +734,23 @@ def _interpolate_with_limit_area(
values[invalid] = np.nan
- return values
+ return
def interpolate_2d(
- values,
+ values: np.ndarray,
method: str = "pad",
axis: Axis = 0,
limit: int | None = None,
limit_area: str | None = None,
-):
+) -> None:
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
Parameters
----------
- values: array-like
+ values: np.ndarray
Input array.
method: str, default "pad"
Interpolation method. Could be "bfill" or "pad"
@@ -759,13 +761,12 @@ def interpolate_2d(
limit_area: str, optional
Limit area for interpolation. Can be "inside" or "outside"
- Returns
- -------
- values: array-like
- Interpolated array.
+ Notes
+ -----
+ Modifies values in-place.
"""
if limit_area is not None:
- return np.apply_along_axis(
+ np.apply_along_axis(
partial(
_interpolate_with_limit_area,
method=method,
@@ -775,11 +776,11 @@ def interpolate_2d(
axis,
values,
)
+ return
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
- ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
@@ -787,20 +788,19 @@ def interpolate_2d(
method = clean_fill_method(method)
tvalues = transf(values)
+
+ # _pad_2d and _backfill_2d both modify tvalues inplace
if method == "pad":
- result, _ = _pad_2d(tvalues, limit=limit)
+ _pad_2d(tvalues, limit=limit)
else:
- result, _ = _backfill_2d(tvalues, limit=limit)
-
- result = transf(result)
- # reshape back
- if ndim == 1:
- result = result[0]
+ _backfill_2d(tvalues, limit=limit)
- return result
+ return
-def _fillna_prep(values, mask: np.ndarray | None = None) -> np.ndarray:
+def _fillna_prep(
+ values, mask: npt.NDArray[np.bool_] | None = None
+) -> npt.NDArray[np.bool_]:
# boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
if mask is None:
@@ -834,8 +834,8 @@ def new_func(values, limit=None, mask=None):
def _pad_1d(
values: np.ndarray,
limit: int | None = None,
- mask: np.ndarray | None = None,
-) -> tuple[np.ndarray, np.ndarray]:
+ mask: npt.NDArray[np.bool_] | None = None,
+) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
mask = _fillna_prep(values, mask)
algos.pad_inplace(values, mask, limit=limit)
return values, mask
@@ -845,15 +845,15 @@ def _pad_1d(
def _backfill_1d(
values: np.ndarray,
limit: int | None = None,
- mask: np.ndarray | None = None,
-) -> tuple[np.ndarray, np.ndarray]:
+ mask: npt.NDArray[np.bool_] | None = None,
+) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
mask = _fillna_prep(values, mask)
algos.backfill_inplace(values, mask, limit=limit)
return values, mask
@_datetimelike_compat
-def _pad_2d(values, limit=None, mask=None):
+def _pad_2d(values: np.ndarray, limit=None, mask: npt.NDArray[np.bool_] | None = None):
mask = _fillna_prep(values, mask)
if np.all(values.shape):
@@ -865,7 +865,7 @@ def _pad_2d(values, limit=None, mask=None):
@_datetimelike_compat
-def _backfill_2d(values, limit=None, mask=None):
+def _backfill_2d(values, limit=None, mask: npt.NDArray[np.bool_] | None = None):
mask = _fillna_prep(values, mask)
if np.all(values.shape):
@@ -890,7 +890,7 @@ def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
-def _interp_limit(invalid: np.ndarray, fw_limit, bw_limit):
+def _interp_limit(invalid: npt.NDArray[np.bool_], fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
@@ -955,7 +955,7 @@ def inner(invalid, limit):
return f_idx & b_idx
-def _rolling_window(a: np.ndarray, window: int):
+def _rolling_window(a: npt.NDArray[np.bool_], window: int) -> npt.NDArray[np.bool_]:
"""
[True, True, False, True, False], 2 ->
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44210 | 2021-10-27T21:57:11Z | 2021-10-28T01:13:15Z | 2021-10-28T01:13:15Z | 2021-10-28T02:56:43Z |
REF: Remove ArrowStringArray.fillna | diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index a83cfa89c4728..4e3bd05d2cc8d 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -33,7 +33,6 @@
pa_version_under4p0,
)
from pandas.util._decorators import doc
-from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_array_like,
@@ -48,7 +47,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas.core import missing
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.boolean import BooleanDtype
@@ -339,55 +337,6 @@ def _as_pandas_scalar(self, arrow_scalar: pa.Scalar):
else:
return scalar
- def fillna(self, value=None, method=None, limit=None):
- """
- Fill NA/NaN values using the specified method.
-
- Parameters
- ----------
- value : scalar, array-like
- If a scalar value is passed it is used to fill all missing values.
- Alternatively, an array-like 'value' can be given. It's expected
- that the array-like have the same length as 'self'.
- method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
- Method to use for filling holes in reindexed Series
- pad / ffill: propagate last valid observation forward to next valid
- backfill / bfill: use NEXT valid observation to fill gap.
- limit : int, default None
- If method is specified, this is the maximum number of consecutive
- NaN values to forward/backward fill. In other words, if there is
- a gap with more than this number of consecutive NaNs, it will only
- be partially filled. If method is not specified, this is the
- maximum number of entries along the entire axis where NaNs will be
- filled.
-
- Returns
- -------
- ExtensionArray
- With NA/NaN filled.
- """
- value, method = validate_fillna_kwargs(value, method)
-
- mask = self.isna()
- value = missing.check_value_size(value, mask, len(self))
-
- if mask.any():
- if method is not None:
- func = missing.get_fill_func(method)
- new_values, _ = func(
- self.to_numpy("object"),
- limit=limit,
- mask=mask,
- )
- new_values = self._from_sequence(new_values)
- else:
- # fill with value
- new_values = self.copy()
- new_values[mask] = value
- else:
- new_values = self.copy()
- return new_values
-
def _reduce(self, name: str, skipna: bool = True, **kwargs):
if name in ["min", "max"]:
return getattr(self, name)(skipna=skipna)
| Equivalent to base class method. | https://api.github.com/repos/pandas-dev/pandas/pulls/44209 | 2021-10-27T21:51:27Z | 2021-10-28T03:03:05Z | 2021-10-28T03:03:05Z | 2021-10-28T03:07:10Z |
Backport PR #44204 on branch 1.3.x (CI: Python Dev build) | diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 4fe58ad4d60e9..96d5542451f06 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -17,7 +17,6 @@ env:
PANDAS_CI: 1
PATTERN: "not slow and not network and not clipboard"
COVERAGE: true
- PYTEST_TARGET: pandas
jobs:
build:
@@ -26,12 +25,13 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest, macOS-latest, windows-latest]
+ pytest_target: ["pandas/tests/[a-h]*", "pandas/tests/[i-z]*"]
name: actions-310-dev
- timeout-minutes: 60
+ timeout-minutes: 80
concurrency:
- group: ${{ github.ref }}-${{ matrix.os }}-dev
+ group: ${{ github.ref }}-${{ matrix.os }}-${{ matrix.pytest_target }}-dev
cancel-in-progress: ${{github.event_name == 'pull_request'}}
steps:
@@ -63,6 +63,8 @@ jobs:
python -c "import pandas; pandas.show_versions();"
- name: Test with pytest
+ env:
+ PYTEST_TARGET: ${{ matrix.pytest_target }}
shell: bash
run: |
ci/run_tests.sh
| Backport PR #44204: CI: Python Dev build | https://api.github.com/repos/pandas-dev/pandas/pulls/44208 | 2021-10-27T21:32:34Z | 2021-10-28T01:13:33Z | 2021-10-28T01:13:33Z | 2021-10-28T01:13:34Z |
CLN: remove unused algos_common_helper functions | diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in
index 4242a76dcc3b7..c6338216eb7a2 100644
--- a/pandas/_libs/algos_common_helper.pxi.in
+++ b/pandas/_libs/algos_common_helper.pxi.in
@@ -36,19 +36,19 @@ def ensure_object(object arr):
# name, c_type, dtype
dtypes = [('float64', 'FLOAT64', 'float64'),
- ('float32', 'FLOAT32', 'float32'),
+ # ('float32', 'FLOAT32', 'float32'), # disabling bc unused
('int8', 'INT8', 'int8'),
('int16', 'INT16', 'int16'),
('int32', 'INT32', 'int32'),
('int64', 'INT64', 'int64'),
- ('uint8', 'UINT8', 'uint8'),
- ('uint16', 'UINT16', 'uint16'),
- ('uint32', 'UINT32', 'uint32'),
- ('uint64', 'UINT64', 'uint64'),
- ('complex64', 'COMPLEX64', 'complex64'),
- ('complex128', 'COMPLEX128', 'complex128')
- # ('platform_int', 'INT', 'int_'),
- # ('object', 'OBJECT', 'object_'),
+ # Disabling uint and complex dtypes because we do not use them
+ # (and compiling them increases wheel size)
+ # ('uint8', 'UINT8', 'uint8'),
+ # ('uint16', 'UINT16', 'uint16'),
+ # ('uint32', 'UINT32', 'uint32'),
+ # ('uint64', 'UINT64', 'uint64'),
+ # ('complex64', 'COMPLEX64', 'complex64'),
+ # ('complex128', 'COMPLEX128', 'complex128')
]
def get_dispatch(dtypes):
diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py
index e1a29f0dbe395..87d55702b33e0 100644
--- a/pandas/core/array_algos/take.py
+++ b/pandas/core/array_algos/take.py
@@ -284,6 +284,9 @@ def _get_take_nd_function_cached(
if func is not None:
return func
+ # We get here with string, uint, float16, and complex dtypes that could
+ # potentially be handled in algos_take_helper.
+ # Also a couple with (M8[ns], object) and (m8[ns], object)
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 2e8641c281661..0788ecdd8b4b5 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -65,7 +65,6 @@
_is_scipy_sparse = None
ensure_float64 = algos.ensure_float64
-ensure_float32 = algos.ensure_float32
def ensure_float(arr):
@@ -92,13 +91,10 @@ def ensure_float(arr):
return arr
-ensure_uint64 = algos.ensure_uint64
ensure_int64 = algos.ensure_int64
ensure_int32 = algos.ensure_int32
ensure_int16 = algos.ensure_int16
ensure_int8 = algos.ensure_int8
-ensure_complex64 = algos.ensure_complex64
-ensure_complex128 = algos.ensure_complex128
ensure_platform_int = algos.ensure_platform_int
ensure_object = algos.ensure_object
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44207 | 2021-10-27T20:18:02Z | 2021-10-28T13:18:19Z | 2021-10-28T13:18:19Z | 2021-10-28T15:08:30Z |
TST: enable 2D tests for Categorical | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index c7f587b35f557..9c43e3714c332 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -6,7 +6,6 @@
from shutil import get_terminal_size
from typing import (
TYPE_CHECKING,
- Any,
Hashable,
Sequence,
TypeVar,
@@ -38,10 +37,6 @@
Dtype,
NpDtype,
Ordered,
- PositionalIndexer2D,
- PositionalIndexerTuple,
- ScalarIndexer,
- SequenceIndexer,
Shape,
npt,
type_t,
@@ -102,7 +97,10 @@
take_nd,
unique1d,
)
-from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
+from pandas.core.arrays._mixins import (
+ NDArrayBackedExtensionArray,
+ ravel_compat,
+)
from pandas.core.base import (
ExtensionArray,
NoNewAttributesMixin,
@@ -113,7 +111,6 @@
extract_array,
sanitize_array,
)
-from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.core.strings.object_array import ObjectStringArrayMixin
@@ -1484,6 +1481,7 @@ def _validate_scalar(self, fill_value):
# -------------------------------------------------------------
+ @ravel_compat
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
"""
The numpy array interface.
@@ -1934,7 +1932,10 @@ def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
- return iter(self._internal_get_values().tolist())
+ if self.ndim == 1:
+ return iter(self._internal_get_values().tolist())
+ else:
+ return (self[n] for n in range(len(self)))
def __contains__(self, key) -> bool:
"""
@@ -2053,27 +2054,6 @@ def __repr__(self) -> str:
# ------------------------------------------------------------------
- @overload
- def __getitem__(self, key: ScalarIndexer) -> Any:
- ...
-
- @overload
- def __getitem__(
- self: CategoricalT,
- key: SequenceIndexer | PositionalIndexerTuple,
- ) -> CategoricalT:
- ...
-
- def __getitem__(self: CategoricalT, key: PositionalIndexer2D) -> CategoricalT | Any:
- """
- Return an item.
- """
- result = super().__getitem__(key)
- if getattr(result, "ndim", 0) > 1:
- result = result._ndarray
- deprecate_ndim_indexing(result)
- return result
-
def _validate_listlike(self, value):
# NB: here we assume scalar-like tuples have already been excluded
value = extract_array(value, extract_numpy=True)
@@ -2311,7 +2291,19 @@ def _concat_same_type(
) -> CategoricalT:
from pandas.core.dtypes.concat import union_categoricals
- return union_categoricals(to_concat)
+ result = union_categoricals(to_concat)
+
+ # in case we are concatenating along axis != 0, we need to reshape
+ # the result from union_categoricals
+ first = to_concat[0]
+ if axis >= first.ndim:
+ raise ValueError
+ if axis == 1:
+ if not all(len(x) == len(first) for x in to_concat):
+ raise ValueError
+ # TODO: Will this get contiguity wrong?
+ result = result.reshape(-1, len(to_concat), order="F")
+ return result
# ------------------------------------------------------------------
@@ -2699,6 +2691,11 @@ def _get_codes_for_values(values, categories: Index) -> np.ndarray:
"""
dtype_equal = is_dtype_equal(values.dtype, categories.dtype)
+ if values.ndim > 1:
+ flat = values.ravel()
+ codes = _get_codes_for_values(flat, categories)
+ return codes.reshape(values.shape)
+
if isinstance(categories.dtype, ExtensionDtype) and is_object_dtype(values):
# Support inferring the correct extension dtype from an array of
# scalar objects. e.g.
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index e9dc63e9bd903..6a1a9512bc036 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -303,3 +303,14 @@ def test_not_equal_with_na(self, categories):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ def test_repr_2d(self, data):
+ # Categorical __repr__ doesn't include "Categorical", so we need
+ # to special-case
+ res = repr(data.reshape(1, -1))
+ assert res.count("\nCategories") == 1
+
+ res = repr(data.reshape(-1, 1))
+ assert res.count("\nCategories") == 1
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44206 | 2021-10-27T20:17:32Z | 2021-11-10T01:47:02Z | 2021-11-10T01:47:02Z | 2021-11-10T02:13:30Z |
REF: de-special-case Block._maybe_downcast | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index bc9f5c3243705..151709301f71f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -510,54 +510,24 @@ def _maybe_downcast(self, blocks: list[Block], downcast=None) -> list[Block]:
[blk.convert(datetime=True, numeric=False) for blk in blocks]
)
- # no need to downcast our float
- # unless indicated
- if downcast is None and self.dtype.kind in ["f", "c", "m", "M"]:
- # passing "infer" to maybe_downcast_to_dtype (via self.downcast)
- # would be a no-op, so we can short-circuit
+ if downcast is None:
+ return blocks
+ if downcast is False:
+ # turn if off completely
+ # TODO: not reached, deprecate in favor of downcast=None
return blocks
- return extend_blocks([b.downcast(downcast) for b in blocks])
+ return extend_blocks([b._downcast_2d(downcast) for b in blocks])
@final
- def downcast(self, dtypes=None) -> list[Block]:
- """try to downcast each item to the dict of dtypes if present"""
- # turn it off completely
- if dtypes is False:
- return [self]
-
- values = self.values
-
- if self.ndim == 1:
-
- # try to cast all non-floats here
- if dtypes is None:
- dtypes = "infer"
-
- nv = maybe_downcast_to_dtype(values, dtypes)
- return [self.make_block(nv)]
-
- # ndim > 1
- if dtypes is None:
- return [self]
-
- if not (dtypes == "infer" or isinstance(dtypes, dict)):
- raise ValueError(
- "downcast must have a dictionary or 'infer' as its argument"
- )
- elif dtypes != "infer":
- raise AssertionError("dtypes as dict is not supported yet")
-
- return self._downcast_2d()
-
@maybe_split
- def _downcast_2d(self) -> list[Block]:
+ def _downcast_2d(self, dtype) -> list[Block]:
"""
downcast specialized to 2D case post-validation.
Refactored to allow use of maybe_split.
"""
- new_values = maybe_downcast_to_dtype(self.values, dtype="infer")
+ new_values = maybe_downcast_to_dtype(self.values, dtype=dtype)
return [self.make_block(new_values)]
@final
@@ -1098,8 +1068,8 @@ def interpolate(
**kwargs,
)
- nbs = [self.make_block_same_class(interp_values)]
- return self._maybe_downcast(nbs, downcast)
+ nb = self.make_block_same_class(interp_values)
+ return nb._maybe_downcast([nb], downcast)
def take_nd(
self,
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44205 | 2021-10-27T19:46:35Z | 2021-10-28T12:32:35Z | 2021-10-28T12:32:35Z | 2021-10-28T15:13:51Z |
CI: Python Dev build | diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 4fe58ad4d60e9..96d5542451f06 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -17,7 +17,6 @@ env:
PANDAS_CI: 1
PATTERN: "not slow and not network and not clipboard"
COVERAGE: true
- PYTEST_TARGET: pandas
jobs:
build:
@@ -26,12 +25,13 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest, macOS-latest, windows-latest]
+ pytest_target: ["pandas/tests/[a-h]*", "pandas/tests/[i-z]*"]
name: actions-310-dev
- timeout-minutes: 60
+ timeout-minutes: 80
concurrency:
- group: ${{ github.ref }}-${{ matrix.os }}-dev
+ group: ${{ github.ref }}-${{ matrix.os }}-${{ matrix.pytest_target }}-dev
cancel-in-progress: ${{github.event_name == 'pull_request'}}
steps:
@@ -63,6 +63,8 @@ jobs:
python -c "import pandas; pandas.show_versions();"
- name: Test with pytest
+ env:
+ PYTEST_TARGET: ${{ matrix.pytest_target }}
shell: bash
run: |
ci/run_tests.sh
| xref #44173
Ref: https://github.com/pandas-dev/pandas/issues/44173#issuecomment-952289166
Means we will have 3 more jobs (hopefully can reduce this back..) - but all can run in parallel.
https://github.com/pandas-dev/pandas/runs/4026818833?check_suite_focus=true | https://api.github.com/repos/pandas-dev/pandas/pulls/44204 | 2021-10-27T19:32:58Z | 2021-10-27T21:32:06Z | 2021-10-27T21:32:06Z | 2021-11-07T16:41:10Z |
DOCS: Update pyarrow version requirement in "What's new in 1.4.0" | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 254a004a37c40..003289037996d 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -95,7 +95,7 @@ Validation now for ``caption`` arg (:issue:`43368`)
Multithreaded CSV reading with a new CSV Engine based on pyarrow
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-:func:`pandas.read_csv` now accepts ``engine="pyarrow"`` (requires at least ``pyarrow`` 0.17.0) as an argument, allowing for faster csv parsing on multicore machines
+:func:`pandas.read_csv` now accepts ``engine="pyarrow"`` (requires at least ``pyarrow`` 1.0.1) as an argument, allowing for faster csv parsing on multicore machines
with pyarrow installed. See the :doc:`I/O docs </user_guide/io>` for more info. (:issue:`23697`, :issue:`43706`)
.. _whatsnew_140.enhancements.window_rank:
| Updated the pyarrow version requirement from 0.17 to 1.0.1 in the "Multithreaded CSV reading" section, to make it in line with #44064.
Docs only, no functional changes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44202 | 2021-10-27T10:18:03Z | 2021-10-27T11:53:26Z | 2021-10-27T11:53:26Z | 2021-10-27T11:53:31Z |
BUG: Series[Interval[int64]] setitem Interval[float] | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 254a004a37c40..7b10ca64a9b3d 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -530,8 +530,10 @@ Indexing
- Bug in :meth:`Series.__setitem__` with object dtype when setting an array with matching size and dtype='datetime64[ns]' or dtype='timedelta64[ns]' incorrectly converting the datetime/timedeltas to integers (:issue:`43868`)
- Bug in :meth:`DataFrame.sort_index` where ``ignore_index=True`` was not being respected when the index was already sorted (:issue:`43591`)
- Bug in :meth:`Index.get_indexer_non_unique` when index contains multiple ``np.datetime64("NaT")`` and ``np.timedelta64("NaT")`` (:issue:`43869`)
+- Bug in setting a scalar :class:`Interval` value into a :class:`Series` with ``IntervalDtype`` when the scalar's sides are floats and the values' sides are integers (:issue:`44201`)
-
+
Missing
^^^^^^^
- Bug in :meth:`DataFrame.fillna` with limit and no method ignores axis='columns' or ``axis = 1`` (:issue:`40989`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index de612b367f78f..06ec02794e578 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -48,6 +48,7 @@
is_1d_only_ea_obj,
is_dtype_equal,
is_extension_array_dtype,
+ is_interval_dtype,
is_list_like,
is_sparse,
is_string_dtype,
@@ -1440,7 +1441,21 @@ def putmask(self, mask, new) -> list[Block]:
# TODO(EA2D): unnecessary with 2D EAs
mask = mask.reshape(new_values.shape)
- new_values[mask] = new
+ try:
+ new_values[mask] = new
+ except TypeError:
+ if not is_interval_dtype(self.dtype):
+ # Discussion about what we want to support in the general
+ # case GH#39584
+ raise
+
+ blk = self.coerce_to_target_dtype(new)
+ if blk.dtype == _dtype_obj:
+ # For now at least, only support casting e.g.
+ # Interval[int64]->Interval[float64],
+ raise
+ return blk.putmask(mask, new)
+
nb = type(self)(new_values, placement=self._mgr_locs, ndim=self.ndim)
return [nb]
@@ -1477,12 +1492,8 @@ def setitem(self, indexer, value):
be a compatible shape.
"""
if not self._can_hold_element(value):
- # This is only relevant for DatetimeTZBlock, PeriodDtype, IntervalDtype,
- # which has a non-trivial `_can_hold_element`.
- # https://github.com/pandas-dev/pandas/issues/24020
- # Need a dedicated setitem until GH#24020 (type promotion in setitem
- # for extension arrays) is designed and implemented.
- return self.astype(_dtype_obj).setitem(indexer, value)
+ # see TestSetitemFloatIntervalWithIntIntervalValues
+ return self.coerce_to_target_dtype(value).setitem(indexer, value)
if isinstance(indexer, tuple):
# TODO(EA2D): not needed with 2D EAs
@@ -1642,6 +1653,15 @@ def where(self, other, cond, errors="raise") -> list[Block]:
# TODO: don't special-case
raise
+ if is_interval_dtype(self.dtype):
+ # TestSetitemFloatIntervalWithIntIntervalValues
+ blk = self.coerce_to_target_dtype(other)
+ if blk.dtype == _dtype_obj:
+ # For now at least only support casting e.g.
+ # Interval[int64]->Interval[float64]
+ raise
+ return blk.where(other, cond, errors)
+
result = type(self.values)._from_sequence(
np.where(cond, self.values, other), dtype=dtype
)
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index fe3495abd2fb0..a922a937ce9d3 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -11,6 +11,7 @@
DataFrame,
DatetimeIndex,
Index,
+ Interval,
IntervalIndex,
MultiIndex,
NaT,
@@ -928,6 +929,38 @@ def is_inplace(self, obj):
return obj.dtype.kind != "i"
+class TestSetitemFloatIntervalWithIntIntervalValues(SetitemCastingEquivalents):
+ # GH#44201 Cast to shared IntervalDtype rather than object
+
+ def test_setitem_example(self):
+ # Just a case here to make obvious what this test class is aimed at
+ idx = IntervalIndex.from_breaks(range(4))
+ obj = Series(idx)
+ val = Interval(0.5, 1.5)
+
+ obj[0] = val
+ assert obj.dtype == "Interval[float64, right]"
+
+ @pytest.fixture
+ def obj(self):
+ idx = IntervalIndex.from_breaks(range(4))
+ return Series(idx)
+
+ @pytest.fixture
+ def val(self):
+ return Interval(0.5, 1.5)
+
+ @pytest.fixture
+ def key(self):
+ return 0
+
+ @pytest.fixture
+ def expected(self, obj, val):
+ data = [val] + list(obj[1:])
+ idx = IntervalIndex(data, dtype="Interval[float64]")
+ return Series(idx)
+
+
def test_setitem_int_as_positional_fallback_deprecation():
# GH#42215 deprecated falling back to positional on __setitem__ with an
# int not contained in the index
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
Cast to common dtype instead of object, match IntervalIndex behavior. | https://api.github.com/repos/pandas-dev/pandas/pulls/44201 | 2021-10-27T04:28:11Z | 2021-10-27T18:24:06Z | 2021-10-27T18:24:06Z | 2021-10-27T19:49:25Z |
TST: added test_join_multiindex_dates | diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py
index 30118d20f67a9..3e06b96b1cb07 100644
--- a/pandas/tests/frame/methods/test_join.py
+++ b/pandas/tests/frame/methods/test_join.py
@@ -323,6 +323,20 @@ def test_join_multiindex_leftright(self):
tm.assert_frame_equal(df1.join(df2, how="right"), exp)
tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]])
+ def test_join_multiindex_dates(self):
+ # GH 33692
+ date = pd.Timestamp(2000, 1, 1).date()
+
+ # creates dataframes
+ df1 = DataFrame({"index_0": int(0), "date": date, "col1": [2]})
+ df2 = DataFrame({"col2": [3]})
+
+ multi_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
+ df3 = DataFrame(index=multi_index, columns=["col3"], data=[4])
+
+ # if fails, raises error
+ df1.join([df2, df3])
+
def test_merge_join_different_levels(self):
# GH#9455
| - [ ] closes #33692
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
Added tests to make sure that joining dataframes with multiindices containing dates doesn't break.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44200 | 2021-10-27T01:35:40Z | 2021-11-28T01:44:20Z | null | 2021-11-28T01:44:32Z |
CLN: remove no-op BlockManager.downcast | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c3ad87082c8ed..6895455a43160 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6359,9 +6359,6 @@ def fillna(
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
- # need to downcast here because of all of the transposes
- result._mgr = result._mgr.downcast()
-
return result
new_data = self._mgr.interpolate(
@@ -6415,9 +6412,6 @@ def fillna(
result = self.T.fillna(value=value, limit=limit).T
- # need to downcast here because of all of the transposes
- result._mgr = result._mgr.downcast()
-
new_data = result
else:
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 9344aea8221d5..d1802afb7a2f1 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -388,9 +388,6 @@ def fillna(self: T, value, limit, inplace: bool, downcast) -> T:
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
)
- def downcast(self: T) -> T:
- return self.apply_with_block("downcast")
-
def astype(self: T, dtype, copy: bool = False, errors: str = "raise") -> T:
return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index de612b367f78f..85d715f37a05b 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -511,8 +511,9 @@ def _maybe_downcast(self, blocks: list[Block], downcast=None) -> list[Block]:
# no need to downcast our float
# unless indicated
- if downcast is None and self.dtype.kind in ["f", "m", "M"]:
- # TODO: complex? more generally, self._can_hold_na?
+ if downcast is None and self.dtype.kind in ["f", "c", "m", "M"]:
+ # passing "infer" to maybe_downcast_to_dtype (via self.downcast)
+ # would be a no-op, so we can short-circuit
return blocks
return extend_blocks([b.downcast(downcast) for b in blocks])
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 9b29216fa407b..991094f86c999 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -394,9 +394,6 @@ def fillna(self: T, value, limit, inplace: bool, downcast) -> T:
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
)
- def downcast(self: T) -> T:
- return self.apply("downcast")
-
def astype(self: T, dtype, copy: bool = False, errors: str = "raise") -> T:
return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 3a307ebd702ca..186135f598235 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -164,19 +164,6 @@ def test_nonzero(self):
with pytest.raises(ValueError, match=msg):
not obj1
- def test_downcast(self):
- # test close downcasting
-
- o = self._construct(shape=4, value=9, dtype=np.int64)
- result = o.copy()
- result._mgr = o._mgr.downcast()
- self._compare(result, o)
-
- o = self._construct(shape=4, value=9.5)
- result = o.copy()
- result._mgr = o._mgr.downcast()
- self._compare(result, o)
-
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44198 | 2021-10-26T22:48:35Z | 2021-10-27T11:54:25Z | 2021-10-27T11:54:25Z | 2021-10-27T15:27:01Z |
Backport PR #44192 on branch 1.3.x (PERF: read_csv GH#44106) | diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst
index 0f1997de2166a..ba9fcb5c1bfeb 100644
--- a/doc/source/whatsnew/v1.3.5.rst
+++ b/doc/source/whatsnew/v1.3.5.rst
@@ -14,7 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed performance regression in :func:`read_csv` (:issue:`44106`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index ae62cc3b45578..8aedfb9b26f38 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -206,9 +206,10 @@ def _set_noconvert_columns(self):
"""
assert self.orig_names is not None
# error: Cannot determine type of 'names'
- col_indices = [
- self.orig_names.index(x) for x in self.names # type: ignore[has-type]
- ]
+
+ # much faster than using orig_names.index(x) xref GH#44106
+ names_dict = {x: i for i, x in enumerate(self.orig_names)}
+ col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
# error: Cannot determine type of 'names'
noconvert_columns = self._set_noconvert_dtype_columns(
col_indices,
| Backport PR #44192: PERF: read_csv GH#44106 | https://api.github.com/repos/pandas-dev/pandas/pulls/44197 | 2021-10-26T20:27:16Z | 2021-10-27T01:25:34Z | 2021-10-27T01:25:34Z | 2021-10-27T01:25:34Z |
Fix series with none equals float series | diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst
index ba9fcb5c1bfeb..589092c0dd7e3 100644
--- a/doc/source/whatsnew/v1.3.5.rst
+++ b/doc/source/whatsnew/v1.3.5.rst
@@ -14,6 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- Fixed regression in :meth:`Series.equals` when comparing floats with dtype object to None (:issue:`44190`)
- Fixed performance regression in :func:`read_csv` (:issue:`44106`)
-
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 90f409d371e6b..b77db2aec4a08 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -67,7 +67,7 @@ cpdef bint is_matching_na(object left, object right, bint nan_matches_none=False
elif left is NaT:
return right is NaT
elif util.is_float_object(left):
- if nan_matches_none and right is None:
+ if nan_matches_none and right is None and util.is_nan(left):
return True
return (
util.is_nan(left)
diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py
index 052aef4ac1bab..22e27c271df88 100644
--- a/pandas/tests/series/methods/test_equals.py
+++ b/pandas/tests/series/methods/test_equals.py
@@ -125,3 +125,18 @@ def test_equals_none_vs_nan():
assert ser.equals(ser2)
assert Index(ser, dtype=ser.dtype).equals(Index(ser2, dtype=ser2.dtype))
assert ser.array.equals(ser2.array)
+
+
+def test_equals_None_vs_float():
+ # GH#44190
+ left = Series([-np.inf, np.nan, -1.0, 0.0, 1.0, 10 / 3, np.inf], dtype=object)
+ right = Series([None] * len(left))
+
+ # these series were found to be equal due to a bug, check that they are correctly
+ # found to not equal
+ assert not left.equals(right)
+ assert not right.equals(left)
+ assert not left.to_frame().equals(right.to_frame())
+ assert not right.to_frame().equals(left.to_frame())
+ assert not Index(left, dtype="object").equals(Index(right, dtype="object"))
+ assert not Index(right, dtype="object").equals(Index(left, dtype="object"))
| - [x] closes #44190
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44195 | 2021-10-26T18:16:19Z | 2021-10-29T13:17:25Z | 2021-10-29T13:17:25Z | 2021-10-29T13:17:58Z |
DEPR: warn on checks retained for fastparquet | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 57f07313ebdc4..a0f116c1f8f88 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -31,6 +31,7 @@
npt,
)
from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
@@ -268,8 +269,19 @@ def make_block_same_class(
placement = self._mgr_locs
if values.dtype.kind in ["m", "M"]:
- # TODO: remove this once fastparquet has stopped relying on it
- values = ensure_wrapped_if_datetimelike(values)
+
+ new_values = ensure_wrapped_if_datetimelike(values)
+ if new_values is not values:
+ # TODO(2.0): remove once fastparquet has stopped relying on it
+ warnings.warn(
+ "In a future version, Block.make_block_same_class will "
+ "assume that datetime64 and timedelta64 ndarrays have "
+ "already been cast to DatetimeArray and TimedeltaArray, "
+ "respectively.",
+ DeprecationWarning,
+ stacklevel=find_stack_level(),
+ )
+ values = new_values
# We assume maybe_coerce_values has already been called
return type(self)(values, placement=placement, ndim=self.ndim)
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 991094f86c999..745cddee93479 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -27,6 +27,7 @@
)
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import infer_dtype_from_scalar
@@ -907,7 +908,15 @@ def __init__(
f"number of axes ({self.ndim})"
)
if isinstance(block, DatetimeTZBlock) and block.values.ndim == 1:
- # TODO: remove once fastparquet no longer needs this
+ # TODO(2.0): remove once fastparquet no longer needs this
+ warnings.warn(
+ "In a future version, the BlockManager constructor "
+ "will assume that a DatetimeTZBlock with block.ndim==2 "
+ "has block.values.ndim == 2.",
+ DeprecationWarning,
+ stacklevel=find_stack_level(),
+ )
+
# error: Incompatible types in assignment (expression has type
# "Union[ExtensionArray, ndarray]", variable has type
# "DatetimeArray")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
cc @mdurant | https://api.github.com/repos/pandas-dev/pandas/pulls/44193 | 2021-10-26T16:06:04Z | 2021-10-30T22:24:35Z | 2021-10-30T22:24:35Z | 2021-10-30T23:31:24Z |
PERF: read_csv GH#44106 | diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst
index 0f1997de2166a..ba9fcb5c1bfeb 100644
--- a/doc/source/whatsnew/v1.3.5.rst
+++ b/doc/source/whatsnew/v1.3.5.rst
@@ -14,7 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed performance regression in :func:`read_csv` (:issue:`44106`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index 7998fe57b58c8..32ca3aaeba6cc 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -205,9 +205,10 @@ def _set_noconvert_columns(self):
"""
assert self.orig_names is not None
# error: Cannot determine type of 'names'
- col_indices = [
- self.orig_names.index(x) for x in self.names # type: ignore[has-type]
- ]
+
+ # much faster than using orig_names.index(x) xref GH#44106
+ names_dict = {x: i for i, x in enumerate(self.orig_names)}
+ col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
# error: Cannot determine type of 'names'
noconvert_columns = self._set_noconvert_dtype_columns(
col_indices,
| - [x] closes #44106
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
Using the example in #44106 I time 188.4s on master and 3.9s on this PR. | https://api.github.com/repos/pandas-dev/pandas/pulls/44192 | 2021-10-26T16:00:05Z | 2021-10-26T20:26:51Z | 2021-10-26T20:26:51Z | 2021-10-26T23:48:20Z |
Update generic.py, explanatory text of describe | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c3ad87082c8ed..b8e158b5f9e2e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9899,7 +9899,7 @@ def describe(
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
- ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
+ ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
datetime_is_numeric : bool, default False
| Usage of correct parameter ``exclude`` in description of ``exclude``-parameter of ``describe()``-function
| https://api.github.com/repos/pandas-dev/pandas/pulls/44191 | 2021-10-26T13:18:10Z | 2021-10-28T01:37:15Z | 2021-10-28T01:37:15Z | 2021-10-28T01:37:17Z |
:white_check_mark: TST: Update sync flake8 tests | diff --git a/scripts/tests/test_sync_flake8_versions.py b/scripts/tests/test_sync_flake8_versions.py
index d9b6dbe8c3f0a..21c3b743830ee 100644
--- a/scripts/tests/test_sync_flake8_versions.py
+++ b/scripts/tests/test_sync_flake8_versions.py
@@ -3,44 +3,6 @@
from ..sync_flake8_versions import get_revisions
-def test_wrong_yesqa_flake8(capsys):
- precommit_config = {
- "repos": [
- {
- "repo": "https://gitlab.com/pycqa/flake8",
- "rev": "0.1.1",
- "hooks": [
- {
- "id": "flake8",
- }
- ],
- },
- {
- "repo": "https://github.com/asottile/yesqa",
- "rev": "v1.2.2",
- "hooks": [
- {
- "id": "yesqa",
- "additional_dependencies": [
- "flake8==0.4.2",
- ],
- }
- ],
- },
- ]
- }
- environment = {
- "dependencies": [
- "flake8=0.1.1",
- ]
- }
- with pytest.raises(SystemExit, match=None):
- get_revisions(precommit_config, environment)
- result, _ = capsys.readouterr()
- expected = "flake8 in 'yesqa' does not match in 'flake8' from 'pre-commit'\n"
- assert result == expected
-
-
def test_wrong_env_flake8(capsys):
precommit_config = {
"repos": [
@@ -53,18 +15,6 @@ def test_wrong_env_flake8(capsys):
}
],
},
- {
- "repo": "https://github.com/asottile/yesqa",
- "rev": "v1.2.2",
- "hooks": [
- {
- "id": "yesqa",
- "additional_dependencies": [
- "flake8==0.4.2",
- ],
- }
- ],
- },
]
}
environment = {
@@ -81,52 +31,6 @@ def test_wrong_env_flake8(capsys):
assert result == expected
-def test_wrong_yesqa_add_dep(capsys):
- precommit_config = {
- "repos": [
- {
- "repo": "https://gitlab.com/pycqa/flake8",
- "rev": "0.1.1",
- "hooks": [
- {
- "id": "flake8",
- "additional_dependencies": [
- "flake8-bugs==1.1.1",
- ],
- }
- ],
- },
- {
- "repo": "https://github.com/asottile/yesqa",
- "rev": "v1.2.2",
- "hooks": [
- {
- "id": "yesqa",
- "additional_dependencies": [
- "flake8==0.4.2",
- "flake8-bugs>=1.1.1",
- ],
- }
- ],
- },
- ]
- }
- environment = {
- "dependencies": [
- "flake8=1.5.6",
- "flake8-bugs=1.1.1",
- ]
- }
- with pytest.raises(SystemExit, match=None):
- get_revisions(precommit_config, environment)
- result, _ = capsys.readouterr()
- expected = (
- "Mismatch of 'flake8-bugs' version between 'flake8' and 'yesqa' in "
- "'.pre-commit-config.yaml'\n"
- )
- assert result == expected
-
-
def test_wrong_env_add_dep(capsys):
precommit_config = {
"repos": [
|
- [x] related to #44177
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
I sent a PR yesterday #44177 which was aimed at adding anchors to the `pre-commit-config.yaml` file
However, somehow I forgot to also update the corresponding test so this PR should remove the references to `yesqa` dependencies checks
| https://api.github.com/repos/pandas-dev/pandas/pulls/44189 | 2021-10-26T10:25:32Z | 2021-10-26T11:06:16Z | 2021-10-26T11:06:16Z | 2021-10-26T12:02:28Z |
ENH: implement EA._where | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 848e724949bc5..cbb029f62732a 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -320,7 +320,7 @@ def putmask(self, mask: np.ndarray, value) -> None:
np.putmask(self._ndarray, mask, value)
- def where(
+ def _where(
self: NDArrayBackedExtensionArrayT, mask: np.ndarray, value
) -> NDArrayBackedExtensionArrayT:
"""
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 5536a4665fd79..46b505e7384b4 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1411,6 +1411,31 @@ def insert(self: ExtensionArrayT, loc: int, item) -> ExtensionArrayT:
return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]])
+ def _where(
+ self: ExtensionArrayT, mask: npt.NDArray[np.bool_], value
+ ) -> ExtensionArrayT:
+ """
+ Analogue to np.where(mask, self, value)
+
+ Parameters
+ ----------
+ mask : np.ndarray[bool]
+ value : scalar or listlike
+
+ Returns
+ -------
+ same type as self
+ """
+ result = self.copy()
+
+ if is_list_like(value):
+ val = value[~mask]
+ else:
+ val = value
+
+ result[~mask] = val
+ return result
+
@classmethod
def _empty(cls, shape: Shape, dtype: ExtensionDtype):
"""
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 87fcf54ed684b..8260846ae7dc7 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -1305,6 +1305,13 @@ def to_dense(self) -> np.ndarray:
_internal_get_values = to_dense
+ def _where(self, mask, value):
+ # NB: may not preserve dtype, e.g. result may be Sparse[float64]
+ # while self is Sparse[int64]
+ naive_implementation = np.where(mask, self, value)
+ result = type(self)._from_sequence(naive_implementation)
+ return result
+
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index bc9f5c3243705..3015df95f6e1b 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -50,7 +50,6 @@
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
- is_sparse,
is_string_dtype,
)
from pandas.core.dtypes.dtypes import (
@@ -1626,30 +1625,9 @@ def where(self, other, cond, errors="raise") -> list[Block]:
# for the type
other = self.dtype.na_value
- if is_sparse(self.values):
- # TODO(SparseArray.__setitem__): remove this if condition
- # We need to re-infer the type of the data after doing the
- # where, for cases where the subtypes don't match
- dtype = None
- else:
- dtype = self.dtype
-
- result = self.values.copy()
- icond = ~cond
- if lib.is_scalar(other):
- set_other = other
- else:
- set_other = other[icond]
try:
- result[icond] = set_other
- except (NotImplementedError, TypeError):
- # NotImplementedError for class not implementing `__setitem__`
- # TypeError for SparseArray, which implements just to raise
- # a TypeError
- if isinstance(result, Categorical):
- # TODO: don't special-case
- raise
-
+ result = self.values._where(cond, other)
+ except TypeError:
if is_interval_dtype(self.dtype):
# TestSetitemFloatIntervalWithIntIntervalValues
blk = self.coerce_to_target_dtype(other)
@@ -1658,10 +1636,7 @@ def where(self, other, cond, errors="raise") -> list[Block]:
# Interval[int64]->Interval[float64]
raise
return blk.where(other, cond, errors)
-
- result = type(self.values)._from_sequence(
- np.where(cond, self.values, other), dtype=dtype
- )
+ raise
return [self.make_block_same_class(result)]
@@ -1751,7 +1726,7 @@ def where(self, other, cond, errors="raise") -> list[Block]:
cond = extract_bool_array(cond)
try:
- res_values = arr.T.where(cond, other).T
+ res_values = arr.T._where(cond, other).T
except (ValueError, TypeError):
return Block.where(self, other, cond, errors=errors)
diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py
index 798aa7188cb9a..6f8b18f449779 100644
--- a/pandas/tests/indexes/categorical/test_indexing.py
+++ b/pandas/tests/indexes/categorical/test_indexing.py
@@ -325,7 +325,7 @@ def test_where_non_categories(self):
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
# Test the Categorical method directly
- ci._data.where(mask, 2)
+ ci._data._where(mask, 2)
class TestContains:
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index ed1ba11c5fd55..0adc1810a6c47 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -508,7 +508,7 @@ def test_where_datetimelike_categorical(tz_naive_fixture):
tm.assert_index_equal(res, dr)
# DatetimeArray.where
- res = lvals._data.where(mask, rvals)
+ res = lvals._data._where(mask, rvals)
tm.assert_datetime_array_equal(res, dr._data)
# Series.where
| The goal here is to avoid special-casing in ExtensionBlock.where | https://api.github.com/repos/pandas-dev/pandas/pulls/44187 | 2021-10-26T04:30:18Z | 2021-10-28T12:32:47Z | 2021-10-28T12:32:47Z | 2021-10-28T15:10:55Z |
PERF: improve efficiency of `BaseMaskedArray.__setitem__` | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 291ad2b071665..eb5b478953dcb 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -85,7 +85,7 @@ def frame_apply(
args=None,
kwargs=None,
) -> FrameApply:
- """construct and return a row or column based frame apply object"""
+ """Construct and return a row- or column-based frame apply object."""
axis = obj._get_axis_number(axis)
klass: type[FrameApply]
if axis == 0:
@@ -693,7 +693,7 @@ def dtypes(self) -> Series:
return self.obj.dtypes
def apply(self) -> DataFrame | Series:
- """compute the results"""
+ """Compute the results."""
# dispatch to agg
if is_list_like(self.f):
return self.apply_multiple()
@@ -1011,7 +1011,7 @@ def result_columns(self) -> Index:
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
- """return the results for the columns"""
+ """Return the results for the columns."""
result: DataFrame | Series
# we have requested to expand
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index c09d4486afcae..f7b80dc2d55df 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -365,6 +365,9 @@ def map_string(s):
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value)
+ def _validate_setitem_value(self, value):
+ return lib.is_bool(value)
+
@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index 1e7f1aff52d2e..9064245b04f55 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -270,6 +270,9 @@ def _from_sequence_of_strings(
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=self.dtype)
+ def _validate_setitem_value(self, value):
+ return lib.is_float(value)
+
@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 12bef068ef44b..f925841f26a6d 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -338,6 +338,9 @@ def _from_sequence_of_strings(
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=self.dtype)
+ def _validate_setitem_value(self, value):
+ return lib.is_integer(value)
+
@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 9d98bd8045006..3bb7af2360972 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -50,6 +50,7 @@
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import (
array_equivalent,
+ is_valid_na_for_dtype,
isna,
notna,
)
@@ -82,7 +83,7 @@
class BaseMaskedDtype(ExtensionDtype):
"""
- Base class for dtypes for BasedMaskedArray subclasses.
+ Base class for dtypes for BaseMaskedArray subclasses.
"""
name: str
@@ -213,19 +214,23 @@ def fillna(
def _coerce_to_array(self, values) -> tuple[np.ndarray, np.ndarray]:
raise AbstractMethodError(self)
- def __setitem__(self, key, value) -> None:
- _is_scalar = is_scalar(value)
- if _is_scalar:
- value = [value]
- value, mask = self._coerce_to_array(value)
-
- if _is_scalar:
- value = value[0]
- mask = mask[0]
+ def _validate_setitem_value(self, value) -> bool:
+ raise AbstractMethodError(self)
+ def __setitem__(self, key, value) -> None:
key = check_array_indexer(self, key)
- self._data[key] = value
- self._mask[key] = mask
+ if is_scalar(value):
+ if self._validate_setitem_value(value):
+ self._data[key] = value
+ self._mask[key] = False
+ elif isna(value) and is_valid_na_for_dtype(value):
+ self._mask[key] = True
+ else:
+ raise TypeError(f"Invalid value '{value}' for dtype {self.dtype}")
+ else:
+ value, mask = self._coerce_to_array(value)
+ self._data[key] = value
+ self._mask[key] = mask
def __iter__(self):
if self.ndim == 1:
| This somewhat deals with #44172, though that won't be fully resolved until 2D `ExtensionArray`s are supported (per the comments there).
CC @jbrockmendel
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/44186 | 2021-10-26T03:15:06Z | 2022-01-17T13:45:16Z | null | 2022-01-17T13:45:17Z |
ENH: added regex argument to Series.str.split | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 2a718fdcf16e7..496bc6046a935 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -180,6 +180,7 @@ Other enhancements
- :meth:`DataFrame.__pos__`, :meth:`DataFrame.__neg__` now retain ``ExtensionDtype`` dtypes (:issue:`43883`)
- The error raised when an optional dependency can't be imported now includes the original exception, for easier investigation (:issue:`43882`)
- Added :meth:`.ExponentialMovingWindow.sum` (:issue:`13297`)
+- :meth:`Series.str.split` now supports a ``regex`` argument that explicitly specifies whether the pattern is a regular expression. Default is ``None`` (:issue:`43563`, :issue:`32835`, :issue:`25549`)
- :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`)
-
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index a62d701413bf1..9f163f77a2ae8 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -659,11 +659,11 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"):
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
- at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
+ at the specified delimiter string.
Parameters
----------
- pat : str, optional
+ pat : str or compiled regex, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
@@ -672,14 +672,30 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"):
expand : bool, default False
Expand the split strings into separate columns.
- * If ``True``, return DataFrame/MultiIndex expanding dimensionality.
- * If ``False``, return Series/Index, containing lists of strings.
+ - If ``True``, return DataFrame/MultiIndex expanding dimensionality.
+ - If ``False``, return Series/Index, containing lists of strings.
+
+ regex : bool, default None
+ Determines if the passed-in pattern is a regular expression:
+
+ - If ``True``, assumes the passed-in pattern is a regular expression
+ - If ``False``, treats the pattern as a literal string.
+ - If ``None`` and `pat` length is 1, treats `pat` as a literal string.
+ - If ``None`` and `pat` length is not 1, treats `pat` as a regular expression.
+ - Cannot be set to False if `pat` is a compiled regex
+
+ .. versionadded:: 1.4.0
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
+ Raises
+ ------
+ ValueError
+ * if `regex` is False and `pat` is a compiled regex
+
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
@@ -702,6 +718,9 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"):
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
+ Use of `regex=False` with a `pat` as a compiled regex will raise
+ an error.
+
Examples
--------
>>> s = pd.Series(
@@ -776,22 +795,63 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"):
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
- Remember to escape special characters when explicitly using regular
- expressions.
+ Remember to escape special characters when explicitly using regular expressions.
- >>> s = pd.Series(["1+1=2"])
- >>> s
- 0 1+1=2
- dtype: object
- >>> s.str.split(r"\+|=", expand=True)
- 0 1 2
- 0 1 1 2
+ >>> s = pd.Series(["foo and bar plus baz"])
+ >>> s.str.split(r"and|plus", expand=True)
+ 0 1 2
+ 0 foo bar baz
+
+ Regular expressions can be used to handle urls or file names.
+ When `pat` is a string and ``regex=None`` (the default), the given `pat` is compiled
+ as a regex only if ``len(pat) != 1``.
+
+ >>> s = pd.Series(['foojpgbar.jpg'])
+ >>> s.str.split(r".", expand=True)
+ 0 1
+ 0 foojpgbar jpg
+
+ >>> s.str.split(r"\.jpg", expand=True)
+ 0 1
+ 0 foojpgbar
+
+ When ``regex=True``, `pat` is interpreted as a regex
+
+ >>> s.str.split(r"\.jpg", regex=True, expand=True)
+ 0 1
+ 0 foojpgbar
+
+ A compiled regex can be passed as `pat`
+
+ >>> import re
+ >>> s.str.split(re.compile(r"\.jpg"), expand=True)
+ 0 1
+ 0 foojpgbar
+
+ When ``regex=False``, `pat` is interpreted as the string itself
+
+ >>> s.str.split(r"\.jpg", regex=False, expand=True)
+ 0
+ 0 foojpgbar.jpg
"""
@Appender(_shared_docs["str_split"] % {"side": "beginning", "method": "split"})
@forbid_nonstring_types(["bytes"])
- def split(self, pat=None, n=-1, expand=False):
- result = self._data.array._str_split(pat, n, expand)
+ def split(
+ self,
+ pat: str | re.Pattern | None = None,
+ n=-1,
+ expand=False,
+ *,
+ regex: bool | None = None,
+ ):
+ if regex is False and is_re(pat):
+ raise ValueError(
+ "Cannot use a compiled regex as replacement pattern with regex=False"
+ )
+ if is_re(pat):
+ regex = True
+ result = self._data.array._str_split(pat, n, expand, regex)
return self._wrap_result(result, returns_string=expand, expand=expand)
@Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"})
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 76ee55ef5f9ad..3081575f50700 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -308,21 +308,38 @@ def f(x):
return self._str_map(f)
- def _str_split(self, pat=None, n=-1, expand=False):
+ def _str_split(
+ self,
+ pat: str | re.Pattern | None = None,
+ n=-1,
+ expand=False,
+ regex: bool | None = None,
+ ):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
- if len(pat) == 1:
- if n is None or n == 0:
- n = -1
- f = lambda x: x.split(pat, n)
+ new_pat: str | re.Pattern
+ if regex is True or isinstance(pat, re.Pattern):
+ new_pat = re.compile(pat)
+ elif regex is False:
+ new_pat = pat
+ # regex is None so link to old behavior #43563
else:
+ if len(pat) == 1:
+ new_pat = pat
+ else:
+ new_pat = re.compile(pat)
+
+ if isinstance(new_pat, re.Pattern):
if n is None or n == -1:
n = 0
- regex = re.compile(pat)
- f = lambda x: regex.split(x, maxsplit=n)
+ f = lambda x: new_pat.split(x, maxsplit=n)
+ else:
+ if n is None or n == 0:
+ n = -1
+ f = lambda x: x.split(pat, n)
return self._str_map(f, dtype=object)
def _str_rsplit(self, pat=None, n=-1):
diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py
index f3f5acd0d2f1c..01a397938db52 100644
--- a/pandas/tests/strings/test_split_partition.py
+++ b/pandas/tests/strings/test_split_partition.py
@@ -1,4 +1,5 @@
from datetime import datetime
+import re
import numpy as np
import pytest
@@ -35,6 +36,44 @@ def test_split(any_string_dtype):
tm.assert_series_equal(result, exp)
+def test_split_regex(any_string_dtype):
+ # GH 43563
+ # explicit regex = True split
+ values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype)
+ result = values.str.split(r"\.jpg", regex=True)
+ exp = Series([["xxxjpgzzz", ""]])
+ tm.assert_series_equal(result, exp)
+
+ # explicit regex = True split with compiled regex
+ regex_pat = re.compile(r".jpg")
+ values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype)
+ result = values.str.split(regex_pat)
+ exp = Series([["xx", "zzz", ""]])
+ tm.assert_series_equal(result, exp)
+
+ # explicit regex = False split
+ result = values.str.split(r"\.jpg", regex=False)
+ exp = Series([["xxxjpgzzz.jpg"]])
+ tm.assert_series_equal(result, exp)
+
+ # non explicit regex split, pattern length == 1
+ result = values.str.split(r".")
+ exp = Series([["xxxjpgzzz", "jpg"]])
+ tm.assert_series_equal(result, exp)
+
+ # non explicit regex split, pattern length != 1
+ result = values.str.split(r".jpg")
+ exp = Series([["xx", "zzz", ""]])
+ tm.assert_series_equal(result, exp)
+
+ # regex=False with pattern compiled regex raises error
+ with pytest.raises(
+ ValueError,
+ match="Cannot use a compiled regex as replacement pattern with regex=False",
+ ):
+ values.str.split(regex_pat, regex=False)
+
+
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
| - [X] closes #43563
- [X] closes #32835
- [X] closes #25549
- [X] xref #37963
- [X] tests added / passed
- [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [X] whatsnew entry
I've preserved current behavior, in which regex = None. Currently, it handles the pattern as a regex if the length of pattern is not 1. I believe that in the future, this may be worth considering deprecating.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44185 | 2021-10-26T02:13:17Z | 2021-11-04T00:40:28Z | 2021-11-04T00:40:27Z | 2021-11-04T00:40:54Z |
CI: debug 310 dev | diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index dc357101e79fd..0000000000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-version: 2.1
-
-jobs:
- test-arm:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.medium
- environment:
- ENV_FILE: ci/deps/circle-38-arm64.yaml
- PYTEST_WORKERS: auto
- PATTERN: "not slow and not network and not clipboard and not arm_slow"
- PYTEST_TARGET: "pandas"
- steps:
- - checkout
- - run: ci/setup_env.sh
- - run: PATH=$HOME/miniconda3/envs/pandas-dev/bin:$HOME/miniconda3/condabin:$PATH ci/run_tests.sh
-
-workflows:
- test:
- jobs:
- - test-arm
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
deleted file mode 100644
index 23e452f682b60..0000000000000
--- a/.github/workflows/ci.yml
+++ /dev/null
@@ -1,190 +0,0 @@
-name: CI
-
-on:
- push:
- branches:
- - master
- - 1.3.x
- pull_request:
- branches:
- - master
- - 1.3.x
-
-env:
- ENV_FILE: environment.yml
- PANDAS_CI: 1
-
-jobs:
- checks:
- name: Checks
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -l {0}
-
- concurrency:
- group: ${{ github.ref }}-checks
- cancel-in-progress: ${{github.event_name == 'pull_request'}}
-
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Cache conda
- uses: actions/cache@v2
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }}
-
- - uses: conda-incubator/setup-miniconda@v2
- with:
- activate-environment: pandas-dev
- channel-priority: strict
- environment-file: ${{ env.ENV_FILE }}
- use-only-tar-bz2: true
-
- - name: Install node.js (for pyright)
- uses: actions/setup-node@v2
- with:
- node-version: "16"
-
- - name: Install pyright
- # note: keep version in sync with .pre-commit-config.yaml
- run: npm install -g pyright@1.1.171
-
- - name: Build Pandas
- uses: ./.github/actions/build_pandas
-
- - name: Checks on imported code
- run: ci/code_checks.sh code
- if: always()
-
- - name: Running doctests
- run: ci/code_checks.sh doctests
- if: always()
-
- - name: Docstring validation
- run: ci/code_checks.sh docstrings
- if: always()
-
- - name: Typing validation
- run: ci/code_checks.sh typing
- if: always()
-
- - name: Testing docstring validation script
- run: pytest scripts
- if: always()
-
- - name: Running benchmarks
- run: |
- cd asv_bench
- asv check -E existing
- git remote add upstream https://github.com/pandas-dev/pandas.git
- git fetch upstream
- asv machine --yes
- asv dev | sed "/failed$/ s/^/##[error]/" | tee benchmarks.log
- if grep "failed" benchmarks.log > /dev/null ; then
- exit 1
- fi
- if: always()
-
- - name: Publish benchmarks artifact
- uses: actions/upload-artifact@master
- with:
- name: Benchmarks log
- path: asv_bench/benchmarks.log
- if: failure()
-
- web_and_docs:
- name: Web and docs
- runs-on: ubuntu-latest
-
- concurrency:
- group: ${{ github.ref }}-web-docs
- cancel-in-progress: true
-
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Set up pandas
- uses: ./.github/actions/setup
-
- - name: Build website
- run: |
- source activate pandas-dev
- python web/pandas_web.py web/pandas --target-path=web/build
- - name: Build documentation
- run: |
- source activate pandas-dev
- doc/make.py --warnings-are-errors | tee sphinx.log ; exit ${PIPESTATUS[0]}
-
- # This can be removed when the ipython directive fails when there are errors,
- # including the `tee sphinx.log` in te previous step (https://github.com/ipython/ipython/issues/11547)
- - name: Check ipython directive errors
- run: "! grep -B10 \"^<<<-------------------------------------------------------------------------$\" sphinx.log"
-
- - name: Install ssh key
- run: |
- mkdir -m 700 -p ~/.ssh
- echo "${{ secrets.server_ssh_key }}" > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- echo "${{ secrets.server_ip }} ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE1Kkopomm7FHG5enATf7SgnpICZ4W2bw+Ho+afqin+w7sMcrsa0je7sbztFAV8YchDkiBKnWTG4cRT+KZgZCaY=" > ~/.ssh/known_hosts
- if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}}
-
- - name: Copy cheatsheets into site directory
- run: cp doc/cheatsheet/Pandas_Cheat_Sheet* web/build/
-
- - name: Upload web
- run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas
- if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}}
-
- - name: Upload dev docs
- run: rsync -az --delete doc/build/html/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas/pandas-docs/dev
- if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}}
-
- - name: Move docs into site directory
- run: mv doc/build/html web/build/docs
-
- - name: Save website as an artifact
- uses: actions/upload-artifact@v2
- with:
- name: website
- path: web/build
- retention-days: 14
-
- data_manager:
- name: Test experimental data manager
- runs-on: ubuntu-latest
- strategy:
- matrix:
- pattern: ["not slow and not network and not clipboard", "slow"]
- concurrency:
- group: ${{ github.ref }}-data_manager-${{ matrix.pattern }}
- cancel-in-progress: true
-
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Set up pandas
- uses: ./.github/actions/setup
-
- - name: Run tests
- env:
- PANDAS_DATA_MANAGER: array
- PATTERN: ${{ matrix.pattern }}
- PYTEST_WORKERS: "auto"
- PYTEST_TARGET: pandas
- run: |
- source activate pandas-dev
- ci/run_tests.sh
-
- - name: Print skipped tests
- run: python ci/print_skipped.py
diff --git a/.github/workflows/database.yml b/.github/workflows/database.yml
deleted file mode 100644
index 6da47c86026ed..0000000000000
--- a/.github/workflows/database.yml
+++ /dev/null
@@ -1,112 +0,0 @@
-name: Database
-
-on:
- push:
- branches:
- - master
- - 1.3.x
- pull_request:
- branches:
- - master
- - 1.3.x
- paths-ignore:
- - "doc/**"
-
-env:
- PYTEST_WORKERS: "auto"
- PANDAS_CI: 1
- PATTERN: ((not slow and not network and not clipboard) or (single and db))
- COVERAGE: true
-
-jobs:
- Linux_py38_IO:
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -l {0}
-
- strategy:
- matrix:
- ENV_FILE: [ci/deps/actions-38-db-min.yaml, ci/deps/actions-38-db.yaml]
- fail-fast: false
-
- concurrency:
- group: ${{ github.ref }}-${{ matrix.ENV_FILE }}
- cancel-in-progress: ${{github.event_name == 'pull_request'}}
-
- services:
- mysql:
- image: mysql
- env:
- MYSQL_ALLOW_EMPTY_PASSWORD: yes
- MYSQL_DATABASE: pandas
- options: >-
- --health-cmd "mysqladmin ping"
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 3306:3306
-
- postgres:
- image: postgres
- env:
- POSTGRES_USER: postgres
- POSTGRES_PASSWORD: postgres
- POSTGRES_DB: pandas
- options: >-
- --health-cmd pg_isready
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 5432:5432
-
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Cache conda
- uses: actions/cache@v2
- env:
- CACHE_NUMBER: 0
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{
- hashFiles('${{ matrix.ENV_FILE }}') }}
-
- - uses: conda-incubator/setup-miniconda@v2
- with:
- activate-environment: pandas-dev
- channel-priority: strict
- environment-file: ${{ matrix.ENV_FILE }}
- use-only-tar-bz2: true
-
- - name: Build Pandas
- uses: ./.github/actions/build_pandas
-
- - name: Test
- run: pytest -m "${{ env.PATTERN }}" -n 2 --dist=loadfile --cov=pandas --cov-report=xml pandas/tests/io
- if: always()
-
- - name: Build Version
- run: pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
-
- - name: Publish test results
- uses: actions/upload-artifact@master
- with:
- name: Test results
- path: test-data.xml
- if: failure()
-
- - name: Print skipped tests
- run: python ci/print_skipped.py
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v2
- with:
- flags: unittests
- name: codecov-pandas
- fail_ci_if_error: true
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
deleted file mode 100644
index f37f31686ef69..0000000000000
--- a/.github/workflows/posix.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-name: Posix
-
-on:
- push:
- branches:
- - master
- - 1.3.x
- pull_request:
- branches:
- - master
- - 1.3.x
- paths-ignore:
- - "doc/**"
-
-env:
- PYTEST_WORKERS: "auto"
- PANDAS_CI: 1
-
-jobs:
- pytest:
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash -l {0}
- strategy:
- matrix:
- settings: [
- [actions-38-minimum_versions.yaml, "not slow and not network and not clipboard", "", "", "", "", ""],
- [actions-38-locale_slow.yaml, "slow", "language-pack-it xsel", "it_IT.utf8", "it_IT.utf8", "", ""],
- [actions-38.yaml, "not slow and not network and not clipboard", "", "", "", "", ""],
- [actions-38-slow.yaml, "slow", "", "", "", "", ""],
- [actions-38-locale.yaml, "not slow and not network", "language-pack-zh-hans xsel", "zh_CN.utf8", "zh_CN.utf8", "", ""],
- [actions-39-slow.yaml, "slow", "", "", "", "", ""],
- [actions-39-numpydev.yaml, "not slow and not network", "xsel", "", "", "deprecate", "-W error"],
- [actions-39.yaml, "not slow and not network and not clipboard", "", "", "", "", ""]
- ]
- fail-fast: false
- env:
- COVERAGE: true
- ENV_FILE: ci/deps/${{ matrix.settings[0] }}
- PATTERN: ${{ matrix.settings[1] }}
- EXTRA_APT: ${{ matrix.settings[2] }}
- LANG: ${{ matrix.settings[3] }}
- LC_ALL: ${{ matrix.settings[4] }}
- PANDAS_TESTING_MODE: ${{ matrix.settings[5] }}
- TEST_ARGS: ${{ matrix.settings[6] }}
- PYTEST_TARGET: pandas
- concurrency:
- group: ${{ github.ref }}-${{ matrix.settings[0] }}
- cancel-in-progress: ${{github.event_name == 'pull_request'}}
-
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Cache conda
- uses: actions/cache@v2
- env:
- CACHE_NUMBER: 0
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{
- hashFiles('${{ env.ENV_FILE }}') }}
-
- - name: Extra installs
- run: sudo apt-get update && sudo apt-get install -y libc6-dev-i386 ${{ env.EXTRA_APT }}
-
- - uses: conda-incubator/setup-miniconda@v2
- with:
- activate-environment: pandas-dev
- channel-priority: flexible
- environment-file: ${{ env.ENV_FILE }}
- use-only-tar-bz2: true
-
- - name: Build Pandas
- uses: ./.github/actions/build_pandas
-
- - name: Test
- run: ci/run_tests.sh
- if: always()
-
- - name: Build Version
- run: pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd
-
- - name: Publish test results
- uses: actions/upload-artifact@master
- with:
- name: Test results
- path: test-data.xml
- if: failure()
-
- - name: Print skipped tests
- run: python ci/print_skipped.py
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v2
- with:
- flags: unittests
- name: codecov-pandas
- fail_ci_if_error: false
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
deleted file mode 100644
index 93e30e4d551af..0000000000000
--- a/.github/workflows/pre-commit.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-name: pre-commit
-
-on:
- pull_request:
- push:
- branches:
- - master
- - 1.3.x
-
-jobs:
- pre-commit:
- runs-on: ubuntu-latest
- concurrency:
- group: ${{ github.ref }}-pre-commit
- cancel-in-progress: ${{github.event_name == 'pull_request'}}
- steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
- with:
- python-version: '3.9.7'
- - uses: pre-commit/action@v2.0.0
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
deleted file mode 100644
index 7692dc522522f..0000000000000
--- a/.github/workflows/sdist.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-name: sdist
-
-on:
- push:
- branches:
- - master
- - 1.3.x
- pull_request:
- branches:
- - master
- - 1.3.x
- paths-ignore:
- - "doc/**"
-
-jobs:
- build:
- runs-on: ubuntu-latest
- timeout-minutes: 60
- defaults:
- run:
- shell: bash -l {0}
-
- strategy:
- fail-fast: false
- matrix:
- python-version: ["3.8", "3.9", "3.10"]
- concurrency:
- group: ${{github.ref}}-${{matrix.python-version}}-sdist
- cancel-in-progress: ${{github.event_name == 'pull_request'}}
-
- steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
-
- - name: Set up Python
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
-
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip setuptools wheel
-
- # GH 39416
- pip install numpy
-
- - name: Build pandas sdist
- run: |
- pip list
- python setup.py sdist --formats=gztar
-
- - uses: conda-incubator/setup-miniconda@v2
- with:
- activate-environment: pandas-sdist
- python-version: '${{ matrix.python-version }}'
-
- - name: Install pandas from sdist
- run: |
- pip list
- python -m pip install dist/*.gz
-
- - name: Force oldest supported NumPy
- run: |
- case "${{matrix.python-version}}" in
- 3.8)
- pip install numpy==1.18.5 ;;
- 3.9)
- pip install numpy==1.19.3 ;;
- 3.10)
- pip install numpy==1.21.2 ;;
- esac
-
- - name: Import pandas
- run: |
- cd ..
- conda list
- python -c "import pandas; pandas.show_versions();"
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
deleted file mode 100644
index 6c685d09ab55a..0000000000000
--- a/azure-pipelines.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml
-trigger:
- branches:
- include:
- - master
- - 1.3.x
- paths:
- exclude:
- - 'doc/*'
-
-pr:
- autoCancel: true
- branches:
- include:
- - master
- - 1.3.x
-
-variables:
- PYTEST_WORKERS: auto
- PYTEST_TARGET: pandas
-
-jobs:
-# Mac and Linux use the same template
-- template: ci/azure/posix.yml
- parameters:
- name: macOS
- vmImage: macOS-10.15
-
-- template: ci/azure/windows.yml
- parameters:
- name: Windows
- vmImage: windows-2019
-
-- job: py38_32bit
- pool:
- vmImage: ubuntu-18.04
-
- steps:
- - script: |
- docker pull quay.io/pypa/manylinux2014_i686
- docker run -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \
- /bin/bash -xc "cd pandas && \
- /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
- . ~/virtualenvs/pandas-dev/bin/activate && \
- python -m pip install --no-deps -U pip wheel setuptools && \
- pip install cython numpy python-dateutil pytz pytest pytest-xdist hypothesis pytest-azurepipelines && \
- python setup.py build_ext -q -j2 && \
- python -m pip install --no-build-isolation -e . && \
- pytest -m 'not slow and not network and not clipboard' pandas --junitxml=test-data.xml"
- displayName: 'Run 32-bit manylinux2014 Docker Build / Tests'
-
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- failTaskOnFailedTests: true
- testRunTitle: 'Publish test results for Python 3.8-32 bit full Linux'
diff --git a/pyproject.toml b/pyproject.toml
index ae68e54ce1346..d47bab4c78579 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,6 +57,7 @@ markers = [
"arm_slow: mark a test as slow for arm64 architecture",
"arraymanager: mark a test to run with ArrayManager enabled",
]
+log_cli = 1
[tool.mypy]
# Import discovery
| xref #44173
| https://api.github.com/repos/pandas-dev/pandas/pulls/44183 | 2021-10-25T23:40:06Z | 2021-10-28T01:35:34Z | null | 2021-10-28T01:35:39Z |
BUG: np.timedelta64 + Period | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index fc3eaec47431f..8221bc406521a 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -567,7 +567,7 @@ I/O
Period
^^^^^^
--
+- Bug in adding a :class:`Period` object to a ``np.timedelta64`` object incorrectly raising ``TypeError`` (:issue:`44182`)
-
Plotting
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 0998cb7b0c21e..dcf4323bc8755 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -4,6 +4,7 @@ cimport numpy as cnp
from cpython.object cimport (
Py_EQ,
Py_NE,
+ PyObject_RichCompare,
PyObject_RichCompareBool,
)
from numpy cimport (
@@ -1594,6 +1595,9 @@ cdef class _Period(PeriodMixin):
PeriodDtypeBase _dtype
BaseOffset freq
+ # higher than np.ndarray, np.matrix, np.timedelta64
+ __array_priority__ = 100
+
dayofweek = _Period.day_of_week
dayofyear = _Period.day_of_year
@@ -1652,7 +1656,10 @@ cdef class _Period(PeriodMixin):
return PyObject_RichCompareBool(self.ordinal, other.ordinal, op)
elif other is NaT:
return _nat_scalar_rules[op]
- return NotImplemented # TODO: ndarray[object]?
+ elif util.is_array(other):
+ # in particular ndarray[object]; see test_pi_cmp_period
+ return np.array([PyObject_RichCompare(self, x, op) for x in other])
+ return NotImplemented
def __hash__(self):
return hash((self.ordinal, self.freqstr))
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index 0c42be517b798..d7cb314743e86 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -185,6 +185,10 @@ def test_pi_cmp_period(self):
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
+ # Tests Period.__richcmp__ against ndarray[object, ndim=2]
+ result = idx.values.reshape(10, 2) < idx[10]
+ tm.assert_numpy_array_equal(result, exp.reshape(10, 2))
+
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = get_expected_box(box_with_array)
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index 9b2e0cac5de84..f1b8c1cfdd39b 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -1287,20 +1287,8 @@ def test_add_offset(self):
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
-
- if isinstance(o, np.timedelta64):
- msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=msg):
- o + p
- else:
- msg = "|".join(
- [
- "Input has different freq",
- "Input cannot be converted to Period",
- ]
- )
- with pytest.raises(IncompatibleFrequency, match=msg):
- o + p
+ with pytest.raises(IncompatibleFrequency, match=msg):
+ o + p
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
@@ -1329,14 +1317,8 @@ def test_add_offset(self):
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
-
- if isinstance(o, np.timedelta64):
- td_msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=td_msg):
- o + p
- else:
- with pytest.raises(IncompatibleFrequency, match=msg):
- o + p
+ with pytest.raises(IncompatibleFrequency, match=msg):
+ o + p
# freq is Tick
for freq in ["D", "2D", "3D"]:
@@ -1352,14 +1334,11 @@ def test_add_offset(self):
exp = Period("2011-04-03", freq=freq)
assert p + np.timedelta64(2, "D") == exp
- msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=msg):
- np.timedelta64(2, "D") + p
+ assert np.timedelta64(2, "D") + p == exp
exp = Period("2011-04-02", freq=freq)
assert p + np.timedelta64(3600 * 24, "s") == exp
- with pytest.raises(TypeError, match=msg):
- np.timedelta64(3600 * 24, "s") + p
+ assert np.timedelta64(3600 * 24, "s") + p == exp
exp = Period("2011-03-30", freq=freq)
assert p + timedelta(-2) == exp
@@ -1385,14 +1364,8 @@ def test_add_offset(self):
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
-
- if isinstance(o, np.timedelta64):
- td_msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=td_msg):
- o + p
- else:
- with pytest.raises(IncompatibleFrequency, match=msg):
- o + p
+ with pytest.raises(IncompatibleFrequency, match=msg):
+ o + p
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
@@ -1408,13 +1381,11 @@ def test_add_offset(self):
msg = "cannot use operands with types"
exp = Period("2011-04-01 12:00", freq=freq)
assert p + np.timedelta64(3, "h") == exp
- with pytest.raises(TypeError, match=msg):
- np.timedelta64(3, "h") + p
+ assert np.timedelta64(3, "h") + p == exp
exp = Period("2011-04-01 10:00", freq=freq)
assert p + np.timedelta64(3600, "s") == exp
- with pytest.raises(TypeError, match=msg):
- np.timedelta64(3600, "s") + p
+ assert np.timedelta64(3600, "s") + p == exp
exp = Period("2011-04-01 11:00", freq=freq)
assert p + timedelta(minutes=120) == exp
@@ -1440,14 +1411,8 @@ def test_add_offset(self):
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
-
- if isinstance(o, np.timedelta64):
- td_msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=td_msg):
- o + p
- else:
- with pytest.raises(IncompatibleFrequency, match=msg):
- o + p
+ with pytest.raises(IncompatibleFrequency, match=msg):
+ o + p
def test_sub_offset(self):
# freq is DateOffset
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44182 | 2021-10-25T18:07:58Z | 2021-10-28T13:19:18Z | 2021-10-28T13:19:18Z | 2021-11-02T01:57:49Z |
Group by a categorical Series of unequal length | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 71d5c46b81ea0..5136051468ff8 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -784,6 +784,7 @@ Groupby/resample/rolling
- Bug in :meth:`GroupBy.nth` failing on ``axis=1`` (:issue:`43926`)
- Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` not respecting right bound on centered datetime-like windows, if the index contain duplicates (:issue:`3944`)
- Bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` when using a :class:`pandas.api.indexers.BaseIndexer` subclass that returned unequal start and end arrays would segfault instead of raising a ``ValueError`` (:issue:`44470`)
+- Fixed bug where grouping by a :class:`Series` that has a categorical data type and length unequal to the axis of grouping raised ``ValueError`` (:issue:`44179`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index a05f8e581d12f..b3302233ce91f 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -887,12 +887,6 @@ def is_in_obj(gpr) -> bool:
else:
in_axis = False
- if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
- raise ValueError(
- f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) "
- "must be same length"
- )
-
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (
@@ -938,7 +932,7 @@ def _convert_grouper(axis: Index, grouper):
return grouper.reindex(axis)._values
elif isinstance(grouper, MultiIndex):
return grouper._values
- elif isinstance(grouper, (list, tuple, Series, Index, np.ndarray)):
+ elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError("Grouper and axis must be same length")
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 28128dee9da0f..585491f8664b3 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -664,11 +664,32 @@ def test_bins_unequal_len():
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
- msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(ValueError, match="Grouper and axis must be same length"):
series.groupby(bins).mean()
+@pytest.mark.parametrize(
+ ["series", "data"],
+ [
+ # Group a series with length and index equal to those of the grouper.
+ (Series(range(4)), {"A": [0, 3], "B": [1, 2]}),
+ # Group a series with length equal to that of the grouper and index unequal to
+ # that of the grouper.
+ (Series(range(4)).rename(lambda idx: idx + 1), {"A": [2], "B": [0, 1]}),
+ # GH44179: Group a series with length unequal to that of the grouper.
+ (Series(range(7)), {"A": [0, 3], "B": [1, 2]}),
+ ],
+)
+def test_categorical_series(series, data):
+ # Group the given series by a series with categorical data type such that group A
+ # takes indices 0 and 3 and group B indices 1 and 2, obtaining the values mapped in
+ # the given data.
+ groupby = series.groupby(Series(list("ABBA"), dtype="category"))
+ result = groupby.aggregate(list)
+ expected = Series(data, index=CategoricalIndex(data.keys()))
+ tm.assert_series_equal(result, expected)
+
+
def test_as_index():
# GH13204
df = DataFrame(
| - [X] closes #44179
- [X] tests added / passed
- [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [X] whatsnew entry
While this fix enables grouping by a `Series` that has a categorical data type and length unequal to the axis of grouping, it maintains the requirement (developed in #3017, #9741, and #18525) that the length of a `Categorical` object used for grouping must match that of the axis of grouping. Additionally, this fix checks that requirement consistently with other groupers whose lengths must match—namely `list`, `tuple`, `Index`, and `numpy.ndarray`—and produces the same exception message when the lengths differ. | https://api.github.com/repos/pandas-dev/pandas/pulls/44180 | 2021-10-25T14:17:49Z | 2021-12-22T03:07:33Z | 2021-12-22T03:07:32Z | 2022-01-03T17:14:19Z |
ENH: Use yaml anchors for pre-commit hooks additional dependencies | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2c76b682ee343..469c4066e2387 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -39,10 +39,11 @@ repos:
rev: 3.9.2
hooks:
- id: flake8
- additional_dependencies:
- - flake8-comprehensions==3.1.0
- - flake8-bugbear==21.3.2
- - pandas-dev-flaker==0.2.0
+ additional_dependencies: &flake8_dependencies
+ - flake8==3.9.2
+ - flake8-comprehensions==3.1.0
+ - flake8-bugbear==21.3.2
+ - pandas-dev-flaker==0.2.0
- id: flake8
alias: flake8-cython
name: flake8 (cython)
@@ -76,11 +77,7 @@ repos:
rev: v1.2.3
hooks:
- id: yesqa
- additional_dependencies:
- - flake8==3.9.2
- - flake8-comprehensions==3.1.0
- - flake8-bugbear==21.3.2
- - pandas-dev-flaker==0.2.0
+ additional_dependencies: *flake8_dependencies
- repo: local
hooks:
- id: pyright
diff --git a/scripts/sync_flake8_versions.py b/scripts/sync_flake8_versions.py
index cb6bb1eb0986e..370924cdfa199 100644
--- a/scripts/sync_flake8_versions.py
+++ b/scripts/sync_flake8_versions.py
@@ -68,16 +68,9 @@ def _conda_to_pip_compat(dep):
def _validate_additional_dependencies(
flake8_additional_dependencies,
- yesqa_additional_dependencies,
environment_additional_dependencies,
) -> None:
for dep in flake8_additional_dependencies:
- if dep not in yesqa_additional_dependencies:
- sys.stdout.write(
- f"Mismatch of '{dep.name}' version between 'flake8' "
- "and 'yesqa' in '.pre-commit-config.yaml'\n"
- )
- sys.exit(1)
if dep not in environment_additional_dependencies:
sys.stdout.write(
f"Mismatch of '{dep.name}' version between 'enviroment.yml' "
@@ -94,13 +87,6 @@ def _validate_revisions(revisions):
)
sys.exit(1)
- if revisions.yesqa != revisions.pre_commit:
- sys.stdout.write(
- f"{revisions.name} in 'yesqa' does not match "
- "in 'flake8' from 'pre-commit'\n"
- )
- sys.exit(1)
-
def _process_dependencies(deps):
for dep in deps:
@@ -130,21 +116,12 @@ def get_revisions(
else:
flake8_additional_dependencies.append(dep)
- _, yesqa_hook = _get_repo_hook(repos, "yesqa")
- yesqa_additional_dependencies = []
- for dep in _process_dependencies(yesqa_hook.get("additional_dependencies", [])):
- if dep.name == "flake8":
- flake8_revisions.yesqa = dep
- elif dep.name == "pandas-dev-flaker":
- pandas_dev_flaker_revisions.yesqa = dep
- else:
- yesqa_additional_dependencies.append(dep)
-
environment_dependencies = environment["dependencies"]
environment_additional_dependencies = []
for dep in _process_dependencies(environment_dependencies):
if dep.name == "flake8":
flake8_revisions.environment = dep
+ environment_additional_dependencies.append(dep)
elif dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.environment = dep
else:
@@ -152,7 +129,6 @@ def get_revisions(
_validate_additional_dependencies(
flake8_additional_dependencies,
- yesqa_additional_dependencies,
environment_additional_dependencies,
)
| - [x] closes #43282
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
This PR adds:
- use of yaml anchors for the `flake8` and `yesqa` hooks
- updates to `scripts/sync_flake8_versions.py` since now both `yesqa` and `flake8` hooks have the same additional dependencies declared through a reusable anchor there is no need to check for sync between these two
PD. I am not sure this needs a `whatsnew` entry so I did not add it - unless y'all think there should be one | https://api.github.com/repos/pandas-dev/pandas/pulls/44177 | 2021-10-25T11:55:38Z | 2021-10-25T14:04:54Z | 2021-10-25T14:04:54Z | 2021-10-25T14:08:51Z |
PERF: Improve performance in rolling.mean(engine=numba) | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 254a004a37c40..fa4dadde13185 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -426,7 +426,7 @@ Performance improvements
- :meth:`SparseArray.min` and :meth:`SparseArray.max` no longer require converting to a dense array (:issue:`43526`)
- Indexing into a :class:`SparseArray` with a ``slice`` with ``step=1`` no longer requires converting to a dense array (:issue:`43777`)
- Performance improvement in :meth:`SparseArray.take` with ``allow_fill=False`` (:issue:`43654`)
-- Performance improvement in :meth:`.Rolling.mean` and :meth:`.Expanding.mean` with ``engine="numba"`` (:issue:`43612`)
+- Performance improvement in :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.Rolling.sum`, :meth:`.Expanding.sum` with ``engine="numba"`` (:issue:`43612`, :issue:`44176`)
- Improved performance of :meth:`pandas.read_csv` with ``memory_map=True`` when file encoding is UTF-8 (:issue:`43787`)
- Performance improvement in :meth:`RangeIndex.sort_values` overriding :meth:`Index.sort_values` (:issue:`43666`)
- Performance improvement in :meth:`RangeIndex.insert` (:issue:`43988`)
diff --git a/pandas/core/_numba/kernels/__init__.py b/pandas/core/_numba/kernels/__init__.py
index eb43de1e0d979..23b0ec5c3d8aa 100644
--- a/pandas/core/_numba/kernels/__init__.py
+++ b/pandas/core/_numba/kernels/__init__.py
@@ -1,3 +1,4 @@
from pandas.core._numba.kernels.mean_ import sliding_mean
+from pandas.core._numba.kernels.sum_ import sliding_sum
-__all__ = ["sliding_mean"]
+__all__ = ["sliding_mean", "sliding_sum"]
diff --git a/pandas/core/_numba/kernels/mean_.py b/pandas/core/_numba/kernels/mean_.py
index 32ea505513ed0..8f67dd9b51c06 100644
--- a/pandas/core/_numba/kernels/mean_.py
+++ b/pandas/core/_numba/kernels/mean_.py
@@ -1,5 +1,5 @@
"""
-Numba 1D aggregation kernels that can be shared by
+Numba 1D mean kernels that can be shared by
* Dataframe / Series
* groupby
* rolling / expanding
@@ -11,20 +11,7 @@
import numba
import numpy as np
-
-@numba.jit(nopython=True, nogil=True, parallel=False)
-def is_monotonic_increasing(bounds: np.ndarray) -> bool:
- """Check if int64 values are monotonically increasing."""
- n = len(bounds)
- if n < 2:
- return True
- prev = bounds[0]
- for i in range(1, n):
- cur = bounds[i]
- if cur < prev:
- return False
- prev = cur
- return True
+from pandas.core._numba.kernels.shared import is_monotonic_increasing
@numba.jit(nopython=True, nogil=True, parallel=False)
diff --git a/pandas/core/_numba/kernels/shared.py b/pandas/core/_numba/kernels/shared.py
new file mode 100644
index 0000000000000..d84e409ca879d
--- /dev/null
+++ b/pandas/core/_numba/kernels/shared.py
@@ -0,0 +1,17 @@
+import numba
+import numpy as np
+
+
+@numba.jit(numba.boolean(numba.int64[:]), nopython=True, nogil=True, parallel=False)
+def is_monotonic_increasing(bounds: np.ndarray) -> bool:
+ """Check if int64 values are monotonically increasing."""
+ n = len(bounds)
+ if n < 2:
+ return True
+ prev = bounds[0]
+ for i in range(1, n):
+ cur = bounds[i]
+ if cur < prev:
+ return False
+ prev = cur
+ return True
diff --git a/pandas/core/_numba/kernels/sum_.py b/pandas/core/_numba/kernels/sum_.py
new file mode 100644
index 0000000000000..c2e81b4990ba9
--- /dev/null
+++ b/pandas/core/_numba/kernels/sum_.py
@@ -0,0 +1,98 @@
+"""
+Numba 1D sum kernels that can be shared by
+* Dataframe / Series
+* groupby
+* rolling / expanding
+
+Mirrors pandas/_libs/window/aggregation.pyx
+"""
+from __future__ import annotations
+
+import numba
+import numpy as np
+
+from pandas.core._numba.kernels.shared import is_monotonic_increasing
+
+
+@numba.jit(nopython=True, nogil=True, parallel=False)
+def add_sum(
+ val: float, nobs: int, sum_x: float, compensation: float
+) -> tuple[int, float, float]:
+ if not np.isnan(val):
+ nobs += 1
+ y = val - compensation
+ t = sum_x + y
+ compensation = t - sum_x - y
+ sum_x = t
+ return nobs, sum_x, compensation
+
+
+@numba.jit(nopython=True, nogil=True, parallel=False)
+def remove_sum(
+ val: float, nobs: int, sum_x: float, compensation: float
+) -> tuple[int, float, float]:
+ if not np.isnan(val):
+ nobs -= 1
+ y = -val - compensation
+ t = sum_x + y
+ compensation = t - sum_x - y
+ sum_x = t
+ return nobs, sum_x, compensation
+
+
+@numba.jit(nopython=True, nogil=True, parallel=False)
+def sliding_sum(
+ values: np.ndarray,
+ start: np.ndarray,
+ end: np.ndarray,
+ min_periods: int,
+) -> np.ndarray:
+ N = len(start)
+ nobs = 0
+ sum_x = 0.0
+ compensation_add = 0.0
+ compensation_remove = 0.0
+
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
+ start
+ ) and is_monotonic_increasing(end)
+
+ output = np.empty(N, dtype=np.float64)
+
+ for i in range(N):
+ s = start[i]
+ e = end[i]
+ if i == 0 or not is_monotonic_increasing_bounds:
+ for j in range(s, e):
+ val = values[j]
+ nobs, sum_x, compensation_add = add_sum(
+ val, nobs, sum_x, compensation_add
+ )
+ else:
+ for j in range(start[i - 1], s):
+ val = values[j]
+ nobs, sum_x, compensation_remove = remove_sum(
+ val, nobs, sum_x, compensation_remove
+ )
+
+ for j in range(end[i - 1], e):
+ val = values[j]
+ nobs, sum_x, compensation_add = add_sum(
+ val, nobs, sum_x, compensation_add
+ )
+
+ if nobs == 0 == nobs:
+ result = 0.0
+ elif nobs >= min_periods:
+ result = sum_x
+ else:
+ result = np.nan
+
+ output[i] = result
+
+ if not is_monotonic_increasing_bounds:
+ nobs = 0
+ sum_x = 0.0
+ compensation_remove = 0.0
+
+ return output
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 274c78c30aec4..b04aab3755b91 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1345,15 +1345,16 @@ def sum(
if maybe_use_numba(engine):
if self.method == "table":
func = generate_manual_numpy_nan_agg_with_axis(np.nansum)
+ return self.apply(
+ func,
+ raw=True,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ )
else:
- func = np.nansum
+ from pandas.core._numba.kernels import sliding_sum
- return self.apply(
- func,
- raw=True,
- engine=engine,
- engine_kwargs=engine_kwargs,
- )
+ return self._numba_apply(sliding_sum, "rolling_sum", engine_kwargs)
window_func = window_aggregations.roll_sum
return self._apply(window_func, name="sum", **kwargs)
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index d47b3e856cb25..9fd4bd422178a 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -59,7 +59,7 @@ def test_numba_vs_cython_rolling_methods(
expected = getattr(roll, method)(engine="cython")
# Check the cache
- if method != "mean":
+ if method not in ("mean", "sum"):
assert (
getattr(np, f"nan{method}"),
"Rolling_apply_single",
@@ -67,7 +67,9 @@ def test_numba_vs_cython_rolling_methods(
tm.assert_equal(result, expected)
- @pytest.mark.parametrize("data", [DataFrame(np.eye(5)), Series(range(5))])
+ @pytest.mark.parametrize(
+ "data", [DataFrame(np.eye(5)), Series(range(5), name="foo")]
+ )
def test_numba_vs_cython_expanding_methods(
self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
@@ -82,7 +84,7 @@ def test_numba_vs_cython_expanding_methods(
expected = getattr(expand, method)(engine="cython")
# Check the cache
- if method != "mean":
+ if method not in ("mean", "sum"):
assert (
getattr(np, f"nan{method}"),
"Expanding_apply_single",
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
This also starts to add a shared aggregation function (sum) that can shared between rolling/groupby/DataFrame when using the numba engine.
```
df = pd.DataFrame(np.ones((10000, 1000)))
roll = df.rolling(10)
roll.sum(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
%timeit roll.sum(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
211 ms ± 12.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- PR
424 ms ± 9.23 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- master
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/44176 | 2021-10-25T04:43:08Z | 2021-10-28T02:07:11Z | 2021-10-28T02:07:11Z | 2021-10-29T02:32:13Z |
TST: tests using invalid_scalar fixture | diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index bed64efc690ec..0e413f81834b2 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -56,6 +56,7 @@
is_datetime64tz_dtype,
is_dtype_equal,
is_integer,
+ is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
@@ -927,6 +928,14 @@ def __getitem__(
indices = np.arange(len(self), dtype=np.int32)[key]
return self.take(indices)
+ elif not is_list_like(key):
+ # e.g. "foo" or 2.5
+ # exception message copied from numpy
+ raise IndexError(
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
+ r"(`None`) and integer or boolean arrays are valid indices"
+ )
+
else:
# TODO: I think we can avoid densifying when masking a
# boolean SparseArray with another. Need to look at the
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 4e3bd05d2cc8d..58b4a0c9f9242 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -322,6 +322,13 @@ def __getitem__(
elif item[1] is Ellipsis:
item = item[0]
+ if is_scalar(item) and not is_integer(item):
+ # e.g. "foo" or 2.5
+ # exception message copied from numpy
+ raise IndexError(
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
+ r"(`None`) and integer or boolean arrays are valid indices"
+ )
# We are not an array indexer, so maybe e.g. a slice or integer
# indexer. We dispatch to pyarrow.
value = self._data[item]
@@ -392,6 +399,11 @@ def _cmp_method(self, other, op):
# TODO(ARROW-9429): Add a .to_numpy() to ChunkedArray
return BooleanArray._from_sequence(result.to_pandas().values)
+ def insert(self, loc: int, item):
+ if not isinstance(item, str) and item is not libmissing.NA:
+ raise TypeError("Scalar must be NA or str")
+ return super().insert(loc, item)
+
def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None:
"""Set one or more values inplace.
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index 7efd3bdb6920a..73bff29305f20 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -120,6 +120,33 @@ def test_getitem_scalar(self, data):
result = pd.Series(data)[0]
assert isinstance(result, data.dtype.type)
+ def test_getitem_invalid(self, data):
+ # TODO: box over scalar, [scalar], (scalar,)?
+
+ msg = (
+ r"only integers, slices \(`:`\), ellipsis \(`...`\), numpy.newaxis "
+ r"\(`None`\) and integer or boolean arrays are valid indices"
+ )
+ with pytest.raises(IndexError, match=msg):
+ data["foo"]
+ with pytest.raises(IndexError, match=msg):
+ data[2.5]
+
+ ub = len(data)
+ msg = "|".join(
+ [
+ "list index out of range", # json
+ "index out of bounds", # pyarrow
+ "Out of bounds access", # Sparse
+ f"index {ub+1} is out of bounds for axis 0 with size {ub}",
+ f"index -{ub+1} is out of bounds for axis 0 with size {ub}",
+ ]
+ )
+ with pytest.raises(IndexError, match=msg):
+ data[ub + 1]
+ with pytest.raises(IndexError, match=msg):
+ data[-ub - 1]
+
def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 0392ea794237c..a2d100db81a2c 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -367,3 +367,11 @@ def test_delitem_series(self, data):
expected = ser[taker]
del ser[1]
self.assert_series_equal(ser, expected)
+
+ def test_setitem_invalid(self, data, invalid_scalar):
+ msg = "" # messages vary by subclass, so we do not test it
+ with pytest.raises((ValueError, TypeError), match=msg):
+ data[0] = invalid_scalar
+
+ with pytest.raises((ValueError, TypeError), match=msg):
+ data[:] = invalid_scalar
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 2eef828288e59..309d865bc7452 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -32,7 +32,10 @@
from pandas._typing import type_t
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
-from pandas.core.dtypes.common import pandas_dtype
+from pandas.core.dtypes.common import (
+ is_list_like,
+ pandas_dtype,
+)
import pandas as pd
from pandas.api.extensions import (
@@ -103,6 +106,13 @@ def __getitem__(self, item):
elif isinstance(item, slice):
# slice
return type(self)(self.data[item])
+ elif not is_list_like(item):
+ # e.g. "foo" or 2.5
+ # exception message copied from numpy
+ raise IndexError(
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
+ r"(`None`) and integer or boolean arrays are valid indices"
+ )
else:
item = pd.api.indexers.check_array_indexer(self, item)
if is_bool_dtype(item.dtype):
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 0e3e26e7e9500..e60f7769270bd 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -363,6 +363,11 @@ def test_concat(self, data, in_frame):
class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
+ @skip_nested
+ def test_setitem_invalid(self, data, invalid_scalar):
+ # object dtype can hold anything, so doesn't raise
+ super().test_setitem_invalid(data, invalid_scalar)
+
@skip_nested
def test_setitem_sequence_broadcasts(self, data, box_in_series):
# ValueError: cannot set using a list-like indexer with a different
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index 06b07968f949e..af86c359c4c00 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -160,13 +160,6 @@ def test_value_counts(self, all_data, dropna):
def test_value_counts_with_normalize(self, data):
pass
- def test_insert_invalid(self, data, invalid_scalar, request):
- if data.dtype.storage == "pyarrow":
- mark = pytest.mark.xfail(reason="casts invalid_scalar to string")
- request.node.add_marker(mark)
-
- super().test_insert_invalid(data, invalid_scalar)
-
class TestCasting(base.BaseCastingTests):
pass
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 7566c17eda9e6..ea76a4b4b1cfc 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -403,6 +403,33 @@ def test_insert_base(self, index):
# test 0th element
assert index[0:4].equals(result.insert(0, index[0]))
+ def test_insert_out_of_bounds(self, index):
+ # TypeError/IndexError matches what np.insert raises in these cases
+
+ if len(index) > 0:
+ err = TypeError
+ else:
+ err = IndexError
+ if len(index) == 0:
+ # 0 vs 0.5 in error message varies with numpy version
+ msg = "index (0|0.5) is out of bounds for axis 0 with size 0"
+ else:
+ msg = "slice indices must be integers or None or have an __index__ method"
+ with pytest.raises(err, match=msg):
+ index.insert(0.5, "foo")
+
+ msg = "|".join(
+ [
+ r"index -?\d+ is out of bounds for axis 0 with size \d+",
+ "loc must be an integer between",
+ ]
+ )
+ with pytest.raises(IndexError, match=msg):
+ index.insert(len(index) + 1, 1)
+
+ with pytest.raises(IndexError, match=msg):
+ index.insert(-len(index) - 1, 1)
+
def test_delete_base(self, index):
if not len(index):
return
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44175 | 2021-10-25T02:20:29Z | 2021-10-30T23:52:25Z | 2021-10-30T23:52:25Z | 2021-10-31T16:18:48Z |
CLN/TST: address TODOs/FIXMES #2 | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 07cef290c8919..8a2ba69a61ed9 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1517,8 +1517,11 @@ def _cython_agg_general(
if numeric_only:
if is_ser and not is_numeric_dtype(self._selected_obj.dtype):
# GH#41291 match Series behavior
+ kwd_name = "numeric_only"
+ if how in ["any", "all"]:
+ kwd_name = "bool_only"
raise NotImplementedError(
- f"{type(self).__name__}.{how} does not implement numeric_only."
+ f"{type(self).__name__}.{how} does not implement {kwd_name}."
)
elif not is_ser:
data = data.get_numeric_data(copy=False)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 8534d4f6c9e59..41ef824afc2a7 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -600,9 +600,7 @@ def _intersection(self, other: Index, sort=False):
new_index = new_index[::-1]
if sort is None:
- # TODO: can revert to just `if sort is None` after GH#43666
- if new_index.step < 0:
- new_index = new_index[::-1]
+ new_index = new_index.sort_values()
return new_index
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 20e68742e4075..9795e1f7141ee 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4169,7 +4169,7 @@ def map(self, arg, na_action=None) -> Series:
3 I am a rabbit
dtype: object
"""
- new_values = super()._map_values(arg, na_action=na_action)
+ new_values = self._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index).__finalize__(
self, method="map"
)
@@ -4396,8 +4396,11 @@ def _reduce(
else:
# dispatch to numpy arrays
if numeric_only:
+ kwd_name = "numeric_only"
+ if name in ["any", "all"]:
+ kwd_name = "bool_only"
raise NotImplementedError(
- f"Series.{name} does not implement numeric_only."
+ f"Series.{name} does not implement {kwd_name}."
)
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index e9b4ceafddfd5..461dbe5575022 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -1,5 +1,4 @@
import decimal
-import math
import operator
import numpy as np
@@ -70,54 +69,7 @@ def data_for_grouping():
return DecimalArray([b, b, na, na, a, a, b, c])
-class BaseDecimal:
- @classmethod
- def assert_series_equal(cls, left, right, *args, **kwargs):
- def convert(x):
- # need to convert array([Decimal(NaN)], dtype='object') to np.NaN
- # because Series[object].isnan doesn't recognize decimal(NaN) as
- # NA.
- try:
- return math.isnan(x)
- except TypeError:
- return False
-
- if left.dtype == "object":
- left_na = left.apply(convert)
- else:
- left_na = left.isna()
- if right.dtype == "object":
- right_na = right.apply(convert)
- else:
- right_na = right.isna()
-
- tm.assert_series_equal(left_na, right_na)
- return tm.assert_series_equal(left[~left_na], right[~right_na], *args, **kwargs)
-
- @classmethod
- def assert_frame_equal(cls, left, right, *args, **kwargs):
- # TODO(EA): select_dtypes
- tm.assert_index_equal(
- left.columns,
- right.columns,
- exact=kwargs.get("check_column_type", "equiv"),
- check_names=kwargs.get("check_names", True),
- check_exact=kwargs.get("check_exact", False),
- check_categorical=kwargs.get("check_categorical", True),
- obj=f"{kwargs.get('obj', 'DataFrame')}.columns",
- )
-
- decimals = (left.dtypes == "decimal").index
-
- for col in decimals:
- cls.assert_series_equal(left[col], right[col], *args, **kwargs)
-
- left = left.drop(columns=decimals)
- right = right.drop(columns=decimals)
- tm.assert_frame_equal(left, right, *args, **kwargs)
-
-
-class TestDtype(BaseDecimal, base.BaseDtypeTests):
+class TestDtype(base.BaseDtypeTests):
def test_hashable(self, dtype):
pass
@@ -129,19 +81,19 @@ def test_infer_dtype(self, data, data_missing, skipna):
assert infer_dtype(data_missing, skipna=skipna) == "unknown-array"
-class TestInterface(BaseDecimal, base.BaseInterfaceTests):
+class TestInterface(base.BaseInterfaceTests):
pass
-class TestConstructors(BaseDecimal, base.BaseConstructorsTests):
+class TestConstructors(base.BaseConstructorsTests):
pass
-class TestReshaping(BaseDecimal, base.BaseReshapingTests):
+class TestReshaping(base.BaseReshapingTests):
pass
-class TestGetitem(BaseDecimal, base.BaseGetitemTests):
+class TestGetitem(base.BaseGetitemTests):
def test_take_na_value_other_decimal(self):
arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0"))
@@ -149,7 +101,7 @@ def test_take_na_value_other_decimal(self):
self.assert_extension_array_equal(result, expected)
-class TestMissing(BaseDecimal, base.BaseMissingTests):
+class TestMissing(base.BaseMissingTests):
pass
@@ -175,7 +127,7 @@ class TestBooleanReduce(Reduce, base.BaseBooleanReduceTests):
pass
-class TestMethods(BaseDecimal, base.BaseMethodsTests):
+class TestMethods(base.BaseMethodsTests):
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna, request):
all_data = all_data[:10]
@@ -200,20 +152,20 @@ def test_value_counts_with_normalize(self, data):
return super().test_value_counts_with_normalize(data)
-class TestCasting(BaseDecimal, base.BaseCastingTests):
+class TestCasting(base.BaseCastingTests):
pass
-class TestGroupby(BaseDecimal, base.BaseGroupbyTests):
+class TestGroupby(base.BaseGroupbyTests):
def test_groupby_agg_extension(self, data_for_grouping):
super().test_groupby_agg_extension(data_for_grouping)
-class TestSetitem(BaseDecimal, base.BaseSetitemTests):
+class TestSetitem(base.BaseSetitemTests):
pass
-class TestPrinting(BaseDecimal, base.BasePrintingTests):
+class TestPrinting(base.BasePrintingTests):
def test_series_repr(self, data):
# Overriding this base test to explicitly test that
# the custom _formatter is used
@@ -282,7 +234,7 @@ def test_astype_dispatches(frame):
assert result.dtype.context.prec == ctx.prec
-class TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests):
+class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
@@ -313,7 +265,7 @@ def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
super()._check_divmod_op(s, op, other, exc=None)
-class TestComparisonOps(BaseDecimal, base.BaseComparisonOpsTests):
+class TestComparisonOps(base.BaseComparisonOpsTests):
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index 9df5f79aa7d19..d8511581f0e94 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -211,7 +211,7 @@ def test_shift_axis1_multiple_blocks_with_int_fill(self):
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
- # TODO: remove this test when tshift deprecation is enforced
+ # TODO(2.0): remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 704af61ee2390..1bb4b24266de0 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2508,7 +2508,7 @@ def check_views():
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
- # FIXME: until GH#35417, iloc.setitem into EA values does not preserve
+ # FIXME(GH#35417): until GH#35417, iloc.setitem into EA values does not preserve
# view, so we have to check in the other direction
# df.iloc[0, 2] = 0
# if not copy:
@@ -2522,7 +2522,7 @@ def check_views():
else:
assert a[0] == a.dtype.type(1)
assert b[0] == b.dtype.type(3)
- # FIXME: enable after GH#35417
+ # FIXME(GH#35417): enable after GH#35417
# assert c[0] == 1
assert df.iloc[0, 2] == 1
else:
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index a7f6c47db916d..3a8ae03015628 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -929,13 +929,11 @@ def test_all_any_params(self):
with tm.assert_produces_warning(FutureWarning):
s.all(bool_only=True, level=0)
- # bool_only is not implemented alone.
- # TODO GH38810 change this error message to:
- # "Series.any does not implement bool_only"
- msg = "Series.any does not implement numeric_only"
+ # GH#38810 bool_only is not implemented alone.
+ msg = "Series.any does not implement bool_only"
with pytest.raises(NotImplementedError, match=msg):
s.any(bool_only=True)
- msg = "Series.all does not implement numeric_only."
+ msg = "Series.all does not implement bool_only."
with pytest.raises(NotImplementedError, match=msg):
s.all(bool_only=True)
diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py
index e00e9a894d340..2930c657eb3b2 100644
--- a/pandas/tests/series/methods/test_rename.py
+++ b/pandas/tests/series/methods/test_rename.py
@@ -22,11 +22,13 @@ def test_rename(self, datetime_series):
renamed2 = ts.rename(rename_dict)
tm.assert_series_equal(renamed, renamed2)
+ def test_rename_partial_dict(self):
# partial dict
- s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64")
- renamed = s.rename({"b": "foo", "d": "bar"})
+ ser = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64")
+ renamed = ser.rename({"b": "foo", "d": "bar"})
tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"]))
+ def test_rename_retain_index_name(self):
# index with name
renamer = Series(
np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64"
@@ -35,38 +37,38 @@ def test_rename(self, datetime_series):
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
- s = Series(range(5), name="foo")
+ ser = Series(range(5), name="foo")
renamer = Series({1: 10, 2: 20})
- result = s.rename(renamer)
+ result = ser.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo")
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
- s = Series(range(4), index=list("abcd"))
+ ser = Series(range(4), index=list("abcd"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
- result = s.rename(name)
+ result = ser.rename(name)
assert result.name == name
- tm.assert_numpy_array_equal(result.index.values, s.index.values)
- assert s.name is None
+ tm.assert_numpy_array_equal(result.index.values, ser.index.values)
+ assert ser.name is None
def test_rename_set_name_inplace(self):
- s = Series(range(3), index=list("abc"))
+ ser = Series(range(3), index=list("abc"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
- s.rename(name, inplace=True)
- assert s.name == name
+ ser.rename(name, inplace=True)
+ assert ser.name == name
exp = np.array(["a", "b", "c"], dtype=np.object_)
- tm.assert_numpy_array_equal(s.index.values, exp)
+ tm.assert_numpy_array_equal(ser.index.values, exp)
def test_rename_axis_supported(self):
# Supporting axis for compatibility, detailed in GH-18589
- s = Series(range(5))
- s.rename({}, axis=0)
- s.rename({}, axis="index")
- # FIXME: dont leave commenred-out
+ ser = Series(range(5))
+ ser.rename({}, axis=0)
+ ser.rename({}, axis="index")
+ # FIXME: dont leave commented-out
# TODO: clean up shared index validation
# with pytest.raises(ValueError, match="No axis named 5"):
- # s.rename({}, axis=5)
+ # ser.rename({}, axis=5)
def test_rename_inplace(self, datetime_series):
renamer = lambda x: x.strftime("%Y%m%d")
@@ -81,8 +83,8 @@ class MyIndexer:
pass
ix = MyIndexer()
- s = Series([1, 2, 3]).rename(ix)
- assert s.name is ix
+ ser = Series([1, 2, 3]).rename(ix)
+ assert ser.name is ix
def test_rename_with_custom_indexer_inplace(self):
# GH 27814
@@ -90,15 +92,15 @@ class MyIndexer:
pass
ix = MyIndexer()
- s = Series([1, 2, 3])
- s.rename(ix, inplace=True)
- assert s.name is ix
+ ser = Series([1, 2, 3])
+ ser.rename(ix, inplace=True)
+ assert ser.name is ix
def test_rename_callable(self):
# GH 17407
- s = Series(range(1, 6), index=Index(range(2, 7), name="IntIndex"))
- result = s.rename(str)
- expected = s.rename(lambda i: str(i))
+ ser = Series(range(1, 6), index=Index(range(2, 7), name="IntIndex"))
+ result = ser.rename(str)
+ expected = ser.rename(lambda i: str(i))
tm.assert_series_equal(result, expected)
assert result.name == expected.name
@@ -111,8 +113,8 @@ def test_rename_series_with_multiindex(self):
]
index = MultiIndex.from_arrays(arrays, names=["first", "second"])
- s = Series(np.ones(5), index=index)
- result = s.rename(index={"one": "yes"}, level="second", errors="raise")
+ ser = Series(np.ones(5), index=index)
+ result = ser.rename(index={"one": "yes"}, level="second", errors="raise")
arrays_expected = [
["bar", "baz", "baz", "foo", "qux"],
diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py
index df270f3e0f85c..4fb378720d89d 100644
--- a/pandas/tests/series/methods/test_shift.py
+++ b/pandas/tests/series/methods/test_shift.py
@@ -202,7 +202,7 @@ def test_shift_dst(self):
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_series):
- # TODO: remove this test when tshift deprecation is enforced
+ # TODO(2.0): remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodSeries()
diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py
index 6cbf2dd606692..974ecc152f17b 100644
--- a/pandas/tests/strings/test_api.py
+++ b/pandas/tests/strings/test_api.py
@@ -71,7 +71,6 @@ def test_api_per_method(
inferred_dtype, values = any_allowed_skipna_inferred_dtype
method_name, args, kwargs = any_string_method
- # TODO: get rid of these xfails
reason = None
if box is Index and values.size == 0:
if method_name in ["partition", "rpartition"] and kwargs.get("expand", True):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44174 | 2021-10-25T00:38:44Z | 2021-10-25T17:18:56Z | 2021-10-25T17:18:55Z | 2021-10-25T17:20:05Z |
TST: Move a consistency ewm test to test_ewm.py | diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py
index b41d2ec23a52d..800ee2164693b 100644
--- a/pandas/tests/window/moments/test_moments_consistency_ewm.py
+++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py
@@ -9,15 +9,6 @@
import pandas._testing as tm
-@pytest.mark.parametrize("func", ["cov", "corr"])
-def test_ewm_pairwise_cov_corr(func, frame):
- result = getattr(frame.ewm(span=10, min_periods=5), func)()
- result = result.loc[(slice(None), 1), 5]
- result.index = result.index.droplevel(1)
- expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
- tm.assert_series_equal(result, expected, check_names=False)
-
-
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 21c0099bbc0e6..23c3a0ef27fef 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -657,3 +657,12 @@ def test_ewm_alpha_arg(series):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=10.0, alpha=0.5)
+
+
+@pytest.mark.parametrize("func", ["cov", "corr"])
+def test_ewm_pairwise_cov_corr(func, frame):
+ result = getattr(frame.ewm(span=10, min_periods=5), func)()
+ result = result.loc[(slice(None), 1), 5]
+ result.index = result.index.droplevel(1)
+ expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
+ tm.assert_series_equal(result, expected, check_names=False)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
Since some tests are not running on windows/moments (#37535), moving a "consistency" ewm tests to the `test_ewm.py`. | https://api.github.com/repos/pandas-dev/pandas/pulls/44171 | 2021-10-24T22:08:32Z | 2021-10-25T12:33:56Z | 2021-10-25T12:33:56Z | 2021-10-25T18:27:41Z |
REF: share ExtensionIndex.insert-> Index.insert | diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index f5fbd4cc4a7fc..38553bc1be8d6 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -37,6 +37,7 @@
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
@@ -641,5 +642,8 @@ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
elif isinstance(dtype, IntervalDtype):
return lib.is_float(obj) or obj is None or obj is libmissing.NA
+ elif isinstance(dtype, CategoricalDtype):
+ return is_valid_na_for_dtype(obj, dtype.categories.dtype)
+
# fallback, default to allowing NaN, None, NA, NaT
return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 05047540c6ccd..e82bd61938f15 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6432,14 +6432,21 @@ def insert(self, loc: int, item) -> Index:
if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object:
item = self._na_value
+ arr = self._values
+
try:
- item = self._validate_fill_value(item)
- except TypeError:
+ if isinstance(arr, ExtensionArray):
+ res_values = arr.insert(loc, item)
+ return type(self)._simple_new(res_values, name=self.name)
+ else:
+ item = self._validate_fill_value(item)
+ except (TypeError, ValueError):
+ # e.g. trying to insert an integer into a DatetimeIndex
+ # We cannot keep the same dtype, so cast to the (often object)
+ # minimal shared dtype before doing the insert.
dtype = self._find_common_type_compat(item)
return self.astype(dtype).insert(loc, item)
- arr = self._values
-
if arr.dtype != object or not isinstance(
item, (tuple, np.datetime64, np.timedelta64)
):
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index ccd18f54da327..7c7f1b267b5be 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -134,31 +134,6 @@ class ExtensionIndex(Index):
# ---------------------------------------------------------------------
- def insert(self, loc: int, item) -> Index:
- """
- Make new Index inserting new item at location. Follows
- Python list.append semantics for negative values.
-
- Parameters
- ----------
- loc : int
- item : object
-
- Returns
- -------
- new_index : Index
- """
- try:
- result = self._data.insert(loc, item)
- except (ValueError, TypeError):
- # e.g. trying to insert an integer into a DatetimeIndex
- # We cannot keep the same dtype, so cast to the (often object)
- # minimal shared dtype before doing the insert.
- dtype = self._find_common_type_compat(item)
- return self.astype(dtype).insert(loc, item)
- else:
- return type(self)._simple_new(result, name=self.name)
-
def _validate_fill_value(self, value):
"""
Convert value to be insertable to underlying array.
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index bf68c4b79bcea..55d0e5e73418e 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -18,6 +18,7 @@
is_scalar,
)
from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
@@ -739,3 +740,11 @@ def test_is_valid_na_for_dtype_interval(self):
dtype = IntervalDtype("datetime64[ns]", "both")
assert not is_valid_na_for_dtype(NaT, dtype)
+
+ def test_is_valid_na_for_dtype_categorical(self):
+ dtype = CategoricalDtype(categories=[0, 1, 2])
+ assert is_valid_na_for_dtype(np.nan, dtype)
+
+ assert not is_valid_na_for_dtype(NaT, dtype)
+ assert not is_valid_na_for_dtype(np.datetime64("NaT", "ns"), dtype)
+ assert not is_valid_na_for_dtype(np.timedelta64("NaT", "ns"), dtype)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
Prelim to #43930 | https://api.github.com/repos/pandas-dev/pandas/pulls/44170 | 2021-10-24T21:39:28Z | 2021-10-24T23:26:20Z | 2021-10-24T23:26:20Z | 2021-10-25T00:06:42Z |
WEB: Update institutional sponsors list | diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index 82eb023f185c8..9165456d55897 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -112,26 +112,21 @@ sponsors:
url: https://numfocus.org/
logo: /static/img/partners/numfocus.svg
kind: numfocus
- - name: "Anaconda"
- url: https://www.anaconda.com/
- logo: /static/img/partners/anaconda.svg
- kind: partner
- description: "Tom Augspurger, Brock Mendel"
- name: "Two Sigma"
url: https://www.twosigma.com/
logo: /static/img/partners/two_sigma.svg
kind: partner
description: "Phillip Cloud, Jeff Reback"
- - name: "RStudio"
- url: https://www.rstudio.com/
- logo: /static/img/partners/r_studio.svg
- kind: partner
- description: "Wes McKinney"
- name: "Ursa Labs"
url: https://ursalabs.org/
logo: /static/img/partners/ursa_labs.svg
kind: partner
description: "Wes McKinney, Joris Van den Bossche"
+ - name: "d-fine GmbH"
+ url: https://www.d-fine.com/en/
+ logo: /static/img/partners/dfine.svg
+ kind: partner
+ description: "Patrick Hoefler"
- name: "Tidelift"
url: https://tidelift.com
logo: /static/img/partners/tidelift.svg
@@ -153,3 +148,12 @@ sponsors:
- name: "Paris-Saclay Center for Data Science"
url: https://www.datascience-paris-saclay.fr/
kind: partner
+ - name: "Anaconda"
+ url: https://www.anaconda.com/
+ logo: /static/img/partners/anaconda.svg
+ kind: partner
+ - name: "RStudio"
+ url: https://www.rstudio.com/
+ logo: /static/img/partners/r_studio.svg
+ kind: partner
+ description: "Wes McKinney"
diff --git a/web/pandas/static/img/partners/dfine.svg b/web/pandas/static/img/partners/dfine.svg
new file mode 100755
index 0000000000000..d892dded33322
--- /dev/null
+++ b/web/pandas/static/img/partners/dfine.svg
@@ -0,0 +1 @@
+<svg id="Ebene_1" data-name="Ebene 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 343.18 104.19"><defs><style>.cls-1,.cls-2{fill:#003e52;}.cls-1{fill-rule:evenodd;}</style></defs><title>dfine_dunkelblau_cmyk</title><path class="cls-1" d="M138.15,438.72c0-12.41,3.4-30,16-30,11.38,0,15.37,16,15.37,30,0,12.57-3.39,30-15.37,30-13.59,0-16-17.14-16-30Zm31.33,34H181V370l-22.76,3.55v2.51c4.29.44,11.23.89,11.23,9.61v29h-.29c-2.07-4.13-7.69-11.38-17-11.38-20.54,0-27.19,17.44-27.19,35.47s6.65,35.47,27.19,35.47c8.28,0,15.67-6.94,17-10.93h.29v9.42Z" transform="translate(-125 -370)"/><polygon class="cls-2" points="67.52 39.71 67.52 35.67 155.01 35.67 155.01 39.71 67.52 39.71 67.52 39.71"/><path class="cls-1" d="M314.28,472.68V403.25l-22.77,3.55v2.51c4.44.3,11.24.74,11.24,9.61v53.76Z" transform="translate(-125 -370)"/><path class="cls-2" d="M466.26,466.77v4.73c-3.54,1.33-8.56,2.66-17.58,2.66-22.9,0-35.75-12.56-35.75-37.38,0-21.57,8.72-33.54,30-33.54,13.3,0,25.26,7.39,25.26,24.53V431h-43c0,12.41,6.06,37.67,30.73,37.67,3.4,0,7.39-.44,10.34-1.92Zm-41.07-40.63H455c0-7.83-2.66-18-14-18s-15.81,12.56-15.81,18ZM343.5,472.68V418.9c0-8.86-6.95-9.3-11.23-9.6v-2.51L355,403.24v19.5h.3c2.06-5.46,8.71-19.5,23.34-19.5,15.66,0,17.14,8.57,17.14,22.31v47.13H384.27V428.8c0-12.7-.14-18.47-9.89-18.47-8.72,0-19.36,16.7-19.36,30.73v31.62Z" transform="translate(-125 -370)"/><path class="cls-2" d="M248.56,401.63v-3.4c0-19.51,5.32-28.23,27.93-28.23,7.24,0,16.26,2.81,16.26,8.87a6.74,6.74,0,0,1-7.1,7.09c-9.31,0-4.87-11.08-14.33-11.08-11.23,0-11.23,9.9-11.23,21v5.77Zm11.53,71.05H248.55V413.75h11.53v58.93Z" transform="translate(-125 -370)"/><path class="cls-2" d="M301.07,381a7.47,7.47,0,1,1,7.47,7.47,7.47,7.47,0,0,1-7.47-7.47Z" transform="translate(-125 -370)"/></svg>
\ No newline at end of file
| Makes the lists consistent with the governance docs and adds d-fine
cc @MarcoGorelli Would you like to add gousto too? | https://api.github.com/repos/pandas-dev/pandas/pulls/44169 | 2021-10-24T20:47:32Z | 2021-10-24T23:26:35Z | 2021-10-24T23:26:35Z | 2021-11-13T19:32:43Z |
Added two examples to documentation of DataFrame.std | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c3ad87082c8ed..6e0e74249f61b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10685,10 +10685,11 @@ def var(
_num_ddof_doc,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
- "ddof argument",
+ "ddof argument.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
+ examples=_std_examples,
notes=_std_notes,
)
def std(
@@ -11191,6 +11192,34 @@ def _doc_params(cls):
To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the
default `ddof=1`)"""
+_std_examples = """
+Examples
+--------
+>>> df = pd.DataFrame({'person_id':[0,1,2,3],
+... 'age':[21,25,62,43],
+... 'height':[1.61,1.87,1.49,2.01]}
+... ).set_index('person_id')
+>>> df
+ age height
+person_id
+0 21 1.61
+1 25 1.87
+2 62 1.49
+3 43 2.01
+
+The standard deviation of the columns can be found as follows.
+
+>>> df.std()
+age 18.786076
+height 0.237417
+
+Alternatively, `ddof=0` can be set to normalize by N instead of N-1.
+
+>>> df.std(ddof=0)
+age 16.269219
+height 0.205609
+"""
+
_bool_doc = """
{desc}
@@ -11771,7 +11800,6 @@ def _doc_params(cls):
``min_count`` non-NA values are present the result will be NA.
"""
-
def _align_as_utc(
left: NDFrameT, right: NDFrameT, join_index: Index | None
) -> tuple[NDFrameT, NDFrameT]:
| - [x] closes part of #44162
Added two examples to the documentation of DataFrame.std function so that users can understand how to use it with different delta degrees of freedom. | https://api.github.com/repos/pandas-dev/pandas/pulls/44167 | 2021-10-24T16:44:45Z | 2021-10-29T11:17:18Z | null | 2021-10-29T11:20:55Z |
BUG: metadata propagation in .loc, .iloc and Series.to_frame() | diff --git a/MANIFEST.in b/MANIFEST.in
index f616fad6b1557..c6ddc79eaa83c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -33,6 +33,7 @@ global-exclude *.xlsb
global-exclude *.xlsm
global-exclude *.xlsx
global-exclude *.xpt
+global-exclude *.cpt
global-exclude *.xz
global-exclude *.zip
global-exclude *~
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 86322661a4e8a..ff58e382a9ba2 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -443,7 +443,7 @@ class GroupByMethods:
"var",
],
["direct", "transformation"],
- [1, 2, 5, 10],
+ [1, 5],
]
def setup(self, dtype, method, application, ncols):
@@ -455,6 +455,7 @@ def setup(self, dtype, method, application, ncols):
raise NotImplementedError
if application == "transformation" and method in [
+ "describe",
"head",
"tail",
"unique",
@@ -464,7 +465,12 @@ def setup(self, dtype, method, application, ncols):
# DataFrameGroupBy doesn't have these methods
raise NotImplementedError
- ngroups = 1000
+ if method == "describe":
+ ngroups = 20
+ elif method in ["mad", "skew"]:
+ ngroups = 100
+ else:
+ ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups).reshape(-1, 1)
rng = np.broadcast_to(rng, (len(rng), ncols))
@@ -491,9 +497,6 @@ def setup(self, dtype, method, application, ncols):
cols = cols[0]
if application == "transformation":
- if method == "describe":
- raise NotImplementedError
-
self.as_group_method = lambda: df.groupby("key")[cols].transform(method)
self.as_field_method = lambda: df.groupby(cols)["key"].transform(method)
else:
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py
index 153cad403dcc3..39cc09d32981e 100644
--- a/asv_bench/benchmarks/io/csv.py
+++ b/asv_bench/benchmarks/io/csv.py
@@ -525,4 +525,14 @@ def time_to_datetime_format_DD_MM_YYYY(self, cache_dates):
to_datetime(df["date"], cache=cache_dates, format="%d-%m-%Y")
+class ReadCSVIndexCol(StringIORewind):
+ def setup(self):
+ count_elem = 100_000
+ data = "a,b\n" + "1,2\n" * count_elem
+ self.StringIO_input = StringIO(data)
+
+ def time_read_csv_index_col(self):
+ read_csv(self.StringIO_input, index_col="a")
+
+
from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index ed44102700dc6..d3168bde0a783 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -17,7 +17,7 @@
try:
import pandas._testing as tm
except ImportError:
- import pandas.util.testing as tm # noqa
+ import pandas.util.testing as tm # noqa:F401
numeric_dtypes = [
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 406b27dd37ea5..1c53d4adc8c25 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -9,22 +9,24 @@ class Methods:
params = (
["DataFrame", "Series"],
- [10, 1000],
+ [("rolling", {"window": 10}), ("rolling", {"window": 1000}), ("expanding", {})],
["int", "float"],
- ["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
+ ["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum", "sem"],
)
- param_names = ["constructor", "window", "dtype", "method"]
+ param_names = ["constructor", "window_kwargs", "dtype", "method"]
- def setup(self, constructor, window, dtype, method):
+ def setup(self, constructor, window_kwargs, dtype, method):
N = 10 ** 5
+ window, kwargs = window_kwargs
arr = (100 * np.random.random(N)).astype(dtype)
- self.roll = getattr(pd, constructor)(arr).rolling(window)
+ obj = getattr(pd, constructor)(arr)
+ self.window = getattr(obj, window)(**kwargs)
- def time_rolling(self, constructor, window, dtype, method):
- getattr(self.roll, method)()
+ def time_method(self, constructor, window_kwargs, dtype, method):
+ getattr(self.window, method)()
- def peakmem_rolling(self, constructor, window, dtype, method):
- getattr(self.roll, method)()
+ def peakmem_method(self, constructor, window_kwargs, dtype, method):
+ getattr(self.window, method)()
class Apply:
@@ -46,19 +48,27 @@ def time_rolling(self, constructor, window, dtype, function, raw):
self.roll.apply(function, raw=raw)
-class NumbaEngine:
+class NumbaEngineMethods:
params = (
["DataFrame", "Series"],
["int", "float"],
- [np.sum, lambda x: np.sum(x) + 5],
- ["sum", "max", "min", "median", "mean"],
+ [("rolling", {"window": 10}), ("expanding", {})],
+ ["sum", "max", "min", "median", "mean", "var", "std"],
[True, False],
[None, 100],
)
- param_names = ["constructor", "dtype", "function", "method", "parallel", "cols"]
+ param_names = [
+ "constructor",
+ "dtype",
+ "window_kwargs",
+ "method",
+ "parallel",
+ "cols",
+ ]
- def setup(self, constructor, dtype, function, method, parallel, cols):
+ def setup(self, constructor, dtype, window_kwargs, method, parallel, cols):
N = 10 ** 3
+ window, kwargs = window_kwargs
shape = (N, cols) if cols is not None and constructor != "Series" else N
arr = (100 * np.random.random(shape)).astype(dtype)
data = getattr(pd, constructor)(arr)
@@ -66,84 +76,88 @@ def setup(self, constructor, dtype, function, method, parallel, cols):
# Warm the cache
with warnings.catch_warnings(record=True):
# Catch parallel=True not being applicable e.g. 1D data
- self.roll = data.rolling(10)
- self.roll.apply(
- function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
- )
- getattr(self.roll, method)(
+ self.window = getattr(data, window)(**kwargs)
+ getattr(self.window, method)(
engine="numba", engine_kwargs={"parallel": parallel}
)
- self.expand = data.expanding()
- self.expand.apply(
- function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
- )
-
- def time_rolling_apply(self, constructor, dtype, function, method, parallel, col):
- with warnings.catch_warnings(record=True):
- self.roll.apply(
- function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
- )
-
- def time_expanding_apply(self, constructor, dtype, function, method, parallel, col):
- with warnings.catch_warnings(record=True):
- self.expand.apply(
- function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
- )
-
- def time_rolling_methods(self, constructor, dtype, function, method, parallel, col):
+ def test_method(self, constructor, dtype, window_kwargs, method, parallel, cols):
with warnings.catch_warnings(record=True):
- getattr(self.roll, method)(
+ getattr(self.window, method)(
engine="numba", engine_kwargs={"parallel": parallel}
)
-class ExpandingMethods:
-
+class NumbaEngineApply:
params = (
["DataFrame", "Series"],
["int", "float"],
- ["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
+ [("rolling", {"window": 10}), ("expanding", {})],
+ [np.sum, lambda x: np.sum(x) + 5],
+ [True, False],
+ [None, 100],
)
- param_names = ["constructor", "window", "dtype", "method"]
+ param_names = [
+ "constructor",
+ "dtype",
+ "window_kwargs",
+ "function",
+ "parallel",
+ "cols",
+ ]
- def setup(self, constructor, dtype, method):
- N = 10 ** 5
- N_groupby = 100
- arr = (100 * np.random.random(N)).astype(dtype)
- self.expanding = getattr(pd, constructor)(arr).expanding()
- self.expanding_groupby = (
- pd.DataFrame({"A": arr[:N_groupby], "B": range(N_groupby)})
- .groupby("B")
- .expanding()
- )
+ def setup(self, constructor, dtype, window_kwargs, function, parallel, cols):
+ N = 10 ** 3
+ window, kwargs = window_kwargs
+ shape = (N, cols) if cols is not None and constructor != "Series" else N
+ arr = (100 * np.random.random(shape)).astype(dtype)
+ data = getattr(pd, constructor)(arr)
- def time_expanding(self, constructor, dtype, method):
- getattr(self.expanding, method)()
+ # Warm the cache
+ with warnings.catch_warnings(record=True):
+ # Catch parallel=True not being applicable e.g. 1D data
+ self.window = getattr(data, window)(**kwargs)
+ self.window.apply(
+ function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
+ )
- def time_expanding_groupby(self, constructor, dtype, method):
- getattr(self.expanding_groupby, method)()
+ def test_method(self, constructor, dtype, window_kwargs, function, parallel, cols):
+ with warnings.catch_warnings(record=True):
+ self.window.apply(
+ function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
+ )
class EWMMethods:
- params = (["DataFrame", "Series"], [10, 1000], ["int", "float"], ["mean", "std"])
- param_names = ["constructor", "window", "dtype", "method"]
+ params = (
+ ["DataFrame", "Series"],
+ [
+ ({"halflife": 10}, "mean"),
+ ({"halflife": 10}, "std"),
+ ({"halflife": 1000}, "mean"),
+ ({"halflife": 1000}, "std"),
+ (
+ {
+ "halflife": "1 Day",
+ "times": pd.date_range("1900", periods=10 ** 5, freq="23s"),
+ },
+ "mean",
+ ),
+ ],
+ ["int", "float"],
+ )
+ param_names = ["constructor", "kwargs_method", "dtype"]
- def setup(self, constructor, window, dtype, method):
+ def setup(self, constructor, kwargs_method, dtype):
N = 10 ** 5
+ kwargs, method = kwargs_method
arr = (100 * np.random.random(N)).astype(dtype)
- times = pd.date_range("1900", periods=N, freq="23s")
- self.ewm = getattr(pd, constructor)(arr).ewm(halflife=window)
- self.ewm_times = getattr(pd, constructor)(arr).ewm(
- halflife="1 Day", times=times
- )
+ self.method = method
+ self.ewm = getattr(pd, constructor)(arr).ewm(**kwargs)
- def time_ewm(self, constructor, window, dtype, method):
- getattr(self.ewm, method)()
-
- def time_ewm_times(self, constructor, window, dtype, method):
- self.ewm_times.mean()
+ def time_ewm(self, constructor, kwargs_method, dtype):
+ getattr(self.ewm, self.method)()
class VariableWindowMethods(Methods):
@@ -151,7 +165,7 @@ class VariableWindowMethods(Methods):
["DataFrame", "Series"],
["50s", "1h", "1d"],
["int", "float"],
- ["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
+ ["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum", "sem"],
)
param_names = ["constructor", "window", "dtype", "method"]
@@ -159,35 +173,35 @@ def setup(self, constructor, window, dtype, method):
N = 10 ** 5
arr = (100 * np.random.random(N)).astype(dtype)
index = pd.date_range("2017-01-01", periods=N, freq="5s")
- self.roll = getattr(pd, constructor)(arr, index=index).rolling(window)
+ self.window = getattr(pd, constructor)(arr, index=index).rolling(window)
class Pairwise:
- params = ([10, 1000, None], ["corr", "cov"], [True, False])
- param_names = ["window", "method", "pairwise"]
+ params = (
+ [({"window": 10}, "rolling"), ({"window": 1000}, "rolling"), ({}, "expanding")],
+ ["corr", "cov"],
+ [True, False],
+ )
+ param_names = ["window_kwargs", "method", "pairwise"]
- def setup(self, window, method, pairwise):
+ def setup(self, kwargs_window, method, pairwise):
N = 10 ** 4
n_groups = 20
+ kwargs, window = kwargs_window
groups = [i for _ in range(N // n_groups) for i in range(n_groups)]
arr = np.random.random(N)
self.df = pd.DataFrame(arr)
- self.df_group = pd.DataFrame({"A": groups, "B": arr}).groupby("A")
+ self.window = getattr(self.df, window)(**kwargs)
+ self.window_group = getattr(
+ pd.DataFrame({"A": groups, "B": arr}).groupby("A"), window
+ )(**kwargs)
- def time_pairwise(self, window, method, pairwise):
- if window is None:
- r = self.df.expanding()
- else:
- r = self.df.rolling(window=window)
- getattr(r, method)(self.df, pairwise=pairwise)
+ def time_pairwise(self, kwargs_window, method, pairwise):
+ getattr(self.window, method)(self.df, pairwise=pairwise)
- def time_groupby(self, window, method, pairwise):
- if window is None:
- r = self.df_group.expanding()
- else:
- r = self.df_group.rolling(window=window)
- getattr(r, method)(self.df, pairwise=pairwise)
+ def time_groupby(self, kwargs_window, method, pairwise):
+ getattr(self.window_group, method)(self.df, pairwise=pairwise)
class Quantile:
@@ -274,10 +288,18 @@ def peakmem_rolling(self, constructor, window_size, dtype, method):
class Groupby:
- params = ["sum", "median", "mean", "max", "min", "kurt", "sum"]
+ params = (
+ ["sum", "median", "mean", "max", "min", "kurt", "sum"],
+ [
+ ("rolling", {"window": 2}),
+ ("rolling", {"window": "30s", "on": "C"}),
+ ("expanding", {}),
+ ],
+ )
- def setup(self, method):
+ def setup(self, method, window_kwargs):
N = 1000
+ window, kwargs = window_kwargs
df = pd.DataFrame(
{
"A": [str(i) for i in range(N)] * 10,
@@ -285,14 +307,10 @@ def setup(self, method):
"C": pd.date_range(start="1900-01-01", freq="1min", periods=N * 10),
}
)
- self.groupby_roll_int = df.groupby("A").rolling(window=2)
- self.groupby_roll_offset = df.groupby("A").rolling(window="30s", on="C")
-
- def time_rolling_int(self, method):
- getattr(self.groupby_roll_int, method)()
+ self.groupby_window = getattr(df.groupby("A"), window)(**kwargs)
- def time_rolling_offset(self, method):
- getattr(self.groupby_roll_offset, method)()
+ def time_method(self, method, window_kwargs):
+ getattr(self.groupby_window, method)()
class GroupbyLargeGroups:
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index ea9595fd88630..503120e486f5a 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -93,8 +93,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS01, SS02, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA02, SA03)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS02,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA02,SA03
+ MSG='Validate docstrings (GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS01, SS02, SS03, SS04, SS05, PR03, PR04, PR05, PR08, PRO9, PR10, EX04, RT01, RT04, RT05, SA02, SA03)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS02,SS03,SS04,SS05,PR03,PR04,PR05,PR08,PR09,PR10,EX04,RT01,RT04,RT05,SA02,SA03
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/ci/deps/actions-38-locale.yaml b/ci/deps/actions-38-locale.yaml
index 13b132109effb..b7043735d9457 100644
--- a/ci/deps/actions-38-locale.yaml
+++ b/ci/deps/actions-38-locale.yaml
@@ -35,7 +35,7 @@ dependencies:
- xlsxwriter
- xlwt
- moto
- - pyarrow=1.0.0
+ - pyarrow=1.0.1
- pip
- pip:
- pyxlsb
diff --git a/codecov.yml b/codecov.yml
index 893e40db004a6..883f9fbb20729 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -12,6 +12,7 @@ coverage:
patch:
default:
target: '50'
+ informational: true
github_checks:
annotations: false
diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst
index 4c3c12eb9da92..4ea3701dec029 100644
--- a/doc/source/development/contributing_environment.rst
+++ b/doc/source/development/contributing_environment.rst
@@ -165,7 +165,7 @@ We'll now kick off a three-step process:
At this point you should be able to import pandas from your locally built version::
- $ python # start an interpreter
+ $ python
>>> import pandas
>>> print(pandas.__version__)
0.22.0.dev0+29.g4ad6d4d74
diff --git a/doc/source/reference/io.rst b/doc/source/reference/io.rst
index 82d4ec4950ef1..7aad937d10a18 100644
--- a/doc/source/reference/io.rst
+++ b/doc/source/reference/io.rst
@@ -57,7 +57,7 @@ Excel
ExcelWriter
-.. currentmodule:: pandas.io.json
+.. currentmodule:: pandas
JSON
~~~~
@@ -65,7 +65,10 @@ JSON
:toctree: api/
read_json
- to_json
+ json_normalize
+ DataFrame.to_json
+
+.. currentmodule:: pandas.io.json
.. autosummary::
:toctree: api/
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index a589ad96ca7d9..40ff1049e5820 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -2051,32 +2051,33 @@ The following table lists all of pandas extension types. For methods requiring `
arguments, strings can be specified as indicated. See the respective
documentation sections for more on each type.
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| Kind of Data | Data Type | Scalar | Array | String Aliases | Documentation |
-+===================+===========================+====================+===============================+=========================================+===============================+
-| tz-aware datetime | :class:`DatetimeTZDtype` | :class:`Timestamp` | :class:`arrays.DatetimeArray` | ``'datetime64[ns, <tz>]'`` | :ref:`timeseries.timezone` |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| Categorical | :class:`CategoricalDtype` | (none) | :class:`Categorical` | ``'category'`` | :ref:`categorical` |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| period | :class:`PeriodDtype` | :class:`Period` | :class:`arrays.PeriodArray` | ``'period[<freq>]'``, | :ref:`timeseries.periods` |
-| (time spans) | | | | ``'Period[<freq>]'`` | |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| sparse | :class:`SparseDtype` | (none) | :class:`arrays.SparseArray` | ``'Sparse'``, ``'Sparse[int]'``, | :ref:`sparse` |
-| | | | | ``'Sparse[float]'`` | |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| intervals | :class:`IntervalDtype` | :class:`Interval` | :class:`arrays.IntervalArray` | ``'interval'``, ``'Interval'``, | :ref:`advanced.intervalindex` |
-| | | | | ``'Interval[<numpy_dtype>]'``, | |
-| | | | | ``'Interval[datetime64[ns, <tz>]]'``, | |
-| | | | | ``'Interval[timedelta64[<freq>]]'`` | |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| nullable integer + :class:`Int64Dtype`, ... | (none) | :class:`arrays.IntegerArray` | ``'Int8'``, ``'Int16'``, ``'Int32'``, | :ref:`integer_na` |
-| | | | | ``'Int64'``, ``'UInt8'``, ``'UInt16'``, | |
-| | | | | ``'UInt32'``, ``'UInt64'`` | |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| Strings | :class:`StringDtype` | :class:`str` | :class:`arrays.StringArray` | ``'string'`` | :ref:`text` |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
-| Boolean (with NA) | :class:`BooleanDtype` | :class:`bool` | :class:`arrays.BooleanArray` | ``'boolean'`` | :ref:`api.arrays.bool` |
-+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
+| Kind of Data | Data Type | Scalar | Array | String Aliases |
++=================================================+===============+===========+========+===========+===============================+========================================+
+| :ref:`tz-aware datetime <timeseries.timezone>` | :class:`DatetimeTZDtype` | :class:`Timestamp` | :class:`arrays.DatetimeArray` | ``'datetime64[ns, <tz>]'`` |
+| | | | | |
++-------------------------------------------------+---------------+-----------+--------------------+-------------------------------+----------------------------------------+
+| :ref:`Categorical <categorical>` | :class:`CategoricalDtype` | (none) | :class:`Categorical` | ``'category'`` |
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
+| :ref:`period (time spans) <timeseries.periods>` | :class:`PeriodDtype` | :class:`Period` | :class:`arrays.PeriodArray` | ``'period[<freq>]'``, |
+| | | | ``'Period[<freq>]'`` | |
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
+| :ref:`sparse <sparse>` | :class:`SparseDtype` | (none) | :class:`arrays.SparseArray` | ``'Sparse'``, ``'Sparse[int]'``, |
+| | | | | ``'Sparse[float]'`` |
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
+| :ref:`intervals <advanced.intervalindex>` | :class:`IntervalDtype` | :class:`Interval` | :class:`arrays.IntervalArray` | ``'interval'``, ``'Interval'``, |
+| | | | | ``'Interval[<numpy_dtype>]'``, |
+| | | | | ``'Interval[datetime64[ns, <tz>]]'``, |
+| | | | | ``'Interval[timedelta64[<freq>]]'`` |
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
+| :ref:`nullable integer <integer_na>` | :class:`Int64Dtype`, ... | (none) | :class:`arrays.IntegerArray` | ``'Int8'``, ``'Int16'``, ``'Int32'``, |
+| | | | | ``'Int64'``, ``'UInt8'``, ``'UInt16'``,|
+| | | | | ``'UInt32'``, ``'UInt64'`` |
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
+| :ref:`Strings <text>` | :class:`StringDtype` | :class:`str` | :class:`arrays.StringArray` | ``'string'`` |
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
+| :ref:`Boolean (with NA) <api.arrays.bool>` | :class:`BooleanDtype` | :class:`bool` | :class:`arrays.BooleanArray` | ``'boolean'`` |
++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+
pandas has two ways to store strings.
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index c2ca3df5ca23d..bb7be92bd993b 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -102,7 +102,7 @@ header : int or list of ints, default ``'infer'``
names : array-like, default ``None``
List of column names to use. If file contains no header row, then you should
explicitly pass ``header=None``. Duplicates in this list are not allowed.
-index_col : int, str, sequence of int / str, or False, default ``None``
+index_col : int, str, sequence of int / str, or False, optional, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
@@ -116,11 +116,19 @@ index_col : int, str, sequence of int / str, or False, default ``None``
of the data file, then a default index is used. If it is larger, then
the first columns are used as index so that the remaining number of fields in
the body are equal to the number of fields in the header.
+
+ The first row after the header is used to determine the number of columns,
+ which will go into the index. If the subsequent rows contain less columns
+ than the first row, they are filled with ``NaN``.
+
+ This can be avoided through ``usecols``. This ensures that the columns are
+ taken as is and the trailing data are ignored.
usecols : list-like or callable, default ``None``
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in ``names`` or
- inferred from the document header row(s). For example, a valid list-like
+ inferred from the document header row(s). If ``names`` are given, the document
+ header row(s) are not taken into account. For example, a valid list-like
``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To
@@ -142,9 +150,15 @@ usecols : list-like or callable, default ``None``
pd.read_csv(StringIO(data))
pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ["COL1", "COL3"])
- Using this parameter results in much faster parsing time and lower memory usage.
+ Using this parameter results in much faster parsing time and lower memory usage
+ when using the c engine. The Python engine loads the data first before deciding
+ which columns to drop.
squeeze : boolean, default ``False``
If the parsed data only contains one column then return a ``Series``.
+
+ .. deprecated:: 1.4.0
+ Append ``.squeeze("columns")`` to the call to ``{func_name}`` to squeeze
+ the data.
prefix : str, default ``None``
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : boolean, default ``True``
@@ -348,7 +362,7 @@ dialect : str or :class:`python:csv.Dialect` instance, default ``None``
Error handling
++++++++++++++
-error_bad_lines : boolean, default ``None``
+error_bad_lines : boolean, optional, default ``None``
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no ``DataFrame`` will be
returned. If ``False``, then these "bad lines" will dropped from the
@@ -358,7 +372,7 @@ error_bad_lines : boolean, default ``None``
.. deprecated:: 1.3.0
The ``on_bad_lines`` parameter should be used instead to specify behavior upon
encountering a bad line instead.
-warn_bad_lines : boolean, default ``None``
+warn_bad_lines : boolean, optional, default ``None``
If error_bad_lines is ``False``, and warn_bad_lines is ``True``, a warning for
each "bad line" will be output.
@@ -3022,6 +3036,7 @@ Read in the content of the "books.xml" as instance of ``StringIO`` or
Even read XML from AWS S3 buckets such as Python Software Foundation's IRS 990 Form:
.. ipython:: python
+ :okwarning:
df = pd.read_xml(
"s3://irs-form-990/201923199349319487_public.xml",
diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst
index a65bb774b9df8..93448dae578c9 100644
--- a/doc/source/user_guide/options.rst
+++ b/doc/source/user_guide/options.rst
@@ -430,6 +430,10 @@ display.html.use_mathjax True When True, Jupyter notebook
table contents using MathJax, rendering
mathematical expressions enclosed by the
dollar symbol.
+display.max_dir_items 100 The number of columns from a dataframe that
+ are added to dir. These columns can then be
+ suggested by tab completion. 'None' value means
+ unlimited.
io.excel.xls.writer xlwt The default Excel writer engine for
'xls' files.
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 3a991b5338c38..f94f86b4eea58 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -61,7 +61,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "The above output looks very similar to the standard DataFrame HTML representation. But the HTML here has already attached some CSS classes to each cell, even if we haven't yet created any styles. We can view these by calling the [.to_html()][to_html] method, which returns the raw HTML as string, which is useful for further processing or adding to a file - read on in [More about CSS and HTML](#More-About-CSS-and-HTML). Below we will show how we can use these to format the DataFrame to be more communicative. For example how we can build `s`:\n",
+ "The above output looks very similar to the standard DataFrame HTML representation. But the HTML here has already attached some CSS classes to each cell, even if we haven't yet created any styles. We can view these by calling the [.to_html()][tohtml] method, which returns the raw HTML as string, which is useful for further processing or adding to a file - read on in [More about CSS and HTML](#More-About-CSS-and-HTML). Below we will show how we can use these to format the DataFrame to be more communicative. For example how we can build `s`:\n",
"\n",
"[tohtml]: ../reference/api/pandas.io.formats.style.Styler.to_html.rst"
]
@@ -153,7 +153,7 @@
"\n",
"Before adding styles it is useful to show that the [Styler][styler] can distinguish the *display* value from the *actual* value, in both datavlaues and index or columns headers. To control the display value, the text is printed in each cell as string, and we can use the [.format()][formatfunc] and [.format_index()][formatfuncindex] methods to manipulate this according to a [format spec string][format] or a callable that takes a single value and returns a string. It is possible to define this for the whole table, or index, or for individual columns, or MultiIndex levels. \n",
"\n",
- "Additionally, the format function has a **precision** argument to specifically help formatting floats, as well as **decimal** and **thousands** separators to support other locales, an **na_rep** argument to display missing data, and an **escape** argument to help displaying safe-HTML or safe-LaTeX. The default formatter is configured to adopt pandas' regular `display.precision` option, controllable using `with pd.option_context('display.precision', 2):` \n",
+ "Additionally, the format function has a **precision** argument to specifically help formatting floats, as well as **decimal** and **thousands** separators to support other locales, an **na_rep** argument to display missing data, and an **escape** argument to help displaying safe-HTML or safe-LaTeX. The default formatter is configured to adopt pandas' `styler.format.precision` option, controllable using `with pd.option_context('format.precision', 2):` \n",
"\n",
"[styler]: ../reference/api/pandas.io.formats.style.Styler.rst\n",
"[format]: https://docs.python.org/3/library/string.html#format-specification-mini-language\n",
@@ -224,16 +224,15 @@
"\n",
"The index and column headers can be completely hidden, as well subselecting rows or columns that one wishes to exclude. Both these options are performed using the same methods.\n",
"\n",
- "The index can be hidden from rendering by calling [.hide_index()][hideidx] without any arguments, which might be useful if your index is integer based. Similarly column headers can be hidden by calling [.hide_columns()][hidecols] without any arguments.\n",
+ "The index can be hidden from rendering by calling [.hide()][hideidx] without any arguments, which might be useful if your index is integer based. Similarly column headers can be hidden by calling [.hide(axis=\"columns\")][hideidx] without any further arguments.\n",
"\n",
- "Specific rows or columns can be hidden from rendering by calling the same [.hide_index()][hideidx] or [.hide_columns()][hidecols] methods and passing in a row/column label, a list-like or a slice of row/column labels to for the ``subset`` argument.\n",
+ "Specific rows or columns can be hidden from rendering by calling the same [.hide()][hideidx] method and passing in a row/column label, a list-like or a slice of row/column labels to for the ``subset`` argument.\n",
"\n",
- "Hiding does not change the integer arrangement of CSS classes, e.g. hiding the first two columns of a DataFrame means the column class indexing will start at `col2`, since `col0` and `col1` are simply ignored.\n",
+ "Hiding does not change the integer arrangement of CSS classes, e.g. hiding the first two columns of a DataFrame means the column class indexing will still start at `col2`, since `col0` and `col1` are simply ignored.\n",
"\n",
"We can update our `Styler` object from before to hide some data and format the values.\n",
"\n",
- "[hideidx]: ../reference/api/pandas.io.formats.style.Styler.hide_index.rst\n",
- "[hidecols]: ../reference/api/pandas.io.formats.style.Styler.hide_columns.rst"
+ "[hideidx]: ../reference/api/pandas.io.formats.style.Styler.hide.rst"
]
},
{
@@ -242,7 +241,7 @@
"metadata": {},
"outputs": [],
"source": [
- "s = df.style.format('{:.0f}').hide_columns([('Random', 'Tumour'), ('Random', 'Non-Tumour')])\n",
+ "s = df.style.format('{:.0f}').hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis=\"columns\")\n",
"s"
]
},
@@ -1384,7 +1383,7 @@
" .applymap(style_negative, props='color:red;')\\\n",
" .applymap(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\\\n",
" .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\\\n",
- " .hide_index()\n",
+ " .hide(axis=\"index\")\n",
"style1"
]
},
@@ -1413,12 +1412,9 @@
"## Limitations\n",
"\n",
"- DataFrame only (use `Series.to_frame().style`)\n",
- "- The index and columns must be unique\n",
+ "- The index and columns do not need to be unique, but certain styling functions can only work with unique indexes.\n",
"- No large repr, and construction performance isn't great; although we have some [HTML optimizations](#Optimization)\n",
- "- You can only style the *values*, not the index or columns (except with `table_styles` above)\n",
- "- You can only apply styles, you can't insert new HTML entities\n",
- "\n",
- "Some of these might be addressed in the future. "
+ "- You can only apply styles, you can't insert new HTML entities, except via subclassing."
]
},
{
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index a112c632ceb25..fde9ff0450a12 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -852,7 +852,7 @@ savings time. However, all :class:`DateOffset` subclasses that are an hour or sm
The basic :class:`DateOffset` acts similar to ``dateutil.relativedelta`` (`relativedelta documentation`_)
that shifts a date time by the corresponding calendar duration specified. The
-arithmetic operator (``+``) or the ``apply`` method can be used to perform the shift.
+arithmetic operator (``+``) can be used to perform the shift.
.. ipython:: python
@@ -866,7 +866,6 @@ arithmetic operator (``+``) or the ``apply`` method can be used to perform the s
friday.day_name()
# Add 2 business days (Friday --> Tuesday)
two_business_days = 2 * pd.offsets.BDay()
- two_business_days.apply(friday)
friday + two_business_days
(friday + two_business_days).day_name()
@@ -938,14 +937,14 @@ in the operation).
ts = pd.Timestamp("2014-01-01 09:00")
day = pd.offsets.Day()
- day.apply(ts)
- day.apply(ts).normalize()
+ day + ts
+ (day + ts).normalize()
ts = pd.Timestamp("2014-01-01 22:00")
hour = pd.offsets.Hour()
- hour.apply(ts)
- hour.apply(ts).normalize()
- hour.apply(pd.Timestamp("2014-01-01 23:30")).normalize()
+ hour + ts
+ (hour + ts).normalize()
+ (hour + pd.Timestamp("2014-01-01 23:30")).normalize()
.. _relativedelta documentation: https://dateutil.readthedocs.io/en/stable/relativedelta.html
@@ -1185,16 +1184,16 @@ under the default business hours (9:00 - 17:00), there is no gap (0 minutes) bet
pd.offsets.BusinessHour().rollback(pd.Timestamp("2014-08-02 15:00"))
pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02 15:00"))
- # It is the same as BusinessHour().apply(pd.Timestamp('2014-08-01 17:00')).
- # And it is the same as BusinessHour().apply(pd.Timestamp('2014-08-04 09:00'))
- pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02 15:00"))
+ # It is the same as BusinessHour() + pd.Timestamp('2014-08-01 17:00').
+ # And it is the same as BusinessHour() + pd.Timestamp('2014-08-04 09:00')
+ pd.offsets.BusinessHour() + pd.Timestamp("2014-08-02 15:00")
# BusinessDay results (for reference)
pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02"))
- # It is the same as BusinessDay().apply(pd.Timestamp('2014-08-01'))
+ # It is the same as BusinessDay() + pd.Timestamp('2014-08-01')
# The result is the same as rollworward because BusinessDay never overlap.
- pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02"))
+ pd.offsets.BusinessHour() + pd.Timestamp("2014-08-02")
``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary
holidays, you can use ``CustomBusinessHour`` offset, as explained in the
@@ -1271,6 +1270,36 @@ frequencies. We will refer to these aliases as *offset aliases*.
"U, us", "microseconds"
"N", "nanoseconds"
+.. note::
+
+ When using the offset aliases above, it should be noted that functions
+ such as :func:`date_range`, :func:`bdate_range`, will only return
+ timestamps that are in the interval defined by ``start_date`` and
+ ``end_date``. If the ``start_date`` does not correspond to the frequency,
+ the returned timestamps will start at the next valid timestamp, same for
+ ``end_date``, the returned timestamps will stop at the previous valid
+ timestamp.
+
+ For example, for the offset ``MS``, if the ``start_date`` is not the first
+ of the month, the returned timestamps will start with the first day of the
+ next month. If ``end_date`` is not the first day of a month, the last
+ returned timestamp will be the first day of the corresponding month.
+
+ .. ipython:: python
+
+ dates_lst_1 = pd.date_range("2020-01-06", "2020-04-03", freq="MS")
+ dates_lst_1
+
+ dates_lst_2 = pd.date_range("2020-01-01", "2020-04-01", freq="MS")
+ dates_lst_2
+
+ We can see in the above example :func:`date_range` and
+ :func:`bdate_range` will only return the valid timestamps between the
+ ``start_date`` and ``end_date``. If these are not valid timestamps for the
+ given frequency it will roll to the next value for ``start_date``
+ (respectively previous for the ``end_date``)
+
+
Combining aliases
~~~~~~~~~~~~~~~~~
@@ -2073,14 +2102,18 @@ The ``period`` dtype can be used in ``.astype(...)``. It allows one to change th
# change monthly freq to daily freq
pi.astype("period[D]")
- # convert to DatetimeIndex
- pi.astype("datetime64[ns]")
-
# convert to PeriodIndex
dti = pd.date_range("2011-01-01", freq="M", periods=3)
dti
dti.astype("period[M]")
+.. deprecated:: 1.4.0
+ Converting PeriodIndex to DatetimeIndex with ``.astype(...)`` is deprecated and will raise in a future version. Use ``obj.to_timestamp(how).tz_localize(dtype.tz)`` instead.
+
+.. ipython:: python
+
+ # convert to DatetimeIndex
+ pi.to_timestamp(how="start")
PeriodIndex partial string indexing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 36b591c3c3142..3d3ec53948a01 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -150,6 +150,7 @@ and a short caption (:issue:`36267`).
The keyword ``position`` has been added to set the position.
.. ipython:: python
+ :okwarning:
data = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
table = data.to_latex(position='ht')
@@ -161,6 +162,7 @@ one can optionally provide a tuple ``(full_caption, short_caption)``
to add a short caption macro.
.. ipython:: python
+ :okwarning:
data = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
table = data.to_latex(caption=('the full long caption', 'short caption'))
diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst
index 951b05b65c81b..dabd9a650f45b 100644
--- a/doc/source/whatsnew/v1.3.5.rst
+++ b/doc/source/whatsnew/v1.3.5.rst
@@ -15,6 +15,8 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`Series.equals` when comparing floats with dtype object to None (:issue:`44190`)
+- Fixed regression in :func:`merge_asof` raising error when array was supplied as join key (:issue:`42844`)
+- Fixed regression in creating a :class:`DataFrame` from a timezone-aware :class:`Timestamp` scalar near a Daylight Savings Time transition (:issue:`42505`)
- Fixed performance regression in :func:`read_csv` (:issue:`44106`)
- Fixed regression in :meth:`Series.duplicated` and :meth:`Series.drop_duplicates` when Series has :class:`Categorical` dtype with boolean categories (:issue:`44351`)
-
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index ee1dd58149451..66ccb2043be0b 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -15,6 +15,31 @@ including other versions of pandas.
Enhancements
~~~~~~~~~~~~
+.. _whatsnew_140.enhancements.warning_lineno:
+
+Improved warning messages
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Previously, warning messages may have pointed to lines within the pandas library. Running the script ``setting_with_copy_warning.py``
+
+.. code-block:: python
+
+ import pandas as pd
+
+ df = pd.DataFrame({'a': [1, 2, 3]})
+ df[:2].loc[:, 'a'] = 5
+
+with pandas 1.3 resulted in::
+
+ .../site-packages/pandas/core/indexing.py:1951: SettingWithCopyWarning:
+ A value is trying to be set on a copy of a slice from a DataFrame.
+
+This made it difficult to determine where the warning was being generated from. Now pandas will inspect the call stack, reporting the first line outside of the pandas library that gave rise to the warning. The output of the above script is now::
+
+ setting_with_copy_warning.py:4: SettingWithCopyWarning:
+ A value is trying to be set on a copy of a slice from a DataFrame.
+
+
.. _whatsnew_140.enhancements.numeric_index:
More flexible numeric dtypes for indexes
@@ -70,26 +95,27 @@ See :ref:`here <advanced.numericindex>` for more about :class:`NumericIndex`.
Styler
^^^^^^
-:class:`.Styler` has been further developed in 1.4.0. The following enhancements have been made:
+:class:`.Styler` has been further developed in 1.4.0. The following general enhancements have been made:
+
+ - Styling and formatting of indexes has been added, with :meth:`.Styler.apply_index`, :meth:`.Styler.applymap_index` and :meth:`.Styler.format_index`. These mirror the signature of the methods already used to style and format data values, and work with both HTML, LaTeX and Excel format (:issue:`41893`, :issue:`43101`, :issue:`41993`, :issue:`41995`)
+ - The new method :meth:`.Styler.hide` deprecates :meth:`.Styler.hide_index` and :meth:`.Styler.hide_columns` (:issue:`43758`)
+ - The keyword arguments ``level`` and ``names`` have been added to :meth:`.Styler.hide` (and implicitly to the deprecated methods :meth:`.Styler.hide_index` and :meth:`.Styler.hide_columns`) for additional control of visibility of MultiIndexes and of index names (:issue:`25475`, :issue:`43404`, :issue:`43346`)
+ - The :meth:`.Styler.export` and :meth:`.Styler.use` have been updated to address all of the added functionality from v1.2.0 and v1.3.0 (:issue:`40675`)
+ - Global options under the category ``pd.options.styler`` have been extended to configure default ``Styler`` properties which address formatting, encoding, and HTML and LaTeX rendering. Note that formerly ``Styler`` relied on ``display.html.use_mathjax``, which has now been replaced by ``styler.html.mathjax``. (:issue:`41395`)
+ - Validation of certain keyword arguments, e.g. ``caption`` (:issue:`43368`)
+ - Various bug fixes as recorded below
+
+Additionally there are specific enhancements to the HTML specific rendering:
- - Styling and formatting of indexes has been added, with :meth:`.Styler.apply_index`, :meth:`.Styler.applymap_index` and :meth:`.Styler.format_index`. These mirror the signature of the methods already used to style and format data values, and work with both HTML, LaTeX and Excel format (:issue:`41893`, :issue:`43101`, :issue:`41993`, :issue:`41995`).
- :meth:`.Styler.bar` introduces additional arguments to control alignment and display (:issue:`26070`, :issue:`36419`), and it also validates the input arguments ``width`` and ``height`` (:issue:`42511`).
- - :meth:`.Styler.to_latex` introduces keyword argument ``environment``, which also allows a specific "longtable" entry through a separate jinja2 template (:issue:`41866`).
- :meth:`.Styler.to_html` introduces keyword arguments ``sparse_index``, ``sparse_columns``, ``bold_headers``, ``caption``, ``max_rows`` and ``max_columns`` (:issue:`41946`, :issue:`43149`, :issue:`42972`).
- - Keyword arguments ``level`` and ``names`` added to :meth:`.Styler.hide_index` and :meth:`.Styler.hide_columns` for additional control of visibility of MultiIndexes and index names (:issue:`25475`, :issue:`43404`, :issue:`43346`)
- - Global options have been extended to configure default ``Styler`` properties including formatting and encoding and mathjax options and LaTeX (:issue:`41395`)
- - Naive sparsification is now possible for LaTeX without the multirow package (:issue:`43369`)
- - :meth:`.Styler.to_html` omits CSSStyle rules for hidden table elements (:issue:`43619`)
+ - :meth:`.Styler.to_html` omits CSSStyle rules for hidden table elements as a performance enhancement (:issue:`43619`)
- Custom CSS classes can now be directly specified without string replacement (:issue:`43686`)
- - Bug where row trimming failed to reflect hidden rows (:issue:`43703`, :issue:`44247`)
- - Update and expand the export and use mechanics (:issue:`40675`)
- - New method :meth:`.Styler.hide` added and deprecates :meth:`.Styler.hide_index` and :meth:`.Styler.hide_columns` (:issue:`43758`)
-
-Formerly Styler relied on ``display.html.use_mathjax``, which has now been replaced by ``styler.html.mathjax``.
-There are also bug fixes and deprecations listed below.
+There are also some LaTeX specific enhancements:
-Validation now for ``caption`` arg (:issue:`43368`)
+ - :meth:`.Styler.to_latex` introduces keyword argument ``environment``, which also allows a specific "longtable" entry through a separate jinja2 template (:issue:`41866`).
+ - Naive sparsification is now possible for LaTeX without the necessity of including the multirow package (:issue:`43369`)
.. _whatsnew_140.enhancements.pyarrow_csv_engine:
@@ -182,8 +208,14 @@ Other enhancements
- Added :meth:`.ExponentialMovingWindow.sum` (:issue:`13297`)
- :meth:`Series.str.split` now supports a ``regex`` argument that explicitly specifies whether the pattern is a regular expression. Default is ``None`` (:issue:`43563`, :issue:`32835`, :issue:`25549`)
- :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`)
+- :class:`ExcelWriter` argument ``if_sheet_exists="overlay"`` option added (:issue:`40231`)
- :meth:`read_excel` now accepts a ``decimal`` argument that allow the user to specify the decimal point when parsing string columns to numeric (:issue:`14403`)
- :meth:`.GroupBy.mean` now supports `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`43731`)
+- :meth:`Timestamp.isoformat`, now handles the ``timespec`` argument from the base :class:``datetime`` class (:issue:`26131`)
+- :meth:`NaT.to_numpy` ``dtype`` argument is now respected, so ``np.timedelta64`` can be returned (:issue:`44460`)
+- New option ``display.max_dir_items`` customizes the number of columns added to :meth:`Dataframe.__dir__` and suggested for tab completion (:issue:`37996`)
+- :meth:`.Rolling.var`, :meth:`.Expanding.var`, :meth:`.Rolling.std`, :meth:`.Expanding.std` now support `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`44461`)
+
.. ---------------------------------------------------------------------------
@@ -368,6 +400,8 @@ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for mor
Other API changes
^^^^^^^^^^^^^^^^^
- :meth:`Index.get_indexer_for` no longer accepts keyword arguments (other than 'target'); in the past these would be silently ignored if the index was not unique (:issue:`42310`)
+- Change in the position of the ``min_rows`` argument in :meth:`DataFrame.to_string` due to change in the docstring (:issue:`44304`)
+- Reduction operations for :class:`DataFrame` or :class:`Series` now raising a ``ValueError`` when ``None`` is passed for ``skipna`` (:issue:`44178`)
-
.. ---------------------------------------------------------------------------
@@ -421,6 +455,7 @@ Other Deprecations
- Deprecated dropping of nuisance columns in :class:`Rolling`, :class:`Expanding`, and :class:`EWM` aggregations (:issue:`42738`)
- Deprecated :meth:`Index.reindex` with a non-unique index (:issue:`42568`)
- Deprecated :meth:`.Styler.render` in favour of :meth:`.Styler.to_html` (:issue:`42140`)
+- Deprecated :meth:`.Styler.hide_index` and :meth:`.Styler.hide_columns` in favour of :meth:`.Styler.hide` (:issue:`43758`)
- Deprecated passing in a string column label into ``times`` in :meth:`DataFrame.ewm` (:issue:`43265`)
- Deprecated the 'include_start' and 'include_end' arguments in :meth:`DataFrame.between_time`; in a future version passing 'include_start' or 'include_end' will raise (:issue:`40245`)
- Deprecated the ``squeeze`` argument to :meth:`read_csv`, :meth:`read_table`, and :meth:`read_excel`. Users should squeeze the DataFrame afterwards with ``.squeeze("columns")`` instead. (:issue:`43242`)
@@ -432,6 +467,12 @@ Other Deprecations
- Deprecated casting behavior when setting timezone-aware value(s) into a timezone-aware :class:`Series` or :class:`DataFrame` column when the timezones do not match. Previously this cast to object dtype. In a future version, the values being inserted will be converted to the series or column's existing timezone (:issue:`37605`)
- Deprecated casting behavior when passing an item with mismatched-timezone to :meth:`DatetimeIndex.insert`, :meth:`DatetimeIndex.putmask`, :meth:`DatetimeIndex.where` :meth:`DatetimeIndex.fillna`, :meth:`Series.mask`, :meth:`Series.where`, :meth:`Series.fillna`, :meth:`Series.shift`, :meth:`Series.replace`, :meth:`Series.reindex` (and :class:`DataFrame` column analogues). In the past this has cast to object dtype. In a future version, these will cast the passed item to the index or series's timezone (:issue:`37605`)
- Deprecated the 'errors' keyword argument in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, and meth:`DataFrame.mask`; in a future version the argument will be removed (:issue:`44294`)
+- Deprecated :meth:`PeriodIndex.astype` to ``datetime64[ns]`` or ``DatetimeTZDtype``, use ``obj.to_timestamp(how).tz_localize(dtype.tz)`` instead (:issue:`44398`)
+- Deprecated passing non boolean argument to sort in :func:`concat` (:issue:`41518`)
+- Deprecated passing ``skipna=None`` for :meth:`DataFrame.mad` and :meth:`Series.mad`, pass ``skipna=True`` instead (:issue:`44580`)
+- Deprecated :meth:`DateOffset.apply`, use ``offset + other`` instead (:issue:`44522`)
+- A deprecation warning is now shown for :meth:`DataFrame.to_latex` indicating the arguments signature may change and emulate more the arguments to :meth:`.Styler.to_latex` in future versions (:issue:`44411`)
+-
.. ---------------------------------------------------------------------------
@@ -482,6 +523,7 @@ Performance improvements
- Performance improvement in :meth:`Series.to_frame` (:issue:`43558`)
- Performance improvement in :meth:`Series.mad` (:issue:`43010`)
- Performance improvement in :func:`merge` (:issue:`43332`)
+- Performance improvement in :func:`read_csv` when ``index_col`` was set with a numeric column (:issue:`44158`)
- Performance improvement in :func:`concat` (:issue:`43354`)
-
@@ -510,12 +552,16 @@ Datetimelike
- Bug in inplace addition and subtraction of :class:`DatetimeIndex` or :class:`TimedeltaIndex` with :class:`DatetimeArray` or :class:`TimedeltaArray` (:issue:`43904`)
- Bug in in calling ``np.isnan``, ``np.isfinite``, or ``np.isinf`` on a timezone-aware :class:`DatetimeIndex` incorrectly raising ``TypeError`` (:issue:`43917`)
- Bug in constructing a :class:`Series` from datetime-like strings with mixed timezones incorrectly partially-inferring datetime values (:issue:`40111`)
+- Bug in addition with a :class:`Tick` object and a ``np.timedelta64`` object incorrectly raising instead of returning :class:`Timedelta` (:issue:`44474`)
+- Bug in adding a ``np.timedelta64`` object to a :class:`BusinessDay` or :class:`CustomBusinessDay` object incorrectly raising (:issue:`44532`)
+- Bug in :meth:`Index.insert` for inserting ``np.datetime64``, ``np.timedelta64`` or ``tuple`` into :class:`Index` with ``dtype='object'`` with negative loc adding ``None`` and replacing existing value (:issue:`44509`)
+- Bug in :meth:`Series.mode` with ``DatetimeTZDtype`` incorrectly returning timezone-naive and ``PeriodDtype`` incorrectly raising (:issue:`41927`)
-
Timedelta
^^^^^^^^^
- Bug in division of all-``NaT`` :class:`TimeDeltaIndex`, :class:`Series` or :class:`DataFrame` column with object-dtype arraylike of numbers failing to infer the result as timedelta64-dtype (:issue:`39750`)
--
+- Bug in floor division of ``timedelta64[ns]`` data with a scalar returning garbage values (:issue:`44466`)
Timezones
^^^^^^^^^
@@ -530,6 +576,7 @@ Numeric
- Bug in ``numexpr`` engine still being used when the option ``compute.use_numexpr`` is set to ``False`` (:issue:`32556`)
- Bug in :class:`DataFrame` arithmetic ops with a subclass whose :meth:`_constructor` attribute is a callable other than the subclass itself (:issue:`43201`)
- Bug in arithmetic operations involving :class:`RangeIndex` where the result would have the incorrect ``name`` (:issue:`43962`)
+- Bug in arithmetic operations involving :class:`Series` where the result could have the incorrect ``name`` when the operands having matching NA or matching tuple names (:issue:`44459`)
-
Conversion
@@ -538,6 +585,7 @@ Conversion
- Bug in :class:`Series` constructor returning 0 for missing values with dtype ``int64`` and ``False`` for dtype ``bool`` (:issue:`43017`, :issue:`43018`)
- Bug in :class:`IntegerDtype` not allowing coercion from string dtype (:issue:`25472`)
- Bug in :func:`to_datetime` with ``arg:xr.DataArray`` and ``unit="ns"`` specified raises TypeError (:issue:`44053`)
+- Bug in :meth:`DataFrame.convert_dtypes` not returning the correct type when a subclass does not overload :meth:`_constructor_sliced` (:issue:`43201`)
-
Strings
@@ -547,6 +595,7 @@ Strings
Interval
^^^^^^^^
+- Bug in :meth:`Series.where` with ``IntervalDtype`` incorrectly raising when the ``where`` call should not replace anything (:issue:`44181`)
-
-
@@ -575,13 +624,19 @@ Indexing
- Bug when setting string-backed :class:`Categorical` values that can be parsed to datetimes into a :class:`DatetimeArray` or :class:`Series` or :class:`DataFrame` column backed by :class:`DatetimeArray` failing to parse these strings (:issue:`44236`)
- Bug in :meth:`Series.__setitem__` with an integer dtype other than ``int64`` setting with a ``range`` object unnecessarily upcasting to ``int64`` (:issue:`44261`)
- Bug in :meth:`Series.__setitem__` with a boolean mask indexer setting a listlike value of length 1 incorrectly broadcasting that value (:issue:`44265`)
+- Bug in :meth:`Series.reset_index` not ignoring ``name`` argument when ``drop`` and ``inplace`` are set to ``True`` (:issue:`44575`)
- Bug in :meth:`DataFrame.loc.__setitem__` and :meth:`DataFrame.iloc.__setitem__` with mixed dtypes sometimes failing to operate in-place (:issue:`44345`)
+- Bug in :meth:`DataFrame.loc.__getitem__` incorrectly raising ``KeyError`` when selecting a single column with a boolean key (:issue:`44322`).
+- Bug in indexing on columns with ``loc`` or ``iloc`` using a slice with a negative step with ``ExtensionDtype`` columns incorrectly raising (:issue:`44551`)
+- Bug in :meth:`IntervalIndex.get_indexer_non_unique` returning boolean mask instead of array of integers for a non unique and non monotonic index (:issue:`44084`)
+- Bug in :meth:`IntervalIndex.get_indexer_non_unique` not handling targets of ``dtype`` 'object' with NaNs correctly (:issue:`44482`)
-
Missing
^^^^^^^
- Bug in :meth:`DataFrame.fillna` with limit and no method ignores axis='columns' or ``axis = 1`` (:issue:`40989`)
- Bug in :meth:`DataFrame.fillna` not replacing missing values when using a dict-like ``value`` and duplicate column names (:issue:`43476`)
+- Bug in constructing a :class:`DataFrame` with a dictionary ``np.datetime64`` as a value and ``dtype='timedelta64[ns]'``, or vice-versa, incorrectly casting instead of raising (:issue:`??`)
-
MultiIndex
@@ -603,21 +658,31 @@ I/O
- Bug in unpickling a :class:`Index` with object dtype incorrectly inferring numeric dtypes (:issue:`43188`)
- Bug in :func:`read_csv` where reading multi-header input with unequal lengths incorrectly raising uncontrolled ``IndexError`` (:issue:`43102`)
- Bug in :func:`read_csv`, changed exception class when expecting a file path name or file-like object from ``OSError`` to ``TypeError`` (:issue:`43366`)
+- Bug in :func:`read_csv` and :func:`read_fwf` ignoring all ``skiprows`` except first when ``nrows`` is specified for ``engine='python'`` (:issue:`44021`, :issue:`10261`)
- Bug in :func:`read_json` not handling non-numpy dtypes correctly (especially ``category``) (:issue:`21892`, :issue:`33205`)
- Bug in :func:`json_normalize` where multi-character ``sep`` parameter is incorrectly prefixed to every key (:issue:`43831`)
+- Bug in :func:`json_normalize` where reading data with missing multi-level metadata would not respect errors="ignore" (:issue:`44312`)
- Bug in :func:`read_csv` with :code:`float_precision="round_trip"` which did not skip initial/trailing whitespace (:issue:`43713`)
+- Bug in :func:`read_csv` not applying dtype for ``index_col`` (:issue:`9435`)
- Bug in dumping/loading a :class:`DataFrame` with ``yaml.dump(frame)`` (:issue:`42748`)
--
+- Bug in :class:`ExcelWriter`, where ``engine_kwargs`` were not passed through to all engines (:issue:`43442`)
+- Bug in :func:`read_csv` raising ``ValueError`` when ``parse_dates`` was used with ``MultiIndex`` columns (:issue:`8991`)
+- Bug in :func:`read_csv` converting columns to numeric after date parsing failed (:issue:`11019`)
+- Bug in :func:`read_csv` not replacing ``NaN`` values with ``np.nan`` before attempting date conversion (:issue:`26203`)
+- Bug in :func:`read_csv` raising ``AttributeError`` when attempting to read a .csv file and infer index column dtype from an nullable integer type (:issue:`44079`)
+- :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` with ``compression`` set to ``'zip'`` no longer create a zip file containing a file ending with ".zip". Instead, they try to infer the inner file name more smartly. (:issue:`39465`)
+- Bug in :func:`read_csv` when passing simultaneously a parser in ``date_parser`` and ``parse_dates=False``, the parsing was still called (:issue:`44366`)
Period
^^^^^^
- Bug in adding a :class:`Period` object to a ``np.timedelta64`` object incorrectly raising ``TypeError`` (:issue:`44182`)
- Bug in :meth:`PeriodIndex.to_timestamp` when the index has ``freq="B"`` inferring ``freq="D"`` for its result instead of ``freq="B"`` (:issue:`44105`)
+- Bug in :class:`Period` constructor incorrectly allowing ``np.timedelta64("NaT")`` (:issue:`44507`)
-
Plotting
^^^^^^^^
--
+- When given non-numeric data, :meth:`DataFrame.boxplot` now raises a ``ValueError`` rather than a cryptic ``KeyError`` or ``ZeroDivsionError``, in line with other plotting functions like :meth:`DataFrame.hist`. (:issue:`43480`)
-
Groupby/resample/rolling
@@ -639,8 +704,8 @@ Groupby/resample/rolling
- Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` not calculating window bounds correctly for the first row when ``center=True`` and index is decreasing (:issue:`43927`)
- Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` for centered datetimelike windows with uneven nanosecond (:issue:`43997`)
- Bug in :meth:`GroupBy.nth` failing on ``axis=1`` (:issue:`43926`)
-- Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` not respecting right bound on centered datetime-like windows, if the index contain duplicates (:issue:`#3944`)
-
+- Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` not respecting right bound on centered datetime-like windows, if the index contain duplicates (:issue:`3944`)
+- Bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` when using a :class:`pandas.api.indexers.BaseIndexer` subclass that returned unequal start and end arrays would segfault instead of raising a ``ValueError`` (:issue:`44470`)
Reshaping
^^^^^^^^^
@@ -655,8 +720,12 @@ Reshaping
- Fixed bug in :func:`merge` with :class:`MultiIndex` as column index for the ``on`` argument returning an error when assigning a column internally (:issue:`43734`)
- Bug in :func:`crosstab` would fail when inputs are lists or tuples (:issue:`44076`)
- Bug in :meth:`DataFrame.append` failing to retain ``index.name`` when appending a list of :class:`Series` objects (:issue:`44109`)
+- Fixed metadata propagation in ``.loc``, ``.iloc`` and ``Series.to_frame()`` (:issue:`28283`)
- Fixed metadata propagation in :meth:`Dataframe.apply` method, consequently fixing the same issue for :meth:`Dataframe.transform`, :meth:`Dataframe.nunique` and :meth:`Dataframe.mode` (:issue:`28283`)
- Bug in :meth:`DataFrame.stack` with ``ExtensionDtype`` columns incorrectly raising (:issue:`43561`)
+- Bug in :meth:`Series.unstack` with object doing unwanted type inference on resulting columns (:issue:`44595`)
+- Bug in :class:`MultiIndex` failing join operations with overlapping ``IntervalIndex`` levels (:issue:`44096`)
+- Fixed metadata propagation in ``.loc``, ``.iloc`` and ``Series.to_frame()`` (:issue:`28283`)
-
Sparse
@@ -672,6 +741,7 @@ ExtensionArray
- Bug in :func:`array` failing to preserve :class:`PandasArray` (:issue:`43887`)
- NumPy ufuncs ``np.abs``, ``np.positive``, ``np.negative`` now correctly preserve dtype when called on ExtensionArrays that implement ``__abs__, __pos__, __neg__``, respectively. In particular this is fixed for :class:`TimedeltaArray` (:issue:`43899`)
- Avoid raising ``PerformanceWarning`` about fragmented DataFrame when using many columns with an extension dtype (:issue:`44098`)
+- Bug in :meth:`BooleanArray.__eq__` and :meth:`BooleanArray.__ne__` raising ``TypeError`` on comparison with an incompatible type (like a string). This caused :meth:`DataFrame.replace` to sometimes raise a ``TypeError`` if a nullable boolean column was included (:issue:`44499`)
-
Styler
@@ -684,15 +754,19 @@ Styler
- Bug when rendering a single level MultiIndex (:issue:`43383`).
- Bug when combining non-sparse rendering and :meth:`.Styler.hide_columns` or :meth:`.Styler.hide_index` (:issue:`43464`)
- Bug setting a table style when using multiple selectors in :class:`.Styler` (:issue:`44011`)
--
+- Bugs where row trimming and column trimming failed to reflect hidden rows (:issue:`43703`, :issue:`44247`)
Other
^^^^^
+- Bug in :meth:`DataFrame.astype` with non-unique columns and a :class:`Series` ``dtype`` argument (:issue:`44417`)
- Bug in :meth:`CustomBusinessMonthBegin.__add__` (:meth:`CustomBusinessMonthEnd.__add__`) not applying the extra ``offset`` parameter when beginning (end) of the target month is already a business day (:issue:`41356`)
- Bug in :meth:`RangeIndex.union` with another ``RangeIndex`` with matching (even) ``step`` and starts differing by strictly less than ``step / 2`` (:issue:`44019`)
- Bug in :meth:`RangeIndex.difference` with ``sort=None`` and ``step<0`` failing to sort (:issue:`44085`)
- Bug in :meth:`Series.to_frame` and :meth:`Index.to_frame` ignoring the ``name`` argument when ``name=None`` is explicitly passed (:issue:`44212`)
- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` with ``value=None`` and ExtensionDtypes (:issue:`44270`)
+- Bug in :meth:`FloatingArray.equals` failing to consider two arrays equal if they contain ``np.nan`` values (:issue:`44382`)
+- Bug in :meth:`DataFrame.shift` with ``axis=1`` and ``ExtensionDtype`` columns incorrectly raising when an incompatible ``fill_value`` is passed (:issue:`44564`)
+- Bug in :meth:`DataFrame.diff` when passing a NumPy integer object instead of an ``int`` object (:issue:`44572`)
-
.. ***DO NOT USE THIS SECTION***
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index e2e6bbe8db7cc..5c3db40828fe3 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -98,7 +98,7 @@ class RegisteredOption(NamedTuple):
class OptionError(AttributeError, KeyError):
"""
Exception for pandas.options, backwards compatible with KeyError
- checks
+ checks.
"""
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 92837a43e2b69..969da5aa53e3e 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -338,7 +338,12 @@ cdef class IndexEngine:
missing = np.empty(n_t, dtype=np.intp)
# map each starget to its position in the index
- if stargets and len(stargets) < 5 and self.is_monotonic_increasing:
+ if (
+ stargets and
+ len(stargets) < 5 and
+ not any([checknull(t) for t in stargets]) and
+ self.is_monotonic_increasing
+ ):
# if there are few enough stargets and the index is monotonically
# increasing, then use binary search for each starget
remaining_stargets = set()
@@ -649,7 +654,7 @@ cdef class BaseMultiIndexCodesEngine:
Integers representing one combination each
"""
zt = [target._get_level_values(i) for i in range(target.nlevels)]
- level_codes = [lev.get_indexer(codes) + 1 for lev, codes
+ level_codes = [lev.get_indexer_for(codes) + 1 for lev, codes
in zip(self.levels, zt)]
return self._codes_to_ints(np.array(level_codes, dtype='uint64').T)
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index dd1fa0780520c..a7ebd9d0c77ad 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -228,3 +228,4 @@ def get_reverse_indexer(
length: int,
) -> npt.NDArray[np.intp]: ...
def is_bool_list(obj: list) -> bool: ...
+def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index ec89e52e2eff7..f527882a9dc9d 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1462,7 +1462,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str:
for i in range(n):
val = values[i]
- # do not use is_null_datetimelike to keep
+ # do not use checknull to keep
# np.datetime64('nat') and np.timedelta64('nat')
if val is None or util.is_nan(val):
pass
@@ -1704,10 +1704,15 @@ cdef class Validator:
cdef bint _validate(self, ndarray values) except -1:
cdef:
Py_ssize_t i
- Py_ssize_t n = self.n
+ Py_ssize_t n = values.size
+ flatiter it = PyArray_IterNew(values)
for i in range(n):
- if not self.is_valid(values[i]):
+ # The PyArray_GETITEM and PyArray_ITER_NEXT are faster
+ # equivalents to `val = values[i]`
+ val = PyArray_GETITEM(values, PyArray_ITER_DATA(it))
+ PyArray_ITER_NEXT(it)
+ if not self.is_valid(val):
return False
return True
@@ -1717,10 +1722,15 @@ cdef class Validator:
cdef bint _validate_skipna(self, ndarray values) except -1:
cdef:
Py_ssize_t i
- Py_ssize_t n = self.n
+ Py_ssize_t n = values.size
+ flatiter it = PyArray_IterNew(values)
for i in range(n):
- if not self.is_valid_skipna(values[i]):
+ # The PyArray_GETITEM and PyArray_ITER_NEXT are faster
+ # equivalents to `val = values[i]`
+ val = PyArray_GETITEM(values, PyArray_ITER_DATA(it))
+ PyArray_ITER_NEXT(it)
+ if not self.is_valid_skipna(val):
return False
return True
@@ -3028,3 +3038,25 @@ def is_bool_list(obj: list) -> bool:
# Note: we return True for empty list
return True
+
+
+def dtypes_all_equal(list types not None) -> bool:
+ """
+ Faster version for:
+
+ first = types[0]
+ all(is_dtype_equal(first, t) for t in types[1:])
+
+ And assuming all elements in the list are np.dtype/ExtensionDtype objects
+
+ See timings at https://github.com/pandas-dev/pandas/pull/44594
+ """
+ first = types[0]
+ for t in types[1:]:
+ try:
+ if not t == first:
+ return False
+ except (TypeError, AttributeError):
+ return False
+ else:
+ return True
diff --git a/pandas/_libs/missing.pxd b/pandas/_libs/missing.pxd
index 9d32fcd3625db..854dcf2ec9775 100644
--- a/pandas/_libs/missing.pxd
+++ b/pandas/_libs/missing.pxd
@@ -6,9 +6,8 @@ from numpy cimport (
cpdef bint is_matching_na(object left, object right, bint nan_matches_none=*)
-cpdef bint checknull(object val)
-cpdef bint checknull_old(object val)
-cpdef ndarray[uint8_t] isnaobj(ndarray arr)
+cpdef bint checknull(object val, bint inf_as_na=*)
+cpdef ndarray[uint8_t] isnaobj(ndarray arr, bint inf_as_na=*)
cdef bint is_null_datetime64(v)
cdef bint is_null_timedelta64(v)
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index b77db2aec4a08..cd04f4f6e4b3a 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -21,7 +21,6 @@ from pandas._libs.tslibs.nattype cimport (
c_NaT as NaT,
checknull_with_nat,
is_dt64nat,
- is_null_datetimelike,
is_td64nat,
)
from pandas._libs.tslibs.np_datetime cimport (
@@ -99,7 +98,7 @@ cpdef bint is_matching_na(object left, object right, bint nan_matches_none=False
return False
-cpdef bint checknull(object val):
+cpdef bint checknull(object val, bint inf_as_na=False):
"""
Return boolean describing of the input is NA-like, defined here as any
of:
@@ -114,21 +113,27 @@ cpdef bint checknull(object val):
Parameters
----------
val : object
+ inf_as_na : bool, default False
+ Whether to treat INF and -INF as NA values.
Returns
-------
bool
-
- Notes
- -----
- The difference between `checknull` and `checknull_old` is that `checknull`
- does *not* consider INF or NEGINF to be NA.
"""
- return (
- val is C_NA
- or is_null_datetimelike(val, inat_is_null=False)
- or is_decimal_na(val)
- )
+ if val is None or val is NaT or val is C_NA:
+ return True
+ elif util.is_float_object(val) or util.is_complex_object(val):
+ if val != val:
+ return True
+ elif inf_as_na:
+ return val == INF or val == NEGINF
+ return False
+ elif util.is_timedelta64_object(val):
+ return get_timedelta64_value(val) == NPY_NAT
+ elif util.is_datetime64_object(val):
+ return get_datetime64_value(val) == NPY_NAT
+ else:
+ return is_decimal_na(val)
cdef inline bint is_decimal_na(object val):
@@ -138,43 +143,9 @@ cdef inline bint is_decimal_na(object val):
return isinstance(val, cDecimal) and val != val
-cpdef bint checknull_old(object val):
- """
- Return boolean describing of the input is NA-like, defined here as any
- of:
- - None
- - nan
- - INF
- - NEGINF
- - NaT
- - np.datetime64 representation of NaT
- - np.timedelta64 representation of NaT
- - NA
- - Decimal("NaN")
-
- Parameters
- ----------
- val : object
-
- Returns
- -------
- result : bool
-
- Notes
- -----
- The difference between `checknull` and `checknull_old` is that `checknull`
- does *not* consider INF or NEGINF to be NA.
- """
- if checknull(val):
- return True
- elif util.is_float_object(val) or util.is_complex_object(val):
- return val == INF or val == NEGINF
- return False
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
-cpdef ndarray[uint8_t] isnaobj(ndarray arr):
+cpdef ndarray[uint8_t] isnaobj(ndarray arr, bint inf_as_na=False):
"""
Return boolean mask denoting which elements of a 1-D array are na-like,
according to the criteria defined in `checknull`:
@@ -205,53 +176,13 @@ cpdef ndarray[uint8_t] isnaobj(ndarray arr):
result = np.empty(n, dtype=np.uint8)
for i in range(n):
val = arr[i]
- result[i] = checknull(val)
+ result[i] = checknull(val, inf_as_na=inf_as_na)
return result.view(np.bool_)
@cython.wraparound(False)
@cython.boundscheck(False)
-def isnaobj_old(arr: ndarray) -> ndarray:
- """
- Return boolean mask denoting which elements of a 1-D array are na-like,
- defined as being any of:
- - None
- - nan
- - INF
- - NEGINF
- - NaT
- - NA
- - Decimal("NaN")
-
- Parameters
- ----------
- arr : ndarray
-
- Returns
- -------
- result : ndarray (dtype=np.bool_)
- """
- cdef:
- Py_ssize_t i, n
- object val
- ndarray[uint8_t] result
-
- assert arr.ndim == 1, "'arr' must be 1-D."
-
- n = len(arr)
- result = np.zeros(n, dtype=np.uint8)
- for i in range(n):
- val = arr[i]
- result[i] = (
- checknull(val)
- or util.is_float_object(val) and (val == INF or val == NEGINF)
- )
- return result.view(np.bool_)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def isnaobj2d(arr: ndarray) -> ndarray:
+def isnaobj2d(arr: ndarray, inf_as_na: bool = False) -> ndarray:
"""
Return boolean mask denoting which elements of a 2-D array are na-like,
according to the criteria defined in `checknull`:
@@ -270,57 +201,6 @@ def isnaobj2d(arr: ndarray) -> ndarray:
Returns
-------
result : ndarray (dtype=np.bool_)
-
- Notes
- -----
- The difference between `isnaobj2d` and `isnaobj2d_old` is that `isnaobj2d`
- does *not* consider INF or NEGINF to be NA.
- """
- cdef:
- Py_ssize_t i, j, n, m
- object val
- ndarray[uint8_t, ndim=2] result
-
- assert arr.ndim == 2, "'arr' must be 2-D."
-
- n, m = (<object>arr).shape
- result = np.zeros((n, m), dtype=np.uint8)
- for i in range(n):
- for j in range(m):
- val = arr[i, j]
- if checknull(val):
- result[i, j] = 1
- return result.view(np.bool_)
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def isnaobj2d_old(arr: ndarray) -> ndarray:
- """
- Return boolean mask denoting which elements of a 2-D array are na-like,
- according to the criteria defined in `checknull_old`:
- - None
- - nan
- - INF
- - NEGINF
- - NaT
- - np.datetime64 representation of NaT
- - np.timedelta64 representation of NaT
- - NA
- - Decimal("NaN")
-
- Parameters
- ----------
- arr : ndarray
-
- Returns
- -------
- ndarray (dtype=np.bool_)
-
- Notes
- -----
- The difference between `isnaobj2d` and `isnaobj2d_old` is that `isnaobj2d`
- does *not* consider INF or NEGINF to be NA.
"""
cdef:
Py_ssize_t i, j, n, m
@@ -334,7 +214,7 @@ def isnaobj2d_old(arr: ndarray) -> ndarray:
for i in range(n):
for j in range(m):
val = arr[i, j]
- if checknull_old(val):
+ if checknull(val, inf_as_na=inf_as_na):
result[i, j] = 1
return result.view(np.bool_)
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index e38ed9a20e55b..11de4e60f202d 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -5,7 +5,6 @@
"NaTType",
"iNaT",
"nat_strings",
- "is_null_datetimelike",
"OutOfBoundsDatetime",
"OutOfBoundsTimedelta",
"IncompatibleFrequency",
@@ -37,7 +36,6 @@
NaT,
NaTType,
iNaT,
- is_null_datetimelike,
nat_strings,
)
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd
index 35319bd88053a..b7c14e0a5b068 100644
--- a/pandas/_libs/tslibs/nattype.pxd
+++ b/pandas/_libs/tslibs/nattype.pxd
@@ -18,4 +18,3 @@ cdef _NaT c_NaT
cdef bint checknull_with_nat(object val)
cdef bint is_dt64nat(object val)
cdef bint is_td64nat(object val)
-cpdef bint is_null_datetimelike(object val, bint inat_is_null=*)
diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi
index 22e6395a1fe99..6a5555cfff030 100644
--- a/pandas/_libs/tslibs/nattype.pyi
+++ b/pandas/_libs/tslibs/nattype.pyi
@@ -12,13 +12,13 @@ NaT: NaTType
iNaT: int
nat_strings: set[str]
-def is_null_datetimelike(val: object, inat_is_null: bool = ...) -> bool: ...
-
class NaTType(datetime):
value: np.int64
def asm8(self) -> np.datetime64: ...
def to_datetime64(self) -> np.datetime64: ...
- def to_numpy(self, dtype=..., copy: bool = ...) -> np.datetime64: ...
+ def to_numpy(
+ self, dtype=..., copy: bool = ...
+ ) -> np.datetime64 | np.timedelta64: ...
@property
def is_leap_year(self) -> bool: ...
@property
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 2aebf75ba35d4..a36a1e12c274d 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -258,19 +258,20 @@ cdef class _NaT(datetime):
"""
return np.datetime64('NaT', "ns")
- def to_numpy(self, dtype=None, copy=False) -> np.datetime64:
+ def to_numpy(self, dtype=None, copy=False) -> np.datetime64 | np.timedelta64:
"""
- Convert the Timestamp to a NumPy datetime64.
+ Convert the Timestamp to a NumPy datetime64 or timedelta64.
.. versionadded:: 0.25.0
- This is an alias method for `Timestamp.to_datetime64()`. The dtype and
- copy parameters are available here only for compatibility. Their values
+ With the default 'dtype', this is an alias method for `NaT.to_datetime64()`.
+
+ The copy parameter is available here only for compatibility. Its value
will not affect the return value.
Returns
-------
- numpy.datetime64
+ numpy.datetime64 or numpy.timedelta64
See Also
--------
@@ -286,7 +287,22 @@ cdef class _NaT(datetime):
>>> pd.NaT.to_numpy()
numpy.datetime64('NaT')
+
+ >>> pd.NaT.to_numpy("m8[ns]")
+ numpy.timedelta64('NaT','ns')
"""
+ if dtype is not None:
+ # GH#44460
+ dtype = np.dtype(dtype)
+ if dtype.kind == "M":
+ return np.datetime64("NaT").astype(dtype)
+ elif dtype.kind == "m":
+ return np.timedelta64("NaT").astype(dtype)
+ else:
+ raise ValueError(
+ "NaT.to_numpy dtype must be a datetime64 dtype, timedelta64 "
+ "dtype, or None."
+ )
return self.to_datetime64()
def __repr__(self) -> str:
@@ -295,7 +311,7 @@ cdef class _NaT(datetime):
def __str__(self) -> str:
return "NaT"
- def isoformat(self, sep="T") -> str:
+ def isoformat(self, sep: str = "T", timespec: str = "auto") -> str:
# This allows Timestamp(ts.isoformat()) to always correctly roundtrip.
return "NaT"
@@ -776,6 +792,13 @@ timedelta}, default 'raise'
------
ValueError if the freq cannot be converted
+ Notes
+ -----
+ If the Timestamp has a timezone, rounding will take place relative to the
+ local ("wall") time and re-localized to the same timezone. When rounding
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
+ control the re-localization behavior.
+
Examples
--------
Create a timestamp object:
@@ -810,6 +833,17 @@ timedelta}, default 'raise'
>>> pd.NaT.round()
NaT
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam")
+
+ >>> ts_tz.round("H", ambiguous=False)
+ Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam')
+
+ >>> ts_tz.round("H", ambiguous=True)
+ Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam')
""",
)
floor = _make_nat_func(
@@ -847,6 +881,13 @@ timedelta}, default 'raise'
------
ValueError if the freq cannot be converted.
+ Notes
+ -----
+ If the Timestamp has a timezone, flooring will take place relative to the
+ local ("wall") time and re-localized to the same timezone. When flooring
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
+ control the re-localization behavior.
+
Examples
--------
Create a timestamp object:
@@ -881,6 +922,17 @@ timedelta}, default 'raise'
>>> pd.NaT.floor()
NaT
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> ts_tz = pd.Timestamp("2021-10-31 03:30:00").tz_localize("Europe/Amsterdam")
+
+ >>> ts_tz.floor("2H", ambiguous=False)
+ Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam')
+
+ >>> ts_tz.floor("2H", ambiguous=True)
+ Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam')
""",
)
ceil = _make_nat_func(
@@ -918,6 +970,13 @@ timedelta}, default 'raise'
------
ValueError if the freq cannot be converted.
+ Notes
+ -----
+ If the Timestamp has a timezone, ceiling will take place relative to the
+ local ("wall") time and re-localized to the same timezone. When ceiling
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
+ control the re-localization behavior.
+
Examples
--------
Create a timestamp object:
@@ -952,6 +1011,17 @@ timedelta}, default 'raise'
>>> pd.NaT.ceil()
NaT
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam")
+
+ >>> ts_tz.ceil("H", ambiguous=False)
+ Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam')
+
+ >>> ts_tz.ceil("H", ambiguous=True)
+ Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam')
""",
)
@@ -1131,6 +1201,7 @@ cdef inline bint checknull_with_nat(object val):
"""
return val is None or util.is_nan(val) or val is c_NaT
+
cdef inline bint is_dt64nat(object val):
"""
Is this a np.datetime64 object np.datetime64("NaT").
@@ -1139,6 +1210,7 @@ cdef inline bint is_dt64nat(object val):
return get_datetime64_value(val) == NPY_NAT
return False
+
cdef inline bint is_td64nat(object val):
"""
Is this a np.timedelta64 object np.timedelta64("NaT").
@@ -1146,32 +1218,3 @@ cdef inline bint is_td64nat(object val):
if util.is_timedelta64_object(val):
return get_timedelta64_value(val) == NPY_NAT
return False
-
-
-cpdef bint is_null_datetimelike(object val, bint inat_is_null=True):
- """
- Determine if we have a null for a timedelta/datetime (or integer versions).
-
- Parameters
- ----------
- val : object
- inat_is_null : bool, default True
- Whether to treat integer iNaT value as null
-
- Returns
- -------
- bool
- """
- if val is None:
- return True
- elif val is c_NaT:
- return True
- elif util.is_float_object(val) or util.is_complex_object(val):
- return val != val
- elif util.is_timedelta64_object(val):
- return get_timedelta64_value(val) == NPY_NAT
- elif util.is_datetime64_object(val):
- return get_datetime64_value(val) == NPY_NAT
- elif inat_is_null and util.is_integer_object(val):
- return val == NPY_NAT
- return False
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 00d02e096c976..98055c01d6ab0 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -72,7 +72,10 @@ from pandas._libs.tslibs.np_datetime cimport (
from pandas._libs.tslibs.tzconversion cimport tz_convert_from_utc_single
from .dtypes cimport PeriodDtypeCode
-from .timedeltas cimport delta_to_nanoseconds
+from .timedeltas cimport (
+ delta_to_nanoseconds,
+ is_any_td_scalar,
+)
from .timedeltas import Timedelta
@@ -154,7 +157,11 @@ def apply_wraps(func):
if other is NaT:
return NaT
- elif isinstance(other, BaseOffset) or PyDelta_Check(other):
+ elif (
+ isinstance(other, BaseOffset)
+ or PyDelta_Check(other)
+ or util.is_timedelta64_object(other)
+ ):
# timedelta path
return func(self, other)
elif is_datetime64_object(other) or PyDate_Check(other):
@@ -346,6 +353,9 @@ cdef class BaseOffset:
"""
Base class for DateOffset methods that are not overridden by subclasses.
"""
+ # ensure that reversed-ops with numpy scalars return NotImplemented
+ __array_priority__ = 1000
+
_day_opt = None
_attributes = tuple(["n", "normalize"])
_use_relativedelta = False
@@ -427,8 +437,12 @@ cdef class BaseOffset:
if not isinstance(self, BaseOffset):
# cython semantics; this is __radd__
return other.__add__(self)
+
+ elif util.is_array(other) and other.dtype == object:
+ return np.array([self + x for x in other])
+
try:
- return self.apply(other)
+ return self._apply(other)
except ApplyTypeError:
return NotImplemented
@@ -441,7 +455,8 @@ cdef class BaseOffset:
elif not isinstance(self, BaseOffset):
# cython semantics, this is __rsub__
return (-other).__add__(self)
- else: # pragma: no cover
+ else:
+ # e.g. PeriodIndex
return NotImplemented
def __call__(self, other):
@@ -451,7 +466,17 @@ cdef class BaseOffset:
FutureWarning,
stacklevel=1,
)
- return self.apply(other)
+ return self._apply(other)
+
+ def apply(self, other):
+ # GH#44522
+ warnings.warn(
+ f"{type(self).__name__}.apply is deprecated and will be removed "
+ "in a future version. Use `offset + other` instead",
+ FutureWarning,
+ stacklevel=2,
+ )
+ return self._apply(other)
def __mul__(self, other):
if util.is_array(other):
@@ -760,8 +785,6 @@ cdef class SingleConstructorOffset(BaseOffset):
# Tick Offsets
cdef class Tick(SingleConstructorOffset):
- # ensure that reversed-ops with numpy scalars return NotImplemented
- __array_priority__ = 1000
_adjust_dst = False
_prefix = "undefined"
_attributes = tuple(["n", "normalize"])
@@ -882,7 +905,7 @@ cdef class Tick(SingleConstructorOffset):
else:
return delta_to_tick(self.delta + other.delta)
try:
- return self.apply(other)
+ return self._apply(other)
except ApplyTypeError:
# Includes pd.Period
return NotImplemented
@@ -891,7 +914,7 @@ cdef class Tick(SingleConstructorOffset):
f"the add operation between {self} and {other} will overflow"
) from err
- def apply(self, other):
+ def _apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, _Timestamp):
# GH#15126
@@ -902,7 +925,7 @@ cdef class Tick(SingleConstructorOffset):
# PyDate_Check includes date, datetime
return Timestamp(other) + self
- if PyDelta_Check(other):
+ if util.is_timedelta64_object(other) or PyDelta_Check(other):
return other + self.delta
elif isinstance(other, type(self)):
# TODO: this is reached in tests that specifically call apply,
@@ -1034,7 +1057,7 @@ cdef class RelativeDeltaOffset(BaseOffset):
self.__dict__.update(state)
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
if self._use_relativedelta:
other = _as_datetime(other)
@@ -1364,7 +1387,7 @@ cdef class BusinessDay(BusinessMixin):
return "+" + repr(self.offset)
@apply_wraps
- def apply(self, other):
+ def _apply(self, other):
if PyDateTime_Check(other):
n = self.n
wday = other.weekday()
@@ -1396,9 +1419,10 @@ cdef class BusinessDay(BusinessMixin):
result = result + self.offset
return result
- elif PyDelta_Check(other) or isinstance(other, Tick):
+ elif is_any_td_scalar(other):
+ td = Timedelta(self.offset) + other
return BusinessDay(
- self.n, offset=self.offset + other, normalize=self.normalize
+ self.n, offset=td.to_pytimedelta(), normalize=self.normalize
)
else:
raise ApplyTypeError(
@@ -1676,7 +1700,7 @@ cdef class BusinessHour(BusinessMixin):
return dt
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
# used for detecting edge condition
nanosecond = getattr(other, "nanosecond", 0)
# reset timezone and nanosecond
@@ -1825,7 +1849,7 @@ cdef class WeekOfMonthMixin(SingleConstructorOffset):
raise ValueError(f"Day must be 0<=day<=6, got {weekday}")
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
compare_day = self._get_offset_day(other)
months = self.n
@@ -1905,7 +1929,7 @@ cdef class YearOffset(SingleConstructorOffset):
return get_day_of_month(&dts, self._day_opt)
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
years = roll_qtrday(other, self.n, self.month, self._day_opt, modby=12)
months = years * 12 + (self.month - other.month)
return shift_month(other, months, self._day_opt)
@@ -2054,7 +2078,7 @@ cdef class QuarterOffset(SingleConstructorOffset):
return mod_month == 0 and dt.day == self._get_offset_day(dt)
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
# months_since: find the calendar quarter containing other.month,
# e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].
# Then find the month in that quarter containing an is_on_offset date for
@@ -2181,7 +2205,7 @@ cdef class MonthOffset(SingleConstructorOffset):
return dt.day == self._get_offset_day(dt)
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
compare_day = self._get_offset_day(other)
n = roll_convention(other.day, self.n, compare_day)
return shift_month(other, n, self._day_opt)
@@ -2223,7 +2247,7 @@ cdef class MonthBegin(MonthOffset):
cdef class BusinessMonthEnd(MonthOffset):
"""
- DateOffset increments between the last business day of the month
+ DateOffset increments between the last business day of the month.
Examples
--------
@@ -2299,7 +2323,7 @@ cdef class SemiMonthOffset(SingleConstructorOffset):
return self._prefix + suffix
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
is_start = isinstance(self, SemiMonthBegin)
# shift `other` to self.day_of_month, incrementing `n` if necessary
@@ -2474,7 +2498,7 @@ cdef class Week(SingleConstructorOffset):
return self.n == 1 and self.weekday is not None
@apply_wraps
- def apply(self, other):
+ def _apply(self, other):
if self.weekday is None:
return other + self.n * self._inc
@@ -2825,7 +2849,7 @@ cdef class FY5253(FY5253Mixin):
return year_end == dt
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
norm = Timestamp(other).normalize()
n = self.n
@@ -3074,7 +3098,7 @@ cdef class FY5253Quarter(FY5253Mixin):
return start, num_qtrs, tdelta
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
# Note: self.n == 0 is not allowed.
n = self.n
@@ -3165,7 +3189,7 @@ cdef class Easter(SingleConstructorOffset):
self.normalize = state.pop("normalize")
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
current_easter = easter(other.year)
current_easter = datetime(
current_easter.year, current_easter.month, current_easter.day
@@ -3244,7 +3268,7 @@ cdef class CustomBusinessDay(BusinessDay):
BusinessDay.__setstate__(self, state)
@apply_wraps
- def apply(self, other):
+ def _apply(self, other):
if self.n <= 0:
roll = "forward"
else:
@@ -3265,8 +3289,9 @@ cdef class CustomBusinessDay(BusinessDay):
result = result + self.offset
return result
- elif PyDelta_Check(other) or isinstance(other, Tick):
- return BDay(self.n, offset=self.offset + other, normalize=self.normalize)
+ elif is_any_td_scalar(other):
+ td = Timedelta(self.offset) + other
+ return BDay(self.n, offset=td.to_pytimedelta(), normalize=self.normalize)
else:
raise ApplyTypeError(
"Only know how to combine trading day with "
@@ -3406,7 +3431,7 @@ cdef class _CustomBusinessMonth(BusinessMixin):
return roll_func
@apply_wraps
- def apply(self, other: datetime) -> datetime:
+ def _apply(self, other: datetime) -> datetime:
# First move to month offset
cur_month_offset_date = self.month_roll(other)
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index f594e0a8bdafd..67696f9740ea1 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -104,7 +104,7 @@ from pandas._libs.tslibs.nattype cimport (
_nat_scalar_rules,
c_NaT as NaT,
c_nat_strings as nat_strings,
- is_null_datetimelike,
+ checknull_with_nat,
)
from pandas._libs.tslibs.offsets cimport (
BaseOffset,
@@ -1459,10 +1459,13 @@ def extract_ordinals(ndarray[object] values, freq) -> np.ndarray:
for i in range(n):
p = values[i]
- if is_null_datetimelike(p):
+ if checknull_with_nat(p):
ordinals[i] = NPY_NAT
elif util.is_integer_object(p):
- raise TypeError(p)
+ if p == NPY_NAT:
+ ordinals[i] = NPY_NAT
+ else:
+ raise TypeError(p)
else:
try:
ordinals[i] = p.ordinal
@@ -2473,14 +2476,17 @@ class Period(_Period):
converted = other.asfreq(freq)
ordinal = converted.ordinal
- elif is_null_datetimelike(value) or (isinstance(value, str) and
- value in nat_strings):
+ elif checknull_with_nat(value) or (isinstance(value, str) and
+ value in nat_strings):
# explicit str check is necessary to avoid raising incorrectly
# if we have a non-hashable value.
ordinal = NPY_NAT
elif isinstance(value, str) or util.is_integer_object(value):
if util.is_integer_object(value):
+ if value == NPY_NAT:
+ value = "NaT"
+
value = str(value)
value = value.upper()
dt, reso = parse_time_string(value, freq)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 43f9be3fef5ee..be39ccd444865 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -929,6 +929,10 @@ cdef class _Timedelta(timedelta):
--------
Series.to_numpy : Similar method for Series.
"""
+ if dtype is not None or copy is not False:
+ raise ValueError(
+ "Timedelta.to_numpy dtype and copy arguments are ignored"
+ )
return self.to_timedelta64()
def view(self, dtype):
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 613da5a691736..00149dec0790f 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -737,9 +737,42 @@ cdef class _Timestamp(ABCTimestamp):
# -----------------------------------------------------------------
# Rendering Methods
- def isoformat(self, sep: str = "T") -> str:
- base = super(_Timestamp, self).isoformat(sep=sep)
- if self.nanosecond == 0:
+ def isoformat(self, sep: str = "T", timespec: str = "auto") -> str:
+ """
+ Return the time formatted according to ISO.
+
+ The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmmnnn'.
+ By default, the fractional part is omitted if self.microsecond == 0
+ and self.nanosecond == 0.
+
+ If self.tzinfo is not None, the UTC offset is also attached, giving
+ giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmmnnn+HH:MM'.
+
+ Parameters
+ ----------
+ sep : str, default 'T'
+ String used as the separator between the date and time.
+
+ timespec : str, default 'auto'
+ Specifies the number of additional terms of the time to include.
+ The valid values are 'auto', 'hours', 'minutes', 'seconds',
+ 'milliseconds', 'microseconds', and 'nanoseconds'.
+
+ Returns
+ -------
+ str
+
+ Examples
+ --------
+ >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651')
+ >>> ts.isoformat()
+ '2020-03-14T15:32:52.192548651'
+ >>> ts.isoformat(timespec='microseconds')
+ '2020-03-14T15:32:52.192548'
+ """
+ base_ts = "microseconds" if timespec == "nanoseconds" else timespec
+ base = super(_Timestamp, self).isoformat(sep=sep, timespec=base_ts)
+ if self.nanosecond == 0 and timespec != "nanoseconds":
return base
if self.tzinfo is not None:
@@ -747,10 +780,11 @@ cdef class _Timestamp(ABCTimestamp):
else:
base1, base2 = base, ""
- if self.microsecond != 0:
- base1 += f"{self.nanosecond:03d}"
- else:
- base1 += f".{self.nanosecond:09d}"
+ if timespec == "nanoseconds" or (timespec == "auto" and self.nanosecond):
+ if self.microsecond:
+ base1 += f"{self.nanosecond:03d}"
+ else:
+ base1 += f".{self.nanosecond:09d}"
return base1 + base2
@@ -900,6 +934,10 @@ cdef class _Timestamp(ABCTimestamp):
>>> pd.NaT.to_numpy()
numpy.datetime64('NaT')
"""
+ if dtype is not None or copy is not False:
+ raise ValueError(
+ "Timestamp.to_numpy dtype and copy arguments are ignored."
+ )
return self.to_datetime64()
def to_period(self, freq=None):
@@ -981,7 +1019,7 @@ class Timestamp(_Timestamp):
Due to daylight saving time, one wall clock time can occur twice
when shifting from summer to winter time; fold describes whether the
datetime-like corresponds to the first (0) or the second time (1)
- the wall clock hits the ambiguous time
+ the wall clock hits the ambiguous time.
.. versionadded:: 1.1.0
@@ -1424,6 +1462,13 @@ timedelta}, default 'raise'
------
ValueError if the freq cannot be converted
+ Notes
+ -----
+ If the Timestamp has a timezone, rounding will take place relative to the
+ local ("wall") time and re-localized to the same timezone. When rounding
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
+ control the re-localization behavior.
+
Examples
--------
Create a timestamp object:
@@ -1458,6 +1503,17 @@ timedelta}, default 'raise'
>>> pd.NaT.round()
NaT
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam")
+
+ >>> ts_tz.round("H", ambiguous=False)
+ Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam')
+
+ >>> ts_tz.round("H", ambiguous=True)
+ Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam')
"""
return self._round(
freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent
@@ -1497,6 +1553,13 @@ timedelta}, default 'raise'
------
ValueError if the freq cannot be converted.
+ Notes
+ -----
+ If the Timestamp has a timezone, flooring will take place relative to the
+ local ("wall") time and re-localized to the same timezone. When flooring
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
+ control the re-localization behavior.
+
Examples
--------
Create a timestamp object:
@@ -1531,6 +1594,17 @@ timedelta}, default 'raise'
>>> pd.NaT.floor()
NaT
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> ts_tz = pd.Timestamp("2021-10-31 03:30:00").tz_localize("Europe/Amsterdam")
+
+ >>> ts_tz.floor("2H", ambiguous=False)
+ Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam')
+
+ >>> ts_tz.floor("2H", ambiguous=True)
+ Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam')
"""
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@@ -1568,6 +1642,13 @@ timedelta}, default 'raise'
------
ValueError if the freq cannot be converted.
+ Notes
+ -----
+ If the Timestamp has a timezone, ceiling will take place relative to the
+ local ("wall") time and re-localized to the same timezone. When ceiling
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
+ control the re-localization behavior.
+
Examples
--------
Create a timestamp object:
@@ -1602,6 +1683,17 @@ timedelta}, default 'raise'
>>> pd.NaT.ceil()
NaT
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam")
+
+ >>> ts_tz.ceil("H", ambiguous=False)
+ Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam')
+
+ >>> ts_tz.ceil("H", ambiguous=True)
+ Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam')
"""
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
diff --git a/pandas/_libs/window/aggregations.pyi b/pandas/_libs/window/aggregations.pyi
index f3317ff5a60be..b926a7cb73425 100644
--- a/pandas/_libs/window/aggregations.pyi
+++ b/pandas/_libs/window/aggregations.pyi
@@ -6,7 +6,10 @@ from typing import (
import numpy as np
-from pandas._typing import WindowingRankType
+from pandas._typing import (
+ WindowingRankType,
+ npt,
+)
def roll_sum(
values: np.ndarray, # const float64_t[:]
@@ -83,7 +86,7 @@ def roll_apply(
raw: bool,
args: tuple[Any, ...],
kwargs: dict[str, Any],
-) -> np.ndarray: ... # np.ndarray[float] # FIXME: could also be type(obj) if n==0
+) -> npt.NDArray[np.float64]: ...
def roll_weighted_sum(
values: np.ndarray, # const float64_t[:]
weights: np.ndarray, # const float64_t[:]
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 98201a6f58499..be8bb61092362 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -1271,7 +1271,7 @@ def roll_apply(object obj,
Py_ssize_t i, s, e, N = len(start), n = len(obj)
if n == 0:
- return obj
+ return np.array([], dtype=np.float64)
arr = np.asarray(obj)
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index c2c55a4060f7a..4f9ef2c3c3ffa 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -82,6 +82,7 @@
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
+ assert_indexing_slices_equivalent,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py
index c7113e663789b..437e75be0e55b 100644
--- a/pandas/_testing/_io.py
+++ b/pandas/_testing/_io.py
@@ -10,7 +10,10 @@
)
import zipfile
-from pandas._typing import FilePathOrBuffer
+from pandas._typing import (
+ FilePath,
+ ReadPickleBuffer,
+)
from pandas.compat import (
get_lzma_file,
import_lzma,
@@ -277,7 +280,7 @@ def can_connect(url, error_classes=None):
def round_trip_pickle(
- obj: Any, path: FilePathOrBuffer | None = None
+ obj: Any, path: FilePath | ReadPickleBuffer | None = None
) -> DataFrame | Series:
"""
Pickle an object and then read it again.
diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py
index 5f01996d0390d..f66614bd02a3f 100644
--- a/pandas/_testing/_warnings.py
+++ b/pandas/_testing/_warnings.py
@@ -151,8 +151,8 @@ def _assert_caught_no_extra_warnings(
if actual_warning.category == ResourceWarning and unclosed in str(
actual_warning.message
):
- # FIXME: kludge because pytest.filterwarnings does not
- # suppress these, xref GH#38630
+ # FIXME(GH#38630): kludge because pytest.filterwarnings does not
+ # suppress these
continue
extra_warnings.append(
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index c9f7fd43c1050..267aed5301508 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -11,6 +11,7 @@
)
from pandas._libs.missing import is_matching_na
import pandas._libs.testing as _testing
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_bool,
@@ -106,7 +107,7 @@ def assert_almost_equal(
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
# https://github.com/python/mypy/issues/7642
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
@@ -340,7 +341,7 @@ def _get_ilevel_values(index, level):
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
# https://github.com/python/mypy/issues/7642
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
@@ -541,7 +542,7 @@ def assert_categorical_equal(
left : Categorical
right : Categorical
check_dtype : bool, default True
- Check that integer dtype of the codes are the same
+ Check that integer dtype of the codes are the same.
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
@@ -549,7 +550,7 @@ def assert_categorical_equal(
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
- assertion message
+ assertion message.
"""
_check_isinstance(left, right, Categorical)
@@ -818,7 +819,7 @@ def assert_extension_array_equal(
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
@@ -964,7 +965,7 @@ def assert_series_equal(
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
@@ -983,6 +984,8 @@ def assert_series_equal(
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
+ assert left.attrs == right.attrs, f"{repr(left.attrs)} != {repr(right.attrs)}"
+
if check_index:
# GH #38183
assert_index_equal(
@@ -1067,6 +1070,8 @@ def assert_series_equal(
assert_extension_array_equal(
left._values,
right._values,
+ rtol=rtol,
+ atol=atol,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
@@ -1247,7 +1252,7 @@ def assert_frame_equal(
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
@@ -1267,6 +1272,8 @@ def assert_frame_equal(
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
+ assert left.attrs == right.attrs, f"{repr(left.attrs)} != {repr(right.attrs)}"
+
# index comparison
assert_index_equal(
left.index,
@@ -1444,3 +1451,17 @@ def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) ->
Related to issue #37609
"""
return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
+
+
+def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice):
+ """
+ Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable,
+ ser[l_slc].
+ """
+ expected = ser.iloc[i_slc]
+
+ assert_series_equal(ser.loc[l_slc], expected)
+
+ if not ser.index.is_integer():
+ # For integer indices, .loc and plain getitem are position-based.
+ assert_series_equal(ser[l_slc], expected)
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index e20d2d58e499f..b92772761e0a7 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -14,6 +14,8 @@
import numpy as np
+from pandas import set_option
+
from pandas.io.common import get_handle
@@ -202,11 +204,11 @@ def use_numexpr(use, min_elements=None):
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
- expr.set_use_numexpr(use)
+ set_option("compute.use_numexpr", use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
- expr.set_use_numexpr(olduse)
+ set_option("compute.use_numexpr", olduse)
class RNGContext:
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 68ec331c2781f..89e1c0bf7a71f 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -1,29 +1,24 @@
+from __future__ import annotations
+
from datetime import (
datetime,
timedelta,
tzinfo,
)
-from io import (
- BufferedIOBase,
- RawIOBase,
- TextIOBase,
- TextIOWrapper,
-)
-from mmap import mmap
from os import PathLike
from typing import (
- IO,
TYPE_CHECKING,
Any,
- AnyStr,
Callable,
Collection,
Dict,
Hashable,
+ Iterator,
List,
Literal,
Mapping,
Optional,
+ Protocol,
Sequence,
Tuple,
Type as type_t,
@@ -170,9 +165,76 @@
PythonFuncType = Callable[[Any], Any]
# filenames and file-like-objects
-Buffer = Union[IO[AnyStr], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]
-FileOrBuffer = Union[str, Buffer[AnyStr]]
-FilePathOrBuffer = Union["PathLike[str]", FileOrBuffer[AnyStr]]
+AnyStr_cov = TypeVar("AnyStr_cov", str, bytes, covariant=True)
+AnyStr_con = TypeVar("AnyStr_con", str, bytes, contravariant=True)
+
+
+class BaseBuffer(Protocol):
+ @property
+ def mode(self) -> str:
+ # for _get_filepath_or_buffer
+ ...
+
+ def fileno(self) -> int:
+ # for _MMapWrapper
+ ...
+
+ def seek(self, __offset: int, __whence: int = ...) -> int:
+ # with one argument: gzip.GzipFile, bz2.BZ2File
+ # with two arguments: zip.ZipFile, read_sas
+ ...
+
+ def seekable(self) -> bool:
+ # for bz2.BZ2File
+ ...
+
+ def tell(self) -> int:
+ # for zip.ZipFile, read_stata, to_stata
+ ...
+
+
+class ReadBuffer(BaseBuffer, Protocol[AnyStr_cov]):
+ def read(self, __n: int | None = ...) -> AnyStr_cov:
+ # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File
+ ...
+
+
+class WriteBuffer(BaseBuffer, Protocol[AnyStr_con]):
+ def write(self, __b: AnyStr_con) -> Any:
+ # for gzip.GzipFile, bz2.BZ2File
+ ...
+
+ def flush(self) -> Any:
+ # for gzip.GzipFile, bz2.BZ2File
+ ...
+
+
+class ReadPickleBuffer(ReadBuffer[bytes], Protocol):
+ def readline(self) -> AnyStr_cov:
+ ...
+
+
+class WriteExcelBuffer(WriteBuffer[bytes], Protocol):
+ def truncate(self, size: int | None = ...) -> int:
+ ...
+
+
+class ReadCsvBuffer(ReadBuffer[AnyStr_cov], Protocol):
+ def __iter__(self) -> Iterator[AnyStr_cov]:
+ # for engine=python
+ ...
+
+ def readline(self) -> AnyStr_cov:
+ # for engine=python
+ ...
+
+ @property
+ def closed(self) -> bool:
+ # for enine=pyarrow
+ ...
+
+
+FilePath = Union[str, "PathLike[str]"]
# for arbitrary kwargs passed during reading/writing files
StorageOptions = Optional[Dict[str, Any]]
diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py
index c22f37f2ef292..80202b3569862 100644
--- a/pandas/api/__init__.py
+++ b/pandas/api/__init__.py
@@ -1,5 +1,5 @@
""" public toolkit API """
-from pandas.api import ( # noqa
+from pandas.api import ( # noqa:F401
extensions,
indexers,
types,
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index fd5c46f7a6d5a..57b13fef9ad8a 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -20,7 +20,7 @@
np_version_under1p20,
)
from pandas.compat.pyarrow import (
- pa_version_under1p0,
+ pa_version_under1p01,
pa_version_under2p0,
pa_version_under3p0,
pa_version_under4p0,
@@ -153,7 +153,7 @@ def get_lzma_file(lzma):
"np_datetime64_compat",
"np_version_under1p19",
"np_version_under1p20",
- "pa_version_under1p0",
+ "pa_version_under1p01",
"pa_version_under2p0",
"pa_version_under3p0",
"pa_version_under4p0",
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index adf20f3322a79..1cf57404bbe01 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -21,7 +21,7 @@
"odfpy": "1.4.1",
"openpyxl": "3.0.2",
"pandas_gbq": "0.14.0",
- "pyarrow": "0.17.0",
+ "pyarrow": "1.0.1",
"pytest": "6.0",
"pyxlsb": "1.0.6",
"s3fs": "0.4.0",
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 5b87257651a2d..2792a756bf20c 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -11,9 +11,15 @@
_nlv = Version(_np_version)
np_version_under1p19 = _nlv < Version("1.19")
np_version_under1p20 = _nlv < Version("1.20")
+np_version_under1p22 = _nlv < Version("1.22")
is_numpy_dev = _nlv.dev is not None
_min_numpy_ver = "1.18.5"
+if is_numpy_dev or not np_version_under1p22:
+ np_percentile_argname = "method"
+else:
+ np_percentile_argname = "interpolation"
+
if _nlv < Version(_min_numpy_ver):
raise ImportError(
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index f9b9409317774..e6ac0c59e789a 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -7,14 +7,14 @@
_pa_version = pa.__version__
_palv = Version(_pa_version)
- pa_version_under1p0 = _palv < Version("1.0.0")
+ pa_version_under1p01 = _palv < Version("1.0.1")
pa_version_under2p0 = _palv < Version("2.0.0")
pa_version_under3p0 = _palv < Version("3.0.0")
pa_version_under4p0 = _palv < Version("4.0.0")
pa_version_under5p0 = _palv < Version("5.0.0")
pa_version_under6p0 = _palv < Version("6.0.0")
except ImportError:
- pa_version_under1p0 = True
+ pa_version_under1p01 = True
pa_version_under2p0 = True
pa_version_under3p0 = True
pa_version_under4p0 = True
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 65d4b936efe44..04589993b5f53 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1268,6 +1268,16 @@ def timedelta64_dtype(request):
return request.param
+@pytest.fixture
+def fixed_now_ts():
+ """
+ Fixture emits fixed Timestamp.now()
+ """
+ return Timestamp(
+ year=2021, month=1, day=1, hour=12, minute=4, second=13, microsecond=22
+ )
+
+
@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES)
def float_numpy_dtype(request):
"""
@@ -1656,6 +1666,11 @@ def __init__(self, **kwargs):
("foo", None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
+ # possibly-matching NAs
+ (np.nan, np.nan, np.nan),
+ (np.nan, pd.NaT, None),
+ (np.nan, pd.NA, None),
+ (pd.NA, pd.NA, pd.NA),
]
)
def names(request):
diff --git a/pandas/core/_numba/executor.py b/pandas/core/_numba/executor.py
index c2b6191c05152..acb0c6d175c51 100644
--- a/pandas/core/_numba/executor.py
+++ b/pandas/core/_numba/executor.py
@@ -51,10 +51,11 @@ def column_looper(
start: np.ndarray,
end: np.ndarray,
min_periods: int,
+ *args,
):
result = np.empty((len(start), values.shape[1]), dtype=np.float64)
for i in numba.prange(values.shape[1]):
- result[:, i] = func(values[:, i], start, end, min_periods)
+ result[:, i] = func(values[:, i], start, end, min_periods, *args)
return result
return column_looper
diff --git a/pandas/core/_numba/kernels/__init__.py b/pandas/core/_numba/kernels/__init__.py
index 23b0ec5c3d8aa..2753a1e01161d 100644
--- a/pandas/core/_numba/kernels/__init__.py
+++ b/pandas/core/_numba/kernels/__init__.py
@@ -1,4 +1,5 @@
from pandas.core._numba.kernels.mean_ import sliding_mean
from pandas.core._numba.kernels.sum_ import sliding_sum
+from pandas.core._numba.kernels.var_ import sliding_var
-__all__ = ["sliding_mean", "sliding_sum"]
+__all__ = ["sliding_mean", "sliding_sum", "sliding_var"]
diff --git a/pandas/core/_numba/kernels/var_.py b/pandas/core/_numba/kernels/var_.py
new file mode 100644
index 0000000000000..2e5660673701b
--- /dev/null
+++ b/pandas/core/_numba/kernels/var_.py
@@ -0,0 +1,116 @@
+"""
+Numba 1D var kernels that can be shared by
+* Dataframe / Series
+* groupby
+* rolling / expanding
+
+Mirrors pandas/_libs/window/aggregation.pyx
+"""
+from __future__ import annotations
+
+import numba
+import numpy as np
+
+from pandas.core._numba.kernels.shared import is_monotonic_increasing
+
+
+@numba.jit(nopython=True, nogil=True, parallel=False)
+def add_var(
+ val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float
+) -> tuple[int, float, float, float]:
+ if not np.isnan(val):
+ nobs += 1
+ prev_mean = mean_x - compensation
+ y = val - compensation
+ t = y - mean_x
+ compensation = t + mean_x - y
+ delta = t
+ if nobs:
+ mean_x += delta / nobs
+ else:
+ mean_x = 0
+ ssqdm_x += (val - prev_mean) * (val - mean_x)
+ return nobs, mean_x, ssqdm_x, compensation
+
+
+@numba.jit(nopython=True, nogil=True, parallel=False)
+def remove_var(
+ val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float
+) -> tuple[int, float, float, float]:
+ if not np.isnan(val):
+ nobs -= 1
+ if nobs:
+ prev_mean = mean_x - compensation
+ y = val - compensation
+ t = y - mean_x
+ compensation = t + mean_x - y
+ delta = t
+ mean_x -= delta / nobs
+ ssqdm_x -= (val - prev_mean) * (val - mean_x)
+ else:
+ mean_x = 0
+ ssqdm_x = 0
+ return nobs, mean_x, ssqdm_x, compensation
+
+
+@numba.jit(nopython=True, nogil=True, parallel=False)
+def sliding_var(
+ values: np.ndarray,
+ start: np.ndarray,
+ end: np.ndarray,
+ min_periods: int,
+ ddof: int = 1,
+) -> np.ndarray:
+ N = len(start)
+ nobs = 0
+ mean_x = 0.0
+ ssqdm_x = 0.0
+ compensation_add = 0.0
+ compensation_remove = 0.0
+
+ min_periods = max(min_periods, 1)
+ is_monotonic_increasing_bounds = is_monotonic_increasing(
+ start
+ ) and is_monotonic_increasing(end)
+
+ output = np.empty(N, dtype=np.float64)
+
+ for i in range(N):
+ s = start[i]
+ e = end[i]
+ if i == 0 or not is_monotonic_increasing_bounds:
+ for j in range(s, e):
+ val = values[j]
+ nobs, mean_x, ssqdm_x, compensation_add = add_var(
+ val, nobs, mean_x, ssqdm_x, compensation_add
+ )
+ else:
+ for j in range(start[i - 1], s):
+ val = values[j]
+ nobs, mean_x, ssqdm_x, compensation_remove = remove_var(
+ val, nobs, mean_x, ssqdm_x, compensation_remove
+ )
+
+ for j in range(end[i - 1], e):
+ val = values[j]
+ nobs, mean_x, ssqdm_x, compensation_add = add_var(
+ val, nobs, mean_x, ssqdm_x, compensation_add
+ )
+
+ if nobs >= min_periods and nobs > ddof:
+ if nobs == 1:
+ result = 0.0
+ else:
+ result = ssqdm_x / (nobs - ddof)
+ else:
+ result = np.nan
+
+ output[i] = result
+
+ if not is_monotonic_increasing_bounds:
+ nobs = 0
+ mean_x = 0.0
+ ssqdm_x = 0.0
+ compensation_remove = 0.0
+
+ return output
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 8c2c01b6aedc8..538d9b0348d5f 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -35,6 +35,7 @@
npt,
)
from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
@@ -214,7 +215,7 @@ def _reconstruct_data(
if isinstance(values, cls) and values.dtype == dtype:
return values
- values = cls._from_sequence(values)
+ values = cls._from_sequence(values, dtype=dtype)
elif is_bool_dtype(dtype):
values = values.astype(dtype, copy=False)
@@ -959,15 +960,18 @@ def mode(values, dropna: bool = True) -> Series:
original = values
# categorical is a fast-path
- if is_categorical_dtype(values):
+ if is_categorical_dtype(values.dtype):
if isinstance(values, Series):
# TODO: should we be passing `name` below?
return Series(values._values.mode(dropna=dropna), name=values.name)
return values.mode(dropna=dropna)
- if dropna and needs_i8_conversion(values.dtype):
- mask = values.isnull()
- values = values[~mask]
+ if needs_i8_conversion(values.dtype):
+ if dropna:
+ mask = values.isna()
+ values = values[~mask]
+ modes = mode(values.view("i8"))
+ return modes.view(original.dtype)
values = _ensure_data(values)
@@ -1550,7 +1554,7 @@ def searchsorted(
_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
-def diff(arr, n: int, axis: int = 0, stacklevel: int = 3):
+def diff(arr, n: int, axis: int = 0):
"""
difference of n between self,
analogous to s-s.shift(n)
@@ -1596,7 +1600,7 @@ def diff(arr, n: int, axis: int = 0, stacklevel: int = 3):
"dtype lost in 'diff()'. In the future this will raise a "
"TypeError. Convert to a suitable dtype prior to calling 'diff'.",
FutureWarning,
- stacklevel=stacklevel,
+ stacklevel=find_stack_level(),
)
arr = np.asarray(arr)
dtype = arr.dtype
@@ -1848,5 +1852,5 @@ def union_with_duplicates(lvals: ArrayLike, rvals: ArrayLike) -> ArrayLike:
unique_array = ensure_wrapped_if_datetimelike(unique_array)
for i, value in enumerate(unique_array):
- indexer += [i] * int(max(l_count[value], r_count[value]))
+ indexer += [i] * int(max(l_count.at[value], r_count.at[value]))
return unique_array.take(indexer)
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py
index 77e38e6c6e3fc..1f37e0e5d249a 100644
--- a/pandas/core/array_algos/putmask.py
+++ b/pandas/core/array_algos/putmask.py
@@ -4,7 +4,6 @@
from __future__ import annotations
from typing import Any
-import warnings
import numpy as np
@@ -15,16 +14,12 @@
)
from pandas.core.dtypes.cast import (
+ can_hold_element,
convert_scalar_for_putitemlike,
find_common_type,
infer_dtype_from,
)
-from pandas.core.dtypes.common import (
- is_float_dtype,
- is_integer_dtype,
- is_list_like,
-)
-from pandas.core.dtypes.missing import isna_compat
+from pandas.core.dtypes.common import is_list_like
from pandas.core.arrays import ExtensionArray
@@ -75,7 +70,7 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd
`values`, updated in-place.
mask : np.ndarray[bool]
Applies to both sides (array like).
- new : `new values` either scalar or an array like aligned with `values`
+ new : listlike `new values` aligned with `values`
Returns
-------
@@ -89,9 +84,6 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
- if not is_list_like(new):
- new = np.broadcast_to(new, mask.shape)
-
# see if we are only masking values that if putted
# will work in the current dtype
try:
@@ -100,27 +92,12 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd
# TypeError: only integer scalar arrays can be converted to a scalar index
pass
else:
- # make sure that we have a nullable type if we have nulls
- if not isna_compat(values, nn[0]):
- pass
- elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
- # only compare integers/floats
- pass
- elif not (is_float_dtype(values.dtype) or is_integer_dtype(values.dtype)):
- # only compare integers/floats
- pass
- else:
-
- # we ignore ComplexWarning here
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("ignore", np.ComplexWarning)
- nn_at = nn.astype(values.dtype)
-
- comp = nn == nn_at
- if is_list_like(comp) and comp.all():
- nv = values.copy()
- nv[mask] = nn_at
- return nv
+ # We only get to putmask_smart when we cannot hold 'new' in values.
+ # The "smart" part of putmask_smart is checking if we can hold new[mask]
+ # in values, in which case we can still avoid the need to cast.
+ if can_hold_element(values, nn):
+ values[mask] = nn
+ return values
new = np.asarray(new)
diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py
index c5e96f32e261f..a1b40acc2558e 100644
--- a/pandas/core/array_algos/quantile.py
+++ b/pandas/core/array_algos/quantile.py
@@ -4,7 +4,10 @@
import numpy as np
-from pandas._typing import ArrayLike
+from pandas._typing import (
+ ArrayLike,
+ npt,
+)
from pandas.core.dtypes.common import is_sparse
from pandas.core.dtypes.missing import (
@@ -18,7 +21,9 @@
from pandas.core.arrays import ExtensionArray
-def quantile_compat(values: ArrayLike, qs: np.ndarray, interpolation: str) -> ArrayLike:
+def quantile_compat(
+ values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str
+) -> ArrayLike:
"""
Compute the quantiles of the given values for each quantile in `qs`.
@@ -55,7 +60,7 @@ def _quantile_with_mask(
values: np.ndarray,
mask: np.ndarray,
fill_value,
- qs: np.ndarray,
+ qs: npt.NDArray[np.float64],
interpolation: str,
) -> np.ndarray:
"""
@@ -112,7 +117,7 @@ def _quantile_with_mask(
def _quantile_ea_compat(
- values: ExtensionArray, qs: np.ndarray, interpolation: str
+ values: ExtensionArray, qs: npt.NDArray[np.float64], interpolation: str
) -> ExtensionArray:
"""
ExtensionArray compatibility layer for _quantile_with_mask.
@@ -158,7 +163,7 @@ def _quantile_ea_compat(
def _quantile_ea_fallback(
- values: ExtensionArray, qs: np.ndarray, interpolation: str
+ values: ExtensionArray, qs: npt.NDArray[np.float64], interpolation: str
) -> ExtensionArray:
"""
quantile compatibility for ExtensionArray subclasses that do not
diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py
index 87d55702b33e0..c9d6640101a8b 100644
--- a/pandas/core/array_algos/take.py
+++ b/pandas/core/array_algos/take.py
@@ -179,8 +179,6 @@ def take_1d(
Note: similarly to `take_nd`, this function assumes that the indexer is
a valid(ated) indexer with no out of bound indices.
"""
- indexer = ensure_platform_int(indexer)
-
if not isinstance(arr, np.ndarray):
# ExtensionArray -> dispatch to their method
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index 11d32e8a159f3..d91404ff05157 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -337,7 +337,9 @@ def reconstruct(result):
"Consider explicitly converting the DataFrame "
"to an array with '.to_numpy()' first."
)
- warnings.warn(msg.format(ufunc), FutureWarning, stacklevel=4)
+ warnings.warn(
+ msg.format(ufunc), FutureWarning, stacklevel=find_stack_level()
+ )
return result
raise NotImplementedError
return result
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 674379f6d65f8..9d534a5a8d815 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -16,6 +16,8 @@
from pandas._libs import lib
from pandas._libs.arrays import NDArrayBacked
from pandas._typing import (
+ ArrayLike,
+ Dtype,
F,
PositionalIndexer2D,
PositionalIndexerTuple,
@@ -34,8 +36,15 @@
validate_insert_loc,
)
-from pandas.core.dtypes.common import is_dtype_equal
-from pandas.core.dtypes.dtypes import ExtensionDtype
+from pandas.core.dtypes.common import (
+ is_dtype_equal,
+ pandas_dtype,
+)
+from pandas.core.dtypes.dtypes import (
+ DatetimeTZDtype,
+ ExtensionDtype,
+ PeriodDtype,
+)
from pandas.core.dtypes.missing import array_equivalent
from pandas.core import missing
@@ -101,6 +110,41 @@ def _validate_scalar(self, value):
# ------------------------------------------------------------------------
+ def view(self, dtype: Dtype | None = None) -> ArrayLike:
+ # We handle datetime64, datetime64tz, timedelta64, and period
+ # dtypes here. Everything else we pass through to the underlying
+ # ndarray.
+ if dtype is None or dtype is self.dtype:
+ return self._from_backing_data(self._ndarray)
+
+ if isinstance(dtype, type):
+ # we sometimes pass non-dtype objects, e.g np.ndarray;
+ # pass those through to the underlying ndarray
+ return self._ndarray.view(dtype)
+
+ dtype = pandas_dtype(dtype)
+ arr = self._ndarray
+
+ if isinstance(dtype, (PeriodDtype, DatetimeTZDtype)):
+ cls = dtype.construct_array_type()
+ return cls(arr.view("i8"), dtype=dtype)
+ elif dtype == "M8[ns]":
+ from pandas.core.arrays import DatetimeArray
+
+ return DatetimeArray(arr.view("i8"), dtype=dtype)
+ elif dtype == "m8[ns]":
+ from pandas.core.arrays import TimedeltaArray
+
+ return TimedeltaArray(arr.view("i8"), dtype=dtype)
+
+ # error: Incompatible return value type (got "ndarray", expected
+ # "ExtensionArray")
+ # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible
+ # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None,
+ # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
+ # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
+ return arr.view(dtype=dtype) # type: ignore[return-value,arg-type]
+
def take(
self: NDArrayBackedExtensionArrayT,
indices: TakeIndexer,
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 58e7abbbe1ddd..c09d4486afcae 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -5,7 +5,6 @@
TYPE_CHECKING,
overload,
)
-import warnings
import numpy as np
@@ -21,7 +20,6 @@
npt,
type_t,
)
-from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
@@ -446,144 +444,6 @@ def _values_for_argsort(self) -> np.ndarray:
data[self._mask] = -1
return data
- def any(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
- """
- Return whether any element is True.
-
- Returns False unless there is at least one element that is True.
- By default, NAs are skipped. If ``skipna=False`` is specified and
- missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
- is used as for logical operations.
-
- Parameters
- ----------
- skipna : bool, default True
- Exclude NA values. If the entire array is NA and `skipna` is
- True, then the result will be False, as for an empty array.
- If `skipna` is False, the result will still be True if there is
- at least one element that is True, otherwise NA will be returned
- if there are NA's present.
- axis : int or None, default 0
- **kwargs : any, default None
- Additional keywords have no effect but might be accepted for
- compatibility with NumPy.
-
- Returns
- -------
- bool or :attr:`pandas.NA`
-
- See Also
- --------
- numpy.any : Numpy version of this method.
- BooleanArray.all : Return whether all elements are True.
-
- Examples
- --------
- The result indicates whether any element is True (and by default
- skips NAs):
-
- >>> pd.array([True, False, True]).any()
- True
- >>> pd.array([True, False, pd.NA]).any()
- True
- >>> pd.array([False, False, pd.NA]).any()
- False
- >>> pd.array([], dtype="boolean").any()
- False
- >>> pd.array([pd.NA], dtype="boolean").any()
- False
-
- With ``skipna=False``, the result can be NA if this is logically
- required (whether ``pd.NA`` is True or False influences the result):
-
- >>> pd.array([True, False, pd.NA]).any(skipna=False)
- True
- >>> pd.array([False, False, pd.NA]).any(skipna=False)
- <NA>
- """
- kwargs.pop("axis", None)
- nv.validate_any((), kwargs)
-
- values = self._data.copy()
- np.putmask(values, self._mask, False)
- result = values.any(axis=axis)
-
- if skipna:
- return result
- else:
- if result or self.size == 0 or not self._mask.any():
- return result
- else:
- return self.dtype.na_value
-
- def all(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
- """
- Return whether all elements are True.
-
- Returns True unless there is at least one element that is False.
- By default, NAs are skipped. If ``skipna=False`` is specified and
- missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
- is used as for logical operations.
-
- Parameters
- ----------
- skipna : bool, default True
- Exclude NA values. If the entire array is NA and `skipna` is
- True, then the result will be True, as for an empty array.
- If `skipna` is False, the result will still be False if there is
- at least one element that is False, otherwise NA will be returned
- if there are NA's present.
- axis : int or None, default 0
- **kwargs : any, default None
- Additional keywords have no effect but might be accepted for
- compatibility with NumPy.
-
- Returns
- -------
- bool or :attr:`pandas.NA`
-
- See Also
- --------
- numpy.all : Numpy version of this method.
- BooleanArray.any : Return whether any element is True.
-
- Examples
- --------
- The result indicates whether any element is True (and by default
- skips NAs):
-
- >>> pd.array([True, True, pd.NA]).all()
- True
- >>> pd.array([True, False, pd.NA]).all()
- False
- >>> pd.array([], dtype="boolean").all()
- True
- >>> pd.array([pd.NA], dtype="boolean").all()
- True
-
- With ``skipna=False``, the result can be NA if this is logically
- required (whether ``pd.NA`` is True or False influences the result):
-
- >>> pd.array([True, True, pd.NA]).all(skipna=False)
- <NA>
- >>> pd.array([True, False, pd.NA]).all(skipna=False)
- False
- """
- kwargs.pop("axis", None)
- nv.validate_all((), kwargs)
-
- values = self._data.copy()
- np.putmask(values, self._mask, True)
- result = values.all(axis=axis)
-
- if skipna:
- return result
- else:
- if not result or self.size == 0 or not self._mask.any():
- return result
- else:
- return self.dtype.na_value
-
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
@@ -621,48 +481,6 @@ def _logical_method(self, other, op):
# expected "ndarray"
return BooleanArray(result, mask) # type: ignore[arg-type]
- def _cmp_method(self, other, op):
- from pandas.arrays import (
- FloatingArray,
- IntegerArray,
- )
-
- if isinstance(other, (IntegerArray, FloatingArray)):
- return NotImplemented
-
- mask = None
-
- if isinstance(other, BooleanArray):
- other, mask = other._data, other._mask
-
- elif is_list_like(other):
- other = np.asarray(other)
- if other.ndim > 1:
- raise NotImplementedError("can only perform ops with 1-d structures")
- if len(self) != len(other):
- raise ValueError("Lengths must match to compare")
-
- if other is libmissing.NA:
- # numpy does not handle pd.NA well as "other" scalar (it returns
- # a scalar False instead of an array)
- result = np.zeros_like(self._data)
- mask = np.ones_like(self._data)
- else:
- # numpy will show a DeprecationWarning on invalid elementwise
- # comparisons, this will raise in the future
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", "elementwise", FutureWarning)
- with np.errstate(all="ignore"):
- result = op(self._data, other)
-
- # nans propagate
- if mask is None:
- mask = self._mask.copy()
- else:
- mask = self._mask | mask
-
- return BooleanArray(result, mask, copy=False)
-
def _arith_method(self, other, op):
mask = None
op_name = op.__name__
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 759c7fb65374d..4c6a32ff1ba4e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2291,18 +2291,28 @@ def _concat_same_type(
) -> CategoricalT:
from pandas.core.dtypes.concat import union_categoricals
- result = union_categoricals(to_concat)
-
- # in case we are concatenating along axis != 0, we need to reshape
- # the result from union_categoricals
first = to_concat[0]
if axis >= first.ndim:
- raise ValueError
+ raise ValueError(
+ f"axis {axis} is out of bounds for array of dimension {first.ndim}"
+ )
+
if axis == 1:
- if not all(len(x) == len(first) for x in to_concat):
+ # Flatten, concatenate then reshape
+ if not all(x.ndim == 2 for x in to_concat):
raise ValueError
- # TODO: Will this get contiguity wrong?
- result = result.reshape(-1, len(to_concat), order="F")
+
+ # pass correctly-shaped to union_categoricals
+ tc_flat = []
+ for obj in to_concat:
+ tc_flat.extend([obj[:, i] for i in range(obj.shape[1])])
+
+ res_flat = cls._concat_same_type(tc_flat, axis=0)
+
+ result = res_flat.reshape(len(first), -1, order="F")
+ return result
+
+ result = union_categoricals(to_concat)
return result
# ------------------------------------------------------------------
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index f8aa1656c8c30..33da9ca858a4c 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -85,11 +85,7 @@
is_unsigned_integer_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.dtypes import (
- DatetimeTZDtype,
- ExtensionDtype,
- PeriodDtype,
-)
+from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
@@ -416,13 +412,12 @@ def astype(self, dtype, copy: bool = True):
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
- level = find_stack_level()
warnings.warn(
f"casting {self.dtype} values to int64 with .astype(...) is "
"deprecated and will raise in a future version. "
"Use .view(...) instead.",
FutureWarning,
- stacklevel=level,
+ stacklevel=find_stack_level(),
)
values = self.asi8
@@ -462,36 +457,9 @@ def view(self, dtype: Dtype | None = ...) -> ArrayLike:
...
def view(self, dtype: Dtype | None = None) -> ArrayLike:
- # We handle datetime64, datetime64tz, timedelta64, and period
- # dtypes here. Everything else we pass through to the underlying
- # ndarray.
- if dtype is None or dtype is self.dtype:
- return type(self)(self._ndarray, dtype=self.dtype)
-
- if isinstance(dtype, type):
- # we sometimes pass non-dtype objects, e.g np.ndarray;
- # pass those through to the underlying ndarray
- return self._ndarray.view(dtype)
-
- dtype = pandas_dtype(dtype)
- if isinstance(dtype, (PeriodDtype, DatetimeTZDtype)):
- cls = dtype.construct_array_type()
- return cls(self.asi8, dtype=dtype)
- elif dtype == "M8[ns]":
- from pandas.core.arrays import DatetimeArray
-
- return DatetimeArray(self.asi8, dtype=dtype)
- elif dtype == "m8[ns]":
- from pandas.core.arrays import TimedeltaArray
-
- return TimedeltaArray(self.asi8, dtype=dtype)
- # error: Incompatible return value type (got "ndarray", expected
- # "ExtensionArray")
- # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible
- # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None,
- # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
- # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
- return self._ndarray.view(dtype=dtype) # type: ignore[return-value,arg-type]
+ # we need to explicitly call super() method as long as the `@overload`s
+ # are present in this file.
+ return super().view(dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
@@ -636,7 +604,10 @@ def _validate_scalar(
-------
self._scalar_type or NaT
"""
- if isinstance(value, str):
+ if isinstance(value, self._scalar_type):
+ pass
+
+ elif isinstance(value, str):
# NB: Careful about tzawareness
try:
value = self._scalar_from_string(value)
@@ -1467,8 +1438,6 @@ def max(self, *, axis: int | None = None, skipna: bool = True, **kwargs):
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
- # TODO: skipna is broken with max.
- # See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max((), kwargs)
nv.validate_minmax_axis(axis, self.ndim)
@@ -1639,6 +1608,13 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
------
ValueError if the `freq` cannot be converted.
+ Notes
+ -----
+ If the timestamps have a timezone, {op}ing will take place relative to the
+ local ("wall") time and re-localized to the same timezone. When {op}ing
+ near daylight savings time, use ``nonexistent`` and ``ambiguous`` to
+ control the re-localization behavior.
+
Examples
--------
**DatetimeIndex**
@@ -1662,6 +1638,19 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
+
+ >>> rng_tz.floor("2H", ambiguous=False)
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
+
+ >>> rng_tz.floor("2H", ambiguous=True)
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
"""
_floor_example = """>>> rng.floor('H')
@@ -1676,6 +1665,19 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")
+
+ >>> rng_tz.floor("2H", ambiguous=False)
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
+
+ >>> rng_tz.floor("2H", ambiguous=True)
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
"""
_ceil_example = """>>> rng.ceil('H')
@@ -1690,6 +1692,19 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
+
+ When rounding near a daylight savings time transition, use ``ambiguous`` or
+ ``nonexistent`` to control how the timestamp should be re-localized.
+
+ >>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam")
+
+ >>> rng_tz.ceil("H", ambiguous=False)
+ DatetimeIndex(['2021-10-31 02:00:00+01:00'],
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
+
+ >>> rng_tz.ceil("H", ambiguous=True)
+ DatetimeIndex(['2021-10-31 02:00:00+02:00'],
+ dtype='datetime64[ns, Europe/Amsterdam]', freq=None)
"""
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index a0a7ef3501d7f..7bd3403abd5cc 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -9,7 +9,6 @@
from typing import (
TYPE_CHECKING,
Literal,
- overload,
)
import warnings
@@ -356,7 +355,7 @@ def _from_sequence_not_strict(
freq, freq_infer = dtl.maybe_infer_freq(freq)
- subarr, tz, inferred_freq = sequence_to_dt64ns(
+ subarr, tz, inferred_freq = _sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
@@ -509,7 +508,7 @@ def _check_compatible_with(self, other, setitem: bool = False):
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
- if not timezones.tz_compare(self.tz, other.tz):
+ if self.tz is not None and not timezones.tz_compare(self.tz, other.tz):
# TODO(2.0): remove this check. GH#37605
warnings.warn(
"Setitem-like behavior with mismatched timezones is deprecated "
@@ -1972,41 +1971,22 @@ def std(
# Constructor Helpers
-@overload
-def sequence_to_datetimes(
- data, allow_object: Literal[False] = ..., require_iso8601: bool = ...
-) -> DatetimeArray:
- ...
-
-
-@overload
-def sequence_to_datetimes(
- data, allow_object: Literal[True] = ..., require_iso8601: bool = ...
-) -> np.ndarray | DatetimeArray:
- ...
-
-
-def sequence_to_datetimes(
- data, allow_object: bool = False, require_iso8601: bool = False
-) -> np.ndarray | DatetimeArray:
+def sequence_to_datetimes(data, require_iso8601: bool = False) -> DatetimeArray:
"""
Parse/convert the passed data to either DatetimeArray or np.ndarray[object].
"""
- result, tz, freq = sequence_to_dt64ns(
+ result, tz, freq = _sequence_to_dt64ns(
data,
- allow_object=allow_object,
allow_mixed=True,
require_iso8601=require_iso8601,
)
- if result.dtype == object:
- return result
dtype = tz_to_dtype(tz)
dta = DatetimeArray._simple_new(result, freq=freq, dtype=dtype)
return dta
-def sequence_to_dt64ns(
+def _sequence_to_dt64ns(
data,
dtype=None,
copy=False,
@@ -2015,7 +1995,6 @@ def sequence_to_dt64ns(
yearfirst=False,
ambiguous="raise",
*,
- allow_object: bool = False,
allow_mixed: bool = False,
require_iso8601: bool = False,
):
@@ -2030,9 +2009,6 @@ def sequence_to_dt64ns(
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
- allow_object : bool, default False
- Whether to return an object-dtype ndarray instead of raising if the
- data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
require_iso8601 : bool, default False
@@ -2102,19 +2078,21 @@ def sequence_to_dt64ns(
data,
dayfirst=dayfirst,
yearfirst=yearfirst,
- allow_object=allow_object,
+ allow_object=False,
allow_mixed=allow_mixed,
require_iso8601=require_iso8601,
)
if tz and inferred_tz:
# two timezones: convert to intended from base UTC repr
- data = tzconversion.tz_convert_from_utc(data.view("i8"), tz)
- data = data.view(DT64NS_DTYPE)
+ if data.dtype == "i8":
+ # GH#42505
+ # by convention, these are _already_ UTC, e.g
+ return data.view(DT64NS_DTYPE), tz, None
+
+ utc_vals = tzconversion.tz_convert_from_utc(data.view("i8"), tz)
+ data = utc_vals.view(DT64NS_DTYPE)
elif inferred_tz:
tz = inferred_tz
- elif allow_object and data.dtype == object:
- # We encountered mixed-timezones.
- return data, None, None
data_dtype = data.dtype
@@ -2593,7 +2571,7 @@ def generate_range(start=None, end=None, periods=None, offset=BDay()):
break
# faster than cur + offset
- next_date = offset.apply(cur)
+ next_date = offset._apply(cur)
if next_date <= cur:
raise ValueError(f"Offset {offset} did not increment date")
cur = next_date
@@ -2607,7 +2585,7 @@ def generate_range(start=None, end=None, periods=None, offset=BDay()):
break
# faster than cur + offset
- next_date = offset.apply(cur)
+ next_date = offset._apply(cur)
if next_date >= cur:
raise ValueError(f"Offset {offset} did not decrement date")
cur = next_date
diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index 6d6cc03a1c83e..1e7f1aff52d2e 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -1,14 +1,10 @@
from __future__ import annotations
from typing import overload
-import warnings
import numpy as np
-from pandas._libs import (
- lib,
- missing as libmissing,
-)
+from pandas._libs import lib
from pandas._typing import (
ArrayLike,
AstypeArg,
@@ -24,7 +20,6 @@
is_datetime64_dtype,
is_float_dtype,
is_integer_dtype,
- is_list_like,
is_object_dtype,
pandas_dtype,
)
@@ -39,7 +34,6 @@
NumericArray,
NumericDtype,
)
-from pandas.core.ops import invalid_comparison
from pandas.core.tools.numeric import to_numeric
@@ -176,9 +170,7 @@ def coerce_to_array(
if mask.any():
values = values.copy()
values[mask] = np.nan
- values = values.astype(dtype, copy=False) # , casting="safe")
- else:
- values = values.astype(dtype, copy=False) # , casting="safe")
+ values = values.astype(dtype, copy=False) # , casting="safe")
return values, mask
@@ -339,52 +331,6 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
def _values_for_argsort(self) -> np.ndarray:
return self._data
- def _cmp_method(self, other, op):
- from pandas.arrays import (
- BooleanArray,
- IntegerArray,
- )
-
- mask = None
-
- if isinstance(other, (BooleanArray, IntegerArray, FloatingArray)):
- other, mask = other._data, other._mask
-
- elif is_list_like(other):
- other = np.asarray(other)
- if other.ndim > 1:
- raise NotImplementedError("can only perform ops with 1-d structures")
-
- if other is libmissing.NA:
- # numpy does not handle pd.NA well as "other" scalar (it returns
- # a scalar False instead of an array)
- # This may be fixed by NA.__array_ufunc__. Revisit this check
- # once that's implemented.
- result = np.zeros(self._data.shape, dtype="bool")
- mask = np.ones(self._data.shape, dtype="bool")
- else:
- with warnings.catch_warnings():
- # numpy may show a FutureWarning:
- # elementwise comparison failed; returning scalar instead,
- # but in the future will perform elementwise comparison
- # before returning NotImplemented. We fall back to the correct
- # behavior today, so that should be fine to ignore.
- warnings.filterwarnings("ignore", "elementwise", FutureWarning)
- with np.errstate(all="ignore"):
- method = getattr(self._data, f"__{op.__name__}__")
- result = method(other)
-
- if result is NotImplemented:
- result = invalid_comparison(self._data, other, op)
-
- # nans propagate
- if mask is None:
- mask = self._mask.copy()
- else:
- mask = self._mask | mask
-
- return BooleanArray(result, mask)
-
def sum(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_sum((), kwargs)
return super()._reduce("sum", skipna=skipna, min_count=min_count, axis=axis)
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index d8b7bf2b86d2c..12bef068ef44b 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -1,14 +1,12 @@
from __future__ import annotations
from typing import overload
-import warnings
import numpy as np
from pandas._libs import (
iNaT,
lib,
- missing as libmissing,
)
from pandas._typing import (
ArrayLike,
@@ -30,7 +28,6 @@
is_float,
is_float_dtype,
is_integer_dtype,
- is_list_like,
is_object_dtype,
is_string_dtype,
pandas_dtype,
@@ -38,15 +35,11 @@
from pandas.core.dtypes.missing import isna
from pandas.core.arrays import ExtensionArray
-from pandas.core.arrays.masked import (
- BaseMaskedArray,
- BaseMaskedDtype,
-)
+from pandas.core.arrays.masked import BaseMaskedDtype
from pandas.core.arrays.numeric import (
NumericArray,
NumericDtype,
)
-from pandas.core.ops import invalid_comparison
from pandas.core.tools.numeric import to_numeric
@@ -214,9 +207,9 @@ def coerce_to_array(
else:
assert len(mask) == len(values)
- if not values.ndim == 1:
+ if values.ndim != 1:
raise TypeError("values must be a 1D list-like")
- if not mask.ndim == 1:
+ if mask.ndim != 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
@@ -418,51 +411,6 @@ def _values_for_argsort(self) -> np.ndarray:
data[self._mask] = data.min() - 1
return data
- def _cmp_method(self, other, op):
- from pandas.core.arrays import BooleanArray
-
- mask = None
-
- if isinstance(other, BaseMaskedArray):
- other, mask = other._data, other._mask
-
- elif is_list_like(other):
- other = np.asarray(other)
- if other.ndim > 1:
- raise NotImplementedError("can only perform ops with 1-d structures")
- if len(self) != len(other):
- raise ValueError("Lengths must match to compare")
-
- if other is libmissing.NA:
- # numpy does not handle pd.NA well as "other" scalar (it returns
- # a scalar False instead of an array)
- # This may be fixed by NA.__array_ufunc__. Revisit this check
- # once that's implemented.
- result = np.zeros(self._data.shape, dtype="bool")
- mask = np.ones(self._data.shape, dtype="bool")
- else:
- with warnings.catch_warnings():
- # numpy may show a FutureWarning:
- # elementwise comparison failed; returning scalar instead,
- # but in the future will perform elementwise comparison
- # before returning NotImplemented. We fall back to the correct
- # behavior today, so that should be fine to ignore.
- warnings.filterwarnings("ignore", "elementwise", FutureWarning)
- with np.errstate(all="ignore"):
- method = getattr(self._data, f"__{op.__name__}__")
- result = method(other)
-
- if result is NotImplemented:
- result = invalid_comparison(self._data, other, op)
-
- # nans propagate
- if mask is None:
- mask = self._mask.copy()
- else:
- mask = self._mask | mask
-
- return BooleanArray(result, mask)
-
def sum(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_sum((), kwargs)
return super()._reduce("sum", skipna=skipna, min_count=min_count, axis=axis)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 01bf5ec0633b5..e1347391b2bdd 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1195,15 +1195,7 @@ def length(self):
Return an Index with entries denoting the length of each Interval in
the IntervalArray.
"""
- try:
- return self.right - self.left
- except TypeError as err:
- # length not defined for some types, e.g. string
- msg = (
- "IntervalArray contains Intervals without defined length, "
- "e.g. Intervals with string endpoints"
- )
- raise TypeError(msg) from err
+ return self.right - self.left
@property
def mid(self):
@@ -1496,7 +1488,7 @@ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
def insert(self: IntervalArrayT, loc: int, item: Interval) -> IntervalArrayT:
"""
Return a new IntervalArray inserting new item at location. Follows
- Python list.append semantics for negative values. Only Interval
+ Python numpy.insert semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index b11b11ded2f22..b334a167d3824 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -7,6 +7,7 @@
TypeVar,
overload,
)
+import warnings
import numpy as np
@@ -40,6 +41,7 @@
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
+ is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
@@ -47,6 +49,7 @@
)
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import (
+ array_equivalent,
isna,
notna,
)
@@ -65,6 +68,7 @@
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.indexers import check_array_indexer
+from pandas.core.ops import invalid_comparison
if TYPE_CHECKING:
from pandas import Series
@@ -481,6 +485,51 @@ def _hasna(self) -> bool:
# error: Incompatible return value type (got "bool_", expected "bool")
return self._mask.any() # type: ignore[return-value]
+ def _cmp_method(self, other, op) -> BooleanArray:
+ from pandas.core.arrays import BooleanArray
+
+ mask = None
+
+ if isinstance(other, BaseMaskedArray):
+ other, mask = other._data, other._mask
+
+ elif is_list_like(other):
+ other = np.asarray(other)
+ if other.ndim > 1:
+ raise NotImplementedError("can only perform ops with 1-d structures")
+ if len(self) != len(other):
+ raise ValueError("Lengths must match to compare")
+
+ if other is libmissing.NA:
+ # numpy does not handle pd.NA well as "other" scalar (it returns
+ # a scalar False instead of an array)
+ # This may be fixed by NA.__array_ufunc__. Revisit this check
+ # once that's implemented.
+ result = np.zeros(self._data.shape, dtype="bool")
+ mask = np.ones(self._data.shape, dtype="bool")
+ else:
+ with warnings.catch_warnings():
+ # numpy may show a FutureWarning:
+ # elementwise comparison failed; returning scalar instead,
+ # but in the future will perform elementwise comparison
+ # before returning NotImplemented. We fall back to the correct
+ # behavior today, so that should be fine to ignore.
+ warnings.filterwarnings("ignore", "elementwise", FutureWarning)
+ with np.errstate(all="ignore"):
+ method = getattr(self._data, f"__{op.__name__}__")
+ result = method(other)
+
+ if result is NotImplemented:
+ result = invalid_comparison(self._data, other, op)
+
+ # nans propagate
+ if mask is None:
+ mask = self._mask.copy()
+ else:
+ mask = self._mask | mask
+
+ return BooleanArray(result, mask, copy=False)
+
def isna(self) -> np.ndarray:
return self._mask.copy()
@@ -604,7 +653,7 @@ def value_counts(self, dropna: bool = True) -> Series:
data = self._data[~self._mask]
value_counts = Index(data).value_counts()
- # TODO(extension)
+ # TODO(ExtensionIndex)
# if we have allow Index to hold an ExtensionArray
# this is easier
index = value_counts.index._values.astype(object)
@@ -627,6 +676,22 @@ def value_counts(self, dropna: bool = True) -> Series:
return Series(counts, index=index)
+ @doc(ExtensionArray.equals)
+ def equals(self, other) -> bool:
+ if type(self) != type(other):
+ return False
+ if other.dtype != self.dtype:
+ return False
+
+ # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT
+ # equal.
+ if not np.array_equal(self._mask, other._mask):
+ return False
+
+ left = self._data[~self._mask]
+ right = other._data[~other._mask]
+ return array_equivalent(left, right, dtype_equal=True)
+
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py
index e1990dc064a84..eb955e4d42bc5 100644
--- a/pandas/core/arrays/numeric.py
+++ b/pandas/core/arrays/numeric.py
@@ -168,7 +168,7 @@ def __neg__(self):
return type(self)(-self._data, self._mask.copy())
def __pos__(self):
- return self
+ return self.copy()
def __abs__(self):
return type(self)(abs(self._data), self._mask.copy())
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index e9fb5bdf80045..df71501d55b20 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -19,7 +19,7 @@
Scalar,
type_t,
)
-from pandas.compat import pa_version_under1p0
+from pandas.compat import pa_version_under1p01
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.base import (
@@ -104,11 +104,10 @@ def __init__(self, storage=None):
raise ValueError(
f"Storage must be 'python' or 'pyarrow'. Got {storage} instead."
)
- if storage == "pyarrow" and pa_version_under1p0:
+ if storage == "pyarrow" and pa_version_under1p01:
raise ImportError(
"pyarrow>=1.0.0 is required for PyArrow backed StringArray."
)
-
self.storage = storage
@property
@@ -319,9 +318,7 @@ def __init__(self, values, copy=False):
def _validate(self):
"""Validate that we only store NA or strings."""
- if len(self._ndarray) and not lib.is_string_array(
- self._ndarray.ravel("K"), skipna=True
- ):
+ if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
raise ValueError("StringArray requires a sequence of strings or pandas.NA")
if self._ndarray.dtype != "object":
raise ValueError(
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index b3278a81e93b7..b1daf0e393ef0 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -27,7 +27,7 @@
npt,
)
from pandas.compat import (
- pa_version_under1p0,
+ pa_version_under1p01,
pa_version_under2p0,
pa_version_under3p0,
pa_version_under4p0,
@@ -63,10 +63,7 @@
)
from pandas.core.strings.object_array import ObjectStringArrayMixin
-# PyArrow backed StringArrays are available starting at 1.0.0, but this
-# file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute
-# and its compute functions existed. GH38801
-if not pa_version_under1p0:
+if not pa_version_under1p01:
import pyarrow as pa
import pyarrow.compute as pc
@@ -87,7 +84,7 @@
def _chk_pyarrow_available() -> None:
- if pa_version_under1p0:
+ if pa_version_under1p01:
msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray."
raise ImportError(msg)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 3d8f9f7edcc74..8fe330d0d41dd 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -651,8 +651,7 @@ def __floordiv__(self, other):
# at this point we should only have numeric scalars; anything
# else will raise
- result = self.asi8 // other
- np.putmask(result, self._isnan, iNaT)
+ result = self._ndarray // other
freq = None
if self.freq is not None:
# Note: freq gets division, not floor-division
@@ -661,7 +660,7 @@ def __floordiv__(self, other):
# e.g. if self.freq is Nano(1) then dividing by 2
# rounds down to zero
freq = None
- return type(self)(result.view("m8[ns]"), freq=freq)
+ return type(self)(result, freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
@@ -801,7 +800,7 @@ def __neg__(self) -> TimedeltaArray:
return type(self)(-self._ndarray)
def __pos__(self) -> TimedeltaArray:
- return type(self)(self._ndarray, freq=self.freq)
+ return type(self)(self._ndarray.copy(), freq=self.freq)
def __abs__(self) -> TimedeltaArray:
# Note: freq is not preserved
diff --git a/pandas/core/base.py b/pandas/core/base.py
index a1bf448df18c4..9040414a8f35f 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1239,8 +1239,8 @@ def factorize(self, sort: bool = False, na_sentinel: int | None = -1):
def searchsorted( # type: ignore[misc]
self,
value: npt._ScalarLike_co,
- side: Literal["left", "right"] = "left",
- sorter: NumpySorter = None,
+ side: Literal["left", "right"] = ...,
+ sorter: NumpySorter = ...,
) -> np.intp:
...
@@ -1248,8 +1248,8 @@ def searchsorted( # type: ignore[misc]
def searchsorted(
self,
value: npt.ArrayLike | ExtensionArray,
- side: Literal["left", "right"] = "left",
- sorter: NumpySorter = None,
+ side: Literal["left", "right"] = ...,
+ sorter: NumpySorter = ...,
) -> npt.NDArray[np.intp]:
...
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 31c2ec8f0cbf9..bf2d770ee1e7f 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -238,6 +238,16 @@ def use_numba_cb(key):
(default: True)
"""
+pc_max_dir_items = """\
+: int
+ The number of items that will be added to `dir(...)`. 'None' value means
+ unlimited. Because dir is cached, changing this option will not immediately
+ affect already existing dataframes until a column is deleted or added.
+
+ This is for instance used to suggest columns from a dataframe to tab
+ completion.
+"""
+
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
@@ -451,6 +461,9 @@ def _deprecate_negative_int_max_colwidth(key):
cf.register_option(
"html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool
)
+ cf.register_option(
+ "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int
+ )
tc_sim_interactive_doc = """
: boolean
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 2c26d6f838315..79ea7731466d4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -969,13 +969,12 @@ def astype_dt64_to_dt64tz(
# this should be the only copy
values = values.copy()
- level = find_stack_level()
warnings.warn(
"Using .astype to convert from timezone-naive dtype to "
"timezone-aware dtype is deprecated and will raise in a "
"future version. Use ser.dt.tz_localize instead.",
FutureWarning,
- stacklevel=level,
+ stacklevel=find_stack_level(),
)
# GH#33401 this doesn't match DatetimeArray.astype, which
@@ -986,13 +985,12 @@ def astype_dt64_to_dt64tz(
# DatetimeArray/DatetimeIndex.astype behavior
if values.tz is None and aware:
dtype = cast(DatetimeTZDtype, dtype)
- level = find_stack_level()
warnings.warn(
"Using .astype to convert from timezone-naive dtype to "
"timezone-aware dtype is deprecated and will raise in a "
"future version. Use obj.tz_localize instead.",
FutureWarning,
- stacklevel=level,
+ stacklevel=find_stack_level(),
)
return values.tz_localize(dtype.tz)
@@ -1006,14 +1004,13 @@ def astype_dt64_to_dt64tz(
return result
elif values.tz is not None:
- level = find_stack_level()
warnings.warn(
"Using .astype to convert from timezone-aware dtype to "
"timezone-naive dtype is deprecated and will raise in a "
"future version. Use obj.tz_localize(None) or "
"obj.tz_convert('UTC').tz_localize(None) instead",
FutureWarning,
- stacklevel=level,
+ stacklevel=find_stack_level(),
)
result = values.tz_convert("UTC").tz_localize(None)
@@ -1526,7 +1523,7 @@ def try_datetime(v: np.ndarray) -> ArrayLike:
try:
# GH#19671 we pass require_iso8601 to be relatively strict
# when parsing strings.
- dta = sequence_to_datetimes(v, require_iso8601=True, allow_object=False)
+ dta = sequence_to_datetimes(v, require_iso8601=True)
except (ValueError, TypeError):
# e.g. <class 'numpy.timedelta64'> is not convertible to datetime
return v.reshape(shape)
@@ -1587,6 +1584,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray:
value = try_datetime(v) # type: ignore[assignment]
if value.dtype.kind in ["m", "M"] and seen_str:
+ # TODO(2.0): enforcing this deprecation should close GH#40111
warnings.warn(
f"Inferring {value.dtype} from data containing strings is deprecated "
"and will be removed in a future version. To retain the old behavior "
@@ -1637,7 +1635,7 @@ def maybe_cast_to_datetime(
try:
if is_datetime64:
- dta = sequence_to_datetimes(value, allow_object=False)
+ dta = sequence_to_datetimes(value)
# GH 25843: Remove tz information since the dtype
# didn't specify one
@@ -1665,7 +1663,7 @@ def maybe_cast_to_datetime(
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value.dtype)
- dta = sequence_to_datetimes(value, allow_object=False)
+ dta = sequence_to_datetimes(value)
if dta.tz is not None:
value = dta.astype(dtype, copy=False)
elif is_dt_string:
@@ -1817,7 +1815,7 @@ def find_common_type(types: list[DtypeObj]) -> DtypeObj:
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
- if all(is_dtype_equal(first, t) for t in types[1:]):
+ if lib.dtypes_all_equal(list(types)):
return first
# get unique types (dict.fromkeys is used as order-preserving set())
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 28f415476d3fd..701f9fd4a9c99 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -107,7 +107,9 @@ def is_nonempty(x) -> bool:
to_concat = non_empties
kinds = {obj.dtype.kind for obj in to_concat}
- contains_datetime = any(kind in ["m", "M"] for kind in kinds)
+ contains_datetime = any(kind in ["m", "M"] for kind in kinds) or any(
+ isinstance(obj, ABCExtensionArray) and obj.ndim > 1 for obj in to_concat
+ )
all_empty = not len(non_empties)
single_dtype = len({x.dtype for x in to_concat}) == 1
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index ae961e53d8b79..1f1486b1b29a7 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -447,5 +447,5 @@ def is_inferred_bool_dtype(arr: ArrayLike) -> bool:
if dtype == np.dtype(bool):
return True
elif dtype == np.dtype("object"):
- return lib.is_bool_array(arr.ravel("K"))
+ return lib.is_bool_array(arr)
return False
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index c457b52cf4b0e..d2733cddf8cee 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -158,11 +158,7 @@ def _isna(obj, inf_as_na: bool = False):
boolean ndarray or boolean
"""
if is_scalar(obj):
- if inf_as_na:
- return libmissing.checknull_old(obj)
- else:
- return libmissing.checknull(obj)
- # hack (for now) because MI registers as ndarray
+ return libmissing.checknull(obj, inf_as_na=inf_as_na)
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
@@ -243,7 +239,7 @@ def _isna_array(values: ArrayLike, inf_as_na: bool = False):
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
if inf_as_na and is_categorical_dtype(dtype):
- result = libmissing.isnaobj_old(values.to_numpy())
+ result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na)
else:
result = values.isna()
elif is_string_dtype(dtype):
@@ -269,10 +265,7 @@ def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> np.ndarray:
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
- if inf_as_na:
- vec = libmissing.isnaobj_old(values.ravel())
- else:
- vec = libmissing.isnaobj(values.ravel())
+ vec = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na)
result[...] = vec.reshape(shape)
@@ -475,8 +468,8 @@ def array_equivalent(
return np.array_equal(left, right)
-def _array_equivalent_float(left, right):
- return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
+def _array_equivalent_float(left, right) -> bool:
+ return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all())
def _array_equivalent_datetimelike(left, right):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b01de5dec610d..803d1c914c954 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -16,7 +16,6 @@
import functools
from io import StringIO
import itertools
-import mmap
from textwrap import dedent
from typing import (
IO,
@@ -55,7 +54,7 @@
CompressionOptions,
Dtype,
DtypeObj,
- FilePathOrBuffer,
+ FilePath,
FillnaOptions,
FloatFormatType,
FormattersType,
@@ -71,6 +70,7 @@
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
+ WriteBuffer,
npt,
)
from pandas.compat._optional import import_optional_dependency
@@ -989,15 +989,13 @@ def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
- buf = StringIO("")
if self._info_repr():
+ buf = StringIO()
self.info(buf=buf)
return buf.getvalue()
repr_params = fmt.get_dataframe_repr_params()
- self.to_string(buf=buf, **repr_params)
-
- return buf.getvalue()
+ return self.to_string(**repr_params)
def _repr_html_(self) -> str | None:
"""
@@ -1006,7 +1004,7 @@ def _repr_html_(self) -> str | None:
Mainly for IPython notebook.
"""
if self._info_repr():
- buf = StringIO("")
+ buf = StringIO()
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
@@ -1043,20 +1041,72 @@ def _repr_html_(self) -> str | None:
else:
return None
+ @overload
+ def to_string(
+ self,
+ buf: None = ...,
+ columns: Sequence[str] | None = ...,
+ col_space: int | list[int] | dict[Hashable, int] | None = ...,
+ header: bool | Sequence[str] = ...,
+ index: bool = ...,
+ na_rep: str = ...,
+ formatters: fmt.FormattersType | None = ...,
+ float_format: fmt.FloatFormatType | None = ...,
+ sparsify: bool | None = ...,
+ index_names: bool = ...,
+ justify: str | None = ...,
+ max_rows: int | None = ...,
+ max_cols: int | None = ...,
+ show_dimensions: bool = ...,
+ decimal: str = ...,
+ line_width: int | None = ...,
+ min_rows: int | None = ...,
+ max_colwidth: int | None = ...,
+ encoding: str | None = ...,
+ ) -> str:
+ ...
+
+ @overload
+ def to_string(
+ self,
+ buf: FilePath | WriteBuffer[str],
+ columns: Sequence[str] | None = ...,
+ col_space: int | list[int] | dict[Hashable, int] | None = ...,
+ header: bool | Sequence[str] = ...,
+ index: bool = ...,
+ na_rep: str = ...,
+ formatters: fmt.FormattersType | None = ...,
+ float_format: fmt.FloatFormatType | None = ...,
+ sparsify: bool | None = ...,
+ index_names: bool = ...,
+ justify: str | None = ...,
+ max_rows: int | None = ...,
+ max_cols: int | None = ...,
+ show_dimensions: bool = ...,
+ decimal: str = ...,
+ line_width: int | None = ...,
+ min_rows: int | None = ...,
+ max_colwidth: int | None = ...,
+ encoding: str | None = ...,
+ ) -> None:
+ ...
+
@Substitution(
- header_type="bool or sequence",
+ header_type="bool or sequence of strings",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
- col_space="The minimum width of each column",
+ col_space="The minimum width of each column. If a list of ints is given "
+ "every integers corresponds with one column. If a dict is given, the key "
+ "references the column, while the value defines the space to use.",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[str] | None = None,
- col_space: int | None = None,
+ col_space: int | list[int] | dict[Hashable, int] | None = None,
header: bool | Sequence[str] = True,
index: bool = True,
na_rep: str = "NaN",
@@ -1066,11 +1116,11 @@ def to_string(
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
- min_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: int | None = None,
+ min_rows: int | None = None,
max_colwidth: int | None = None,
encoding: str | None = None,
) -> str | None:
@@ -1079,6 +1129,9 @@ def to_string(
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
+ min_rows : int, optional
+ The number of rows to display in the console in a truncated repr
+ (when number of rows is above `max_rows`).
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
@@ -2427,7 +2480,7 @@ def _from_arrays(
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
- path: FilePathOrBuffer,
+ path: FilePath | WriteBuffer[bytes],
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
@@ -2449,11 +2502,9 @@ def to_stata(
Parameters
----------
- path : str, buffer or path object
- String, path object (pathlib.Path or py._path.local.LocalPath) or
- object implementing a binary write() function. If using a buffer
- then the buffer will not be automatically closed after the file
- data has been written.
+ path : str, path object, or buffer
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function.
.. versionchanged:: 1.0.0
@@ -2595,14 +2646,16 @@ def to_stata(
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
- def to_feather(self, path: FilePathOrBuffer[bytes], **kwargs) -> None:
+ def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
- path : str or file-like object
- If a string, it will be used as Root Directory path.
+ path : str, path object, file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function. If a string or a path,
+ it will be used as Root Directory path when writing a partitioned dataset.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
@@ -2672,15 +2725,14 @@ def to_markdown(
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
- assert not isinstance(handles.handle, (str, mmap.mmap))
- handles.handle.writelines(result)
+ handles.handle.write(result)
return None
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
- path: FilePathOrBuffer | None = None,
+ path: FilePath | WriteBuffer[bytes] | None = None,
engine: str = "auto",
compression: str | None = "snappy",
index: bool | None = None,
@@ -2698,13 +2750,11 @@ def to_parquet(
Parameters
----------
- path : str or file-like object, default None
- If a string, it will be used as Root Directory path
- when writing a partitioned dataset. By file-like object,
- we refer to objects with a write() method, such as a file handle
- (e.g. via builtin open function) or io.BytesIO. The engine
- fastparquet does not accept file-like objects. If path is None,
- a bytes object is returned.
+ path : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function. If None, the result is
+ returned as bytes. If a string or path, it will be used as Root Directory
+ path when writing a partitioned dataset.
.. versionchanged:: 1.2.0
@@ -2799,7 +2849,7 @@ def to_parquet(
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[str] | None = None,
col_space: ColspaceArgType | None = None,
header: bool | Sequence[str] = True,
@@ -2837,15 +2887,14 @@ def to_html(
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
- encoding : str, default "utf-8"
- Set character encoding.
-
- .. versionadded:: 1.0
-
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
+ encoding : str, default "utf-8"
+ Set character encoding.
+
+ .. versionadded:: 1.0
%(returns)s
See Also
--------
@@ -2887,7 +2936,7 @@ def to_html(
@doc(storage_options=generic._shared_docs["storage_options"])
def to_xml(
self,
- path_or_buffer: FilePathOrBuffer | None = None,
+ path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
index: bool = True,
root_name: str | None = "data",
row_name: str | None = "row",
@@ -2900,7 +2949,7 @@ def to_xml(
xml_declaration: bool | None = True,
pretty_print: bool | None = True,
parser: str | None = "lxml",
- stylesheet: FilePathOrBuffer | None = None,
+ stylesheet: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> str | None:
@@ -2911,9 +2960,10 @@ def to_xml(
Parameters
----------
- path_or_buffer : str, path object or file-like object, optional
- File to write output to. If None, the output is returned as a
- string.
+ path_or_buffer : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a ``write()`` function. If None, the result is returned
+ as a string.
index : bool, default True
Whether to include index in XML document.
root_name : str, default 'data'
@@ -3207,7 +3257,7 @@ def to_xml(
def info(
self,
verbose: bool | None = None,
- buf: IO[str] | None = None,
+ buf: WriteBuffer[str] | None = None,
max_cols: int | None = None,
memory_usage: bool | str | None = None,
show_counts: bool | None = None,
@@ -3864,9 +3914,9 @@ def _set_item_mgr(self, key, value: ArrayLike) -> None:
if len(self):
self._check_setitem_copy()
- def _iset_item(self, loc: int, value, inplace: bool = False) -> None:
+ def _iset_item(self, loc: int, value) -> None:
arraylike = self._sanitize_column(value)
- self._iset_item_mgr(loc, arraylike, inplace=inplace)
+ self._iset_item_mgr(loc, arraylike, inplace=True)
# check if we are modifying a copy
# try to set first as we want an invalid
@@ -5023,10 +5073,6 @@ def drop(
errors=errors,
)
- @rewrite_axis_style_signature(
- "mapper",
- [("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
- )
def rename(
self,
mapper: Renamer | None = None,
@@ -7698,7 +7744,7 @@ def update(
Wild 185.0
We can also choose to include NA in group keys or not by setting
-`dropna` parameter, the default setting is `True`:
+`dropna` parameter, the default setting is `True`.
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
@@ -8562,8 +8608,12 @@ def melt(
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
- if not isinstance(periods, int):
- if not (is_float(periods) and periods.is_integer()):
+ if not lib.is_integer(periods):
+ if not (
+ is_float(periods)
+ # error: "int" has no attribute "is_integer"
+ and periods.is_integer() # type: ignore[attr-defined]
+ ):
raise ValueError("periods must be an integer")
periods = int(periods)
@@ -9155,6 +9205,11 @@ def join(
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
+ * cross: creates the cartesian product from both frames, preserves the order
+ of the left keys.
+
+ .. versionadded:: 1.2.0
+
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 23608cf0192df..b647e5000b8a9 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -12,12 +12,12 @@
from typing import (
TYPE_CHECKING,
Any,
- AnyStr,
Callable,
Hashable,
Literal,
Mapping,
Sequence,
+ Type,
cast,
final,
overload,
@@ -43,7 +43,7 @@
Dtype,
DtypeArg,
DtypeObj,
- FilePathOrBuffer,
+ FilePath,
IndexKeyFunc,
IndexLabel,
JSONSerializable,
@@ -57,6 +57,7 @@
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
+ WriteBuffer,
npt,
)
from pandas.compat._optional import import_optional_dependency
@@ -486,9 +487,10 @@ def _data(self):
@property
def _AXIS_NUMBERS(self) -> dict[str, int]:
""".. deprecated:: 1.1.0"""
- level = self.ndim + 1
warnings.warn(
- "_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=level
+ "_AXIS_NUMBERS has been deprecated.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
)
return {"index": 0}
@@ -2001,15 +2003,15 @@ def __contains__(self, key) -> bool_t:
@property
def empty(self) -> bool_t:
"""
- Indicator whether DataFrame is empty.
+ Indicator whether Series/DataFrame is empty.
- True if DataFrame is entirely empty (no items), meaning any of the
+ True if Series/DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
- If DataFrame is empty, return True, if not return False.
+ If Series/DataFrame is empty, return True, if not return False.
See Also
--------
@@ -2019,7 +2021,7 @@ def empty(self) -> bool_t:
Notes
-----
- If DataFrame contains only NaNs, it is still not considered empty. See
+ If Series/DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
@@ -2045,6 +2047,16 @@ def empty(self) -> bool_t:
False
>>> df.dropna().empty
True
+
+ >>> ser_empty = pd.Series({'A' : []})
+ >>> ser_empty
+ A []
+ dtype: object
+ >>> ser_empty.empty
+ False
+ >>> ser_empty = pd.Series()
+ >>> ser_empty.empty
+ True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
@@ -2320,7 +2332,7 @@ def to_excel(
@doc(storage_options=_shared_docs["storage_options"])
def to_json(
self,
- path_or_buf: FilePathOrBuffer | None = None,
+ path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
orient: str | None = None,
date_format: str | None = None,
double_precision: int = 10,
@@ -2341,9 +2353,10 @@ def to_json(
Parameters
----------
- path_or_buf : str or file handle, optional
- File path or object. If not specified, the result is returned as
- a string.
+ path_or_buf : str, path object, file-like object, or None, default None
+ String, path object (implementing os.PathLike[str]), or file-like
+ object implementing a write() function. If None, the result is
+ returned as a string.
orient : str
Indication of expected JSON string format.
@@ -3259,6 +3272,7 @@ def to_latex(
{returns}
See Also
--------
+ Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting.
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
@@ -3268,7 +3282,7 @@ def to_latex(
>>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
... mask=['red', 'purple'],
... weapon=['sai', 'bo staff']))
- >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
+ >>> print(df.to_latex(index=False)) # doctest: +SKIP
\begin{{tabular}}{{lll}}
\toprule
name & mask & weapon \\
@@ -3278,6 +3292,15 @@ def to_latex(
\bottomrule
\end{{tabular}}
"""
+ msg = (
+ "In future versions `DataFrame.to_latex` is expected to utilise the base "
+ "implementation of `Styler.to_latex` for formatting and rendering. "
+ "The arguments signature may therefore change. It is recommended instead "
+ "to use `DataFrame.style.to_latex` which also contains additional "
+ "functionality."
+ )
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
+
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
@@ -3325,7 +3348,7 @@ def to_latex(
@doc(storage_options=_shared_docs["storage_options"])
def to_csv(
self,
- path_or_buf: FilePathOrBuffer[AnyStr] | None = None,
+ path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
sep: str = ",",
na_rep: str = "",
float_format: str | None = None,
@@ -3352,10 +3375,11 @@ def to_csv(
Parameters
----------
- path_or_buf : str or file handle, default None
- File path or object, if None is provided the result is returned as
- a string. If a non-binary file object is passed, it should be opened
- with `newline=''`, disabling universal newlines. If a binary
+ path_or_buf : str, path object, file-like object, or None, default None
+ String, path object (implementing os.PathLike[str]), or file-like
+ object implementing a write() function. If None, the result is
+ returned as a string. If a non-binary file object is passed, it should
+ be opened with `newline=''`, disabling universal newlines. If a binary
file object is passed, `mode` might need to contain a `'b'`.
.. versionchanged:: 1.2.0
@@ -3397,6 +3421,9 @@ def to_csv(
and mode is one of {{'zip', 'gzip', 'bz2'}}, or inferred as
one of the above, other entries passed as
additional compression options.
+ If `path_or_buf` is omitted or `None` or is a file opened in text
+ mode, this argument is ignored and an (uncompressed) string is
+ returned/written.
.. versionchanged:: 1.0.0
@@ -5284,10 +5311,9 @@ def sample(
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
- random_state : int, array-like, BitGenerator, np.random.RandomState,
- np.random.Generator, optional. If int, array-like, or BitGenerator, seed for
- random number generator. If np.random.RandomState or np.random.Generator,
- use as given.
+ random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional
+ If int, array-like, or BitGenerator, seed for random number generator.
+ If np.random.RandomState or np.random.Generator, use as given.
.. versionchanged:: 1.1.0
@@ -5376,7 +5402,7 @@ def sample(
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
- """
+ """ # noqa:E501
if axis is None:
axis = self._stat_axis_number
@@ -5823,16 +5849,25 @@ def astype(
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
- "key in a dtype mappings argument."
+ "key in a dtype mappings argument. "
+ f"'{col_name}' not found in columns."
)
+
+ # GH#44417 cast to Series so we can use .iat below, which will be
+ # robust in case we
+ from pandas import Series
+
+ dtype_ser = Series(dtype, dtype=object)
+ dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False)
+
results = []
- for col_name, col in self.items():
- if col_name in dtype:
- results.append(
- col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
- )
+ for i, (col_name, col) in enumerate(self.items()):
+ cdt = dtype_ser.iat[i]
+ if isna(cdt):
+ res_col = col.copy() if copy else col
else:
- results.append(col.copy() if copy else col)
+ res_col = col.astype(dtype=cdt, copy=copy, errors=errors)
+ results.append(res_col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
@@ -6219,8 +6254,12 @@ def convert_dtypes(
for col_name, col in self.items()
]
if len(results) > 0:
+ result = concat(results, axis=1, copy=False)
+ cons = cast(Type["DataFrame"], self._constructor)
+ result = cons(result)
+ result = result.__finalize__(self, method="convert_dtypes")
# https://github.com/python/mypy/issues/8354
- return cast(NDFrameT, concat(results, axis=1, copy=False))
+ return cast(NDFrameT, result)
else:
return self.copy()
@@ -7866,11 +7905,10 @@ def resample(
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
- origin : {{'epoch', 'start', 'start_day', 'end', 'end_day'}}, Timestamp
- or str, default 'start_day'
+ origin : Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
- If a timestamp is not used, these values are also supported:
+ If string, must be one of the following:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
@@ -10276,6 +10314,7 @@ def _logical_func(
self, name: str, func, axis=0, bool_only=None, skipna=True, level=None, **kwargs
):
nv.validate_logical_func((), kwargs, fname=name)
+ validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
@@ -10370,6 +10409,7 @@ def _stat_function_ddof(
**kwargs,
):
nv.validate_stat_ddof_func((), kwargs, fname=name)
+ validate_bool_kwarg(skipna, "skipna", none_allowed=False)
if axis is None:
axis = self._stat_axis_number
if level is not None:
@@ -10423,6 +10463,9 @@ def _stat_function(
nv.validate_median((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
+
+ validate_bool_kwarg(skipna, "skipna", none_allowed=False)
+
if axis is None:
axis = self._stat_axis_number
if level is not None:
@@ -10490,6 +10533,9 @@ def _min_count_stat_function(
nv.validate_prod((), kwargs)
else:
nv.validate_stat_func((), kwargs, fname=name)
+
+ validate_bool_kwarg(skipna, "skipna", none_allowed=False)
+
if axis is None:
axis = self._stat_axis_number
if level is not None:
@@ -10552,7 +10598,7 @@ def prod(
product = prod
- def mad(self, axis=None, skipna=None, level=None):
+ def mad(self, axis=None, skipna=True, level=None):
"""
{desc}
@@ -10560,7 +10606,7 @@ def mad(self, axis=None, skipna=None, level=None):
----------
axis : {axis_descr}
Axis for the function to be applied on.
- skipna : bool, default None
+ skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
@@ -10572,7 +10618,14 @@ def mad(self, axis=None, skipna=None, level=None):
{see_also}\
{examples}
"""
- if skipna is None:
+ if not is_bool(skipna):
+ warnings.warn(
+ "Passing None for skipna is deprecated and will raise in a future"
+ "version. Pass True instead. Only boolean values will be allowed "
+ "in the future.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
skipna = True
if axis is None:
axis = self._stat_axis_number
@@ -10642,7 +10695,7 @@ def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
see_also="",
examples="",
)
- def mad(self, axis=None, skipna=None, level=None):
+ def mad(self, axis=None, skipna=True, level=None):
return NDFrame.mad(self, axis, skipna, level)
setattr(cls, "mad", mad)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 3c45f7263265c..4535010b29c3a 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -992,7 +992,7 @@ def _wrap_applied_output(
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
- result = result.astype(data.dtypes.to_dict(), copy=False)
+ result = result.astype(data.dtypes, copy=False)
return result
# GH12824
@@ -1039,7 +1039,7 @@ def _wrap_applied_output_series(
key_index,
) -> DataFrame | Series:
# this is to silence a DeprecationWarning
- # TODO: Remove when default dtype of empty Series is object
+ # TODO(2.0): Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 00c4d2778e545..f96ec527ced89 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -325,7 +325,7 @@ class providing the base-class of operations.
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
-filled with the transformed values
+filled with the transformed values.
Parameters
----------
@@ -2082,7 +2082,8 @@ def size(self) -> DataFrame | Series:
result = self._obj_1d_constructor(result)
if not self.as_index:
- result = result.rename("size").reset_index()
+ # Item "None" of "Optional[Series]" has no attribute "reset_index"
+ result = result.rename("size").reset_index() # type: ignore[union-attr]
return self._reindex_output(result, fill_value=0)
@@ -3337,7 +3338,8 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, axis=0
Series or DataFrame
Percentage changes within each group.
"""
- # TODO: Remove this conditional for SeriesGroupBy when GH#23918 is fixed
+ # TODO(GH#23918): Remove this conditional for SeriesGroupBy when
+ # GH#23918 is fixed
if freq is not None or axis != 0:
return self.apply(
lambda x: x.pct_change(
@@ -3586,10 +3588,9 @@ def sample(
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
- random_state : int, array-like, BitGenerator, np.random.RandomState,
- np.random.Generator, optional. If int, array-like, or BitGenerator, seed for
- random number generator. If np.random.RandomState or np.random.Generator,
- use as given.
+ random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional
+ If int, array-like, or BitGenerator, seed for random number generator.
+ If np.random.RandomState or np.random.Generator, use as given.
.. versionchanged:: 1.4.0
@@ -3649,7 +3650,7 @@ def sample(
5 black 5
2 blue 2
0 red 0
- """
+ """ # noqa:E501
size = sample.process_sampling_size(n, frac, replace)
if weights is not None:
weights_arr = sample.preprocess_weights(
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 7577b1e671d60..a05f8e581d12f 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -21,6 +21,7 @@
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import sanitize_to_nanoseconds
from pandas.core.dtypes.common import (
@@ -105,11 +106,10 @@ class Grouper:
However, loffset is also deprecated for ``.resample(...)``
See: :class:`DataFrame.resample`
- origin : {{'epoch', 'start', 'start_day', 'end', 'end_day'}}, Timestamp
- or str, default 'start_day'
+ origin : Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
- If a timestamp is not used, these values are also supported:
+ If string, must be one of the following:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
@@ -964,8 +964,6 @@ def _check_deprecated_resample_kwargs(kwargs, origin):
From where this function is being called; either Grouper or TimeGrouper. Used
to determine an approximate stacklevel.
"""
- from pandas.core.resample import TimeGrouper
-
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
@@ -975,11 +973,6 @@ def _check_deprecated_resample_kwargs(kwargs, origin):
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
- # hacky way to set the stacklevel: if cls is TimeGrouper it means
- # that the call comes from a pandas internal call of resample,
- # otherwise it comes from pd.Grouper
- stacklevel = (5 if origin is TimeGrouper else 2) + 1
- # the + 1 is for this helper function, check_deprecated_resample_kwargs
if kwargs.get("base", None) is not None:
warnings.warn(
@@ -989,7 +982,7 @@ def _check_deprecated_resample_kwargs(kwargs, origin):
"\nbecomes:\n"
'\n>>> df.resample(freq="3s", offset="2s")\n',
FutureWarning,
- stacklevel=stacklevel,
+ stacklevel=find_stack_level(),
)
if kwargs.get("loffset", None) is not None:
warnings.warn(
@@ -1000,5 +993,5 @@ def _check_deprecated_resample_kwargs(kwargs, origin):
'\n>>> df = df.resample(freq="3s").mean()'
'\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
FutureWarning,
- stacklevel=stacklevel,
+ stacklevel=find_stack_level(),
)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 60c8851f059fe..8223a04883738 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -868,8 +868,8 @@ def result_arraylike(self) -> ArrayLike:
Analogous to result_index, but returning an ndarray/ExtensionArray
allowing us to retain ExtensionDtypes not supported by Index.
"""
- # TODO: once Index supports arbitrary EAs, this can be removed in favor
- # of result_index
+ # TODO(ExtensionIndex): once Index supports arbitrary EAs, this can
+ # be removed in favor of result_index
if len(self.groupings) == 1:
return self.groupings[0].group_arraylike
diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py
index c4156f214ca68..4d5e4bbe6bd36 100644
--- a/pandas/core/indexers/objects.py
+++ b/pandas/core/indexers/objects.py
@@ -124,7 +124,7 @@ def get_window_bounds(
class VariableOffsetWindowIndexer(BaseIndexer):
- """Calculate window boundaries based on a non-fixed offset such as a BusinessDay"""
+ """Calculate window boundaries based on a non-fixed offset such as a BusinessDay."""
def __init__(
self,
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 9715bf8f61f3c..58a31e568981f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -846,7 +846,7 @@ def _dir_additions_for_owner(self) -> set[str_t]:
"""
return {
c
- for c in self.unique(level=0)[:100]
+ for c in self.unique(level=0)[: get_option("display.max_dir_items")]
if isinstance(c, str) and c.isidentifier()
}
@@ -947,7 +947,6 @@ def view(self, cls=None):
# e.g. m8[s]
return self._data.view(cls)
- arr = self._data.view("i8")
idx_cls = self._dtype_to_subclass(dtype)
arr_cls = idx_cls._data_cls
arr = arr_cls(self._data.view("i8"), dtype=dtype)
@@ -2950,7 +2949,7 @@ def _get_reconciled_name_object(self, other):
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
- if self.name != name:
+ if self.name is not name:
return self.rename(name)
return self
@@ -6433,7 +6432,7 @@ def insert(self, loc: int, item) -> Index:
"""
Make new Index inserting new item at location.
- Follows Python list.append semantics for negative values.
+ Follows Python numpy.insert semantics for negative values.
Parameters
----------
@@ -6476,6 +6475,7 @@ def insert(self, loc: int, item) -> Index:
else:
new_values = np.insert(arr, loc, None)
+ loc = loc if loc >= 0 else loc - 1
new_values[loc] = item
# Use self._constructor instead of Index to retain NumericIndex GH#43921
@@ -6681,8 +6681,6 @@ def all(self, *args, **kwargs):
Examples
--------
- **all**
-
True, because nonzero integers are considered True.
>>> pd.Index([1, 2, 3]).all()
@@ -6692,18 +6690,6 @@ def all(self, *args, **kwargs):
>>> pd.Index([0, 1, 2]).all()
False
-
- **any**
-
- True, because ``1`` is considered True.
-
- >>> pd.Index([0, 0, 1]).any()
- True
-
- False, because ``0`` is considered False.
-
- >>> pd.Index([0, 0, 0]).any()
- False
"""
nv.validate_all(args, kwargs)
self._maybe_disable_logical_methods("all")
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 5791f89828ca3..885c922d1ee0f 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -727,6 +727,8 @@ def _get_indexer_pointwise(
if isinstance(locs, slice):
# Only needed for get_indexer_non_unique
locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
+ elif not self.is_unique and not self.is_monotonic:
+ locs = np.where(locs)[0]
locs = np.array(locs, ndmin=1)
except KeyError:
missing.append(i)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 128aa8e282a0d..53d584f801b0f 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -732,7 +732,7 @@ def array(self):
@cache_readonly
def dtypes(self) -> Series:
"""
- Return the dtypes as a Series for the underlying MultiIndex
+ Return the dtypes as a Series for the underlying MultiIndex.
"""
from pandas import Series
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 23851eff252b4..e3e1589d91e09 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -354,6 +354,14 @@ def astype(self, dtype, copy: bool = True, how=lib.no_default):
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
+ # GH#44398 deprecate astype(dt64), matching Series behavior
+ warnings.warn(
+ f"Converting {type(self).__name__} to DatetimeIndex with "
+ "'astype' is deprecated and will raise in a future version. "
+ "Use `obj.to_timestamp(how).tz_localize(dtype.tz)` instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 91f1415178471..2b07bdd7cae49 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -947,14 +947,19 @@ def __getitem__(self, key):
key = tuple(list(x) if is_iterator(x) else x for x in key)
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
if self._is_scalar_access(key):
- return self.obj._get_value(*key, takeable=self._takeable)
- return self._getitem_tuple(key)
+ result = self.obj._get_value(*key, takeable=self._takeable)
+ else:
+ result = self._getitem_tuple(key)
else:
# we by definition only have the 0th axis
axis = self.axis or 0
maybe_callable = com.apply_if_callable(key, self.obj)
- return self._getitem_axis(maybe_callable, axis=axis)
+ result = self._getitem_axis(maybe_callable, axis=axis)
+
+ if hasattr(result, "__finalize__"):
+ return result.__finalize__(self.obj, method="_LocationIndexer.__getitem__")
+ return result
def _is_scalar_access(self, key: tuple):
raise NotImplementedError()
@@ -994,7 +999,7 @@ def _validate_key(self, key, axis: int):
# slice of labels (where start-end in labels)
# slice of integers (only if in the labels)
# boolean not in slice and with boolean index
- if isinstance(key, bool) and not is_bool_dtype(self.obj.index):
+ if isinstance(key, bool) and not is_bool_dtype(self.obj._get_axis(axis)):
raise KeyError(
f"{key}: boolean label can not be used without a boolean index"
)
@@ -1871,11 +1876,11 @@ def _setitem_single_column(self, loc: int, value, plane_indexer):
# The setitem happened inplace, so the DataFrame's values
# were modified inplace.
return
- self.obj._iset_item(loc, ser, inplace=True)
+ self.obj._iset_item(loc, ser)
return
# reset the sliced object if unique
- self.obj._iset_item(loc, ser, inplace=True)
+ self.obj._iset_item(loc, ser)
def _setitem_single_block(self, indexer, value, name: str):
"""
@@ -1892,21 +1897,19 @@ def _setitem_single_block(self, indexer, value, name: str):
# set using those methods to avoid block-splitting
# logic here
if (
- len(indexer) > info_axis
- and is_integer(indexer[info_axis])
- and all(
- com.is_null_slice(idx)
- for i, idx in enumerate(indexer)
- if i != info_axis
- )
+ self.ndim == len(indexer) == 2
+ and is_integer(indexer[1])
+ and com.is_null_slice(indexer[0])
):
col = item_labels[indexer[info_axis]]
if len(item_labels.get_indexer_for([col])) == 1:
+ # e.g. test_loc_setitem_empty_append_expands_rows
loc = item_labels.get_loc(col)
- self.obj._iset_item(loc, value, inplace=True)
+ self.obj._iset_item(loc, value)
return
- indexer = maybe_convert_ix(*indexer)
+ indexer = maybe_convert_ix(*indexer) # e.g. test_setitem_frame_align
+
if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 543b2ea26f750..1cd9fe65407ba 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -365,7 +365,7 @@ def diff(self: T, n: int, axis: int) -> T:
# with axis=0 is equivalent
assert n == 0
axis = 0
- return self.apply(algos.diff, n=n, axis=axis, stacklevel=5)
+ return self.apply(algos.diff, n=n, axis=axis)
def interpolate(self: T, **kwargs) -> T:
return self.apply_with_block("interpolate", swap_axis=False, **kwargs)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 55e5b0d0439fa..550bc4ac56d4b 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -30,6 +30,7 @@
Shape,
npt,
)
+from pandas.compat import np_version_under1p20
from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_bool_kwarg
@@ -309,7 +310,7 @@ def _slice(self, slicer) -> ArrayLike:
return self.values[slicer]
@final
- def getitem_block(self, slicer) -> Block:
+ def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block:
"""
Perform __getitem__-like, return result as block.
@@ -326,7 +327,9 @@ def getitem_block(self, slicer) -> Block:
return type(self)(new_values, new_mgr_locs, self.ndim)
@final
- def getitem_block_columns(self, slicer, new_mgr_locs: BlockPlacement) -> Block:
+ def getitem_block_columns(
+ self, slicer: slice, new_mgr_locs: BlockPlacement
+ ) -> Block:
"""
Perform __getitem__-like, return result as block.
@@ -867,6 +870,12 @@ def _replace_coerce(
# ---------------------------------------------------------------------
+ def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray:
+ """
+ For compatibility with 1D-only ExtensionArrays.
+ """
+ return arg
+
def setitem(self, indexer, value):
"""
Attempt self.values[indexer] = value, possibly creating a new array.
@@ -930,10 +939,7 @@ def setitem(self, indexer, value):
value = setitem_datetimelike_compat(values, len(values[indexer]), value)
values[indexer] = value
- if transpose:
- values = values.T
- block = type(self)(values, placement=self._mgr_locs, ndim=self.ndim)
- return block
+ return self
def putmask(self, mask, new) -> list[Block]:
"""
@@ -952,7 +958,8 @@ def putmask(self, mask, new) -> list[Block]:
List[Block]
"""
orig_mask = mask
- mask, noop = validate_putmask(self.values.T, mask)
+ values = cast(np.ndarray, self.values)
+ mask, noop = validate_putmask(values.T, mask)
assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame))
# if we are passed a scalar None, convert it here
@@ -960,28 +967,30 @@ def putmask(self, mask, new) -> list[Block]:
new = self.fill_value
if self._can_hold_element(new):
-
- # error: Argument 1 to "putmask_without_repeat" has incompatible type
- # "Union[ndarray, ExtensionArray]"; expected "ndarray"
- putmask_without_repeat(self.values.T, mask, new) # type: ignore[arg-type]
+ putmask_without_repeat(values.T, mask, new)
return [self]
- elif noop:
- return [self]
-
- dtype, _ = infer_dtype_from(new)
- if dtype.kind in ["m", "M"]:
+ elif np_version_under1p20 and infer_dtype_from(new)[0].kind in ["m", "M"]:
# using putmask with object dtype will incorrectly cast to object
# Having excluded self._can_hold_element, we know we cannot operate
# in-place, so we are safe using `where`
return self.where(new, ~mask)
+ elif noop:
+ return [self]
+
elif self.ndim == 1 or self.shape[0] == 1:
# no need to split columns
- # error: Argument 1 to "putmask_smart" has incompatible type "Union[ndarray,
- # ExtensionArray]"; expected "ndarray"
- nv = putmask_smart(self.values.T, mask, new).T # type: ignore[arg-type]
+ if not is_list_like(new):
+ # putmask_smart can't save us the need to cast
+ return self.coerce_to_target_dtype(new).putmask(mask, new)
+
+ # This differs from
+ # `self.coerce_to_target_dtype(new).putmask(mask, new)`
+ # because putmask_smart will check if new[mask] may be held
+ # by our dtype.
+ nv = putmask_smart(values.T, mask, new).T
return [self.make_block(nv)]
else:
@@ -1122,7 +1131,7 @@ def take_nd(
def diff(self, n: int, axis: int = 1) -> list[Block]:
"""return block for the diff of the values"""
- new_values = algos.diff(self.values, n, axis=axis, stacklevel=7)
+ new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values)]
def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> list[Block]:
@@ -1311,6 +1320,46 @@ class EABackedBlock(Block):
values: ExtensionArray
+ def putmask(self, mask, new) -> list[Block]:
+ """
+ See Block.putmask.__doc__
+ """
+ mask = extract_bool_array(mask)
+
+ values = self.values
+
+ mask = self._maybe_squeeze_arg(mask)
+
+ try:
+ # Caller is responsible for ensuring matching lengths
+ values._putmask(mask, new)
+ except (TypeError, ValueError) as err:
+ if isinstance(err, ValueError) and "Timezones don't match" not in str(err):
+ # TODO(2.0): remove catching ValueError at all since
+ # DTA raising here is deprecated
+ raise
+
+ if is_interval_dtype(self.dtype):
+ # Discussion about what we want to support in the general
+ # case GH#39584
+ blk = self.coerce_to_target_dtype(new)
+ if blk.dtype == _dtype_obj:
+ # For now at least, only support casting e.g.
+ # Interval[int64]->Interval[float64],
+ raise
+ return blk.putmask(mask, new)
+
+ elif isinstance(self, NDArrayBackedExtensionBlock):
+ # NB: not (yet) the same as
+ # isinstance(values, NDArrayBackedExtensionArray)
+ blk = self.coerce_to_target_dtype(new)
+ return blk.putmask(mask, new)
+
+ else:
+ raise
+
+ return [self]
+
def delete(self, loc) -> None:
"""
Delete given loc(-s) from block in-place.
@@ -1407,36 +1456,16 @@ def set_inplace(self, locs, values) -> None:
# _cache not yet initialized
pass
- def putmask(self, mask, new) -> list[Block]:
+ def _maybe_squeeze_arg(self, arg):
"""
- See Block.putmask.__doc__
+ If necessary, squeeze a (N, 1) ndarray to (N,)
"""
- mask = extract_bool_array(mask)
-
- new_values = self.values
-
- if mask.ndim == new_values.ndim + 1:
+ # e.g. if we are passed a 2D mask for putmask
+ if isinstance(arg, np.ndarray) and arg.ndim == self.values.ndim + 1:
# TODO(EA2D): unnecessary with 2D EAs
- mask = mask.reshape(new_values.shape)
-
- try:
- # Caller is responsible for ensuring matching lengths
- new_values._putmask(mask, new)
- except TypeError:
- if not is_interval_dtype(self.dtype):
- # Discussion about what we want to support in the general
- # case GH#39584
- raise
-
- blk = self.coerce_to_target_dtype(new)
- if blk.dtype == _dtype_obj:
- # For now at least, only support casting e.g.
- # Interval[int64]->Interval[float64],
- raise
- return blk.putmask(mask, new)
-
- nb = type(self)(new_values, placement=self._mgr_locs, ndim=self.ndim)
- return [nb]
+ assert arg.shape[1] == 1
+ arg = arg[:, 0]
+ return arg
@property
def is_view(self) -> bool:
@@ -1537,7 +1566,9 @@ def _slice(self, slicer) -> ExtensionArray:
)
# GH#32959 only full-slicers along fake-dim0 are valid
# TODO(EA2D): won't be necessary with 2D EAs
- new_locs = self._mgr_locs[first]
+ # range(1) instead of self._mgr_locs to avoid exception on [::-1]
+ # see test_iloc_getitem_slice_negative_step_ea_block
+ new_locs = range(1)[first]
if len(new_locs):
# effectively slice(None)
slicer = slicer[1]
@@ -1592,15 +1623,8 @@ def where(self, other, cond) -> list[Block]:
cond = extract_bool_array(cond)
assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame))
- if isinstance(other, np.ndarray) and other.ndim == 2:
- # TODO(EA2D): unnecessary with 2D EAs
- assert other.shape[1] == 1
- other = other[:, 0]
-
- if isinstance(cond, np.ndarray) and cond.ndim == 2:
- # TODO(EA2D): unnecessary with 2D EAs
- assert cond.shape[1] == 1
- cond = cond[:, 0]
+ other = self._maybe_squeeze_arg(other)
+ cond = self._maybe_squeeze_arg(cond)
if lib.is_scalar(other) and isna(other):
# The default `other` for Series / Frame is np.nan
@@ -1610,6 +1634,10 @@ def where(self, other, cond) -> list[Block]:
# attribute "na_value"
other = self.dtype.na_value # type: ignore[union-attr]
+ icond, noop = validate_putmask(self.values, ~cond)
+ if noop:
+ return self.copy()
+
try:
result = self.values._where(cond, other)
except TypeError:
@@ -1695,16 +1723,6 @@ def setitem(self, indexer, value):
values[indexer] = value
return self
- def putmask(self, mask, new) -> list[Block]:
- mask = extract_bool_array(mask)
-
- if not self._can_hold_element(new):
- return self.coerce_to_target_dtype(new).putmask(mask, new)
-
- arr = self.values
- arr.T._putmask(mask, new)
- return [self]
-
def where(self, other, cond) -> list[Block]:
arr = self.values
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index e6d6b561803d6..77f3db0d09df5 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -294,10 +294,15 @@ def ndarray_to_mgr(
if is_1d_only_ea_dtype(vdtype) or isinstance(dtype, ExtensionDtype):
# GH#19157
- if isinstance(values, np.ndarray) and values.ndim > 1:
+ if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
- values = [values[:, n] for n in range(values.shape[1])]
+ # error: No overload variant of "__getitem__" of "ExtensionArray"
+ # matches argument type "Tuple[slice, int]"
+ values = [
+ values[:, n] # type: ignore[call-overload]
+ for n in range(values.shape[1])
+ ]
else:
values = [values]
@@ -443,15 +448,18 @@ def dict_to_mgr(
if missing.any() and not is_integer_dtype(dtype):
nan_dtype: DtypeObj
- if dtype is None or (
- isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.flexible)
- ):
+ if dtype is not None:
+ # calling sanitize_array ensures we don't mix-and-match
+ # NA dtypes
+ midxs = missing.values.nonzero()[0]
+ for i in midxs:
+ arr = sanitize_array(arrays.iat[i], index, dtype=dtype)
+ arrays.iat[i] = arr
+ else:
# GH#1783
nan_dtype = np.dtype("object")
- else:
- nan_dtype = dtype
- val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
- arrays.loc[missing] = [val] * missing.sum()
+ val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
+ arrays.loc[missing] = [val] * missing.sum()
arrays = list(arrays)
columns = ensure_index(columns)
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index cb0c3e05e955f..d69709bf9d06c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -36,6 +36,7 @@
is_1d_only_ea_dtype,
is_dtype_equal,
is_list_like,
+ needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
@@ -80,8 +81,6 @@
operate_blockwise,
)
-# TODO: flexible with index=None and/or items=None
-
T = TypeVar("T", bound="BaseBlockManager")
@@ -364,7 +363,28 @@ def shift(self: T, periods: int, axis: int, fill_value) -> T:
if fill_value is lib.no_default:
fill_value = None
- if axis == 0 and self.ndim == 2 and self.nblocks > 1:
+ if (
+ axis == 0
+ and self.ndim == 2
+ and (
+ self.nblocks > 1
+ or (
+ # If we only have one block and we know that we can't
+ # keep the same dtype (i.e. the _can_hold_element check)
+ # then we can go through the reindex_indexer path
+ # (and avoid casting logic in the Block method).
+ # The exception to this (until 2.0) is datetimelike
+ # dtypes with integers, which cast.
+ not self.blocks[0]._can_hold_element(fill_value)
+ # TODO(2.0): remove special case for integer-with-datetimelike
+ # once deprecation is enforced
+ and not (
+ lib.is_integer(fill_value)
+ and needs_i8_conversion(self.blocks[0].dtype)
+ )
+ )
+ )
+ ):
# GH#35488 we need to watch out for multi-block cases
# We only get here with fill_value not-lib.no_default
ncols = self.shape[0]
@@ -861,9 +881,9 @@ def take(self: T, indexer, axis: int = 1, verify: bool = True) -> T:
"""
# We have 6 tests that get here with a slice
indexer = (
- np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
+ np.arange(indexer.start, indexer.stop, indexer.step, dtype=np.intp)
if isinstance(indexer, slice)
- else np.asanyarray(indexer, dtype="int64")
+ else np.asanyarray(indexer, dtype=np.intp)
)
n = self.shape[axis]
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index a95592c96d411..52d2322b11f42 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -30,6 +30,7 @@
npt,
)
from pandas.compat._optional import import_optional_dependency
+from pandas.compat.numpy import np_percentile_argname
from pandas.core.dtypes.common import (
is_any_int_dtype,
@@ -454,7 +455,8 @@ def _na_for_min_count(values: np.ndarray, axis: int | None) -> Scalar | np.ndarr
def maybe_operate_rowwise(func: F) -> F:
"""
NumPy operations on C-contiguous ndarrays with axis=1 can be
- very slow. Operate row-by-row and concatenate the results.
+ very slow if axis 1 >> axis 0.
+ Operate row-by-row and concatenate the results.
"""
@functools.wraps(func)
@@ -463,6 +465,9 @@ def newfunc(values: np.ndarray, *, axis: int | None = None, **kwargs):
axis == 1
and values.ndim == 2
and values.flags["C_CONTIGUOUS"]
+ # only takes this path for wide arrays (long dataframes), for threshold see
+ # https://github.com/pandas-dev/pandas/pull/43311#issuecomment-974891737
+ and (values.shape[1] / 1000) > values.shape[0]
and values.dtype != object
and values.dtype != bool
):
@@ -1694,7 +1699,7 @@ def _nanpercentile_1d(
if len(values) == 0:
return np.array([na_value] * len(q), dtype=values.dtype)
- return np.percentile(values, q, interpolation=interpolation)
+ return np.percentile(values, q, **{np_percentile_argname: interpolation})
def nanpercentile(
@@ -1747,7 +1752,9 @@ def nanpercentile(
result = np.array(result, dtype=values.dtype, copy=False).T
return result
else:
- return np.percentile(values, q, axis=1, interpolation=interpolation)
+ return np.percentile(
+ values, q, axis=1, **{np_percentile_argname: interpolation}
+ )
def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
@@ -1781,16 +1788,20 @@ def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
# We need to define mask before masking NaTs
mask = isna(values)
- if accum_func == np.minimum.accumulate:
- # Note: the accum_func comparison fails as an "is" comparison
- y = values.view("i8")
- y[mask] = lib.i8max
- changed = True
- else:
- y = values
- changed = False
+ y = values.view("i8")
+ # Note: the accum_func comparison fails as an "is" comparison
+ changed = accum_func == np.minimum.accumulate
+
+ try:
+ if changed:
+ y[mask] = lib.i8max
+
+ result = accum_func(y, axis=0)
+ finally:
+ if changed:
+ # restore NaT elements
+ y[mask] = iNaT
- result = accum_func(y.view("i8"), axis=0)
if skipna:
result[mask] = iNaT
elif accum_func == np.minimum.accumulate:
@@ -1800,10 +1811,6 @@ def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
- if changed:
- # restore NaT elements
- y[mask] = iNaT # TODO: could try/finally for this?
-
if isinstance(values.dtype, np.dtype):
result = result.view(orig_dtype)
else:
diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py
index 2a76eb92120e7..b883fe7751daa 100644
--- a/pandas/core/ops/common.py
+++ b/pandas/core/ops/common.py
@@ -5,6 +5,7 @@
from typing import Callable
from pandas._libs.lib import item_from_zerodim
+from pandas._libs.missing import is_matching_na
from pandas._typing import F
from pandas.core.dtypes.generic import (
@@ -116,10 +117,21 @@ def _maybe_match_name(a, b):
a_has = hasattr(a, "name")
b_has = hasattr(b, "name")
if a_has and b_has:
- if a.name == b.name:
- return a.name
- else:
- # TODO: what if they both have np.nan for their names?
+ try:
+ if a.name == b.name:
+ return a.name
+ elif is_matching_na(a.name, b.name):
+ # e.g. both are np.nan
+ return a.name
+ else:
+ return None
+ except TypeError:
+ # pd.NA
+ if is_matching_na(a.name, b.name):
+ return a.name
+ return None
+ except ValueError:
+ # e.g. np.int64(1) vs (np.int64(1), np.int64(2))
return None
elif a_has:
return a.name
diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py
index 3663e0682c4f4..ed133f30e192d 100644
--- a/pandas/core/ops/missing.py
+++ b/pandas/core/ops/missing.py
@@ -34,7 +34,7 @@
from pandas.core.ops import roperator
-def fill_zeros(result, x, y):
+def _fill_zeros(result, x, y):
"""
If this is a reversed op, then flip x,y
@@ -102,9 +102,6 @@ def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
"""
- if not isinstance(result, np.ndarray):
- # FIXME: SparseArray would raise TypeError with np.putmask
- return result
if is_scalar(y):
y = np.array(y)
@@ -141,7 +138,7 @@ def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:
def dispatch_fill_zeros(op, left, right, result):
"""
- Call fill_zeros with the appropriate fill value depending on the operation,
+ Call _fill_zeros with the appropriate fill value depending on the operation,
with special logic for divmod and rdivmod.
Parameters
@@ -163,12 +160,12 @@ def dispatch_fill_zeros(op, left, right, result):
if op is divmod:
result = (
mask_zero_div_zero(left, right, result[0]),
- fill_zeros(result[1], left, right),
+ _fill_zeros(result[1], left, right),
)
elif op is roperator.rdivmod:
result = (
mask_zero_div_zero(right, left, result[0]),
- fill_zeros(result[1], right, left),
+ _fill_zeros(result[1], right, left),
)
elif op is operator.floordiv:
# Note: no need to do this for truediv; in py3 numpy behaves the way
@@ -179,7 +176,7 @@ def dispatch_fill_zeros(op, left, right, result):
# we want.
result = mask_zero_div_zero(right, left, result)
elif op is operator.mod:
- result = fill_zeros(result, left, right)
+ result = _fill_zeros(result, left, right)
elif op is roperator.rmod:
- result = fill_zeros(result, right, left)
+ result = _fill_zeros(result, right, left)
return result
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 8475877f9b905..71b53d50273e0 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -13,6 +13,7 @@
cast,
overload,
)
+import warnings
import numpy as np
@@ -21,12 +22,14 @@
cache_readonly,
deprecate_nonkeyword_arguments,
)
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
+from pandas.core.dtypes.inference import is_bool
from pandas.core.dtypes.missing import isna
from pandas.core.arrays.categorical import (
@@ -126,12 +129,12 @@ def concat(
axis: Axis = ...,
join: str = ...,
ignore_index: bool = ...,
- keys=None,
- levels=None,
- names=None,
- verify_integrity: bool = False,
- sort: bool = False,
- copy: bool = True,
+ keys=...,
+ levels=...,
+ names=...,
+ verify_integrity: bool = ...,
+ sort: bool = ...,
+ copy: bool = ...,
) -> DataFrame | Series:
...
@@ -519,6 +522,14 @@ def __init__(
self.keys = keys
self.names = names or getattr(keys, "names", None)
self.levels = levels
+
+ if not is_bool(sort):
+ warnings.warn(
+ "Passing non boolean values for sort is deprecated and "
+ "will error in a future version!",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
self.sort = sort
self.ignore_index = ignore_index
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 4dd15dd367581..960b8faec7c59 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1783,21 +1783,27 @@ def _validate_specification(self) -> None:
# GH#29130 Check that merge keys do not have dtype object
if not self.left_index:
left_on = self.left_on[0]
- lo_dtype = (
- self.left[left_on].dtype
- if left_on in self.left.columns
- else self.left.index.get_level_values(left_on)
- )
+ if is_array_like(left_on):
+ lo_dtype = left_on.dtype
+ else:
+ lo_dtype = (
+ self.left[left_on].dtype
+ if left_on in self.left.columns
+ else self.left.index.get_level_values(left_on)
+ )
else:
lo_dtype = self.left.index.dtype
if not self.right_index:
right_on = self.right_on[0]
- ro_dtype = (
- self.right[right_on].dtype
- if right_on in self.right.columns
- else self.right.index.get_level_values(right_on)
- )
+ if is_array_like(right_on):
+ ro_dtype = right_on.dtype
+ else:
+ ro_dtype = (
+ self.right[right_on].dtype
+ if right_on in self.right.columns
+ else self.right.index.get_level_values(right_on)
+ )
else:
ro_dtype = self.right.index.dtype
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 6c6b14653df75..c2cd73584b7da 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -213,7 +213,9 @@ def get_result(self, values, value_columns, fill_value):
columns = self.get_new_columns(value_columns)
index = self.new_index
- return self.constructor(values, index=index, columns=columns)
+ return self.constructor(
+ values, index=index, columns=columns, dtype=values.dtype
+ )
def get_new_values(self, values, fill_value=None):
@@ -243,24 +245,24 @@ def get_new_values(self, values, fill_value=None):
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
+ dtype = values.dtype
+
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
- name = np.dtype(dtype).name
else:
- dtype, fill_value = maybe_promote(values.dtype, fill_value)
if isinstance(dtype, ExtensionDtype):
# GH#41875
cls = dtype.construct_array_type()
new_values = cls._empty(result_shape, dtype=dtype)
new_values[:] = fill_value
- name = dtype.name
else:
+ dtype, fill_value = maybe_promote(dtype, fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
- name = np.dtype(dtype).name
+ name = dtype.name
new_mask = np.zeros(result_shape, dtype=bool)
# we need to convert to a basic dtype
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index a1b058224795e..8cf94e5e433a6 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -443,7 +443,8 @@ def _bins_to_cuts(
)
elif ordered and len(set(labels)) != len(labels):
raise ValueError(
- "labels must be unique if ordered=True; pass ordered=False for duplicate labels" # noqa
+ "labels must be unique if ordered=True; pass ordered=False "
+ "for duplicate labels"
)
else:
if len(labels) != len(bins) - 1:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b3c9167bfbbab..2c11e3528c229 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -802,9 +802,11 @@ def view(self, dtype: Dtype | None = None) -> Series:
4 2
dtype: int8
"""
- return self._constructor(
- self._values.view(dtype), index=self.index
- ).__finalize__(self, method="view")
+ # self.array instead of self._values so we piggyback on PandasArray
+ # implementation
+ res_values = self.array.view(dtype)
+ res_ser = self._constructor(res_values, index=self.index)
+ return res_ser.__finalize__(self, method="view")
# ----------------------------------------------------------------------
# NDArray Compat
@@ -1012,7 +1014,7 @@ def _get_values_tuple(self, key):
# mpl hackaround
if com.any_none(*key):
result = self._get_values(key)
- deprecate_ndim_indexing(result, stacklevel=5)
+ deprecate_ndim_indexing(result, stacklevel=find_stack_level())
return result
if not isinstance(self.index, MultiIndex):
@@ -1449,9 +1451,6 @@ def reset_index(self, level=None, drop=False, name=lib.no_default, inplace=False
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if drop:
- if name is lib.no_default:
- name = self.name
-
new_index = default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
@@ -1462,8 +1461,6 @@ def reset_index(self, level=None, drop=False, name=lib.no_default, inplace=False
if inplace:
self.index = new_index
- # set name if it was passed, otherwise, keep the previous name
- self.name = name or self.name
else:
return self._constructor(
self._values.copy(), index=new_index
@@ -1767,7 +1764,7 @@ def to_frame(self, name: Hashable = lib.no_default) -> DataFrame:
columns = Index([name])
mgr = self._mgr.to_2d_mgr(columns)
- return self._constructor_expanddim(mgr)
+ return self._constructor_expanddim(mgr).__finalize__(self, method="to_frame")
def _set_name(self, name, inplace=False) -> Series:
"""
@@ -1838,7 +1835,7 @@ def _set_name(self, name, inplace=False) -> Series:
Name: Max Speed, dtype: float64
We can also choose to include `NA` in group keys or not by defining
-`dropna` parameter, the default setting is `True`:
+`dropna` parameter, the default setting is `True`.
>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
>>> ser.groupby(level=0).sum()
@@ -1975,7 +1972,7 @@ def count(self, level=None):
self, method="count"
)
- def mode(self, dropna=True) -> Series:
+ def mode(self, dropna: bool = True) -> Series:
"""
Return the mode(s) of the Series.
@@ -4471,14 +4468,16 @@ def align(
def rename(
self,
- index=None,
+ mapper=None,
*,
+ index=None,
+ columns=None,
axis=None,
copy=True,
inplace=False,
level=None,
errors="ignore",
- ):
+ ) -> Series | None:
"""
Alter Series index labels or name.
@@ -4494,7 +4493,7 @@ def rename(
----------
axis : {0 or "index"}
Unused. Accepted for compatibility with DataFrame method only.
- index : scalar, hashable sequence, dict-like or function, optional
+ mapper : scalar, hashable sequence, dict-like or function, optional
Functions or dict-like are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
@@ -4542,12 +4541,16 @@ def rename(
# Make sure we raise if an invalid 'axis' is passed.
axis = self._get_axis_number(axis)
- if callable(index) or is_dict_like(index):
+ if index is not None and mapper is not None:
+ raise TypeError("Cannot specify both 'mapper' and 'index'")
+ if mapper is None:
+ mapper = index
+ if callable(mapper) or is_dict_like(mapper):
return super().rename(
- index, copy=copy, inplace=inplace, level=level, errors=errors
+ mapper, copy=copy, inplace=inplace, level=level, errors=errors
)
else:
- return self._set_name(index, inplace=inplace)
+ return self._set_name(mapper, inplace=inplace)
@overload
def set_axis(
@@ -4604,8 +4607,17 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
optional_labels=_shared_doc_kwargs["optional_labels"],
optional_axis=_shared_doc_kwargs["optional_axis"],
)
- def reindex(self, index=None, **kwargs):
- return super().reindex(index=index, **kwargs)
+ def reindex(self, *args, **kwargs) -> Series:
+ if len(args) > 1:
+ raise TypeError("Only one positional argument ('index') is allowed")
+ if args:
+ (index,) = args
+ if "index" in kwargs:
+ raise TypeError(
+ "'index' passed as both positional and keyword argument"
+ )
+ kwargs.update({"index": index})
+ return super().reindex(**kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
def drop(
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index bc4f4d657b859..9f56885eefc3c 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -94,10 +94,12 @@
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
- aligned; see ``.align()`` method). If an ndarray is passed, the
- values are used as-is to determine the groups. A label or list of
- labels may be passed to group by the columns in ``self``. Notice
- that a tuple is interpreted as a (single) key.
+ aligned; see ``.align()`` method). If a list or ndarray of length
+ equal to the selected axis is passed (see the `groupby user guide
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#splitting-an-object-into-groups>`),
+ the values are used as-is to determine the groups. A label or list
+ of labels may be passed to group by the columns in ``self``.
+ Notice that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
@@ -126,7 +128,7 @@
dropna : bool, default True
If True, and if group keys contain NA values, NA values together
with row/column will be dropped.
- If False, NA values will also be treated as the key in groups
+ If False, NA values will also be treated as the key in groups.
.. versionadded:: 1.1.0
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index f82e1aa5d188c..b5f3af5af8e38 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -1427,7 +1427,7 @@ def replace(
" In addition, single character regular expressions will "
"*not* be treated as literal strings when regex=True."
)
- warnings.warn(msg, FutureWarning, stacklevel=3)
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
# Check whether repl is valid (GH 13438, GH 15055)
if not (isinstance(repl, str) or callable(repl)):
@@ -1977,7 +1977,7 @@ def rstrip(self, to_strip=None):
Parameters
----------
%(side)s : str
- %(side)s to remove.
+ Remove the %(side)s of the string.
Returns
-------
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index ba2f56c79bdfe..2ce5c0cbea272 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -12,7 +12,7 @@
import pandas._libs.missing as libmissing
import pandas._libs.ops as libops
from pandas._typing import (
- Dtype,
+ NpDtype,
Scalar,
)
@@ -37,7 +37,7 @@ def __len__(self):
raise NotImplementedError
def _str_map(
- self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
+ self, f, na_value=None, dtype: NpDtype | None = None, convert: bool = True
):
"""
Map a callable over valid elements of the array.
@@ -62,9 +62,7 @@ def _str_map(
na_value = self._str_na_value
if not len(self):
- # error: Argument 1 to "ndarray" has incompatible type "int";
- # expected "Sequence[int]"
- return np.ndarray(0, dtype=dtype) # type: ignore[arg-type]
+ return np.ndarray(0, dtype=dtype)
arr = np.asarray(self, dtype=object)
mask = isna(arr)
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 67a6975c21fdd..f40f227259998 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -28,7 +28,7 @@
nat_strings,
parsing,
)
-from pandas._libs.tslibs.parsing import ( # noqa
+from pandas._libs.tslibs.parsing import ( # noqa:F401
DateParseError,
format_is_iso,
guess_datetime_format,
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 5277a3514b423..81b2be4e10e62 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -68,8 +68,13 @@ def to_timedelta(arg, unit=None, errors="raise"):
Returns
-------
- timedelta64 or numpy.array of timedelta64
- Output type returned if parsing succeeded.
+ timedelta
+ If parsing succeeded.
+ Return type depends on input:
+
+ - list-like: TimedeltaIndex of timedelta64 dtype
+ - Series: Series of timedelta64 dtype
+ - scalar: Timedelta
See Also
--------
diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py
index 2cc7962c6bd7b..930c12841e4e4 100644
--- a/pandas/core/window/doc.py
+++ b/pandas/core/window/doc.py
@@ -11,7 +11,7 @@ def create_section_header(header: str) -> str:
return "\n".join((header, "-" * len(header))) + "\n"
-template_header = "Calculate the {window_method} {aggregation_description}.\n\n"
+template_header = "\nCalculate the {window_method} {aggregation_description}.\n\n"
template_returns = dedent(
"""
@@ -98,14 +98,17 @@ def create_section_header(header: str) -> str:
"extended documentation and performance considerations for the Numba engine.\n\n"
)
-window_agg_numba_parameters = dedent(
- """
+
+def window_agg_numba_parameters(version: str = "1.3") -> str:
+ return (
+ dedent(
+ """
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
- .. versionadded:: 1.3.0
+ .. versionadded:: {version}.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
@@ -114,6 +117,9 @@ def create_section_header(header: str) -> str:
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}``
- .. versionadded:: 1.3.0\n
+ .. versionadded:: {version}.0\n
"""
-).replace("\n", "", 1)
+ )
+ .replace("\n", "", 1)
+ .replace("{version}", version)
+ )
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index f5f681d9de797..4bebc56273805 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -417,6 +417,13 @@ def __init__(
self.alpha,
)
+ def _check_window_bounds(
+ self, start: np.ndarray, end: np.ndarray, num_vals: int
+ ) -> None:
+ # emw algorithms are iterative with each point
+ # ExponentialMovingWindowIndexer "bounds" are the entire window
+ pass
+
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
@@ -504,7 +511,7 @@ def aggregate(self, func, *args, **kwargs):
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -558,7 +565,7 @@ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
index 796849e622ff2..8c8b7a8284684 100644
--- a/pandas/core/window/expanding.py
+++ b/pandas/core/window/expanding.py
@@ -227,7 +227,7 @@ def apply(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -253,7 +253,7 @@ def sum(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -279,7 +279,7 @@ def max(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -305,7 +305,7 @@ def min(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -330,7 +330,7 @@ def mean(
@doc(
template_header,
create_section_header("Parameters"),
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -361,6 +361,7 @@ def median(
"""
).replace("\n", "", 1),
args_compat,
+ window_agg_numba_parameters("1.4"),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -396,9 +397,18 @@ def median(
aggregation_description="standard deviation",
agg_method="std",
)
- def std(self, ddof: int = 1, *args, **kwargs):
+ def std(
+ self,
+ ddof: int = 1,
+ *args,
+ engine: str | None = None,
+ engine_kwargs: dict[str, bool] | None = None,
+ **kwargs,
+ ):
nv.validate_expanding_func("std", args, kwargs)
- return super().std(ddof=ddof, **kwargs)
+ return super().std(
+ ddof=ddof, engine=engine, engine_kwargs=engine_kwargs, **kwargs
+ )
@doc(
template_header,
@@ -411,6 +421,7 @@ def std(self, ddof: int = 1, *args, **kwargs):
"""
).replace("\n", "", 1),
args_compat,
+ window_agg_numba_parameters("1.4"),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -446,9 +457,18 @@ def std(self, ddof: int = 1, *args, **kwargs):
aggregation_description="variance",
agg_method="var",
)
- def var(self, ddof: int = 1, *args, **kwargs):
+ def var(
+ self,
+ ddof: int = 1,
+ *args,
+ engine: str | None = None,
+ engine_kwargs: dict[str, bool] | None = None,
+ **kwargs,
+ ):
nv.validate_expanding_func("var", args, kwargs)
- return super().var(ddof=ddof, **kwargs)
+ return super().var(
+ ddof=ddof, engine=engine, engine_kwargs=engine_kwargs, **kwargs
+ )
@doc(
template_header,
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index f7799912937b7..fc3390ee6db03 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -227,6 +227,20 @@ def _validate(self) -> None:
if self.method not in ["table", "single"]:
raise ValueError("method must be 'table' or 'single")
+ def _check_window_bounds(
+ self, start: np.ndarray, end: np.ndarray, num_vals: int
+ ) -> None:
+ if len(start) != len(end):
+ raise ValueError(
+ f"start ({len(start)}) and end ({len(end)}) bounds must be the "
+ f"same length"
+ )
+ elif len(start) != num_vals:
+ raise ValueError(
+ f"start and end bounds ({len(start)}) must be the same length "
+ f"as the object ({num_vals})"
+ )
+
def _create_data(self, obj: NDFrameT) -> NDFrameT:
"""
Split data into blocks & return conformed data.
@@ -311,10 +325,7 @@ def __iter__(self):
center=self.center,
closed=self.closed,
)
-
- assert len(start) == len(
- end
- ), "these should be equal in length from get_window_bounds"
+ self._check_window_bounds(start, end, len(obj))
for s, e in zip(start, end):
result = obj.iloc[slice(s, e)]
@@ -565,9 +576,7 @@ def calc(x):
center=self.center,
closed=self.closed,
)
- assert len(start) == len(
- end
- ), "these should be equal in length from get_window_bounds"
+ self._check_window_bounds(start, end, len(x))
return func(x, start, end, min_periods, *numba_args)
@@ -589,6 +598,7 @@ def _numba_apply(
func: Callable[..., Any],
numba_cache_key_str: str,
engine_kwargs: dict[str, bool] | None = None,
+ *func_args,
):
window_indexer = self._get_window_indexer()
min_periods = (
@@ -608,10 +618,11 @@ def _numba_apply(
center=self.center,
closed=self.closed,
)
+ self._check_window_bounds(start, end, len(values))
aggregator = executor.generate_shared_aggregator(
func, engine_kwargs, numba_cache_key_str
)
- result = aggregator(values, start, end, min_periods)
+ result = aggregator(values, start, end, min_periods, *func_args)
NUMBA_FUNC_CACHE[(func, numba_cache_key_str)] = aggregator
result = result.T if self.axis == 1 else result
if obj.ndim == 1:
@@ -902,7 +913,7 @@ class Window(BaseWindow):
If ``'neither'``, the first and last points in the window are excluded
from calculations.
- Default ``None`` (``'right'``)
+ Default ``None`` (``'right'``).
.. versionchanged:: 1.2.0
@@ -1449,8 +1460,24 @@ def median(
window_func = window_aggregations.roll_median_c
return self._apply(window_func, name="median", **kwargs)
- def std(self, ddof: int = 1, *args, **kwargs):
+ def std(
+ self,
+ ddof: int = 1,
+ *args,
+ engine: str | None = None,
+ engine_kwargs: dict[str, bool] | None = None,
+ **kwargs,
+ ):
nv.validate_window_func("std", args, kwargs)
+ if maybe_use_numba(engine):
+ if self.method == "table":
+ raise NotImplementedError("std not supported with method='table'")
+ else:
+ from pandas.core._numba.kernels import sliding_var
+
+ return zsqrt(
+ self._numba_apply(sliding_var, "rolling_std", engine_kwargs, ddof)
+ )
window_func = window_aggregations.roll_var
def zsqrt_func(values, begin, end, min_periods):
@@ -1462,8 +1489,24 @@ def zsqrt_func(values, begin, end, min_periods):
**kwargs,
)
- def var(self, ddof: int = 1, *args, **kwargs):
+ def var(
+ self,
+ ddof: int = 1,
+ *args,
+ engine: str | None = None,
+ engine_kwargs: dict[str, bool] | None = None,
+ **kwargs,
+ ):
nv.validate_window_func("var", args, kwargs)
+ if maybe_use_numba(engine):
+ if self.method == "table":
+ raise NotImplementedError("var not supported with method='table'")
+ else:
+ from pandas.core._numba.kernels import sliding_var
+
+ return self._numba_apply(
+ sliding_var, "rolling_var", engine_kwargs, ddof
+ )
window_func = partial(window_aggregations.roll_var, ddof=ddof)
return self._apply(
window_func,
@@ -1544,10 +1587,7 @@ def cov_func(x, y):
center=self.center,
closed=self.closed,
)
-
- assert len(start) == len(
- end
- ), "these should be equal in length from get_window_bounds"
+ self._check_window_bounds(start, end, len(x_array))
with np.errstate(all="ignore"):
mean_x_y = window_aggregations.roll_mean(
@@ -1588,10 +1628,7 @@ def corr_func(x, y):
center=self.center,
closed=self.closed,
)
-
- assert len(start) == len(
- end
- ), "these should be equal in length from get_window_bounds"
+ self._check_window_bounds(start, end, len(x_array))
with np.errstate(all="ignore"):
mean_x_y = window_aggregations.roll_mean(
@@ -1806,7 +1843,7 @@ def apply(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -1880,7 +1917,7 @@ def sum(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -1906,7 +1943,7 @@ def max(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -1947,7 +1984,7 @@ def min(
template_header,
create_section_header("Parameters"),
args_compat,
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -1994,7 +2031,7 @@ def mean(
@doc(
template_header,
create_section_header("Parameters"),
- window_agg_numba_parameters,
+ window_agg_numba_parameters(),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -2040,6 +2077,7 @@ def median(
"""
).replace("\n", "", 1),
args_compat,
+ window_agg_numba_parameters("1.4"),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -2077,9 +2115,18 @@ def median(
aggregation_description="standard deviation",
agg_method="std",
)
- def std(self, ddof: int = 1, *args, **kwargs):
+ def std(
+ self,
+ ddof: int = 1,
+ *args,
+ engine: str | None = None,
+ engine_kwargs: dict[str, bool] | None = None,
+ **kwargs,
+ ):
nv.validate_rolling_func("std", args, kwargs)
- return super().std(ddof=ddof, **kwargs)
+ return super().std(
+ ddof=ddof, engine=engine, engine_kwargs=engine_kwargs, **kwargs
+ )
@doc(
template_header,
@@ -2092,6 +2139,7 @@ def std(self, ddof: int = 1, *args, **kwargs):
"""
).replace("\n", "", 1),
args_compat,
+ window_agg_numba_parameters("1.4"),
kwargs_compat,
create_section_header("Returns"),
template_returns,
@@ -2129,9 +2177,18 @@ def std(self, ddof: int = 1, *args, **kwargs):
aggregation_description="variance",
agg_method="var",
)
- def var(self, ddof: int = 1, *args, **kwargs):
+ def var(
+ self,
+ ddof: int = 1,
+ *args,
+ engine: str | None = None,
+ engine_kwargs: dict[str, bool] | None = None,
+ **kwargs,
+ ):
nv.validate_rolling_func("var", args, kwargs)
- return super().var(ddof=ddof, **kwargs)
+ return super().var(
+ ddof=ddof, engine=engine, engine_kwargs=engine_kwargs, **kwargs
+ )
@doc(
template_header,
diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py
index 0417529999890..1f97b1af81b6e 100644
--- a/pandas/io/clipboard/__init__.py
+++ b/pandas/io/clipboard/__init__.py
@@ -111,12 +111,16 @@ def _stringifyText(text) -> str:
def init_osx_pbcopy_clipboard():
def copy_osx_pbcopy(text):
text = _stringifyText(text) # Converts non-str values to str.
- p = subprocess.Popen(["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True)
- p.communicate(input=text.encode(ENCODING))
+ with subprocess.Popen(
+ ["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True
+ ) as p:
+ p.communicate(input=text.encode(ENCODING))
def paste_osx_pbcopy():
- p = subprocess.Popen(["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True)
- stdout = p.communicate()[0]
+ with subprocess.Popen(
+ ["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True
+ ) as p:
+ stdout = p.communicate()[0]
return stdout.decode(ENCODING)
return copy_osx_pbcopy, paste_osx_pbcopy
@@ -179,22 +183,22 @@ def copy_xclip(text, primary=False):
selection = DEFAULT_SELECTION
if primary:
selection = PRIMARY_SELECTION
- p = subprocess.Popen(
+ with subprocess.Popen(
["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True
- )
- p.communicate(input=text.encode(ENCODING))
+ ) as p:
+ p.communicate(input=text.encode(ENCODING))
def paste_xclip(primary=False):
selection = DEFAULT_SELECTION
if primary:
selection = PRIMARY_SELECTION
- p = subprocess.Popen(
+ with subprocess.Popen(
["xclip", "-selection", selection, "-o"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
- )
- stdout = p.communicate()[0]
+ ) as p:
+ stdout = p.communicate()[0]
# Intentionally ignore extraneous output on stderr when clipboard is empty
return stdout.decode(ENCODING)
@@ -210,19 +214,19 @@ def copy_xsel(text, primary=False):
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
- p = subprocess.Popen(
+ with subprocess.Popen(
["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True
- )
- p.communicate(input=text.encode(ENCODING))
+ ) as p:
+ p.communicate(input=text.encode(ENCODING))
def paste_xsel(primary=False):
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
- p = subprocess.Popen(
+ with subprocess.Popen(
["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True
- )
- stdout = p.communicate()[0]
+ ) as p:
+ stdout = p.communicate()[0]
return stdout.decode(ENCODING)
return copy_xsel, paste_xsel
@@ -231,7 +235,7 @@ def paste_xsel(primary=False):
def init_klipper_clipboard():
def copy_klipper(text):
text = _stringifyText(text) # Converts non-str values to str.
- p = subprocess.Popen(
+ with subprocess.Popen(
[
"qdbus",
"org.kde.klipper",
@@ -241,16 +245,16 @@ def copy_klipper(text):
],
stdin=subprocess.PIPE,
close_fds=True,
- )
- p.communicate(input=None)
+ ) as p:
+ p.communicate(input=None)
def paste_klipper():
- p = subprocess.Popen(
+ with subprocess.Popen(
["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],
stdout=subprocess.PIPE,
close_fds=True,
- )
- stdout = p.communicate()[0]
+ ) as p:
+ stdout = p.communicate()[0]
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
@@ -483,17 +487,17 @@ def paste_windows():
def init_wsl_clipboard():
def copy_wsl(text):
text = _stringifyText(text) # Converts non-str values to str.
- p = subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True)
- p.communicate(input=text.encode(ENCODING))
+ with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p:
+ p.communicate(input=text.encode(ENCODING))
def paste_wsl():
- p = subprocess.Popen(
+ with subprocess.Popen(
["powershell.exe", "-command", "Get-Clipboard"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
- )
- stdout = p.communicate()[0]
+ ) as p:
+ stdout = p.communicate()[0]
# WSL appends "\r\n" to the contents.
return stdout[:-2].decode(ENCODING)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index be6577e646ac3..2102e67f06d36 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -23,8 +23,12 @@
IO,
Any,
AnyStr,
+ Generic,
+ Literal,
Mapping,
+ TypeVar,
cast,
+ overload,
)
from urllib.parse import (
urljoin,
@@ -37,18 +41,20 @@
import zipfile
from pandas._typing import (
- Buffer,
+ BaseBuffer,
CompressionDict,
CompressionOptions,
- FileOrBuffer,
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
StorageOptions,
+ WriteBuffer,
)
from pandas.compat import (
get_lzma_file,
import_lzma,
)
from pandas.compat._optional import import_optional_dependency
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import is_file_like
@@ -57,19 +63,16 @@
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard("")
+BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer)
+
@dataclasses.dataclass
class IOArgs:
"""
Return value of io/common.py:_get_filepath_or_buffer.
-
- Note (copy&past from io/parsers):
- filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
- though mypy handling of conditional imports is difficult.
- See https://github.com/python/mypy/issues/1297
"""
- filepath_or_buffer: FileOrBuffer
+ filepath_or_buffer: str | BaseBuffer
encoding: str
mode: str
compression: CompressionDict
@@ -77,7 +80,7 @@ class IOArgs:
@dataclasses.dataclass
-class IOHandles:
+class IOHandles(Generic[AnyStr]):
"""
Return value of io/common.py:get_handle
@@ -91,9 +94,10 @@ class IOHandles:
is_wrapped: Whether a TextIOWrapper needs to be detached.
"""
- handle: Buffer
+ # handle might not implement the IO-interface
+ handle: IO[AnyStr]
compression: CompressionDict
- created_handles: list[Buffer] = dataclasses.field(default_factory=list)
+ created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
is_mmap: bool = False
@@ -117,14 +121,14 @@ def close(self) -> None:
self.created_handles = []
self.is_wrapped = False
- def __enter__(self) -> IOHandles:
+ def __enter__(self) -> IOHandles[AnyStr]:
return self
def __exit__(self, *args: Any) -> None:
self.close()
-def is_url(url) -> bool:
+def is_url(url: object) -> bool:
"""
Check to see if a URL has a valid protocol.
@@ -142,7 +146,17 @@ def is_url(url) -> bool:
return parse_url(url).scheme in _VALID_URLS
-def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]:
+@overload
+def _expand_user(filepath_or_buffer: str) -> str:
+ ...
+
+
+@overload
+def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:
+ ...
+
+
+def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
"""
Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
@@ -170,10 +184,22 @@ def validate_header_arg(header) -> None:
)
+@overload
+def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:
+ ...
+
+
+@overload
+def stringify_path(
+ filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
+) -> BaseBufferT:
+ ...
+
+
def stringify_path(
- filepath_or_buffer: FilePathOrBuffer[AnyStr],
+ filepath_or_buffer: FilePath | BaseBufferT,
convert_file_like: bool = False,
-) -> FileOrBuffer[AnyStr]:
+) -> str | BaseBufferT:
"""
Attempt to convert a path-like object to a string.
@@ -197,7 +223,7 @@ def stringify_path(
# GH 38125: some fsspec objects implement os.PathLike but have already opened a
# file. This prevents opening the file a second time. infer_compression calls
# this function with convert_file_like=True to infer the compression.
- return cast(FileOrBuffer[AnyStr], filepath_or_buffer)
+ return cast(BaseBufferT, filepath_or_buffer)
if isinstance(filepath_or_buffer, os.PathLike):
filepath_or_buffer = filepath_or_buffer.__fspath__()
@@ -214,7 +240,7 @@ def urlopen(*args, **kwargs):
return urllib.request.urlopen(*args, **kwargs)
-def is_fsspec_url(url: FilePathOrBuffer) -> bool:
+def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:
"""
Returns true if the given URL looks like
something fsspec can handle
@@ -227,7 +253,7 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool:
def _get_filepath_or_buffer(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | BaseBuffer,
encoding: str = "utf-8",
compression: CompressionOptions = None,
mode: str = "r",
@@ -270,7 +296,7 @@ def _get_filepath_or_buffer(
warnings.warn(
"compression has no effect when passing a non-binary object as input.",
RuntimeWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
compression_method = None
@@ -389,7 +415,11 @@ def _get_filepath_or_buffer(
mode=mode,
)
- if not is_file_like(filepath_or_buffer):
+ # is_file_like requires (read | write) & __iter__ but __iter__ is only
+ # needed for read_csv(engine=python)
+ if not (
+ hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")
+ ):
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
@@ -459,7 +489,7 @@ def get_compression_method(
def infer_compression(
- filepath_or_buffer: FilePathOrBuffer, compression: str | None
+ filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
) -> str | None:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
@@ -532,16 +562,47 @@ def check_parent_directory(path: Path | str) -> None:
raise OSError(fr"Cannot save file into a non-existent directory: '{parent}'")
+@overload
def get_handle(
- path_or_buf: FilePathOrBuffer,
+ path_or_buf: FilePath | BaseBuffer,
mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: Literal[False],
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[bytes]:
+ ...
+
+
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: Literal[True] = ...,
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[str]:
+ ...
+
+
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
encoding: str | None = None,
compression: CompressionOptions = None,
memory_map: bool = False,
is_text: bool = True,
errors: str | None = None,
storage_options: StorageOptions = None,
-) -> IOHandles:
+) -> IOHandles[str] | IOHandles[bytes]:
"""
Get file handle for given path/buffer and mode.
@@ -614,7 +675,7 @@ def get_handle(
)
handle = ioargs.filepath_or_buffer
- handles: list[Buffer]
+ handles: list[BaseBuffer]
# memory mapping needs to be the first step
handle, memory_map, handles = _maybe_memory_map(
@@ -642,17 +703,18 @@ def get_handle(
if compression == "gzip":
if is_path:
assert isinstance(handle, str)
- handle = gzip.GzipFile(
+ # error: Incompatible types in assignment (expression has type
+ # "GzipFile", variable has type "Union[str, BaseBuffer]")
+ handle = gzip.GzipFile( # type: ignore[assignment]
filename=handle,
mode=ioargs.mode,
**compression_args,
)
else:
handle = gzip.GzipFile(
- # error: Argument "fileobj" to "GzipFile" has incompatible type
- # "Union[str, Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase,
- # TextIOWrapper, mmap]]"; expected "Optional[IO[bytes]]"
- fileobj=handle, # type: ignore[arg-type]
+ # No overload variant of "GzipFile" matches argument types
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
+ fileobj=handle, # type: ignore[call-overload]
mode=ioargs.mode,
**compression_args,
)
@@ -671,7 +733,12 @@ def get_handle(
# ZIP Compression
elif compression == "zip":
- handle = _BytesZipFile(handle, ioargs.mode, **compression_args)
+ # error: Argument 1 to "_BytesZipFile" has incompatible type "Union[str,
+ # BaseBuffer]"; expected "Union[Union[str, PathLike[str]],
+ # ReadBuffer[bytes], WriteBuffer[bytes]]"
+ handle = _BytesZipFile(
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
+ )
if handle.mode == "r":
handles.append(handle)
zip_names = handle.namelist()
@@ -752,10 +819,14 @@ def get_handle(
assert not isinstance(ioargs.filepath_or_buffer, str)
handles.append(ioargs.filepath_or_buffer)
- assert not isinstance(handle, str)
return IOHandles(
- handle=handle,
- created_handles=handles,
+ # error: Argument "handle" to "IOHandles" has incompatible type
+ # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],
+ # typing.IO[Any]]"; expected "pandas._typing.IO[Any]"
+ handle=handle, # type: ignore[arg-type]
+ # error: Argument "created_handles" to "IOHandles" has incompatible type
+ # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"
+ created_handles=handles, # type: ignore[arg-type]
is_wrapped=is_wrapped,
is_mmap=memory_map,
compression=ioargs.compression,
@@ -786,7 +857,7 @@ class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc]
# GH 17778
def __init__(
self,
- file: FilePathOrBuffer,
+ file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
mode: str,
archive_name: str | None = None,
**kwargs,
@@ -804,6 +875,18 @@ def __init__(
# _PathLike[str]], IO[bytes]]"
super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type]
+ def infer_filename(self):
+ """
+ If an explicit archive_name is not given, we still want the file inside the zip
+ file not to be named something.zip, because that causes confusion (GH39465).
+ """
+ if isinstance(self.filename, (os.PathLike, str)):
+ filename = Path(self.filename)
+ if filename.suffix == ".zip":
+ return filename.with_suffix("").name
+ return filename.name
+ return None
+
def write(self, data):
# buffer multiple write calls, write on flush
if self.multiple_write_buffer is None:
@@ -818,7 +901,7 @@ def flush(self) -> None:
return
# ZipFile needs a non-empty string
- archive_name = self.archive_name or self.filename or "zip"
+ archive_name = self.archive_name or self.infer_filename() or "zip"
with self.multiple_write_buffer:
super().writestr(archive_name, self.multiple_write_buffer.getvalue())
@@ -939,15 +1022,15 @@ def detach(self):
def _maybe_memory_map(
- handle: FileOrBuffer,
+ handle: str | BaseBuffer,
memory_map: bool,
encoding: str,
mode: str,
errors: str | None,
decode: bool,
-) -> tuple[FileOrBuffer, bool, list[Buffer]]:
+) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
"""Try to memory map file/buffer."""
- handles: list[Buffer] = []
+ handles: list[BaseBuffer] = []
memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
if not memory_map:
return handle, memory_map, handles
@@ -966,10 +1049,11 @@ def _maybe_memory_map(
# error: Argument 1 to "_MMapWrapper" has incompatible type "Union[IO[Any],
# RawIOBase, BufferedIOBase, TextIOBase, mmap]"; expected "IO[Any]"
wrapped = cast(
- mmap.mmap,
+ BaseBuffer,
_MMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type]
)
- handle.close()
+ # error: "BaseBuffer" has no attribute "close"
+ handle.close() # type: ignore[attr-defined]
handles.remove(handle)
handles.append(wrapped)
handle = wrapped
@@ -983,7 +1067,7 @@ def _maybe_memory_map(
return handle, memory_map, handles
-def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:
+def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool:
"""Test whether file exists."""
exists = False
filepath_or_buffer = stringify_path(filepath_or_buffer)
@@ -997,7 +1081,7 @@ def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:
return exists
-def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool:
+def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:
"""Whether the handle is opened in binary mode"""
# specified by user
if "t" in mode or "b" in mode:
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index f079a25f69fec..ef60afa195234 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -4,6 +4,7 @@
import numpy as np
from pandas._libs.tslibs import parsing
+from pandas.util._exceptions import find_stack_level
def parse_date_time(date_col, time_col):
@@ -18,7 +19,7 @@ def parse_date_time(date_col, time_col):
Use pd.to_datetime(date_col + " " + time_col).to_pydatetime() instead to get a Numpy array.
""", # noqa: E501
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
@@ -38,7 +39,7 @@ def parse_date_fields(year_col, month_col, day_col):
np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array.
""", # noqa: E501
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
year_col = _maybe_cast(year_col)
@@ -63,7 +64,7 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_
np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array.
""", # noqa: E501
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
year_col = _maybe_cast(year_col)
@@ -89,7 +90,7 @@ def generic_parser(parse_func, *cols):
Use pd.to_datetime instead.
""",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
N = _check_columns(cols)
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index e543c9161a26e..9eb98195d9a88 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -6,6 +6,7 @@
import os
from textwrap import fill
from typing import (
+ IO,
Any,
Mapping,
cast,
@@ -17,10 +18,11 @@
from pandas._libs.parsers import STR_NA_VALUES
from pandas._typing import (
- Buffer,
DtypeArg,
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
StorageOptions,
+ WriteExcelBuffer,
)
from pandas.compat._optional import (
get_version,
@@ -519,11 +521,10 @@ def parse(
if convert_float is None:
convert_float = True
else:
- stacklevel = find_stack_level()
warnings.warn(
"convert_float is deprecated and will be removed in a future version.",
FutureWarning,
- stacklevel=stacklevel,
+ stacklevel=find_stack_level(),
)
validate_header_arg(header)
@@ -702,17 +703,31 @@ class ExcelWriter(metaclass=abc.ABCMeta):
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://".
.. versionadded:: 1.2.0
- if_sheet_exists : {'error', 'new', 'replace'}, default 'error'
+
+ if_sheet_exists : {'error', 'new', 'replace', 'overlay'}, default 'error'
How to behave when trying to write to a sheet that already
exists (append mode only).
* error: raise a ValueError.
* new: Create a new sheet, with a name determined by the engine.
* replace: Delete the contents of the sheet before writing to it.
+ * overlay: Write contents to the existing sheet without removing the old
+ contents.
.. versionadded:: 1.3.0
+
+ .. versionchanged:: 1.4.0
+
+ Added ``overlay`` option
+
engine_kwargs : dict, optional
- Keyword arguments to be passed into the engine.
+ Keyword arguments to be passed into the engine. These will be passed to
+ the following functions of the respective engines:
+
+ * xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
+ * openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
+ * openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
+ * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
.. versionadded:: 1.3.0
**kwargs : dict, optional
@@ -776,6 +791,28 @@ class ExcelWriter(metaclass=abc.ABCMeta):
>>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
... df.to_excel(writer, sheet_name="Sheet3")
+ Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
+ already exists:
+
+ >>> with ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... mode="a",
+ ... engine="openpyxl",
+ ... if_sheet_exists="replace",
+ ... ) as writer:
+ ... df.to_excel(writer, sheet_name="Sheet1")
+
+ You can also write multiple DataFrames to a single sheet. Note that the
+ ``if_sheet_exists`` parameter needs to be set to ``overlay``:
+
+ >>> with ExcelWriter("path_to_file.xlsx",
+ ... mode="a",
+ ... engine="openpyxl",
+ ... if_sheet_exists="overlay",
+ ... ) as writer:
+ ... df1.to_excel(writer, sheet_name="Sheet1")
+ ... df2.to_excel(writer, sheet_name="Sheet1", startcol=3)
+
You can store Excel file in RAM:
>>> import io
@@ -792,6 +829,26 @@ class ExcelWriter(metaclass=abc.ABCMeta):
... with zf.open("filename.xlsx", "w") as buffer:
... with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
+
+ You can specify additional arguments to the underlying engine:
+
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... engine="xlsxwriter",
+ ... engine_kwargs={"options": {"nan_inf_to_errors": True}}
+ ... ) as writer:
+ ... df.to_excel(writer)
+
+ In append mode, ``engine_kwargs`` are passed through to
+ openpyxl's ``load_workbook``:
+
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... engine="openpyxl",
+ ... mode="a",
+ ... engine_kwargs={"keep_vba": True}
+ ... ) as writer:
+ ... df.to_excel(writer, sheet_name="Sheet2")
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
@@ -817,7 +874,7 @@ class ExcelWriter(metaclass=abc.ABCMeta):
# ExcelWriter.
def __new__(
cls,
- path: FilePathOrBuffer | ExcelWriter,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
engine=None,
date_format=None,
datetime_format=None,
@@ -833,7 +890,7 @@ def __new__(
warnings.warn(
"Use of **kwargs is deprecated, use engine_kwargs instead.",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
# only switch class if generic(ExcelWriter)
@@ -868,7 +925,7 @@ def __new__(
"deprecated and will also raise a warning, it can "
"be globally set and the warning suppressed.",
FutureWarning,
- stacklevel=4,
+ stacklevel=find_stack_level(),
)
cls = get_writer(engine)
@@ -919,7 +976,7 @@ def save(self):
def __init__(
self,
- path: FilePathOrBuffer | ExcelWriter,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
engine=None,
date_format=None,
datetime_format=None,
@@ -942,7 +999,9 @@ def __init__(
mode = mode.replace("a", "r+")
# cast ExcelWriter to avoid adding 'if self.handles is not None'
- self.handles = IOHandles(cast(Buffer, path), compression={"copression": None})
+ self.handles = IOHandles(
+ cast(IO[bytes], path), compression={"copression": None}
+ )
if not isinstance(path, ExcelWriter):
self.handles = get_handle(
path, mode, storage_options=storage_options, is_text=False
@@ -961,10 +1020,10 @@ def __init__(
self.mode = mode
- if if_sheet_exists not in [None, "error", "new", "replace"]:
+ if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
raise ValueError(
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
- "Valid options are 'error', 'new' and 'replace'."
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
)
if if_sheet_exists and "r+" not in mode:
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
@@ -1060,7 +1119,7 @@ def close(self):
@doc(storage_options=_shared_docs["storage_options"])
def inspect_excel_format(
- content_or_path: FilePathOrBuffer,
+ content_or_path: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions = None,
) -> str | None:
"""
@@ -1107,9 +1166,7 @@ def inspect_excel_format(
elif not peek.startswith(ZIP_SIGNATURE):
return None
- # ZipFile typing is overly-strict
- # https://github.com/python/typeshed/issues/4212
- zf = zipfile.ZipFile(stream) # type: ignore[arg-type]
+ zf = zipfile.ZipFile(stream)
# Workaround for some third party files that use forward slashes and
# lower case names.
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index e0c5a2c6a7ff9..952ad72b480b7 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -3,7 +3,8 @@
import numpy as np
from pandas._typing import (
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
Scalar,
StorageOptions,
)
@@ -28,7 +29,7 @@ class ODFReader(BaseExcelReader):
def __init__(
self,
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions = None,
):
import_optional_dependency("odf")
@@ -40,7 +41,7 @@ def _workbook_class(self):
return OpenDocument
- def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):
+ def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
from odf.opendocument import load
return load(filepath_or_buffer)
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index fa2779b01d681..add95c58cd809 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -11,7 +11,10 @@
from pandas._typing import StorageOptions
from pandas.io.excel._base import ExcelWriter
-from pandas.io.excel._util import validate_freeze_panes
+from pandas.io.excel._util import (
+ combine_kwargs,
+ validate_freeze_panes,
+)
from pandas.io.formats.excel import ExcelCell
@@ -44,7 +47,9 @@ def __init__(
engine_kwargs=engine_kwargs,
)
- self.book = OpenDocumentSpreadsheet()
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
+
+ self.book = OpenDocumentSpreadsheet(**engine_kwargs)
self._style_dict: dict[str, str] = {}
def save(self) -> None:
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index d499f1a5ea89f..27c03d4a74bc1 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -9,7 +9,8 @@
import numpy as np
from pandas._typing import (
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
Scalar,
StorageOptions,
)
@@ -62,13 +63,13 @@ def __init__(
if "r+" in self.mode: # Load from existing workbook
from openpyxl import load_workbook
- self.book = load_workbook(self.handles.handle)
+ self.book = load_workbook(self.handles.handle, **engine_kwargs)
self.handles.handle.seek(0)
self.sheets = {name: self.book[name] for name in self.book.sheetnames}
else:
# Create workbook object with default optimized_write=True.
- self.book = Workbook()
+ self.book = Workbook(**engine_kwargs)
if self.book.worksheets:
self.book.remove(self.book.worksheets[0])
@@ -437,10 +438,12 @@ def write_cells(
f"Sheet '{sheet_name}' already exists and "
f"if_sheet_exists is set to 'error'."
)
+ elif self.if_sheet_exists == "overlay":
+ wks = self.sheets[sheet_name]
else:
raise ValueError(
f"'{self.if_sheet_exists}' is not valid for if_sheet_exists. "
- "Valid options are 'error', 'new' and 'replace'."
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
)
else:
wks = self.sheets[sheet_name]
@@ -505,7 +508,7 @@ def write_cells(
class OpenpyxlReader(BaseExcelReader):
def __init__(
self,
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions = None,
) -> None:
"""
@@ -527,7 +530,7 @@ def _workbook_class(self):
return Workbook
- def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):
+ def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
from openpyxl import load_workbook
return load_workbook(
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index 4b2b9f7a3a678..9284cf917a48c 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -2,7 +2,8 @@
from __future__ import annotations
from pandas._typing import (
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
Scalar,
StorageOptions,
)
@@ -14,7 +15,7 @@
class PyxlsbReader(BaseExcelReader):
def __init__(
self,
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions = None,
):
"""
@@ -38,7 +39,7 @@ def _workbook_class(self):
return Workbook
- def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):
+ def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
from pyxlsb import open_workbook
# TODO: hack in buffer capability
diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py
index 4dadf64b44515..a74c03f330cd9 100644
--- a/pandas/io/excel/_xlwt.py
+++ b/pandas/io/excel/_xlwt.py
@@ -53,7 +53,7 @@ def __init__(
if encoding is None:
encoding = "ascii"
- self.book = xlwt.Workbook(encoding=encoding)
+ self.book = xlwt.Workbook(encoding=encoding, **engine_kwargs)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 145cbe182eadb..e4547b527a6b9 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -7,8 +7,10 @@
)
from pandas._typing import (
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
StorageOptions,
+ WriteBuffer,
)
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import doc
@@ -26,7 +28,7 @@
@doc(storage_options=generic._shared_docs["storage_options"])
def to_feather(
df: DataFrame,
- path: FilePathOrBuffer[bytes],
+ path: FilePath | WriteBuffer[bytes],
storage_options: StorageOptions = None,
**kwargs,
):
@@ -36,7 +38,7 @@ def to_feather(
Parameters
----------
df : DataFrame
- path : string file path, or file-like object
+ path : str, path object, or file-like object
{storage_options}
.. versionadded:: 1.2.0
@@ -93,7 +95,7 @@ def to_feather(
@doc(storage_options=generic._shared_docs["storage_options"])
def read_feather(
- path: FilePathOrBuffer[bytes],
+ path: FilePath | ReadBuffer[bytes],
columns: Sequence[Hashable] | None = None,
use_threads: bool = True,
storage_options: StorageOptions = None,
@@ -103,18 +105,11 @@ def read_feather(
Parameters
----------
- path : str, path object or file-like object
- Any valid string path is acceptable. The string could be a URL. Valid
- URL schemes include http, ftp, s3, and file. For file URLs, a host is
- expected. A local file could be:
- ``file://localhost/path/to/table.feather``.
-
- If you want to pass in a path object, pandas accepts any
- ``os.PathLike``.
-
- By file-like object, we refer to objects with a ``read()`` method,
- such as a file handle (e.g. via builtin ``open`` function)
- or ``StringIO``.
+ path : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be: ``file://localhost/path/to/table.feather``.
columns : sequence, default None
If not provided, all columns are read.
use_threads : bool, default True
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index f078975e4b85a..18228a93b5285 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -20,10 +20,11 @@
from pandas._libs import writers as libwriters
from pandas._typing import (
CompressionOptions,
- FilePathOrBuffer,
+ FilePath,
FloatFormatType,
IndexLabel,
StorageOptions,
+ WriteBuffer,
)
from pandas.core.dtypes.generic import (
@@ -48,7 +49,7 @@ class CSVFormatter:
def __init__(
self,
formatter: DataFrameFormatter,
- path_or_buf: FilePathOrBuffer[str] | FilePathOrBuffer[bytes] = "",
+ path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "",
sep: str = ",",
cols: Sequence[Hashable] | None = None,
index_label: IndexLabel | None = None,
@@ -57,7 +58,7 @@ def __init__(
errors: str = "strict",
compression: CompressionOptions = "infer",
quoting: int | None = None,
- line_terminator="\n",
+ line_terminator: str | None = "\n",
chunksize: int | None = None,
quotechar: str | None = '"',
date_format: str | None = None,
@@ -245,7 +246,7 @@ def save(self) -> None:
# Note: self.encoding is irrelevant here
self.writer = csvlib.writer(
- handles.handle, # type: ignore[arg-type]
+ handles.handle,
lineterminator=self.line_terminator,
delimiter=self.sep,
quoting=self.quoting,
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 7f2905d9a63b9..af167964a48f4 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -325,12 +325,6 @@ def build_font(
"color": self.color_to_excel(props.get("color")),
# shadow if nonzero digit before shadow color
"shadow": self._get_shadow(props),
- # FIXME: dont leave commented-out
- # 'vertAlign':,
- # 'charset': ,
- # 'scheme': ,
- # 'outline': ,
- # 'condense': ,
}
def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index ba85a1b340d05..616331bf80a44 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -19,7 +19,6 @@
IO,
TYPE_CHECKING,
Any,
- AnyStr,
Callable,
Hashable,
Iterable,
@@ -51,11 +50,12 @@
ColspaceArgType,
ColspaceType,
CompressionOptions,
- FilePathOrBuffer,
+ FilePath,
FloatFormatType,
FormattersType,
IndexLabel,
StorageOptions,
+ WriteBuffer,
)
from pandas.core.dtypes.common import (
@@ -164,9 +164,6 @@
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
- min_rows : int, optional
- The number of rows to display in the console in a truncated repr
- (when number of rows is above `max_rows`).
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
@@ -1024,7 +1021,7 @@ def __init__(self, fmt: DataFrameFormatter):
def to_latex(
self,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
column_format: str | None = None,
longtable: bool = False,
encoding: str | None = None,
@@ -1056,7 +1053,7 @@ def to_latex(
def to_html(
self,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
encoding: str | None = None,
classes: str | list | tuple | None = None,
notebook: bool = False,
@@ -1069,8 +1066,10 @@ def to_html(
Parameters
----------
- buf : str, Path or StringIO-like, optional, default None
- Buffer to write to. If None, the output is returned as a string.
+ buf : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If None, the result is
+ returned as a string.
encoding : str, default “utf-8”
Set character encoding.
classes : str or list-like
@@ -1105,7 +1104,7 @@ def to_html(
def to_string(
self,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
encoding: str | None = None,
line_width: int | None = None,
) -> str | None:
@@ -1114,8 +1113,10 @@ def to_string(
Parameters
----------
- buf : str, Path or StringIO-like, optional, default None
- Buffer to write to. If None, the output is returned as a string.
+ buf : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If None, the result is
+ returned as a string.
encoding: str, default “utf-8”
Set character encoding.
line_width : int, optional
@@ -1129,7 +1130,7 @@ def to_string(
def to_csv(
self,
- path_or_buf: FilePathOrBuffer[AnyStr] | None = None,
+ path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
encoding: str | None = None,
sep: str = ",",
columns: Sequence[Hashable] | None = None,
@@ -1189,7 +1190,7 @@ def to_csv(
def save_to_buffer(
string: str,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
encoding: str | None = None,
) -> str | None:
"""
@@ -1203,7 +1204,7 @@ def save_to_buffer(
@contextmanager
-def get_buffer(buf: FilePathOrBuffer[str] | None, encoding: str | None = None):
+def get_buffer(buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None):
"""
Context manager to open, yield and close buffer for filenames or Path-like
objects, otherwise yield buf unchanged.
@@ -2145,7 +2146,7 @@ def get_level_lengths(
return result
-def buffer_put_lines(buf: IO[str], lines: list[str]) -> None:
+def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None:
"""
Appends lines to a buffer.
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 64a59778a54f3..ddd2420731028 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -6,7 +6,6 @@
)
import sys
from typing import (
- IO,
TYPE_CHECKING,
Iterable,
Iterator,
@@ -16,7 +15,10 @@
from pandas._config import get_option
-from pandas._typing import Dtype
+from pandas._typing import (
+ Dtype,
+ WriteBuffer,
+)
from pandas.core.indexes.api import Index
@@ -171,7 +173,7 @@ def size_qualifier(self) -> str:
def render(
self,
*,
- buf: IO[str] | None,
+ buf: WriteBuffer[str] | None,
max_cols: int | None,
verbose: bool | None,
show_counts: bool | None,
@@ -287,7 +289,7 @@ def memory_usage_bytes(self) -> int:
def render(
self,
*,
- buf: IO[str] | None,
+ buf: WriteBuffer[str] | None,
max_cols: int | None,
verbose: bool | None,
show_counts: bool | None,
@@ -306,7 +308,7 @@ class InfoPrinterAbstract:
Class for printing dataframe or series info.
"""
- def to_buffer(self, buf: IO[str] | None = None) -> None:
+ def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None:
"""Save dataframe info into buffer."""
table_builder = self._create_table_builder()
lines = table_builder.get_lines()
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index d91c0bb54f8dc..d9550f0940376 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -21,13 +21,18 @@
from pandas._typing import (
Axis,
- FilePathOrBuffer,
+ FilePath,
IndexLabel,
Level,
Scalar,
+ WriteBuffer,
)
from pandas.compat._optional import import_optional_dependency
-from pandas.util._decorators import doc
+from pandas.util._decorators import (
+ Substitution,
+ doc,
+)
+from pandas.util._exceptions import find_stack_level
import pandas as pd
from pandas import (
@@ -77,6 +82,26 @@ def _mpl(func: Callable):
raise ImportError(no_mpl_message.format(func.__name__))
+####
+# Shared Doc Strings
+
+subset = """
+ subset : label, array-like, IndexSlice, optional
+ A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
+ or single key, to `DataFrame.loc[:, <subset>]` where the columns are
+ prioritised, to limit ``data`` to *before* applying the function.
+"""
+
+props = """
+ props : str, default None
+ CSS properties to use for highlighting. If ``props`` is given, ``color``
+ is not used.
+"""
+
+#
+###
+
+
class Styler(StylerRenderer):
r"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
@@ -134,7 +159,7 @@ class Styler(StylerRenderer):
in cell display string with HTML-safe sequences.
Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
- LaTeX-safe sequences. If not given uses ``pandas.options.styler.format.escape``
+ LaTeX-safe sequences. If not given uses ``pandas.options.styler.format.escape``.
.. versionadded:: 1.3.0
formatter : str, callable, dict, optional
@@ -310,7 +335,7 @@ def render(
warnings.warn(
"this method is deprecated in favour of `Styler.to_html()`",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
if sparse_index is None:
sparse_index = get_option("styler.sparse.index")
@@ -463,7 +488,7 @@ def to_excel(
def to_latex(
self,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
*,
column_format: str | None = None,
position: str | None = None,
@@ -487,8 +512,10 @@ def to_latex(
Parameters
----------
- buf : str, Path, or StringIO-like, optional, default None
- Buffer to write to. If `None`, the output is returned as a string.
+ buf : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If None, the result is
+ returned as a string.
column_format : str, optional
The LaTeX column specification placed in location:
@@ -500,7 +527,7 @@ def to_latex(
position : str, optional
The LaTeX positional argument (e.g. 'h!') for tables, placed in location:
- \\begin{table}[<position>]
+ ``\\begin{table}[<position>]``.
position_float : {"centering", "raggedleft", "raggedright"}, optional
The LaTeX float command placed in location:
@@ -892,7 +919,7 @@ def to_latex(
def to_html(
self,
- buf: FilePathOrBuffer[str] | None = None,
+ buf: FilePath | WriteBuffer[str] | None = None,
*,
table_uuid: str | None = None,
table_attributes: str | None = None,
@@ -914,8 +941,10 @@ def to_html(
Parameters
----------
- buf : str, Path, or StringIO-like, optional, default None
- Buffer to write to. If ``None``, the output is returned as a string.
+ buf : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If None, the result is
+ returned as a string.
table_uuid : str, optional
Id attribute assigned to the <table> HTML element in the format:
@@ -1301,6 +1330,7 @@ def _apply(
self._update_ctx(result)
return self
+ @Substitution(subset=subset)
def apply(
self,
func: Callable,
@@ -1331,10 +1361,7 @@ def apply(
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
**kwargs : dict
Pass along to ``func``.
@@ -1544,6 +1571,7 @@ def _applymap(
self._update_ctx(result)
return self
+ @Substitution(subset=subset)
def applymap(
self, func: Callable, subset: Subset | None = None, **kwargs
) -> Styler:
@@ -1556,10 +1584,7 @@ def applymap(
----------
func : function
``func`` should take a scalar and return a string.
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
**kwargs : dict
Pass along to ``func``.
@@ -1608,6 +1633,7 @@ def applymap(
)
return self
+ @Substitution(subset=subset)
def where(
self,
cond: Callable,
@@ -1633,10 +1659,7 @@ def where(
Applied when ``cond`` returns true.
other : str
Applied when ``cond`` returns false.
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
**kwargs : dict
Pass along to ``cond``.
@@ -1675,7 +1698,7 @@ def where(
warnings.warn(
"this method is deprecated in favour of `Styler.applymap()`",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
if other is None:
@@ -1707,7 +1730,7 @@ def set_precision(self, precision: int) -> StylerRenderer:
warnings.warn(
"this method is deprecated in favour of `Styler.format(precision=..)`",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
self.precision = precision
return self.format(precision=precision, na_rep=self.na_rep)
@@ -2217,7 +2240,7 @@ def set_na_rep(self, na_rep: str) -> StylerRenderer:
warnings.warn(
"this method is deprecated in favour of `Styler.format(na_rep=..)`",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
self.na_rep = na_rep
return self.format(na_rep=na_rep, precision=self.precision)
@@ -2271,7 +2294,7 @@ def hide_index(
warnings.warn(
"this method is deprecated in favour of `Styler.hide(axis='index')`",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
return self.hide(axis=0, level=level, subset=subset, names=names)
@@ -2324,7 +2347,7 @@ def hide_columns(
warnings.warn(
"this method is deprecated in favour of `Styler.hide(axis='columns')`",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
return self.hide(axis=1, level=level, subset=subset, names=names)
@@ -2526,6 +2549,7 @@ def hide(
axis="{0 or 'index', 1 or 'columns', None}",
text_threshold="",
)
+ @Substitution(subset=subset)
def background_gradient(
self,
cmap="PuBu",
@@ -2561,10 +2585,7 @@ def background_gradient(
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
text_color_threshold : float or int
{text_threshold}
Luminance threshold for determining text color in [0, 1]. Facilitates text
@@ -2716,6 +2737,7 @@ def text_gradient(
text_only=True,
)
+ @Substitution(subset=subset)
def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
"""
Set defined CSS-properties to each ``<td>`` HTML element within the given
@@ -2723,10 +2745,7 @@ def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
Parameters
----------
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
**kwargs : dict
A dictionary of property, value pairs to be set for each cell.
@@ -2751,6 +2770,7 @@ def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
values = "".join([f"{p}: {v};" for p, v in kwargs.items()])
return self.applymap(lambda x: values, subset=subset)
+ @Substitution(subset=subset)
def bar(
self,
subset: Subset | None = None,
@@ -2772,10 +2792,7 @@ def bar(
Parameters
----------
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
@@ -2825,7 +2842,7 @@ def bar(
When None (default): the maximum value of the data will be used.
props : str, optional
The base CSS of the cell that is extended to add the bar chart. Defaults to
- `"width: 10em;"`
+ `"width: 10em;"`.
.. versionadded:: 1.4.0
@@ -2876,6 +2893,7 @@ def bar(
return self
+ @Substitution(subset=subset, props=props)
def highlight_null(
self,
null_color: str = "red",
@@ -2888,17 +2906,9 @@ def highlight_null(
Parameters
----------
null_color : str, default 'red'
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
-
+ %(subset)s
.. versionadded:: 1.1.0
-
- props : str, default None
- CSS properties to use for highlighting. If ``props`` is given, ``color``
- is not used.
-
+ %(props)s
.. versionadded:: 1.3.0
Returns
@@ -2920,6 +2930,7 @@ def f(data: DataFrame, props: str) -> np.ndarray:
props = f"background-color: {null_color};"
return self.apply(f, axis=None, subset=subset, props=props)
+ @Substitution(subset=subset, props=props)
def highlight_max(
self,
subset: Subset | None = None,
@@ -2932,20 +2943,14 @@ def highlight_max(
Parameters
----------
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
- props : str, default None
- CSS properties to use for highlighting. If ``props`` is given, ``color``
- is not used.
-
+ %(props)s
.. versionadded:: 1.3.0
Returns
@@ -2969,6 +2974,7 @@ def highlight_max(
props=props,
)
+ @Substitution(subset=subset, props=props)
def highlight_min(
self,
subset: Subset | None = None,
@@ -2981,20 +2987,14 @@ def highlight_min(
Parameters
----------
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
- props : str, default None
- CSS properties to use for highlighting. If ``props`` is given, ``color``
- is not used.
-
+ %(props)s
.. versionadded:: 1.3.0
Returns
@@ -3018,6 +3018,7 @@ def highlight_min(
props=props,
)
+ @Substitution(subset=subset, props=props)
def highlight_between(
self,
subset: Subset | None = None,
@@ -3035,10 +3036,7 @@ def highlight_between(
Parameters
----------
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
@@ -3050,10 +3048,7 @@ def highlight_between(
Right bound for defining the range.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether bounds are closed or open.
- props : str, default None
- CSS properties to use for highlighting. If ``props`` is given, ``color``
- is not used.
-
+ %(props)s
Returns
-------
self : Styler
@@ -3127,6 +3122,7 @@ def highlight_between(
inclusive=inclusive,
)
+ @Substitution(subset=subset, props=props)
def highlight_quantile(
self,
subset: Subset | None = None,
@@ -3145,12 +3141,9 @@ def highlight_quantile(
Parameters
----------
- subset : label, array-like, IndexSlice, optional
- A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
- or single key, to `DataFrame.loc[:, <subset>]` where the columns are
- prioritised, to limit ``data`` to *before* applying the function.
+ %(subset)s
color : str, default 'yellow'
- Background color to use for highlighting
+ Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Axis along which to determine and highlight quantiles. If ``None`` quantiles
are measured over the entire DataFrame. See examples.
@@ -3163,10 +3156,7 @@ def highlight_quantile(
quantile estimation.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether quantile bounds are closed or open.
- props : str, default None
- CSS properties to use for highlighting. If ``props`` is given, ``color``
- is not used.
-
+ %(props)s
Returns
-------
self : Styler
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index ae4e05160e70a..de475d145f3a0 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -57,17 +57,23 @@ class CSSDict(TypedDict):
Subset = Union[slice, Sequence, Index]
+def _gl01_adjust(obj: Any) -> Any:
+ """Adjust docstrings for Numpydoc GLO1."""
+ obj.__doc__ = "\n" + obj.__doc__
+ return obj
+
+
class StylerRenderer:
"""
Base class to process rendering a Styler with a specified jinja2 template.
"""
- loader = jinja2.PackageLoader("pandas", "io/formats/templates")
- env = jinja2.Environment(loader=loader, trim_blocks=True)
- template_html = env.get_template("html.tpl")
- template_html_table = env.get_template("html_table.tpl")
- template_html_style = env.get_template("html_style.tpl")
- template_latex = env.get_template("latex.tpl")
+ loader = _gl01_adjust(jinja2.PackageLoader("pandas", "io/formats/templates"))
+ env = _gl01_adjust(jinja2.Environment(loader=loader, trim_blocks=True))
+ template_html = _gl01_adjust(env.get_template("html.tpl"))
+ template_html_table = _gl01_adjust(env.get_template("html_table.tpl"))
+ template_html_style = _gl01_adjust(env.get_template("html_style.tpl"))
+ template_latex = _gl01_adjust(env.get_template("latex.tpl"))
def __init__(
self,
@@ -817,12 +823,12 @@ def format(
.. versionadded:: 1.3.0
decimal : str, default "."
- Character used as decimal separator for floats, complex and integers
+ Character used as decimal separator for floats, complex and integers.
.. versionadded:: 1.3.0
thousands : str, optional, default None
- Character used as thousands separator for floats, complex and integers
+ Character used as thousands separator for floats, complex and integers.
.. versionadded:: 1.3.0
@@ -1011,9 +1017,9 @@ def format_index(
Floating point precision to use for display purposes, if not determined by
the specified ``formatter``.
decimal : str, default "."
- Character used as decimal separator for floats, complex and integers
+ Character used as decimal separator for floats, complex and integers.
thousands : str, optional, default None
- Character used as thousands separator for floats, complex and integers
+ Character used as thousands separator for floats, complex and integers.
escape : str, optional
Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
in cell display string with HTML-safe sequences.
diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py
index ea7d1dfa1645e..b997cd9bddd1e 100644
--- a/pandas/io/formats/xml.py
+++ b/pandas/io/formats/xml.py
@@ -9,8 +9,10 @@
from pandas._typing import (
CompressionOptions,
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
StorageOptions,
+ WriteBuffer,
)
from pandas.errors import AbstractMethodError
@@ -90,7 +92,7 @@ class BaseXMLFormatter:
def __init__(
self,
frame: DataFrame,
- path_or_buffer: FilePathOrBuffer | None = None,
+ path_or_buffer: FilePath | WriteBuffer[bytes] | None = None,
index: bool | None = True,
root_name: str | None = "data",
row_name: str | None = "row",
@@ -102,7 +104,7 @@ def __init__(
encoding: str = "utf-8",
xml_declaration: bool | None = True,
pretty_print: bool | None = True,
- stylesheet: FilePathOrBuffer | None = None,
+ stylesheet: FilePath | ReadBuffer[str] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> None:
@@ -272,7 +274,7 @@ def write_output(self) -> str | None:
storage_options=self.storage_options,
is_text=False,
) as handles:
- handles.handle.write(xml_doc) # type: ignore[arg-type]
+ handles.handle.write(xml_doc)
return None
else:
@@ -582,7 +584,6 @@ def transform_doc(self) -> bytes:
conditionally by its specific object type, then transforms
original tree with XSLT script.
"""
-
from lxml.etree import (
XSLT,
XMLParser,
@@ -591,6 +592,7 @@ def transform_doc(self) -> bytes:
)
style_doc = self.stylesheet
+ assert style_doc is not None # is ensured by caller
handle_data = get_data_from_filepath(
filepath_or_buffer=style_doc,
diff --git a/pandas/io/html.py b/pandas/io/html.py
index cffe910f1c8ff..7985dcbec9672 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -14,7 +14,10 @@
Sequence,
)
-from pandas._typing import FilePathOrBuffer
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
AbstractMethodError,
@@ -119,18 +122,21 @@ def _get_skiprows(skiprows: int | Sequence[int] | slice | None):
raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows")
-def _read(obj: bytes | FilePathOrBuffer, encoding: str | None) -> str | bytes:
+def _read(
+ obj: bytes | FilePath | ReadBuffer[str] | ReadBuffer[bytes], encoding: str | None
+) -> str | bytes:
"""
Try to read from a url, file or string.
Parameters
----------
- obj : str, unicode, or file-like
+ obj : str, unicode, path object, or file-like object
Returns
-------
raw_text : str
"""
+ text: str | bytes
if (
is_url(obj)
or hasattr(obj, "read")
@@ -148,9 +154,7 @@ def _read(obj: bytes | FilePathOrBuffer, encoding: str | None) -> str | bytes:
text = obj
else:
raise TypeError(f"Cannot read object of type '{type(obj).__name__}'")
- # error: Incompatible return value type (got "Union[Any, bytes, None, str]",
- # expected "Union[str, bytes]")
- return text # type: ignore[return-value]
+ return text
class _HtmlFrameParser:
@@ -211,7 +215,7 @@ class _HtmlFrameParser:
def __init__(
self,
- io: FilePathOrBuffer,
+ io: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
match: str | Pattern,
attrs: dict[str, str] | None,
encoding: str,
@@ -944,7 +948,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
@deprecate_nonkeyword_arguments(version="2.0")
def read_html(
- io: FilePathOrBuffer,
+ io: FilePath | ReadBuffer[str],
match: str | Pattern = ".+",
flavor: str | None = None,
header: int | Sequence[int] | None = None,
@@ -965,8 +969,10 @@ def read_html(
Parameters
----------
- io : str, path object or file-like object
- A URL, a file-like object, or a raw string containing HTML. Note that
+ io : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``read()`` function.
+ The string can represent a URL or the HTML itself. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index b9bdfb91ca154..8c44b54e75a3f 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -661,7 +661,7 @@ def __init__(
self.nrows_seen = 0
self.nrows = nrows
self.encoding_errors = encoding_errors
- self.handles: IOHandles | None = None
+ self.handles: IOHandles[str] | None = None
if self.chunksize is not None:
self.chunksize = validate_integer("chunksize", self.chunksize, 1)
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index 90fd5d077d031..36a7949a9f1e3 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -389,6 +389,8 @@ def _pull_field(
try:
if isinstance(spec, list):
for field in spec:
+ if result is None:
+ raise KeyError(field)
result = result[field]
else:
result = result[spec]
@@ -515,7 +517,11 @@ def _recursive_extract(data, path, seen_meta, level=0):
result = DataFrame(records)
if record_prefix is not None:
- result = result.rename(columns=lambda x: f"{record_prefix}{x}")
+ # Incompatible types in assignment (expression has type "Optional[DataFrame]",
+ # variable has type "DataFrame")
+ result = result.rename( # type: ignore[assignment]
+ columns=lambda x: f"{record_prefix}{x}"
+ )
# Data types, a problem
for k, v in meta_vals.items():
diff --git a/pandas/io/orc.py b/pandas/io/orc.py
index 6bdb4df806b5c..6dd4de597c29d 100644
--- a/pandas/io/orc.py
+++ b/pandas/io/orc.py
@@ -3,7 +3,10 @@
from typing import TYPE_CHECKING
-from pandas._typing import FilePathOrBuffer
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.io.common import get_handle
@@ -13,7 +16,7 @@
def read_orc(
- path: FilePathOrBuffer, columns: list[str] | None = None, **kwargs
+ path: FilePath | ReadBuffer[bytes], columns: list[str] | None = None, **kwargs
) -> DataFrame:
"""
Load an ORC object from the file path, returning a DataFrame.
@@ -22,18 +25,12 @@ def read_orc(
Parameters
----------
- path : str, path object or file-like object
- Any valid string path is acceptable. The string could be a URL. Valid
- URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ path : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.orc``.
-
- If you want to pass in a path object, pandas accepts any
- ``os.PathLike``.
-
- By file-like object, we refer to objects with a ``read()`` method,
- such as a file handle (e.g. via builtin ``open`` function)
- or ``StringIO``.
columns : list, default None
If not None, only these columns will be read from the file.
**kwargs
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index e92afd4e35ca1..56131d000b176 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -3,15 +3,14 @@
import io
import os
-from typing import (
- Any,
- AnyStr,
-)
+from typing import Any
from warnings import catch_warnings
from pandas._typing import (
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
StorageOptions,
+ WriteBuffer,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
@@ -69,12 +68,14 @@ def get_engine(engine: str) -> BaseImpl:
def _get_path_or_handle(
- path: FilePathOrBuffer,
+ path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
fs: Any,
storage_options: StorageOptions = None,
mode: str = "rb",
is_dir: bool = False,
-) -> tuple[FilePathOrBuffer, IOHandles | None, Any]:
+) -> tuple[
+ FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any
+]:
"""File handling for PyArrow."""
path_or_handle = stringify_path(path)
if is_fsspec_url(path_or_handle) and fs is None:
@@ -150,14 +151,14 @@ def __init__(self):
import pyarrow.parquet
# import utils to register the pyarrow extension types
- import pandas.core.arrays._arrow_utils # noqa
+ import pandas.core.arrays._arrow_utils # noqa:F401
self.api = pyarrow
def write(
self,
df: DataFrame,
- path: FilePathOrBuffer[AnyStr],
+ path: FilePath | WriteBuffer[bytes],
compression: str | None = "snappy",
index: bool | None = None,
storage_options: StorageOptions = None,
@@ -353,7 +354,7 @@ def read(
@doc(storage_options=generic._shared_docs["storage_options"])
def to_parquet(
df: DataFrame,
- path: FilePathOrBuffer | None = None,
+ path: FilePath | WriteBuffer[bytes] | None = None,
engine: str = "auto",
compression: str | None = "snappy",
index: bool | None = None,
@@ -367,13 +368,12 @@ def to_parquet(
Parameters
----------
df : DataFrame
- path : str or file-like object, default None
- If a string, it will be used as Root Directory path
- when writing a partitioned dataset. By file-like object,
- we refer to objects with a write() method, such as a file handle
- (e.g. via builtin open function) or io.BytesIO. The engine
- fastparquet does not accept file-like objects. If path is None,
- a bytes object is returned.
+ path : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function. If None, the result is
+ returned as bytes. If a string, it will be used as Root Directory path
+ when writing a partitioned dataset. The engine fastparquet does not
+ accept file-like objects.
.. versionchanged:: 1.2.0
@@ -415,7 +415,7 @@ def to_parquet(
partition_cols = [partition_cols]
impl = get_engine(engine)
- path_or_buf: FilePathOrBuffer = io.BytesIO() if path is None else path
+ path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path
impl.write(
df,
@@ -449,21 +449,15 @@ def read_parquet(
Parameters
----------
path : str, path object or file-like object
- Any valid string path is acceptable. The string could be a URL. Valid
- URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
- expected. A local file could be:
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function.
+ The string could be a URL. Valid URL schemes include http, ftp, s3,
+ gs, and file. For file URLs, a host is expected. A local file could be:
``file://localhost/path/to/table.parquet``.
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
- ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``
-
- If you want to pass in a path object, pandas accepts any
- ``os.PathLike``.
-
- By file-like object, we refer to objects with a ``read()`` method,
- such as a file handle (e.g. via builtin ``open`` function)
- or ``StringIO``.
+ ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py
index 5b1b178c4f610..9fbeeb74901ef 100644
--- a/pandas/io/parsers/arrow_parser_wrapper.py
+++ b/pandas/io/parsers/arrow_parser_wrapper.py
@@ -1,6 +1,9 @@
from __future__ import annotations
-from pandas._typing import FilePathOrBuffer
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.inference import is_integer
@@ -16,7 +19,7 @@ class ArrowParserWrapper(ParserBase):
Wrapper for the pyarrow engine for read_csv()
"""
- def __init__(self, src: FilePathOrBuffer, **kwds):
+ def __init__(self, src: FilePath | ReadBuffer[bytes], **kwds):
self.kwds = kwds
self.src = src
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 339585810bec1..4779efc342487 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -1,6 +1,7 @@
from __future__ import annotations
from collections import defaultdict
+from copy import copy
import csv
import datetime
from enum import Enum
@@ -26,12 +27,14 @@
from pandas._typing import (
ArrayLike,
DtypeArg,
- FilePathOrBuffer,
+ FilePath,
+ ReadCsvBuffer,
)
from pandas.errors import (
ParserError,
ParserWarning,
)
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
@@ -137,6 +140,7 @@ def __init__(self, kwds):
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
+ self._parse_date_cols: Iterable = []
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
@@ -146,6 +150,8 @@ def __init__(self, kwds):
self.na_filter = kwds.get("na_filter", False)
self.keep_default_na = kwds.get("keep_default_na", True)
+ self.dtype = copy(kwds.get("dtype", None))
+
self.true_values = kwds.get("true_values")
self.false_values = kwds.get("false_values")
self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True)
@@ -211,13 +217,17 @@ def __init__(self, kwds):
self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"])
- self.handles: IOHandles | None = None
+ self.handles: IOHandles[str] | None = None
# Fallback to error to pass a sketchy test(test_override_set_noconvert_columns)
# Normally, this arg would get pre-processed earlier on
self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR)
- def _open_handles(self, src: FilePathOrBuffer, kwds: dict[str, Any]) -> None:
+ def _open_handles(
+ self,
+ src: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ kwds: dict[str, Any],
+ ) -> None:
"""
Let the readers open IOHandles after they are done with their potential raises.
"""
@@ -231,7 +241,7 @@ def _open_handles(self, src: FilePathOrBuffer, kwds: dict[str, Any]) -> None:
errors=kwds.get("encoding_errors", "strict"),
)
- def _validate_parse_dates_presence(self, columns: list[str]) -> None:
+ def _validate_parse_dates_presence(self, columns: list[str]) -> Iterable:
"""
Check if parse_dates are in columns.
@@ -243,6 +253,11 @@ def _validate_parse_dates_presence(self, columns: list[str]) -> None:
columns : list
List of names of the dataframe.
+ Returns
+ -------
+ The names of the columns which will get parsed later if a dict or list
+ is given as specification.
+
Raises
------
ValueError
@@ -259,11 +274,14 @@ def _validate_parse_dates_presence(self, columns: list[str]) -> None:
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
- col if is_list_like(col) else [col] for col in self.parse_dates
+ col if is_list_like(col) and not isinstance(col, tuple) else [col]
+ for col in self.parse_dates
)
else:
cols_needed = []
+ cols_needed = list(cols_needed)
+
# get only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
@@ -278,6 +296,11 @@ def _validate_parse_dates_presence(self, columns: list[str]) -> None:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
+ # Convert positions to actual column names
+ return [
+ col if (isinstance(col, str) or col in columns) else columns[col]
+ for col in cols_needed
+ ]
def close(self):
if self.handles is not None:
@@ -491,6 +514,19 @@ def _get_name(icol):
return index
+ def _clean_mapping(self, mapping):
+ """converts col numbers to names"""
+ if not isinstance(mapping, dict):
+ return mapping
+ clean = {}
+ for col, v in mapping.items():
+ # for mypy
+ assert self.orig_names is not None
+ if isinstance(col, int) and col not in self.orig_names:
+ col = self.orig_names[col]
+ clean[col] = v
+ return clean
+
@final
def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
arrays = []
@@ -515,7 +551,17 @@ def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
- arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
+ clean_dtypes = self._clean_mapping(self.dtype)
+
+ cast_type = None
+ if isinstance(clean_dtypes, dict) and self.index_names is not None:
+ cast_type = clean_dtypes.get(self.index_names[i], None)
+
+ try_num_bool = not (cast_type and is_string_dtype(cast_type))
+
+ arr, _ = self._infer_types(
+ arr, col_na_values | col_na_fvalues, try_num_bool
+ )
arrays.append(arr)
names = self.index_names
@@ -549,6 +595,14 @@ def _convert_to_ndarrays(
else:
col_na_values, col_na_fvalues = set(), set()
+ if c in self._parse_date_cols:
+ # GH#26203 Do not convert columns which get converted to dates
+ # but replace nans to ensure to_datetime works
+ mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues)
+ np.putmask(values, mask, np.nan)
+ result[c] = values
+ continue
+
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
@@ -558,7 +612,7 @@ def _convert_to_ndarrays(
f"for column {c} - only the converter will be used."
),
ParserWarning,
- stacklevel=7,
+ stacklevel=find_stack_level(),
)
try:
@@ -695,10 +749,10 @@ def _infer_types(self, values, na_values, try_num_bool=True):
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
- # error: Argument 2 to "isin" has incompatible type "List[Any]"; expected
- # "Union[Union[ExtensionArray, ndarray], Index, Series]"
- mask = algorithms.isin(values, list(na_values)) # type: ignore[arg-type]
- na_count = mask.sum()
+ # If our array has numeric dtype, we don't have to check for strings in isin
+ na_values = np.array([val for val in na_values if not isinstance(val, str)])
+ mask = algorithms.isin(values, na_values)
+ na_count = mask.astype("uint8", copy=False).sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
@@ -830,7 +884,7 @@ def _check_data_length(self, columns: list[str], data: list[ArrayLike]) -> None:
"Length of header or names does not match length of data. This leads "
"to a loss of data with index_col=False.",
ParserWarning,
- stacklevel=6,
+ stacklevel=find_stack_level(),
)
def _evaluate_usecols(self, usecols, names):
@@ -1091,7 +1145,7 @@ def _isindex(colspec):
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
- if is_scalar(colspec):
+ if is_scalar(colspec) or isinstance(colspec, tuple):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
@@ -1146,7 +1200,11 @@ def _try_convert_dates(parser: Callable, colspec, data_dict, columns):
else:
colnames.append(c)
- new_name = "_".join([str(x) for x in colnames])
+ new_name: tuple | str
+ if all(isinstance(x, tuple) for x in colnames):
+ new_name = tuple(map("_".join, zip(*colnames)))
+ else:
+ new_name = "_".join([str(x) for x in colnames])
to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict]
new_col = parser(*to_parse)
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index 352dd998dda0f..e96df3b3f3782 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -7,9 +7,11 @@
import pandas._libs.parsers as parsers
from pandas._typing import (
ArrayLike,
- FilePathOrBuffer,
+ FilePath,
+ ReadCsvBuffer,
)
from pandas.errors import DtypeWarning
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_categorical_dtype,
@@ -30,7 +32,9 @@ class CParserWrapper(ParserBase):
low_memory: bool
_reader: parsers.TextReader
- def __init__(self, src: FilePathOrBuffer, **kwds):
+ def __init__(
+ self, src: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], **kwds
+ ):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
@@ -387,7 +391,7 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
f"Specify dtype option on import or set low_memory=False."
]
)
- warnings.warn(warning_message, DtypeWarning, stacklevel=8)
+ warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())
return result
diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py
index b0e868b260369..9c2e7ddb2d397 100644
--- a/pandas/io/parsers/python_parser.py
+++ b/pandas/io/parsers/python_parser.py
@@ -4,7 +4,6 @@
abc,
defaultdict,
)
-from copy import copy
import csv
from io import StringIO
import re
@@ -19,11 +18,16 @@
import numpy as np
import pandas._libs.lib as lib
-from pandas._typing import FilePathOrBuffer
+from pandas._typing import (
+ FilePath,
+ ReadCsvBuffer,
+ Scalar,
+)
from pandas.errors import (
EmptyDataError,
ParserError,
)
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import is_integer
from pandas.core.dtypes.inference import is_dict_like
@@ -41,7 +45,9 @@
class PythonParser(ParserBase):
- def __init__(self, f: FilePathOrBuffer | list, **kwds):
+ def __init__(
+ self, f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list, **kwds
+ ):
"""
Workhorse function for processing nested list into DataFrame
"""
@@ -82,7 +88,6 @@ def __init__(self, f: FilePathOrBuffer | list, **kwds):
self.verbose = kwds["verbose"]
self.converters = kwds["converters"]
- self.dtype = copy(kwds["dtype"])
self.thousands = kwds["thousands"]
self.decimal = kwds["decimal"]
@@ -145,7 +150,7 @@ def __init__(self, f: FilePathOrBuffer | list, **kwds):
if self._col_indices is None:
self._col_indices = list(range(len(self.columns)))
- self._validate_parse_dates_presence(self.columns)
+ self._parse_date_cols = self._validate_parse_dates_presence(self.columns)
no_thousands_columns: set[int] | None = None
if self.parse_dates:
no_thousands_columns = self._set_noconvert_dtype_columns(
@@ -270,9 +275,9 @@ def read(self, rows=None):
alldata = self._rows_to_cols(content)
data, columns = self._exclude_implicit_index(alldata)
+ data = self._convert_data(data)
columns, data = self._do_date_conversions(columns, data)
- data = self._convert_data(data)
index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
@@ -301,21 +306,8 @@ def get_chunk(self, size=None):
def _convert_data(self, data):
# apply converters
- def _clean_mapping(mapping):
- """converts col numbers to names"""
- clean = {}
- for col, v in mapping.items():
- if isinstance(col, int) and col not in self.orig_names:
- col = self.orig_names[col]
- clean[col] = v
- return clean
-
- clean_conv = _clean_mapping(self.converters)
- if not isinstance(self.dtype, dict):
- # handles single dtype applied to all columns
- clean_dtypes = self.dtype
- else:
- clean_dtypes = _clean_mapping(self.dtype)
+ clean_conv = self._clean_mapping(self.converters)
+ clean_dtypes = self._clean_mapping(self.dtype)
# Apply NA values.
clean_na_values = {}
@@ -555,7 +547,7 @@ def _handle_usecols(
"Defining usecols with out of bounds indices is deprecated "
"and will raise a ParserError in a future version.",
FutureWarning,
- stacklevel=8,
+ stacklevel=find_stack_level(),
)
col_indices = self.usecols
@@ -1019,14 +1011,7 @@ def _get_lines(self, rows=None):
new_rows = self.data[self.pos : self.pos + rows]
new_pos = self.pos + rows
- # Check for stop rows. n.b.: self.skiprows is a set.
- if self.skiprows:
- new_rows = [
- row
- for i, row in enumerate(new_rows)
- if not self.skipfunc(i + self.pos)
- ]
-
+ new_rows = self._remove_skipped_rows(new_rows)
lines.extend(new_rows)
self.pos = new_pos
@@ -1034,11 +1019,22 @@ def _get_lines(self, rows=None):
new_rows = []
try:
if rows is not None:
- for _ in range(rows):
+
+ rows_to_skip = 0
+ if self.skiprows is not None and self.pos is not None:
+ # Only read additional rows if pos is in skiprows
+ rows_to_skip = len(
+ set(self.skiprows) - set(range(self.pos))
+ )
+
+ for _ in range(rows + rows_to_skip):
# assert for mypy, data is Iterator[str] or None, would
# error in next
assert self.data is not None
new_rows.append(next(self.data))
+
+ len_new_rows = len(new_rows)
+ new_rows = self._remove_skipped_rows(new_rows)
lines.extend(new_rows)
else:
rows = 0
@@ -1049,18 +1045,15 @@ def _get_lines(self, rows=None):
if new_row is not None:
new_rows.append(new_row)
+ len_new_rows = len(new_rows)
except StopIteration:
- if self.skiprows:
- new_rows = [
- row
- for i, row in enumerate(new_rows)
- if not self.skipfunc(i + self.pos)
- ]
+ len_new_rows = len(new_rows)
+ new_rows = self._remove_skipped_rows(new_rows)
lines.extend(new_rows)
if len(lines) == 0:
raise
- self.pos += len(new_rows)
+ self.pos += len_new_rows
self.buf = []
else:
@@ -1075,6 +1068,13 @@ def _get_lines(self, rows=None):
lines = self._check_thousands(lines)
return self._check_decimal(lines)
+ def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.skiprows:
+ return [
+ row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos)
+ ]
+ return new_rows
+
class FixedWidthReader(abc.Iterator):
"""
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 6d3cc84a31d05..2ca9be3ec097a 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -17,7 +17,8 @@
from pandas._typing import (
ArrayLike,
DtypeArg,
- FilePathOrBuffer,
+ FilePath,
+ ReadCsvBuffer,
StorageOptions,
)
from pandas.errors import (
@@ -104,7 +105,7 @@
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
-index_col : int, str, sequence of int / str, or False, default ``None``
+index_col : int, str, sequence of int / str, or False, optional, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
@@ -116,7 +117,8 @@
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
- inferred from the document header row(s). For example, a valid list-like
+ inferred from the document header row(s). If ``names`` are given, the document
+ header row(s) are not taken into account. For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
@@ -331,7 +333,7 @@
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
-error_bad_lines : bool, default ``None``
+error_bad_lines : bool, optional, default ``None``
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will be dropped from the DataFrame that is
@@ -340,7 +342,7 @@
.. deprecated:: 1.3.0
The ``on_bad_lines`` parameter should be used instead to specify behavior upon
encountering a bad line instead.
-warn_bad_lines : bool, default ``None``
+warn_bad_lines : bool, optional, default ``None``
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
@@ -504,11 +506,19 @@ def _validate_names(names):
raise ValueError("Names should be an ordered collection.")
-def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
+def _read(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds
+):
"""Generic reader of line files."""
- if kwds.get("date_parser", None) is not None:
- if isinstance(kwds["parse_dates"], bool):
- kwds["parse_dates"] = True
+ # if we pass a date_parser and parse_dates=False, we should not parse the
+ # dates GH#44366
+ if (
+ kwds.get("date_parser", None) is not None
+ and kwds.get("parse_dates", None) is None
+ ):
+ kwds["parse_dates"] = True
+ elif kwds.get("parse_dates", None) is None:
+ kwds["parse_dates"] = False
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
@@ -553,7 +563,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
)
)
def read_csv(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
@@ -581,7 +591,7 @@ def read_csv(
verbose=False,
skip_blank_lines=True,
# Datetime Handling
- parse_dates=False,
+ parse_dates=None,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
@@ -651,7 +661,7 @@ def read_csv(
)
)
def read_table(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
@@ -738,7 +748,7 @@ def read_table(
def read_fwf(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
colspecs="infer",
widths=None,
infer_nrows=100,
@@ -755,18 +765,12 @@ def read_fwf(
Parameters
----------
- filepath_or_buffer : str, path object or file-like object
- Any valid string path is acceptable. The string could be a URL. Valid
- URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a text ``read()`` function.The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
-
- If you want to pass in a path object, pandas accepts any
- ``os.PathLike``.
-
- By file-like object, we refer to objects with a ``read()`` method,
- such as a file handle (e.g. via builtin ``open`` function)
- or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
@@ -941,10 +945,10 @@ def _get_options_with_defaults(self, engine):
def _check_file_or_buffer(self, f, engine):
# see gh-16530
- if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
- # The C engine doesn't need the file-like to have the "__next__"
- # attribute. However, the Python engine explicitly calls
- # "__next__(...)" when iterating through such an object, meaning it
+ if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"):
+ # The C engine doesn't need the file-like to have the "__iter__"
+ # attribute. However, the Python engine needs "__iter__(...)"
+ # when iterating through such an object, meaning it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
@@ -1041,7 +1045,7 @@ def _clean_options(self, options, engine):
"engine='python'."
),
ParserWarning,
- stacklevel=5,
+ stacklevel=find_stack_level(),
)
index_col = options["index_col"]
@@ -1573,7 +1577,9 @@ def _merge_with_dialect_properties(
conflict_msgs.append(msg)
if conflict_msgs:
- warnings.warn("\n\n".join(conflict_msgs), ParserWarning, stacklevel=2)
+ warnings.warn(
+ "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level()
+ )
kwds[param] = dialect_val
return kwds
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 6a91c12ee286e..8bd0942550e6e 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -1,12 +1,16 @@
""" pickle compat """
+from __future__ import annotations
+
import pickle
from typing import Any
import warnings
from pandas._typing import (
CompressionOptions,
- FilePathOrBuffer,
+ FilePath,
+ ReadPickleBuffer,
StorageOptions,
+ WriteBuffer,
)
from pandas.compat import pickle_compat as pc
from pandas.util._decorators import doc
@@ -19,7 +23,7 @@
@doc(storage_options=generic._shared_docs["storage_options"])
def to_pickle(
obj: Any,
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | WriteBuffer[bytes],
compression: CompressionOptions = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
storage_options: StorageOptions = None,
@@ -31,8 +35,9 @@ def to_pickle(
----------
obj : any object
Any python object.
- filepath_or_buffer : str, path object or file-like object
- File path, URL, or buffer where the pickled object will be stored.
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``write()`` function.
.. versionchanged:: 1.0.0
Accept URL. URL has to be of S3 or GCS.
@@ -103,26 +108,15 @@ def to_pickle(
# pickle create the entire object and then write it to the buffer.
# "zip" would also be here if pandas.io.common._BytesZipFile
# wouldn't buffer write calls
- handles.handle.write(
- # error: Argument 1 to "write" of "TextIOBase" has incompatible type
- # "bytes"; expected "str"
- pickle.dumps(obj, protocol=protocol) # type: ignore[arg-type]
- )
+ handles.handle.write(pickle.dumps(obj, protocol=protocol))
else:
# letting pickle write directly to the buffer is more memory-efficient
- pickle.dump(
- # error: Argument 2 to "dump" has incompatible type "Union[IO[Any],
- # RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]"; expected
- # "IO[bytes]"
- obj,
- handles.handle, # type: ignore[arg-type]
- protocol=protocol,
- )
+ pickle.dump(obj, handles.handle, protocol=protocol)
@doc(storage_options=generic._shared_docs["storage_options"])
def read_pickle(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadPickleBuffer,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
):
@@ -136,8 +130,9 @@ def read_pickle(
Parameters
----------
- filepath_or_buffer : str, path object or file-like object
- File path, URL, or buffer where the pickled object will be loaded from.
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``readlines()`` function.
.. versionchanged:: 1.0.0
Accept URL. URL is not limited to S3 and GCS.
@@ -211,10 +206,7 @@ def read_pickle(
with warnings.catch_warnings(record=True):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
- # error: Argument 1 to "load" has incompatible type "Union[IO[Any],
- # RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]";
- # expected "IO[bytes]"
- return pickle.load(handles.handle) # type: ignore[arg-type]
+ return pickle.load(handles.handle)
except excs_to_catch:
# e.g.
# "No module named 'pandas.core.sparse.series'"
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 8c8e9b9feeb80..0e886befb5f2f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -45,6 +45,7 @@
from pandas.compat.pickle_compat import patch_pickle
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_object,
@@ -2190,7 +2191,9 @@ def update_info(self, info):
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
- warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
+ warnings.warn(
+ ws, AttributeConflictWarning, stacklevel=find_stack_level()
+ )
# reset
idx[key] = None
@@ -3080,7 +3083,7 @@ def write_array(
pass
else:
ws = performance_doc % (inferred_type, key, items)
- warnings.warn(ws, PerformanceWarning, stacklevel=7)
+ warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level())
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
diff --git a/pandas/io/sas/__init__.py b/pandas/io/sas/__init__.py
index 8f81352e6aecb..71027fd064f3d 100644
--- a/pandas/io/sas/__init__.py
+++ b/pandas/io/sas/__init__.py
@@ -1 +1 @@
-from pandas.io.sas.sasreader import read_sas # noqa
+from pandas.io.sas.sasreader import read_sas # noqa:F401
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 300df9728cd75..cf5cbdcdd94f0 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -21,14 +21,14 @@
timedelta,
)
import struct
-from typing import (
- IO,
- Any,
- cast,
-)
+from typing import cast
import numpy as np
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+)
from pandas.errors import (
EmptyDataError,
OutOfBoundsDatetime,
@@ -103,13 +103,14 @@ class _Column:
col_id: int
name: str | bytes
label: str | bytes
- format: str | bytes # TODO: i think allowing bytes is from py2 days
+ format: str | bytes
ctype: bytes
length: int
def __init__(
self,
col_id: int,
+ # These can be bytes when convert_header_text is False
name: str | bytes,
label: str | bytes,
format: str | bytes,
@@ -159,7 +160,7 @@ class SAS7BDATReader(ReaderBase, abc.Iterator):
def __init__(
self,
- path_or_buf,
+ path_or_buf: FilePath | ReadBuffer[bytes],
index=None,
convert_dates=True,
blank_missing=True,
@@ -179,16 +180,16 @@ def __init__(
self.default_encoding = "latin-1"
self.compression = b""
- self.column_names_strings = []
- self.column_names = []
- self.column_formats = []
- self.columns = []
+ self.column_names_strings: list[str] = []
+ self.column_names: list[str] = []
+ self.column_formats: list[str] = []
+ self.columns: list[_Column] = []
- self._current_page_data_subheader_pointers = []
+ self._current_page_data_subheader_pointers: list[_SubheaderPointer] = []
self._cached_page = None
- self._column_data_lengths = []
- self._column_data_offsets = []
- self._column_types = []
+ self._column_data_lengths: list[int] = []
+ self._column_data_offsets: list[int] = []
+ self._column_types: list[bytes] = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
@@ -196,7 +197,7 @@ def __init__(
self.handles = get_handle(path_or_buf, "rb", is_text=False)
- self._path_or_buf = cast(IO[Any], self.handles.handle)
+ self._path_or_buf = self.handles.handle
try:
self._get_properties()
@@ -227,7 +228,7 @@ def _get_properties(self) -> None:
# Check magic number
self._path_or_buf.seek(0)
- self._cached_page = cast(bytes, self._path_or_buf.read(288))
+ self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0 : len(const.magic)] != const.magic:
raise ValueError("magic number mismatch (not a SAS file?)")
@@ -301,7 +302,7 @@ def _get_properties(self) -> None:
)
# Read the rest of the header into cached_page.
- buf = cast(bytes, self._path_or_buf.read(self.header_length - 288))
+ buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
# error: Argument 1 to "len" has incompatible type "Optional[bytes]";
# expected "Sized"
@@ -400,7 +401,7 @@ def _read_bytes(self, offset: int, length: int):
def _parse_metadata(self) -> None:
done = False
while not done:
- self._cached_page = cast(bytes, self._path_or_buf.read(self._page_length))
+ self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
@@ -761,7 +762,7 @@ def read(self, nrows: int | None = None) -> DataFrame | None:
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
- self._cached_page = cast(bytes, self._path_or_buf.read(self._page_length))
+ self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
@@ -817,7 +818,7 @@ def _chunk_to_dataframe(self) -> DataFrame:
js += 1
else:
self.close()
- raise ValueError(f"unknown column type {self._column_types[j]}")
+ raise ValueError(f"unknown column type {repr(self._column_types[j])}")
df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False)
return df
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 3f9bf6662e99f..d8a3412e05d05 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -7,17 +7,19 @@
https://support.sas.com/techsup/technote/ts140.pdf
"""
+from __future__ import annotations
+
from collections import abc
from datetime import datetime
import struct
-from typing import (
- IO,
- cast,
-)
import warnings
import numpy as np
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+)
from pandas.util._decorators import Appender
import pandas as pd
@@ -248,7 +250,11 @@ class XportReader(ReaderBase, abc.Iterator):
__doc__ = _xport_reader_doc
def __init__(
- self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ index=None,
+ encoding: str | None = "ISO-8859-1",
+ chunksize=None,
):
self._encoding = encoding
@@ -259,7 +265,7 @@ def __init__(
self.handles = get_handle(
filepath_or_buffer, "rb", encoding=encoding, is_text=False
)
- self.filepath_or_buffer = cast(IO[bytes], self.handles.handle)
+ self.filepath_or_buffer = self.handles.handle
try:
self._read_header()
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index b323ce39763a1..f50fc777f55e9 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -13,7 +13,10 @@
overload,
)
-from pandas._typing import FilePathOrBuffer
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+)
from pandas.io.common import stringify_path
@@ -44,7 +47,7 @@ def __exit__(self, exc_type, exc_value, traceback):
@overload
def read_sas(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
format: str | None = ...,
index: Hashable | None = ...,
encoding: str | None = ...,
@@ -56,7 +59,7 @@ def read_sas(
@overload
def read_sas(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
format: str | None = ...,
index: Hashable | None = ...,
encoding: str | None = ...,
@@ -67,7 +70,7 @@ def read_sas(
def read_sas(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
format: str | None = None,
index: Hashable | None = None,
encoding: str | None = None,
@@ -79,18 +82,12 @@ def read_sas(
Parameters
----------
- filepath_or_buffer : str, path object or file-like object
- Any valid string path is acceptable. The string could be a URL. Valid
- URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.sas``.
-
- If you want to pass in a path object, pandas accepts any
- ``os.PathLike``.
-
- By file-like object, we refer to objects with a ``read()`` method,
- such as a file handle (e.g. via builtin ``open`` function)
- or ``StringIO``.
format : str {'xport', 'sas7bdat'} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index ec5262ee3a04c..26869a660f4b4 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -28,6 +28,7 @@
from pandas._typing import DtypeArg
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
@@ -190,12 +191,12 @@ def execute(sql, con, params=None):
def read_sql_table(
table_name,
con,
- schema=None,
- index_col=None,
- coerce_float=True,
- parse_dates=None,
- columns=None,
- chunksize: None = None,
+ schema=...,
+ index_col=...,
+ coerce_float=...,
+ parse_dates=...,
+ columns=...,
+ chunksize: None = ...,
) -> DataFrame:
...
@@ -204,12 +205,12 @@ def read_sql_table(
def read_sql_table(
table_name,
con,
- schema=None,
- index_col=None,
- coerce_float=True,
- parse_dates=None,
- columns=None,
- chunksize: int = 1,
+ schema=...,
+ index_col=...,
+ coerce_float=...,
+ parse_dates=...,
+ columns=...,
+ chunksize: int = ...,
) -> Iterator[DataFrame]:
...
@@ -302,12 +303,12 @@ def read_sql_table(
def read_sql_query(
sql,
con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- chunksize: None = None,
- dtype: DtypeArg | None = None,
+ index_col=...,
+ coerce_float=...,
+ params=...,
+ parse_dates=...,
+ chunksize: None = ...,
+ dtype: DtypeArg | None = ...,
) -> DataFrame:
...
@@ -316,12 +317,12 @@ def read_sql_query(
def read_sql_query(
sql,
con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- chunksize: int = 1,
- dtype: DtypeArg | None = None,
+ index_col=...,
+ coerce_float=...,
+ params=...,
+ parse_dates=...,
+ chunksize: int = ...,
+ dtype: DtypeArg | None = ...,
) -> Iterator[DataFrame]:
...
@@ -375,7 +376,7 @@ def read_sql_query(
rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
- {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
+ {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.
.. versionadded:: 1.3.0
@@ -409,12 +410,12 @@ def read_sql_query(
def read_sql(
sql,
con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- columns=None,
- chunksize: None = None,
+ index_col=...,
+ coerce_float=...,
+ params=...,
+ parse_dates=...,
+ columns=...,
+ chunksize: None = ...,
) -> DataFrame:
...
@@ -423,12 +424,12 @@ def read_sql(
def read_sql(
sql,
con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- columns=None,
- chunksize: int = 1,
+ index_col=...,
+ coerce_float=...,
+ params=...,
+ parse_dates=...,
+ columns=...,
+ chunksize: int = ...,
) -> Iterator[DataFrame]:
...
@@ -1159,7 +1160,7 @@ def _sqlalchemy_type(self, col):
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
- stacklevel=8,
+ stacklevel=find_stack_level(),
)
return BigInteger
elif col_type == "floating":
@@ -1817,12 +1818,6 @@ def _get_valid_sqlite_name(name):
return '"' + uname.replace('"', '""') + '"'
-_SAFE_NAMES_WARNING = (
- "The spaces in these column names will not be changed. "
- "In pandas versions < 0.14, spaces were converted to underscores."
-)
-
-
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
@@ -1882,12 +1877,6 @@ def _create_table_setup(self):
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
-
- pat = re.compile(r"\s+")
- column_names = [col_name for col_name, _, _ in column_names_and_types]
- if any(map(pat.search, column_names)):
- warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
-
escape = _get_valid_sqlite_name
create_tbl_stmts = [
@@ -1948,7 +1937,7 @@ def _sql_type_name(self, col):
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
- stacklevel=8,
+ stacklevel=find_stack_level(),
)
col_type = "integer"
@@ -2169,9 +2158,6 @@ def to_sql(
table.insert(chunksize, method)
def has_table(self, name: str, schema: str | None = None):
- # TODO(wesm): unused?
- # escape = _get_valid_sqlite_name
- # esc_name = escape(name)
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 9803a2e4e3309..ff9d8a1be3d1e 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -18,6 +18,7 @@
import struct
import sys
from typing import (
+ IO,
TYPE_CHECKING,
Any,
AnyStr,
@@ -33,10 +34,11 @@
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas._typing import (
- Buffer,
CompressionOptions,
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
StorageOptions,
+ WriteBuffer,
)
from pandas.util._decorators import (
Appender,
@@ -1117,7 +1119,7 @@ class StataReader(StataParser, abc.Iterator):
def __init__(
self,
- path_or_buf: FilePathOrBuffer,
+ path_or_buf: FilePath | ReadBuffer[bytes],
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
@@ -1168,10 +1170,7 @@ def __init__(
compression=compression,
) as handles:
# Copy to BytesIO, and ensure no encoding
-
- # Argument 1 to "BytesIO" has incompatible type "Union[Any, bytes, None,
- # str]"; expected "bytes"
- self.path_or_buf = BytesIO(handles.handle.read()) # type: ignore[arg-type]
+ self.path_or_buf = BytesIO(handles.handle.read())
self._read_header()
self._setup_dtype()
@@ -2002,7 +2001,7 @@ def value_labels(self) -> dict[str, dict[float | int, str]]:
@Appender(_read_stata_doc)
def read_stata(
- filepath_or_buffer: FilePathOrBuffer,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
@@ -2270,7 +2269,7 @@ class StataWriter(StataParser):
def __init__(
self,
- fname: FilePathOrBuffer,
+ fname: FilePath | WriteBuffer[bytes],
data: DataFrame,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
@@ -2294,7 +2293,7 @@ def __init__(
self._value_labels: list[StataValueLabel] = []
self._has_value_labels = np.array([], dtype=bool)
self._compression = compression
- self._output_file: Buffer | None = None
+ self._output_file: IO[bytes] | None = None
self._converted_names: dict[Hashable, str] = {}
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
@@ -2310,15 +2309,13 @@ def _write(self, to_write: str) -> None:
"""
Helper to call encode before writing to file for Python 3 compat.
"""
- self.handles.handle.write(
- to_write.encode(self._encoding) # type: ignore[arg-type]
- )
+ self.handles.handle.write(to_write.encode(self._encoding))
def _write_bytes(self, value: bytes) -> None:
"""
Helper to assert file is open before writing.
"""
- self.handles.handle.write(value) # type: ignore[arg-type]
+ self.handles.handle.write(value)
def _prepare_non_cat_value_labels(
self, data: DataFrame
@@ -2686,7 +2683,7 @@ def _close(self) -> None:
if self._output_file is not None:
assert isinstance(self.handles.handle, BytesIO)
bio, self.handles.handle = self.handles.handle, self._output_file
- self.handles.handle.write(bio.getvalue()) # type: ignore[arg-type]
+ self.handles.handle.write(bio.getvalue())
def _write_map(self) -> None:
"""No-op, future compatibility"""
@@ -3203,7 +3200,7 @@ class StataWriter117(StataWriter):
def __init__(
self,
- fname: FilePathOrBuffer,
+ fname: FilePath | WriteBuffer[bytes],
data: DataFrame,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
@@ -3605,7 +3602,7 @@ class StataWriterUTF8(StataWriter117):
def __init__(
self,
- fname: FilePathOrBuffer,
+ fname: FilePath | WriteBuffer[bytes],
data: DataFrame,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index bc3436861f1a8..3c3b4afa2c57d 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -7,9 +7,9 @@
import io
from pandas._typing import (
- Buffer,
CompressionOptions,
- FilePathOrBuffer,
+ FilePath,
+ ReadBuffer,
StorageOptions,
)
from pandas.compat._optional import import_optional_dependency
@@ -199,9 +199,6 @@ class _EtreeFrameParser(_XMLFrameParser):
standard library XML module: `xml.etree.ElementTree`.
"""
- def __init__(self, *args, **kwargs) -> None:
- super().__init__(*args, **kwargs)
-
def parse_data(self) -> list[dict[str, str | None]]:
from xml.etree.ElementTree import XML
@@ -571,11 +568,11 @@ def _transform_doc(self) -> bytes:
def get_data_from_filepath(
- filepath_or_buffer,
+ filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str],
encoding,
compression,
storage_options,
-) -> str | bytes | Buffer:
+) -> str | bytes | ReadBuffer[bytes] | ReadBuffer[str]:
"""
Extract raw XML data.
@@ -587,7 +584,8 @@ def get_data_from_filepath(
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
- filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if not isinstance(filepath_or_buffer, bytes):
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
if (
isinstance(filepath_or_buffer, str)
@@ -606,7 +604,10 @@ def get_data_from_filepath(
storage_options=storage_options,
) as handle_obj:
filepath_or_buffer = (
- handle_obj.handle.read()
+ # error: Incompatible types in assignment (expression has type
+ # "Union[str, IO[str]]", variable has type "Union[Union[str,
+ # PathLike[str]], bytes, ReadBuffer[bytes], ReadBuffer[str]]")
+ handle_obj.handle.read() # type: ignore[assignment]
if hasattr(handle_obj.handle, "read")
else handle_obj.handle
)
@@ -728,7 +729,7 @@ def _parse(
@doc(storage_options=_shared_docs["storage_options"])
def read_xml(
- path_or_buffer: FilePathOrBuffer,
+ path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],
xpath: str | None = "./*",
namespaces: dict | list[dict] | None = None,
elems_only: bool | None = False,
@@ -736,7 +737,7 @@ def read_xml(
names: list[str] | None = None,
encoding: str | None = "utf-8",
parser: str | None = "lxml",
- stylesheet: FilePathOrBuffer | None = None,
+ stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> DataFrame:
@@ -748,8 +749,10 @@ def read_xml(
Parameters
----------
path_or_buffer : str, path object, or file-like object
- Any valid XML string or path is acceptable. The string could be a URL.
- Valid URL schemes include http, ftp, s3, and file.
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a ``read()`` function. The string can be any valid XML
+ string or a path. The string can further be a URL. Valid URL schemes
+ include http, ftp, s3, and file.
xpath : str, optional, default './\*'
The XPath to parse required set of nodes for migration to DataFrame.
diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py
index 1308a83f61443..a2089de294e22 100644
--- a/pandas/plotting/_matplotlib/boxplot.py
+++ b/pandas/plotting/_matplotlib/boxplot.py
@@ -391,6 +391,11 @@ def plot_group(keys, values, ax: Axes):
with plt.rc_context(rc):
ax = plt.gca()
data = data._get_numeric_data()
+ naxes = len(data.columns)
+ if naxes == 0:
+ raise ValueError(
+ "boxplot method requires numerical columns, nothing to plot."
+ )
if columns is None:
columns = data.columns
else:
diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py
index 70ddd1ca09c7e..5569b1f2979b0 100644
--- a/pandas/plotting/_matplotlib/compat.py
+++ b/pandas/plotting/_matplotlib/compat.py
@@ -24,3 +24,4 @@ def inner():
mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge)
mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge)
mpl_ge_3_4_0 = _mpl_version("3.4.0", operator.ge)
+mpl_ge_3_5_0 = _mpl_version("3.5.0", operator.ge)
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index ead0a2129d29f..90d3f8d9836bf 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -357,8 +357,8 @@ def get_locator(self, dmin, dmax):
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
- locator.set_view_interval(*self.axis.get_view_interval())
- locator.set_data_interval(*self.axis.get_data_interval())
+ locator.axis.set_view_interval(*self.axis.get_view_interval())
+ locator.axis.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 9679e79d8c4ba..5314a61191d78 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -13,6 +13,8 @@
import matplotlib.ticker as ticker
import numpy as np
+from pandas.util._exceptions import find_stack_level
+
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import (
ABCDataFrame,
@@ -233,7 +235,7 @@ def create_subplots(
"When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified when creating axes.",
UserWarning,
- stacklevel=4,
+ stacklevel=find_stack_level(),
)
if ax.size == naxes:
fig = ax.flat[0].get_figure()
@@ -256,7 +258,7 @@ def create_subplots(
"To output multiple subplots, the figure containing "
"the passed axes is being cleared.",
UserWarning,
- stacklevel=4,
+ stacklevel=find_stack_level(),
)
fig.clear()
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index f8c945bb496a8..6cfcfa778b105 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1198,9 +1198,7 @@ def test_nuiscance_columns():
)
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(
- FutureWarning, match="Select only valid", check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match="Select only valid"):
result = df.agg("sum")
expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index 1d0b64c1835df..b7084e2bc6dc7 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -794,18 +794,15 @@ def test_apply_to_timedelta():
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
- # FIXME: dont leave commented-out
- # Can't compare until apply on a Series gives the correct dtype
- # assert_series_equal(a, b)
+ tm.assert_series_equal(Series(a), b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
- a = pd.to_timedelta(list_of_strings) # noqa
+ a = pd.to_timedelta(list_of_strings)
with tm.assert_produces_warning(FutureWarning, match="Inferring timedelta64"):
ser = Series(list_of_strings)
- b = ser.apply(pd.to_timedelta) # noqa
- # Can't compare until apply on a Series gives the correct dtype
- # assert_series_equal(a, b)
+ b = ser.apply(pd.to_timedelta)
+ tm.assert_series_equal(Series(a), b)
@pytest.mark.parametrize(
diff --git a/pandas/tests/apply/test_str.py b/pandas/tests/apply/test_str.py
index 6c6b674ef6aab..a292b05ee444d 100644
--- a/pandas/tests/apply/test_str.py
+++ b/pandas/tests/apply/test_str.py
@@ -26,7 +26,7 @@
pytest.param([1], {}, id="axis_from_args"),
pytest.param([], {"axis": 1}, id="axis_from_kwds"),
pytest.param([], {"numeric_only": True}, id="optional_kwds"),
- pytest.param([1, None], {"numeric_only": True}, id="args_and_kwds"),
+ pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),
],
)
@pytest.mark.parametrize("how", ["agg", "apply"])
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 87bbdfb3c808f..49585f3d37924 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -150,7 +150,7 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
box = box_with_array
- ts = Timestamp.now(tz)
+ ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 3bf5fdb257c2a..a33febbfbe960 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -85,9 +85,9 @@ def test_operator_series_comparison_zerorank(self):
expected = 0.0 > Series([1, 2, 3])
tm.assert_series_equal(result, expected)
- def test_df_numeric_cmp_dt64_raises(self, box_with_array):
+ def test_df_numeric_cmp_dt64_raises(self, box_with_array, fixed_now_ts):
# GH#8932, GH#22163
- ts = pd.Timestamp.now()
+ ts = fixed_now_ts
obj = np.array(range(5))
obj = tm.box_expected(obj, box_with_array)
@@ -281,9 +281,9 @@ def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
- pd.Timestamp.now().to_pydatetime(),
- pd.Timestamp.now(tz="UTC").to_pydatetime(),
- pd.Timestamp.now().to_datetime64(),
+ pd.Timestamp("2021-01-01").to_pydatetime(),
+ pd.Timestamp("2021-01-01", tz="UTC").to_pydatetime(),
+ pd.Timestamp("2021-01-01").to_datetime64(),
pd.NaT,
],
)
@@ -873,7 +873,7 @@ def test_add_frames(self, first, second, expected):
tm.assert_frame_equal(second + first, expected)
# TODO: This came from series.test.test_operators, needs cleanup
- def test_series_frame_radd_bug(self):
+ def test_series_frame_radd_bug(self, fixed_now_ts):
# GH#353
vals = Series(tm.rands_array(5, 10))
result = "foo_" + vals
@@ -889,7 +889,7 @@ def test_series_frame_radd_bug(self):
ts.name = "ts"
# really raise this time
- now = pd.Timestamp.now().to_pydatetime()
+ fix_now = fixed_now_ts.to_pydatetime()
msg = "|".join(
[
"unsupported operand type",
@@ -898,10 +898,10 @@ def test_series_frame_radd_bug(self):
]
)
with pytest.raises(TypeError, match=msg):
- now + ts
+ fix_now + ts
with pytest.raises(TypeError, match=msg):
- ts + now
+ ts + fix_now
# TODO: This came from series.test.test_operators, needs cleanup
def test_datetime64_with_index(self):
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index 3069868ebb677..c96d7c01ec97f 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -315,7 +315,7 @@ def test_sub_object(self):
with pytest.raises(TypeError, match=msg):
index - np.array([2, "foo"], dtype=object)
- def test_rsub_object(self):
+ def test_rsub_object(self, fixed_now_ts):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(1), Decimal(0)])
@@ -331,7 +331,7 @@ def test_rsub_object(self):
"foo" - index
with pytest.raises(TypeError, match=msg):
- np.array([True, Timestamp.now()]) - index
+ np.array([True, fixed_now_ts]) - index
class MyIndex(pd.Index):
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index f4404a3483e6f..a231e52d4b027 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -72,7 +72,7 @@ def test_compare_zerodim(self, box_with_array):
"scalar",
[
"foo",
- Timestamp.now(),
+ Timestamp("2021-01-01"),
Timedelta(days=4),
9,
9.5,
@@ -693,9 +693,9 @@ def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
"other",
[
# datetime scalars
- Timestamp.now(),
- Timestamp.now().to_pydatetime(),
- Timestamp.now().to_datetime64(),
+ Timestamp("2016-01-01"),
+ Timestamp("2016-01-01").to_pydatetime(),
+ Timestamp("2016-01-01").to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 8078e8c90a2bf..29c01e45ed28d 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -110,11 +110,11 @@ def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
[
345600000000000,
"a",
- Timestamp.now(),
- Timestamp.now("UTC"),
- Timestamp.now().to_datetime64(),
- Timestamp.now().to_pydatetime(),
- Timestamp.now().date(),
+ Timestamp("2021-01-01"),
+ Timestamp("2021-01-01").now("UTC"),
+ Timestamp("2021-01-01").now().to_datetime64(),
+ Timestamp("2021-01-01").now().to_pydatetime(),
+ Timestamp("2021-01-01").date(),
np.array(4), # zero-dim mismatched dtype
],
)
@@ -152,7 +152,7 @@ def test_td64arr_cmp_arraylike_invalid(self, other, box_with_array):
def test_td64arr_cmp_mixed_invalid(self):
rng = timedelta_range("1 days", periods=5)._data
- other = np.array([0, 1, 2, rng[3], Timestamp.now()])
+ other = np.array([0, 1, 2, rng[3], Timestamp("2021-01-01")])
result = rng == other
expected = np.array([False, False, False, True, False])
@@ -1981,6 +1981,20 @@ def test_td64arr_div_numeric_scalar(self, box_with_array, two):
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
+ @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])
+ def test_td64arr_floordiv_numeric_scalar(self, box_with_array, two):
+ tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
+ expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
+
+ tdser = tm.box_expected(tdser, box_with_array)
+ expected = tm.box_expected(expected, box_with_array)
+
+ result = tdser // two
+ tm.assert_equal(result, expected)
+
+ with pytest.raises(TypeError, match="Cannot divide"):
+ two // tdser
+
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
@@ -2160,7 +2174,7 @@ def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
def test_add_timestamp_to_timedelta():
# GH: 35897
- timestamp = Timestamp.now()
+ timestamp = Timestamp("2021-01-01")
result = timestamp + timedelta_range("0s", "1s", periods=31)
expected = DatetimeIndex(
[
diff --git a/pandas/tests/arrays/boolean/test_function.py b/pandas/tests/arrays/boolean/test_function.py
index d90655b6e2820..2f1a3121cdf5b 100644
--- a/pandas/tests/arrays/boolean/test_function.py
+++ b/pandas/tests/arrays/boolean/test_function.py
@@ -59,8 +59,8 @@ def test_ufuncs_unary(ufunc):
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
- s = pd.Series(a)
- result = ufunc(s)
+ ser = pd.Series(a)
+ result = ufunc(ser)
expected = pd.Series(ufunc(a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_series_equal(result, expected)
@@ -86,8 +86,8 @@ def test_value_counts_na():
def test_value_counts_with_normalize():
- s = pd.Series([True, False, pd.NA], dtype="boolean")
- result = s.value_counts(normalize=True)
+ ser = pd.Series([True, False, pd.NA], dtype="boolean")
+ result = ser.value_counts(normalize=True)
expected = pd.Series([1, 1], index=[True, False], dtype="Float64") / 2
tm.assert_series_equal(result, expected)
@@ -102,7 +102,7 @@ def test_diff():
)
tm.assert_extension_array_equal(result, expected)
- s = pd.Series(a)
- result = s.diff()
+ ser = pd.Series(a)
+ result = ser.diff()
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/boolean/test_logical.py b/pandas/tests/arrays/boolean/test_logical.py
index afcbe36e165c9..938fa8f1a5d6a 100644
--- a/pandas/tests/arrays/boolean/test_logical.py
+++ b/pandas/tests/arrays/boolean/test_logical.py
@@ -41,6 +41,20 @@ def test_empty_ok(self, all_logical_operators):
result = getattr(a, op_name)(pd.NA)
tm.assert_extension_array_equal(a, result)
+ @pytest.mark.parametrize(
+ "other", ["a", pd.Timestamp(2017, 1, 1, 12), np.timedelta64(4)]
+ )
+ def test_eq_mismatched_type(self, other):
+ # GH-44499
+ arr = pd.array([True, False])
+ result = arr == other
+ expected = pd.array([False, False])
+ tm.assert_extension_array_equal(result, expected)
+
+ result = arr != other
+ expected = pd.array([True, True])
+ tm.assert_extension_array_equal(result, expected)
+
def test_logical_length_mismatch_raises(self, all_logical_operators):
op_name = all_logical_operators
a = pd.array([True, False, None], dtype="boolean")
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index ee24ecb4964ec..c144c82486be9 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -236,21 +236,19 @@ def test_constructor(self):
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
- c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])
+ Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])
with tm.assert_produces_warning(None):
- c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) # noqa
+ Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5])
# the next one are from the old docs
with tm.assert_produces_warning(None):
- c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
+ Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
- c = Categorical( # noqa
- np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True
- )
+ Categorical(np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True)
def test_constructor_with_existing_categories(self):
# GH25318: constructing with pd.Series used to bogusly skip recoding
diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py
index e23fbb16190ea..678109b2c2497 100644
--- a/pandas/tests/arrays/categorical/test_repr.py
+++ b/pandas/tests/arrays/categorical/test_repr.py
@@ -77,7 +77,7 @@ def test_unicode_print(self):
expected = """\
['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう']
Length: 60
-Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa
+Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa:E501
assert repr(c) == expected
@@ -88,7 +88,7 @@ def test_unicode_print(self):
c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20)
expected = """['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう']
Length: 60
-Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa
+Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa:E501
assert repr(c) == expected
@@ -213,14 +213,14 @@ def test_categorical_repr_datetime_ordered(self):
c = Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
- 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa:E501
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
- 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa:E501
assert repr(c) == exp
@@ -229,7 +229,7 @@ def test_categorical_repr_datetime_ordered(self):
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]""" # noqa
+ 2011-01-01 13:00:00-05:00]""" # noqa:E501
assert repr(c) == exp
@@ -237,7 +237,7 @@ def test_categorical_repr_datetime_ordered(self):
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]""" # noqa
+ 2011-01-01 13:00:00-05:00]""" # noqa:E501
assert repr(c) == exp
@@ -257,14 +257,14 @@ def test_categorical_repr_period(self):
c = Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]""" # noqa
+ 2011-01-01 13:00]""" # noqa:E501
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]""" # noqa
+ 2011-01-01 13:00]""" # noqa:E501
assert repr(c) == exp
@@ -277,7 +277,7 @@ def test_categorical_repr_period(self):
c = Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa
+Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa:E501
assert repr(c) == exp
@@ -286,14 +286,14 @@ def test_categorical_repr_period_ordered(self):
c = Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]""" # noqa
+ 2011-01-01 13:00]""" # noqa:E501
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]""" # noqa
+ 2011-01-01 13:00]""" # noqa:E501
assert repr(c) == exp
@@ -306,7 +306,7 @@ def test_categorical_repr_period_ordered(self):
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa
+Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa:E501
assert repr(c) == exp
@@ -330,7 +330,7 @@ def test_categorical_repr_timedelta(self):
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
- 18 days 01:00:00, 19 days 01:00:00]""" # noqa
+ 18 days 01:00:00, 19 days 01:00:00]""" # noqa:E501
assert repr(c) == exp
@@ -339,7 +339,7 @@ def test_categorical_repr_timedelta(self):
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
- 18 days 01:00:00, 19 days 01:00:00]""" # noqa
+ 18 days 01:00:00, 19 days 01:00:00]""" # noqa:E501
assert repr(c) == exp
@@ -363,7 +363,7 @@ def test_categorical_repr_timedelta_ordered(self):
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
- 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
+ 18 days 01:00:00 < 19 days 01:00:00]""" # noqa:E501
assert repr(c) == exp
@@ -372,26 +372,26 @@ def test_categorical_repr_timedelta_ordered(self):
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
- 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
+ 18 days 01:00:00 < 19 days 01:00:00]""" # noqa:E501
assert repr(c) == exp
def test_categorical_index_repr(self):
idx = CategoricalIndex(Categorical([1, 2, 3]))
- exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa:E501
assert repr(idx) == exp
i = CategoricalIndex(Categorical(np.arange(10)))
- exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
def test_categorical_index_repr_ordered(self):
i = CategoricalIndex(Categorical([1, 2, 3], ordered=True))
- exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
i = CategoricalIndex(Categorical(np.arange(10), ordered=True))
- exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
def test_categorical_index_repr_datetime(self):
@@ -400,7 +400,7 @@ def test_categorical_index_repr_datetime(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
- categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
@@ -409,7 +409,7 @@ def test_categorical_index_repr_datetime(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
@@ -419,7 +419,7 @@ def test_categorical_index_repr_datetime_ordered(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
- categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
@@ -428,7 +428,7 @@ def test_categorical_index_repr_datetime_ordered(self):
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
@@ -438,7 +438,7 @@ def test_categorical_index_repr_datetime_ordered(self):
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
@@ -446,24 +446,24 @@ def test_categorical_index_repr_period(self):
# test all length
idx = period_range("2011-01-01 09:00", freq="H", periods=1)
i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa
+ exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
idx = period_range("2011-01-01 09:00", freq="H", periods=2)
i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
idx = period_range("2011-01-01 09:00", freq="H", periods=3)
i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
idx = period_range("2011-01-01 09:00", freq="H", periods=5)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
@@ -472,13 +472,13 @@ def test_categorical_index_repr_period(self):
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
idx = period_range("2011-01", freq="M", periods=5)
i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
def test_categorical_index_repr_period_ordered(self):
@@ -486,19 +486,19 @@ def test_categorical_index_repr_period_ordered(self):
i = CategoricalIndex(Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
idx = period_range("2011-01", freq="M", periods=5)
i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
def test_categorical_index_repr_timedelta(self):
idx = timedelta_range("1 days", periods=5)
i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
idx = timedelta_range("1 hours", periods=10)
@@ -507,14 +507,14 @@ def test_categorical_index_repr_timedelta(self):
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
- categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa:E501
assert repr(i) == exp
def test_categorical_index_repr_timedelta_ordered(self):
idx = timedelta_range("1 days", periods=5)
i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
idx = timedelta_range("1 hours", periods=10)
@@ -523,7 +523,7 @@ def test_categorical_index_repr_timedelta_ordered(self):
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
- categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa:E501
assert repr(i) == exp
diff --git a/pandas/tests/arrays/datetimes/test_constructors.py b/pandas/tests/arrays/datetimes/test_constructors.py
index cd7d9a479ab38..e6c65499f6fcc 100644
--- a/pandas/tests/arrays/datetimes/test_constructors.py
+++ b/pandas/tests/arrays/datetimes/test_constructors.py
@@ -6,7 +6,7 @@
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
-from pandas.core.arrays.datetimes import sequence_to_dt64ns
+from pandas.core.arrays.datetimes import _sequence_to_dt64ns
class TestDatetimeArrayConstructor:
@@ -42,7 +42,7 @@ def test_freq_validation(self):
"meth",
[
DatetimeArray._from_sequence,
- sequence_to_dt64ns,
+ _sequence_to_dt64ns,
pd.to_datetime,
pd.DatetimeIndex,
],
@@ -97,7 +97,7 @@ def test_bool_dtype_raises(self):
DatetimeArray._from_sequence(arr)
with pytest.raises(TypeError, match=msg):
- sequence_to_dt64ns(arr)
+ _sequence_to_dt64ns(arr)
with pytest.raises(TypeError, match=msg):
pd.DatetimeIndex(arr)
@@ -128,13 +128,13 @@ def test_tz_dtype_mismatch_raises(self):
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
)
with pytest.raises(TypeError, match="data is already tz-aware"):
- sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC"))
+ _sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC"))
def test_tz_dtype_matches(self):
arr = DatetimeArray._from_sequence(
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
)
- result, _, _ = sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="US/Central"))
+ result, _, _ = _sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="US/Central"))
tm.assert_numpy_array_equal(arr._data, result)
@pytest.mark.parametrize("order", ["F", "C"])
@@ -144,8 +144,8 @@ def test_2d(self, order):
if order == "F":
arr = arr.T
- res = sequence_to_dt64ns(arr)
- expected = sequence_to_dt64ns(arr.ravel())
+ res = _sequence_to_dt64ns(arr)
+ expected = _sequence_to_dt64ns(arr.ravel())
tm.assert_numpy_array_equal(res[0].ravel(), expected[0])
assert res[1] == expected[1]
diff --git a/pandas/tests/arrays/floating/test_arithmetic.py b/pandas/tests/arrays/floating/test_arithmetic.py
index e674b49a99bd4..e4c79592b9c9f 100644
--- a/pandas/tests/arrays/floating/test_arithmetic.py
+++ b/pandas/tests/arrays/floating/test_arithmetic.py
@@ -142,17 +142,16 @@ def test_error_invalid_values(data, all_arithmetic_operators):
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
- if op != "__rpow__":
- # TODO(extension)
- # rpow with a datetimelike coerces the integer array incorrectly
- msg = (
- "can only perform ops with numeric values|"
- "cannot perform .* with this index type: DatetimeArray|"
+ msg = "|".join(
+ [
+ "can only perform ops with numeric values",
+ "cannot perform .* with this index type: DatetimeArray",
"Addition/subtraction of integers and integer-arrays "
- "with DatetimeArray is no longer supported. *"
- )
- with pytest.raises(TypeError, match=msg):
- ops(pd.Series(pd.date_range("20180101", periods=len(s))))
+ "with DatetimeArray is no longer supported. *",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# Various
@@ -200,4 +199,6 @@ def test_unary_float_operators(float_ea_dtype, source, neg_target, abs_target):
tm.assert_extension_array_equal(neg_result, neg_target)
tm.assert_extension_array_equal(pos_result, arr)
+ assert not np.shares_memory(pos_result._data, arr._data)
+ assert not np.shares_memory(pos_result._mask, arr._mask)
tm.assert_extension_array_equal(abs_result, abs_target)
diff --git a/pandas/tests/arrays/floating/test_comparison.py b/pandas/tests/arrays/floating/test_comparison.py
index c4163c25ae74d..a429649f1ce1d 100644
--- a/pandas/tests/arrays/floating/test_comparison.py
+++ b/pandas/tests/arrays/floating/test_comparison.py
@@ -1,7 +1,9 @@
+import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
+from pandas.core.arrays import FloatingArray
from pandas.tests.arrays.masked_shared import (
ComparisonOps,
NumericOps,
@@ -34,3 +36,30 @@ def test_equals():
a1 = pd.array([1, 2, None], dtype="Float64")
a2 = pd.array([1, 2, None], dtype="Float32")
assert a1.equals(a2) is False
+
+
+def test_equals_nan_vs_na():
+ # GH#44382
+
+ mask = np.zeros(3, dtype=bool)
+ data = np.array([1.0, np.nan, 3.0], dtype=np.float64)
+
+ left = FloatingArray(data, mask)
+ assert left.equals(left)
+ tm.assert_extension_array_equal(left, left)
+
+ assert left.equals(left.copy())
+ assert left.equals(FloatingArray(data.copy(), mask.copy()))
+
+ mask2 = np.array([False, True, False], dtype=bool)
+ data2 = np.array([1.0, 2.0, 3.0], dtype=np.float64)
+ right = FloatingArray(data2, mask2)
+ assert right.equals(right)
+ tm.assert_extension_array_equal(right, right)
+
+ assert not left.equals(right)
+
+ # with mask[1] = True, the only difference is data[1], which should
+ # not matter for equals
+ mask[1] = True
+ assert left.equals(right)
diff --git a/pandas/tests/arrays/floating/test_function.py b/pandas/tests/arrays/floating/test_function.py
index ef95eac316397..ff84116fa1b18 100644
--- a/pandas/tests/arrays/floating/test_function.py
+++ b/pandas/tests/arrays/floating/test_function.py
@@ -106,16 +106,16 @@ def test_value_counts_na():
def test_value_counts_empty():
- s = pd.Series([], dtype="Float64")
- result = s.value_counts()
+ ser = pd.Series([], dtype="Float64")
+ result = ser.value_counts()
idx = pd.Index([], dtype="object")
expected = pd.Series([], index=idx, dtype="Int64")
tm.assert_series_equal(result, expected)
def test_value_counts_with_normalize():
- s = pd.Series([0.1, 0.2, 0.1, pd.NA], dtype="Float64")
- result = s.value_counts(normalize=True)
+ ser = pd.Series([0.1, 0.2, 0.1, pd.NA], dtype="Float64")
+ result = ser.value_counts(normalize=True)
expected = pd.Series([2, 1], index=[0.1, 0.2], dtype="Float64") / 3
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/integer/test_arithmetic.py b/pandas/tests/arrays/integer/test_arithmetic.py
index 4f66e2ecfd355..894d7697c62d3 100644
--- a/pandas/tests/arrays/integer/test_arithmetic.py
+++ b/pandas/tests/arrays/integer/test_arithmetic.py
@@ -179,17 +179,16 @@ def test_error_invalid_values(data, all_arithmetic_operators):
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
- if op != "__rpow__":
- # TODO(extension)
- # rpow with a datetimelike coerces the integer array incorrectly
- msg = (
- "can only perform ops with numeric values|"
- "cannot perform .* with this index type: DatetimeArray|"
+ msg = "|".join(
+ [
+ "can only perform ops with numeric values",
+ "cannot perform .* with this index type: DatetimeArray",
"Addition/subtraction of integers and integer-arrays "
- "with DatetimeArray is no longer supported. *"
- )
- with pytest.raises(TypeError, match=msg):
- ops(pd.Series(pd.date_range("20180101", periods=len(s))))
+ "with DatetimeArray is no longer supported. *",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# Various
@@ -300,4 +299,6 @@ def test_unary_int_operators(any_signed_int_ea_dtype, source, neg_target, abs_ta
tm.assert_extension_array_equal(neg_result, neg_target)
tm.assert_extension_array_equal(pos_result, arr)
+ assert not np.shares_memory(pos_result._data, arr._data)
+ assert not np.shares_memory(pos_result._mask, arr._mask)
tm.assert_extension_array_equal(abs_result, abs_target)
diff --git a/pandas/tests/arrays/integer/test_function.py b/pandas/tests/arrays/integer/test_function.py
index 6f53b44776900..3d8c93fbd507f 100644
--- a/pandas/tests/arrays/integer/test_function.py
+++ b/pandas/tests/arrays/integer/test_function.py
@@ -118,8 +118,8 @@ def test_value_counts_na():
def test_value_counts_empty():
# https://github.com/pandas-dev/pandas/issues/33317
- s = pd.Series([], dtype="Int64")
- result = s.value_counts()
+ ser = pd.Series([], dtype="Int64")
+ result = ser.value_counts()
# TODO: The dtype of the index seems wrong (it's int64 for non-empty)
idx = pd.Index([], dtype="object")
expected = pd.Series([], index=idx, dtype="Int64")
@@ -128,8 +128,8 @@ def test_value_counts_empty():
def test_value_counts_with_normalize():
# GH 33172
- s = pd.Series([1, 2, 1, pd.NA], dtype="Int64")
- result = s.value_counts(normalize=True)
+ ser = pd.Series([1, 2, 1, pd.NA], dtype="Int64")
+ result = ser.value_counts(normalize=True)
expected = pd.Series([2, 1], index=[1, 2], dtype="Float64") / 3
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py
index d66a603ad568c..3f0a1b5d0eaf3 100644
--- a/pandas/tests/arrays/masked/test_arrow_compat.py
+++ b/pandas/tests/arrays/masked/test_arrow_compat.py
@@ -1,12 +1,10 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
import pandas._testing as tm
-pa = pytest.importorskip("pyarrow", minversion="0.17.0")
+pa = pytest.importorskip("pyarrow", minversion="1.0.1")
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
@@ -29,7 +27,6 @@ def test_arrow_array(data):
assert arr.equals(expected)
-@td.skip_if_no("pyarrow")
def test_arrow_roundtrip(data):
df = pd.DataFrame({"a": data})
table = pa.table(df)
@@ -39,7 +36,6 @@ def test_arrow_roundtrip(data):
tm.assert_frame_equal(result, df)
-@td.skip_if_no("pyarrow")
def test_arrow_load_from_zero_chunks(data):
# GH-41040
@@ -54,7 +50,6 @@ def test_arrow_load_from_zero_chunks(data):
tm.assert_frame_equal(result, df)
-@td.skip_if_no("pyarrow")
def test_arrow_from_arrow_uint():
# https://github.com/pandas-dev/pandas/issues/31896
# possible mismatch in types
@@ -66,7 +61,6 @@ def test_arrow_from_arrow_uint():
tm.assert_extension_array_equal(result, expected)
-@td.skip_if_no("pyarrow")
def test_arrow_sliced(data):
# https://github.com/pandas-dev/pandas/issues/38525
@@ -161,7 +155,6 @@ def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays):
tm.assert_numpy_array_equal(mask, mask_expected_empty)
-@td.skip_if_no("pyarrow")
def test_from_arrow_type_error(request, data):
# ensure that __from_arrow__ returns a TypeError when getting a wrong
# array type
diff --git a/pandas/tests/arrays/period/test_arrow_compat.py b/pandas/tests/arrays/period/test_arrow_compat.py
index 5211397f20c36..560299a4a47f5 100644
--- a/pandas/tests/arrays/period/test_arrow_compat.py
+++ b/pandas/tests/arrays/period/test_arrow_compat.py
@@ -1,7 +1,5 @@
import pytest
-import pandas.util._test_decorators as td
-
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
@@ -11,10 +9,9 @@
period_array,
)
-pyarrow_skip = td.skip_if_no("pyarrow", min_version="0.17.0")
+pa = pytest.importorskip("pyarrow", minversion="1.0.1")
-@pyarrow_skip
def test_arrow_extension_type():
from pandas.core.arrays._arrow_utils import ArrowPeriodType
@@ -29,7 +26,6 @@ def test_arrow_extension_type():
assert not hash(p1) == hash(p3)
-@pyarrow_skip
@pytest.mark.parametrize(
"data, freq",
[
@@ -38,8 +34,6 @@ def test_arrow_extension_type():
],
)
def test_arrow_array(data, freq):
- import pyarrow as pa
-
from pandas.core.arrays._arrow_utils import ArrowPeriodType
periods = period_array(data, freq=freq)
@@ -62,10 +56,7 @@ def test_arrow_array(data, freq):
pa.array(periods, type=ArrowPeriodType("T"))
-@pyarrow_skip
def test_arrow_array_missing():
- import pyarrow as pa
-
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
@@ -78,10 +69,7 @@ def test_arrow_array_missing():
assert result.storage.equals(expected)
-@pyarrow_skip
def test_arrow_table_roundtrip():
- import pyarrow as pa
-
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
@@ -100,10 +88,8 @@ def test_arrow_table_roundtrip():
tm.assert_frame_equal(result, expected)
-@pyarrow_skip
def test_arrow_load_from_zero_chunks():
# GH-41040
- import pyarrow as pa
from pandas.core.arrays._arrow_utils import ArrowPeriodType
@@ -120,10 +106,7 @@ def test_arrow_load_from_zero_chunks():
tm.assert_frame_equal(result, df)
-@pyarrow_skip
def test_arrow_table_roundtrip_without_metadata():
- import pyarrow as pa
-
arr = PeriodArray([1, 2, 3], freq="H")
arr[1] = pd.NaT
df = pd.DataFrame({"a": arr})
diff --git a/pandas/tests/arrays/period/test_constructors.py b/pandas/tests/arrays/period/test_constructors.py
index 52543d91e8f2a..cf9749058d1d1 100644
--- a/pandas/tests/arrays/period/test_constructors.py
+++ b/pandas/tests/arrays/period/test_constructors.py
@@ -96,3 +96,28 @@ def test_from_sequence_disallows_i8():
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype)
+
+
+def test_from_td64nat_sequence_raises():
+ # GH#44507
+ td = pd.NaT.to_numpy("m8[ns]")
+
+ dtype = pd.period_range("2005-01-01", periods=3, freq="D").dtype
+
+ arr = np.array([None], dtype=object)
+ arr[0] = td
+
+ msg = "Value must be Period, string, integer, or datetime"
+ with pytest.raises(ValueError, match=msg):
+ PeriodArray._from_sequence(arr, dtype=dtype)
+
+ with pytest.raises(ValueError, match=msg):
+ pd.PeriodIndex(arr, dtype=dtype)
+ with pytest.raises(ValueError, match=msg):
+ pd.Index(arr, dtype=dtype)
+ with pytest.raises(ValueError, match=msg):
+ pd.array(arr, dtype=dtype)
+ with pytest.raises(ValueError, match=msg):
+ pd.Series(arr, dtype=dtype)
+ with pytest.raises(ValueError, match=msg):
+ pd.DataFrame(arr, dtype=dtype)
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index c1466882b8443..db63bba4d4eaf 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -460,11 +460,10 @@ def test_check_integrity(self):
lengths = []
# 0-length OK
- # TODO: index variables are not used...is that right?
- index = BlockIndex(0, locs, lengths)
+ BlockIndex(0, locs, lengths)
# also OK even though empty
- index = BlockIndex(1, locs, lengths) # noqa
+ BlockIndex(1, locs, lengths)
msg = "Block 0 extends beyond end"
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 501a79a8bc5ed..c330e959ad5bf 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -475,8 +475,8 @@ def test_value_counts_na(dtype):
def test_value_counts_with_normalize(dtype):
- s = pd.Series(["a", "b", "a", pd.NA], dtype=dtype)
- result = s.value_counts(normalize=True)
+ ser = pd.Series(["a", "b", "a", pd.NA], dtype=dtype)
+ result = ser.value_counts(normalize=True)
expected = pd.Series([2, 1], index=["a", "b"], dtype="Float64") / 3
tm.assert_series_equal(result, expected)
@@ -518,8 +518,8 @@ def test_memory_usage(dtype):
@pytest.mark.parametrize("float_dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_dtype(float_dtype, dtype):
# https://github.com/pandas-dev/pandas/issues/36451
- s = pd.Series([0.1], dtype=float_dtype)
- result = s.astype(dtype)
+ ser = pd.Series([0.1], dtype=float_dtype)
+ result = ser.astype(dtype)
expected = pd.Series(["0.1"], dtype=dtype)
tm.assert_series_equal(result, expected)
@@ -539,7 +539,7 @@ def test_to_numpy_na_value(dtype, nulls_fixture):
tm.assert_numpy_array_equal(result, expected)
-def test_isin(dtype, request):
+def test_isin(dtype, request, fixed_now_ts):
s = pd.Series(["a", "b", None], dtype=dtype)
result = s.isin(["a", "c"])
@@ -554,6 +554,6 @@ def test_isin(dtype, request):
expected = pd.Series([False, False, False])
tm.assert_series_equal(result, expected)
- result = s.isin(["a", pd.Timestamp.now()])
+ result = s.isin(["a", fixed_now_ts])
expected = pd.Series([True, False, False])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py
index c3f951adf7f89..265afa89d6530 100644
--- a/pandas/tests/arrays/string_/test_string_arrow.py
+++ b/pandas/tests/arrays/string_/test_string_arrow.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas.compat import pa_version_under1p0
+from pandas.compat import pa_version_under1p01
import pandas as pd
import pandas._testing as tm
@@ -14,7 +14,7 @@
from pandas.core.arrays.string_arrow import ArrowStringArray
skip_if_no_pyarrow = pytest.mark.skipif(
- pa_version_under1p0,
+ pa_version_under1p01,
reason="pyarrow>=1.0.0 is required for PyArrow backed StringArray",
)
@@ -118,7 +118,7 @@ def test_from_sequence_wrong_dtype_raises():
@pytest.mark.skipif(
- not pa_version_under1p0,
+ not pa_version_under1p01,
reason="pyarrow is installed",
)
def test_pyarrow_not_installed_raises():
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 13fe3c2d427c5..9d61f57bc1f62 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -26,7 +26,7 @@
PeriodArray,
TimedeltaArray,
)
-from pandas.core.arrays.datetimes import sequence_to_dt64ns
+from pandas.core.arrays.datetimes import _sequence_to_dt64ns
from pandas.core.arrays.timedeltas import sequence_to_td64ns
@@ -168,7 +168,7 @@ def test_take(self):
tm.assert_index_equal(self.index_cls(result), expected)
- @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp.now().time])
+ @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp(2021, 1, 1, 12).time])
def test_take_fill_raises(self, fill_value):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
@@ -783,9 +783,7 @@ def test_to_perioddelta(self, datetime_index, freqstr):
with tm.assert_produces_warning(FutureWarning, match=msg):
# Deprecation GH#34853
expected = dti.to_perioddelta(freq=freqstr)
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match=msg):
# stacklevel is chosen to be "correct" for DatetimeIndex, not
# DatetimeArray
result = arr.to_perioddelta(freq=freqstr)
@@ -841,11 +839,11 @@ def test_int_properties(self, arr1d, propname):
tm.assert_numpy_array_equal(result, expected)
- def test_take_fill_valid(self, arr1d):
+ def test_take_fill_valid(self, arr1d, fixed_now_ts):
arr = arr1d
dti = self.index_cls(arr1d)
- now = Timestamp.now().tz_localize(dti.tz)
+ now = fixed_now_ts.tz_localize(dti.tz)
result = arr.take([-1, 1], allow_fill=True, fill_value=now)
assert result[0] == now
@@ -859,7 +857,7 @@ def test_take_fill_valid(self, arr1d):
arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1"))
tz = None if dti.tz is not None else "US/Eastern"
- now = Timestamp.now().tz_localize(tz)
+ now = fixed_now_ts.tz_localize(tz)
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
with pytest.raises(TypeError, match=msg):
# Timestamp with mismatched tz-awareness
@@ -879,7 +877,7 @@ def test_take_fill_valid(self, arr1d):
if arr.tz is not None:
# GH#37356
# Assuming here that arr1d fixture does not include Australia/Melbourne
- value = Timestamp.now().tz_localize("Australia/Melbourne")
+ value = fixed_now_ts.tz_localize("Australia/Melbourne")
msg = "Timezones don't match. .* != 'Australia/Melbourne'"
with pytest.raises(ValueError, match=msg):
# require tz match, not just tzawareness match
@@ -1033,7 +1031,7 @@ def test_array_interface(self, timedelta_index):
expected = np.asarray(arr).astype(dtype)
tm.assert_numpy_array_equal(result, expected)
- def test_take_fill_valid(self, timedelta_index):
+ def test_take_fill_valid(self, timedelta_index, fixed_now_ts):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
@@ -1041,14 +1039,13 @@ def test_take_fill_valid(self, timedelta_index):
result = arr.take([-1, 1], allow_fill=True, fill_value=td1)
assert result[0] == td1
- now = Timestamp.now()
- value = now
+ value = fixed_now_ts
msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# fill_value Timestamp invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
- value = now.to_period("D")
+ value = fixed_now_ts.to_period("D")
with pytest.raises(TypeError, match=msg):
# fill_value Period invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
@@ -1362,7 +1359,7 @@ def test_from_pandas_array(dtype):
expected = cls._from_sequence(data)
tm.assert_extension_array_equal(result, expected)
- func = {"M8[ns]": sequence_to_dt64ns, "m8[ns]": sequence_to_td64ns}[dtype]
+ func = {"M8[ns]": _sequence_to_dt64ns, "m8[ns]": sequence_to_td64ns}[dtype]
result = func(arr)[0]
expected = func(data)[0]
tm.assert_equal(result, expected)
@@ -1425,16 +1422,18 @@ def test_from_obscure_array(dtype, array_likes):
result = cls._from_sequence(data)
tm.assert_extension_array_equal(result, expected)
- func = {"M8[ns]": sequence_to_dt64ns, "m8[ns]": sequence_to_td64ns}[dtype]
+ func = {"M8[ns]": _sequence_to_dt64ns, "m8[ns]": sequence_to_td64ns}[dtype]
result = func(arr)[0]
expected = func(data)[0]
tm.assert_equal(result, expected)
- # FIXME: dask and memoryview both break on these
- # func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype]
- # result = func(arr).array
- # expected = func(data).array
- # tm.assert_equal(result, expected)
+ if not isinstance(data, memoryview):
+ # FIXME(GH#44431) these raise on memoryview and attempted fix
+ # fails on py3.10
+ func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype]
+ result = func(arr).array
+ expected = func(data).array
+ tm.assert_equal(result, expected)
# Let's check the Indexes while we're here
idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype]
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 5b9df44f5b565..d905c55c4553a 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -33,9 +33,13 @@ def test_cmp_dt64_arraylike_tznaive(self, comparison_op):
result = op(arr, arr)
tm.assert_numpy_array_equal(result, expected)
- for other in [right, np.array(right)]:
- # TODO: add list and tuple, and object-dtype once those
- # are fixed in the constructor
+ for other in [
+ right,
+ np.array(right),
+ list(right),
+ tuple(right),
+ right.astype(object),
+ ]:
result = op(arr, other)
tm.assert_numpy_array_equal(result, expected)
@@ -142,9 +146,9 @@ def test_setitem_clears_freq(self):
@pytest.mark.parametrize(
"obj",
[
- pd.Timestamp.now(),
- pd.Timestamp.now().to_datetime64(),
- pd.Timestamp.now().to_pydatetime(),
+ pd.Timestamp("2021-01-01"),
+ pd.Timestamp("2021-01-01").to_datetime64(),
+ pd.Timestamp("2021-01-01").to_pydatetime(),
],
)
def test_setitem_objects(self, obj):
@@ -325,7 +329,7 @@ def test_searchsorted_tzawareness_compat(self, index):
"invalid",
np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9,
np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10 ** 9,
- pd.Timestamp.now().to_period("D"),
+ pd.Timestamp("2021-01-01").to_period("D"),
],
)
@pytest.mark.parametrize("index", [True, False])
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 98329776242f1..b9ddac92c0a47 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -55,11 +55,11 @@ def test_setitem_objects(self, obj):
np.int64(1),
1.0,
np.datetime64("NaT"),
- pd.Timestamp.now(),
+ pd.Timestamp("2021-01-01"),
"invalid",
np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9,
(np.arange(10) * 24 * 3600 * 10 ** 9).view("datetime64[ns]"),
- pd.Timestamp.now().to_period("D"),
+ pd.Timestamp("2021-01-01").to_period("D"),
],
)
@pytest.mark.parametrize("index", [True, False])
@@ -99,9 +99,11 @@ def test_pos(self):
result = +arr
tm.assert_timedelta_array_equal(result, arr)
+ assert not np.shares_memory(result._ndarray, arr._ndarray)
result2 = np.positive(arr)
tm.assert_timedelta_array_equal(result2, arr)
+ assert not np.shares_memory(result2._ndarray, arr._ndarray)
def test_neg(self):
vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]")
diff --git a/pandas/tests/arrays/timedeltas/test_reductions.py b/pandas/tests/arrays/timedeltas/test_reductions.py
index 9e854577f7e3c..586a9187fc169 100644
--- a/pandas/tests/arrays/timedeltas/test_reductions.py
+++ b/pandas/tests/arrays/timedeltas/test_reductions.py
@@ -127,9 +127,9 @@ def test_sum_2d_skipna_false(self):
"add",
[
Timedelta(0),
- pd.Timestamp.now(),
- pd.Timestamp.now("UTC"),
- pd.Timestamp.now("Asia/Tokyo"),
+ pd.Timestamp("2021-01-01"),
+ pd.Timestamp("2021-01-01", tz="UTC"),
+ pd.Timestamp("2021-01-01", tz="Asia/Tokyo"),
],
)
def test_std(self, add):
diff --git a/pandas/tests/base/test_unique.py b/pandas/tests/base/test_unique.py
index 95e07583bab66..31f2aebcba4ba 100644
--- a/pandas/tests/base/test_unique.py
+++ b/pandas/tests/base/test_unique.py
@@ -1,12 +1,7 @@
import numpy as np
import pytest
-from pandas._libs import iNaT
-
-from pandas.core.dtypes.common import (
- is_datetime64tz_dtype,
- needs_i8_conversion,
-)
+from pandas.core.dtypes.common import is_datetime64tz_dtype
import pandas as pd
from pandas import NumericIndex
@@ -49,11 +44,8 @@ def test_unique_null(null_obj, index_or_series_obj):
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
- values = obj.values
- if needs_i8_conversion(obj.dtype):
- values[0:2] = iNaT
- else:
- values[0:2] = null_obj
+ values = obj._values
+ values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py
index 23bb4c5d2670c..ddb21408a1a04 100644
--- a/pandas/tests/base/test_value_counts.py
+++ b/pandas/tests/base/test_value_counts.py
@@ -1,6 +1,5 @@
import collections
from datetime import timedelta
-from io import StringIO
import numpy as np
import pytest
@@ -190,19 +189,21 @@ def test_value_counts_datetime64(index_or_series):
# GH 3002, datetime64[ns]
# don't test names though
- txt = "\n".join(
- [
- "xxyyzz20100101PIE",
- "xxyyzz20100101GUM",
- "xxyyzz20100101EGG",
- "xxyyww20090101EGG",
- "foofoo20080909PIE",
- "foofoo20080909GUM",
- ]
- )
- f = StringIO(txt)
- df = pd.read_fwf(
- f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
+ df = pd.DataFrame(
+ {
+ "person_id": ["xxyyzz", "xxyyzz", "xxyyzz", "xxyyww", "foofoo", "foofoo"],
+ "dt": pd.to_datetime(
+ [
+ "2010-01-01",
+ "2010-01-01",
+ "2010-01-01",
+ "2009-01-01",
+ "2008-09-09",
+ "2008-09-09",
+ ]
+ ),
+ "food": ["PIE", "GUM", "EGG", "EGG", "PIE", "GUM"],
+ }
)
s = klass(df["dt"].copy())
diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index 6d6aa08204c3f..5e76fa4cbdb4f 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -29,13 +29,13 @@ def test_compat():
@pytest.mark.parametrize("parser", expr.PARSERS)
def test_invalid_numexpr_version(engine, parser):
def testit():
- a, b = 1, 2 # noqa
+ a, b = 1, 2 # noqa:F841
res = pd.eval("a + b", engine=engine, parser=parser)
assert res == 3
if engine == "numexpr":
try:
- import numexpr as ne # noqa F401
+ import numexpr as ne # noqa:F401
except ImportError:
pytest.skip("no numexpr")
else:
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index dfdb9dabf6bed..5c614dac2bcb9 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -704,7 +704,7 @@ def test_identical(self):
tm.assert_numpy_array_equal(result, np.array([1.5]))
assert result.shape == (1,)
- x = np.array([False]) # noqa
+ x = np.array([False]) # noqa:F841
result = pd.eval("x", engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([False]))
assert result.shape == (1,)
@@ -1239,7 +1239,7 @@ def test_truediv(self):
assert res == expec
def test_failing_subscript_with_name_error(self):
- df = DataFrame(np.random.randn(5, 3)) # noqa
+ df = DataFrame(np.random.randn(5, 3)) # noqa:F841
with pytest.raises(NameError, match="name 'x' is not defined"):
self.eval("df[x > 2] > 2")
@@ -1304,7 +1304,7 @@ def test_assignment_column(self):
# with a local name overlap
def f():
df = orig_df.copy()
- a = 1 # noqa
+ a = 1 # noqa:F841
df.eval("a = 1 + b", inplace=True)
return df
@@ -1316,7 +1316,7 @@ def f():
df = orig_df.copy()
def f():
- a = 1 # noqa
+ a = 1 # noqa:F841
old_a = df.a.copy()
df.eval("a = a + b", inplace=True)
result = old_a + df.b
@@ -1629,7 +1629,7 @@ class TestOperationsNumExprPython(TestOperationsNumExprPandas):
parser = "python"
def test_check_many_exprs(self):
- a = 1 # noqa
+ a = 1 # noqa:F841
expr = " * ".join("a" * 33)
expected = 1
res = pd.eval(expr, engine=self.engine, parser=self.parser)
@@ -1669,14 +1669,14 @@ def test_fails_not(self):
)
def test_fails_ampersand(self):
- df = DataFrame(np.random.randn(5, 3)) # noqa
+ df = DataFrame(np.random.randn(5, 3)) # noqa:F841
ex = "(df + 2)[df > 1] > 0 & (df > 0)"
msg = "cannot evaluate scalar only bool ops"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, parser=self.parser, engine=self.engine)
def test_fails_pipe(self):
- df = DataFrame(np.random.randn(5, 3)) # noqa
+ df = DataFrame(np.random.randn(5, 3)) # noqa:F841
ex = "(df + 2)[df > 1] > 0 | (df > 0)"
msg = "cannot evaluate scalar only bool ops"
with pytest.raises(NotImplementedError, match=msg):
@@ -1851,7 +1851,7 @@ def test_no_new_locals(self, engine, parser):
assert lcls == lcls2
def test_no_new_globals(self, engine, parser):
- x = 1 # noqa
+ x = 1 # noqa:F841
gbls = globals().copy()
pd.eval("x + 1", engine=engine, parser=parser)
gbls2 = globals().copy()
@@ -1936,7 +1936,7 @@ def test_name_error_exprs(engine, parser):
@pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"])
def test_invalid_local_variable_reference(engine, parser, express):
- a, b = 1, 2 # noqa
+ a, b = 1, 2 # noqa:F841
if parser != "pandas":
with pytest.raises(SyntaxError, match="The '@' prefix is only"):
@@ -1980,7 +1980,7 @@ def test_more_than_one_expression_raises(engine, parser):
def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
gen = {int: lambda: np.random.randint(10), float: np.random.randn}
- mid = gen[lhs]() # noqa
+ mid = gen[lhs]() # noqa:F841
lhs = gen[lhs]()
rhs = gen[rhs]()
diff --git a/pandas/tests/dtypes/cast/test_construct_from_scalar.py b/pandas/tests/dtypes/cast/test_construct_from_scalar.py
index eccd838a11331..0ce04ce2e64cd 100644
--- a/pandas/tests/dtypes/cast/test_construct_from_scalar.py
+++ b/pandas/tests/dtypes/cast/test_construct_from_scalar.py
@@ -7,7 +7,6 @@
from pandas import (
Categorical,
Timedelta,
- Timestamp,
)
import pandas._testing as tm
@@ -25,9 +24,9 @@ def test_cast_1d_array_like_from_scalar_categorical():
tm.assert_categorical_equal(result, expected)
-def test_cast_1d_array_like_from_timestamp():
+def test_cast_1d_array_like_from_timestamp(fixed_now_ts):
# check we dont lose nanoseconds
- ts = Timestamp.now() + Timedelta(1)
+ ts = fixed_now_ts + Timedelta(1)
res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]"))
assert res[0] == ts
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index e8a3b5d28ee63..a514a9ce9b0e4 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -411,7 +411,7 @@ def test_maybe_promote_any_with_datetime64(
# Casting date to dt64 is deprecated
warn = FutureWarning
- with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
+ with tm.assert_produces_warning(warn, match=msg):
# stacklevel is chosen to make sense when called from higher-level functions
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
diff --git a/pandas/tests/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py
index a749955d35494..f624c56b54001 100644
--- a/pandas/tests/dtypes/test_concat.py
+++ b/pandas/tests/dtypes/test_concat.py
@@ -26,3 +26,21 @@ def test_concat_single_dataframe_tz_aware(copy):
expected = df.copy()
result = pd.concat([df], copy=copy)
tm.assert_frame_equal(result, expected)
+
+
+def test_concat_periodarray_2d():
+ pi = pd.period_range("2016-01-01", periods=36, freq="D")
+ arr = pi._data.reshape(6, 6)
+
+ result = _concat.concat_compat([arr[:2], arr[2:]], axis=0)
+ tm.assert_period_array_equal(result, arr)
+
+ result = _concat.concat_compat([arr[:, :2], arr[:, 2:]], axis=1)
+ tm.assert_period_array_equal(result, arr)
+
+ msg = "all the input array dimensions for the concatenation axis must match exactly"
+ with pytest.raises(ValueError, match=msg):
+ _concat.concat_compat([arr[:, :2], arr[:, 2:]], axis=0)
+
+ with pytest.raises(ValueError, match=msg):
+ _concat.concat_compat([arr[:2], arr[2:]], axis=1)
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 64a635707c2ff..5936248456ca7 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -1429,9 +1429,11 @@ def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(["foo", "bar"])
assert not func(arr)
+ assert not func(arr.reshape(2, 1))
arr = np.array([1, 2])
assert not func(arr)
+ assert not func(arr.reshape(2, 1))
def test_date(self):
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 55d0e5e73418e..1917fc615118a 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -8,10 +8,7 @@
from pandas._config import config as cf
from pandas._libs import missing as libmissing
-from pandas._libs.tslibs import (
- iNaT,
- is_null_datetimelike,
-)
+from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import (
is_float,
@@ -44,8 +41,8 @@
import pandas._testing as tm
from pandas.core.api import Float64Index
-now = pd.Timestamp.now()
-utcnow = pd.Timestamp.now("UTC")
+fix_now = pd.Timestamp("2021-01-01")
+fix_utcnow = pd.Timestamp("2021-01-01", tz="UTC")
@pytest.mark.parametrize("notna_f", [notna, notnull])
@@ -467,12 +464,12 @@ def test_array_equivalent_different_dtype_but_equal():
# There are 3 variants for each of lvalue and rvalue. We include all
# three for the tz-naive `now` and exclude the datetim64 variant
# for utcnow because it drops tzinfo.
- (now, utcnow),
- (now.to_datetime64(), utcnow),
- (now.to_pydatetime(), utcnow),
- (now, utcnow),
- (now.to_datetime64(), utcnow.to_pydatetime()),
- (now.to_pydatetime(), utcnow.to_pydatetime()),
+ (fix_now, fix_utcnow),
+ (fix_now.to_datetime64(), fix_utcnow),
+ (fix_now.to_pydatetime(), fix_utcnow),
+ (fix_now, fix_utcnow),
+ (fix_now.to_datetime64(), fix_utcnow.to_pydatetime()),
+ (fix_now.to_pydatetime(), fix_utcnow.to_pydatetime()),
],
)
def test_array_equivalent_tzawareness(lvalue, rvalue):
@@ -565,21 +562,19 @@ def test_na_value_for_dtype(dtype, na_value):
class TestNAObj:
-
- _1d_methods = ["isnaobj", "isnaobj_old"]
- _2d_methods = ["isnaobj2d", "isnaobj2d_old"]
-
def _check_behavior(self, arr, expected):
- for method in TestNAObj._1d_methods:
- result = getattr(libmissing, method)(arr)
- tm.assert_numpy_array_equal(result, expected)
+ result = libmissing.isnaobj(arr)
+ tm.assert_numpy_array_equal(result, expected)
+ result = libmissing.isnaobj(arr, inf_as_na=True)
+ tm.assert_numpy_array_equal(result, expected)
arr = np.atleast_2d(arr)
expected = np.atleast_2d(expected)
- for method in TestNAObj._2d_methods:
- result = getattr(libmissing, method)(arr)
- tm.assert_numpy_array_equal(result, expected)
+ result = libmissing.isnaobj2d(arr)
+ tm.assert_numpy_array_equal(result, expected)
+ result = libmissing.isnaobj2d(arr, inf_as_na=True)
+ tm.assert_numpy_array_equal(result, expected)
def test_basic(self):
arr = np.array([1, None, "foo", -5.1, NaT, np.nan])
@@ -676,36 +671,16 @@ def test_checknull(self, func):
def test_checknull_old(self):
for value in na_vals + sometimes_na_vals:
- assert libmissing.checknull_old(value)
+ assert libmissing.checknull(value, inf_as_na=True)
for value in inf_vals:
- assert libmissing.checknull_old(value)
+ assert libmissing.checknull(value, inf_as_na=True)
for value in int_na_vals:
- assert not libmissing.checknull_old(value)
-
- for value in never_na_vals:
- assert not libmissing.checknull_old(value)
-
- def test_is_null_datetimelike(self):
- for value in na_vals:
- assert is_null_datetimelike(value)
- assert is_null_datetimelike(value, False)
-
- for value in inf_vals:
- assert not is_null_datetimelike(value)
- assert not is_null_datetimelike(value, False)
-
- for value in int_na_vals:
- assert is_null_datetimelike(value)
- assert not is_null_datetimelike(value, False)
-
- for value in sometimes_na_vals:
- assert not is_null_datetimelike(value)
- assert not is_null_datetimelike(value, False)
+ assert not libmissing.checknull(value, inf_as_na=True)
for value in never_na_vals:
- assert not is_null_datetimelike(value)
+ assert not libmissing.checknull(value, inf_as_na=True)
def test_is_matching_na(self, nulls_fixture, nulls_fixture2):
left = nulls_fixture
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index d262f09182a9c..320bfc13f7032 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -6,7 +6,7 @@
from pandas.api.types import is_bool_dtype
from pandas.tests.extension import base
-pytest.importorskip("pyarrow", minversion="0.13.0")
+pytest.importorskip("pyarrow", minversion="1.0.1")
from pandas.tests.extension.arrow.arrays import ( # isort:skip
ArrowBoolArray,
diff --git a/pandas/tests/extension/arrow/test_timestamp.py b/pandas/tests/extension/arrow/test_timestamp.py
index c61cc30950a23..fe2c484731019 100644
--- a/pandas/tests/extension/arrow/test_timestamp.py
+++ b/pandas/tests/extension/arrow/test_timestamp.py
@@ -12,7 +12,7 @@
register_extension_dtype,
)
-pytest.importorskip("pyarrow", minversion="0.13.0")
+pytest.importorskip("pyarrow", minversion="1.0.1")
import pyarrow as pa # isort:skip
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 6e4ed7b77cad8..4ba315eeaeb15 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -3,10 +3,7 @@
import pandas as pd
from pandas.api.extensions import ExtensionArray
-from pandas.core.internals.blocks import (
- DatetimeTZBlock,
- ExtensionBlock,
-)
+from pandas.core.internals.blocks import EABackedBlock
from pandas.tests.extension.base.base import BaseExtensionTests
@@ -29,14 +26,14 @@ def test_series_constructor(self, data):
assert result.dtype == data.dtype
assert len(result) == len(data)
if hasattr(result._mgr, "blocks"):
- assert isinstance(result._mgr.blocks[0], (ExtensionBlock, DatetimeTZBlock))
+ assert isinstance(result._mgr.blocks[0], EABackedBlock)
assert result._mgr.array is data
# Series[EA] is unboxed / boxed correctly
result2 = pd.Series(result)
assert result2.dtype == data.dtype
if hasattr(result._mgr, "blocks"):
- assert isinstance(result2._mgr.blocks[0], (ExtensionBlock, DatetimeTZBlock))
+ assert isinstance(result2._mgr.blocks[0], EABackedBlock)
def test_series_constructor_no_data_with_index(self, dtype, na_value):
result = pd.Series(index=[1, 2, 3], dtype=dtype)
@@ -71,7 +68,7 @@ def test_dataframe_constructor_from_dict(self, data, from_series):
assert result.dtypes["A"] == data.dtype
assert result.shape == (len(data), 1)
if hasattr(result._mgr, "blocks"):
- assert isinstance(result._mgr.blocks[0], (ExtensionBlock, DatetimeTZBlock))
+ assert isinstance(result._mgr.blocks[0], EABackedBlock)
assert isinstance(result._mgr.arrays[0], ExtensionArray)
def test_dataframe_from_series(self, data):
@@ -79,7 +76,7 @@ def test_dataframe_from_series(self, data):
assert result.dtypes[0] == data.dtype
assert result.shape == (len(data), 1)
if hasattr(result._mgr, "blocks"):
- assert isinstance(result._mgr.blocks[0], (ExtensionBlock, DatetimeTZBlock))
+ assert isinstance(result._mgr.blocks[0], EABackedBlock)
assert isinstance(result._mgr.arrays[0], ExtensionArray)
def test_series_given_mismatched_index_raises(self, data):
diff --git a/pandas/tests/extension/base/dim2.py b/pandas/tests/extension/base/dim2.py
index b4a817cbc37ec..a86c07c604320 100644
--- a/pandas/tests/extension/base/dim2.py
+++ b/pandas/tests/extension/base/dim2.py
@@ -4,6 +4,7 @@
import numpy as np
import pytest
+from pandas._libs.missing import is_matching_na
from pandas.compat import (
IS64,
is_platform_windows,
@@ -14,6 +15,13 @@
class Dim2CompatTests(BaseExtensionTests):
+ def test_frame_from_2d_array(self, data):
+ arr2d = data.repeat(2).reshape(-1, 2)
+
+ df = pd.DataFrame(arr2d)
+ expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]})
+ self.assert_frame_equal(df, expected)
+
def test_swapaxes(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
@@ -114,21 +122,23 @@ def test_tolist_2d(self, data):
assert result == expected
def test_concat_2d(self, data):
- left = data.reshape(-1, 1)
+ left = type(data)._concat_same_type([data, data]).reshape(-1, 2)
right = left.copy()
# axis=0
result = left._concat_same_type([left, right], axis=0)
- expected = data._concat_same_type([data, data]).reshape(-1, 1)
+ expected = data._concat_same_type([data] * 4).reshape(-1, 2)
self.assert_extension_array_equal(result, expected)
# axis=1
result = left._concat_same_type([left, right], axis=1)
- expected = data.repeat(2).reshape(-1, 2)
- self.assert_extension_array_equal(result, expected)
+ assert result.shape == (len(data), 4)
+ self.assert_extension_array_equal(result[:, :2], left)
+ self.assert_extension_array_equal(result[:, 2:], right)
# axis > 1 -> invalid
- with pytest.raises(ValueError):
+ msg = "axis 2 is out of bounds for array of dimension 2"
+ with pytest.raises(ValueError, match=msg):
left._concat_same_type([left, right], axis=2)
@pytest.mark.parametrize("method", ["backfill", "pad"])
@@ -168,7 +178,7 @@ def test_reductions_2d_axis_none(self, data, method, request):
assert type(err_result) == type(err_expected)
return
- assert result == expected # TODO: or matching NA
+ assert is_matching_na(result, expected) or result == expected
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis0(self, data, method, request):
@@ -247,8 +257,5 @@ def test_reductions_2d_axis1(self, data, method, request):
# not necessarily type/dtype-preserving, so weaker assertions
assert result.shape == (1,)
expected_scalar = getattr(data, method)()
- if pd.isna(result[0]):
- # TODO: require matching NA
- assert pd.isna(expected_scalar), expected_scalar
- else:
- assert result[0] == expected_scalar
+ res = result[0]
+ assert is_matching_na(res, expected_scalar) or res == expected_scalar
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index 8f241679d5108..b7bb4c95372cc 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -3,12 +3,6 @@
import numpy as np
import pytest
-from pandas.core.dtypes.common import (
- is_datetime64tz_dtype,
- is_interval_dtype,
- is_period_dtype,
-)
-
import pandas as pd
from pandas.api.extensions import ExtensionArray
from pandas.core.internals import ExtensionBlock
@@ -327,17 +321,11 @@ def test_unstack(self, data, index, obj):
expected = ser.astype(object).unstack(
level=level, fill_value=data.dtype.na_value
)
- if obj == "series":
- # TODO: special cases belong in dtype-specific tests
- if is_datetime64tz_dtype(data.dtype):
- assert expected.dtypes.apply(is_datetime64tz_dtype).all()
- expected = expected.astype(object)
- if is_period_dtype(data.dtype):
- assert expected.dtypes.apply(is_period_dtype).all()
- expected = expected.astype(object)
- if is_interval_dtype(data.dtype):
- assert expected.dtypes.apply(is_interval_dtype).all()
- expected = expected.astype(object)
+ if obj == "series" and not isinstance(ser.dtype, pd.SparseDtype):
+ # GH#34457 SparseArray.astype(object) gives Sparse[object]
+ # instead of np.dtype(object)
+ assert (expected.dtypes == object).all()
+
result = result.astype(object)
self.assert_frame_equal(result, expected)
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index de5a6b7a5bb06..f13b24fabaf34 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -108,10 +108,6 @@ class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests):
class TestMethods(BaseDatetimeTests, base.BaseMethodsTests):
- @pytest.mark.skip(reason="Incorrect expected")
- def test_value_counts(self, all_data, dropna):
- pass
-
def test_combine_add(self, data_repeated):
# Timestamp.__add__(Timestamp) not defined
pass
@@ -140,23 +136,23 @@ def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
- s = pd.Series(data)
- self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
+ ser = pd.Series(data)
+ self.check_opname(ser, all_arithmetic_operators, ser.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_add_series_with_extension_array(self, data):
# Datetime + Datetime not implemented
- s = pd.Series(data)
+ ser = pd.Series(data)
msg = "cannot add DatetimeArray and DatetimeArray"
with pytest.raises(TypeError, match=msg):
- s + data
+ ser + data
def test_arith_series_with_array(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
- s = pd.Series(data)
- self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
+ ser = pd.Series(data)
+ self.check_opname(ser, all_arithmetic_operators, ser.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 012a3fbb12cac..1f22feff018ec 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -179,12 +179,12 @@ def test_transpose(self, data):
class TestGetitem(BaseSparseTests, base.BaseGetitemTests):
def test_get(self, data):
- s = pd.Series(data, index=[2 * i for i in range(len(data))])
- if np.isnan(s.values.fill_value):
- assert np.isnan(s.get(4)) and np.isnan(s.iloc[2])
+ ser = pd.Series(data, index=[2 * i for i in range(len(data))])
+ if np.isnan(ser.values.fill_value):
+ assert np.isnan(ser.get(4)) and np.isnan(ser.iloc[2])
else:
- assert s.get(4) == s.iloc[2]
- assert s.get(2) == s.iloc[1]
+ assert ser.get(4) == ser.iloc[2]
+ assert ser.get(2) == ser.iloc[1]
def test_reindex(self, data, na_value):
self._check_unsupported(data)
@@ -454,8 +454,8 @@ def _compare_other(self, s, data, comparison_op, other):
tm.assert_series_equal(result, expected)
# series
- s = pd.Series(data)
- result = op(s, other)
+ ser = pd.Series(data)
+ result = op(ser, other)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index 3729f921f59ad..b512664b57ade 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -259,3 +259,26 @@ def frame_of_index_cols():
}
)
return df
+
+
+@pytest.fixture(
+ params=[
+ "any",
+ "all",
+ "count",
+ "sum",
+ "prod",
+ "max",
+ "min",
+ "mean",
+ "median",
+ "skew",
+ "kurt",
+ "sem",
+ "var",
+ "std",
+ "mad",
+ ]
+)
+def reduction_functions(request):
+ return request.param
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 942da38dc5a26..40e6500fce64b 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -800,7 +800,7 @@ def test_setitem_single_column_mixed_datetime(self):
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
- # allow this syntax
+ # allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
@@ -808,12 +808,6 @@ def test_setitem_single_column_mixed_datetime(self):
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
- # FIXME: don't leave commented-out
- # as of GH 3216 this will now work!
- # try to set with a list like item
- # pytest.raises(
- # Exception, df.loc.__setitem__, ('d', 'timestamp'), [np.nan])
-
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 389bf56ab6035..597216f55e444 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -44,6 +44,19 @@
class TestDataFrameSetItem:
+ def test_setitem_str_subclass(self):
+ # GH#37366
+ class mystring(str):
+ pass
+
+ data = ["2020-10-22 01:21:00+00:00"]
+ index = DatetimeIndex(data)
+ df = DataFrame({"a": [1]}, index=index)
+ df["b"] = 2
+ df[mystring("c")] = 3
+ expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index)
+ tm.assert_equal(df, expected)
+
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
@@ -715,8 +728,6 @@ def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
class TestDataFrameSetItemWithExpansion:
- # TODO(ArrayManager) update parent (_maybe_update_cacher)
- @td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
@@ -936,7 +947,7 @@ def test_setitem_mask_categorical(self):
df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
# issue #37643 inplace kwarg deprecated
return_value = exp_fancy["cats"].cat.set_categories(
["a", "b", "c"], inplace=True
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index b675e9d703f44..0906186418c0a 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -688,6 +688,16 @@ def test_where_ea_other(self):
result = df.where(mask, ser2, axis=1)
tm.assert_frame_equal(result, expected)
+ def test_where_interval_noop(self):
+ # GH#44181
+ df = DataFrame([pd.Interval(0, 0)])
+ res = df.where(df.notna())
+ tm.assert_frame_equal(res, df)
+
+ ser = df[0]
+ res = ser.where(ser.notna())
+ tm.assert_series_equal(res, ser)
+
def test_where_try_cast_deprecated(frame_or_series):
obj = DataFrame(np.random.randn(4, 3))
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index 9f1f953cecc7e..feee03bbb91a2 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -222,10 +222,13 @@ def test_astype_dict_like(self, dtype_class):
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
- msg = "Only a column name can be used for the key in a dtype mappings argument"
- with pytest.raises(KeyError, match=msg):
+ msg_frame = (
+ "Only a column name can be used for the key in a dtype mappings argument. "
+ "'{}' not found in columns."
+ )
+ with pytest.raises(KeyError, match=msg_frame.format(2)):
df.astype(dt4)
- with pytest.raises(KeyError, match=msg):
+ with pytest.raises(KeyError, match=msg_frame.format("e")):
df.astype(dt5)
tm.assert_frame_equal(df, original)
@@ -261,6 +264,26 @@ def test_astype_duplicate_col(self):
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
+ def test_astype_duplicate_col_series_arg(self):
+ # GH#44417
+ vals = np.random.randn(3, 4)
+ df = DataFrame(vals, columns=["A", "B", "C", "A"])
+ dtypes = df.dtypes
+ dtypes.iloc[0] = str
+ dtypes.iloc[2] = "Float64"
+
+ result = df.astype(dtypes)
+ expected = DataFrame(
+ {
+ 0: vals[:, 0].astype(str),
+ 1: vals[:, 1],
+ 2: pd.array(vals[:, 2], dtype="Float64"),
+ 3: vals[:, 3],
+ }
+ )
+ expected.columns = df.columns
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize(
"dtype",
[
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 382c11f23a517..fc485f14a4820 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -209,15 +209,15 @@ def test_combine_first_align_nan(self):
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
- # ToDo: this must be int64
+ # TODO: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
- # ToDo: this must be datetime64
+ # TODO: this must be datetime64
assert res["a"].dtype == "float64"
- # ToDo: this must be int64
+ # TODO: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py
index 5fd6928f11f44..f61529659e9d5 100644
--- a/pandas/tests/frame/methods/test_diff.py
+++ b/pandas/tests/frame/methods/test_diff.py
@@ -17,26 +17,31 @@ def test_diff_requires_integer(self):
with pytest.raises(ValueError, match="periods must be an integer"):
df.diff(1.5)
- def test_diff(self, datetime_frame):
- the_diff = datetime_frame.diff(1)
+ # GH#44572 np.int64 is accepted
+ @pytest.mark.parametrize("num", [1, np.int64(1)])
+ def test_diff(self, datetime_frame, num):
+ df = datetime_frame
+ the_diff = df.diff(num)
- tm.assert_series_equal(
- the_diff["A"], datetime_frame["A"] - datetime_frame["A"].shift(1)
- )
+ expected = df["A"] - df["A"].shift(num)
+ tm.assert_series_equal(the_diff["A"], expected)
+ def test_diff_int_dtype(self):
# int dtype
a = 10_000_000_000_000_000
b = a + 1
- s = Series([a, b])
+ ser = Series([a, b])
- rs = DataFrame({"s": s}).diff()
+ rs = DataFrame({"s": ser}).diff()
assert rs.s[1] == 1
+ def test_diff_mixed_numeric(self, datetime_frame):
# mixed numeric
tf = datetime_frame.astype("float32")
the_diff = tf.diff(1)
tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1))
+ def test_diff_axis1_nonconsolidated(self):
# GH#10907
df = DataFrame({"y": Series([2]), "z": Series([3])})
df.insert(0, "x", 1)
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index 7e486f9fac083..45a3bf9b145b9 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -232,7 +232,6 @@ def test_fillna_categorical_nan(self):
df = DataFrame({"a": Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=NaT), df)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) implement downcast
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
@@ -258,7 +257,6 @@ def test_fillna_dictlike_value_duplicate_colnames(self, columns):
expected["A"] = 0.0
tm.assert_frame_equal(result, expected)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) object upcasting
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
@@ -276,7 +274,6 @@ def test_fillna_dtype_conversion(self):
expected = DataFrame("nan", index=range(3), columns=["A", "B"])
tm.assert_frame_equal(result, expected)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) object upcasting
@pytest.mark.parametrize("val", ["", 1, np.nan, 1.0])
def test_fillna_dtype_conversion_equiv_replace(self, val):
df = DataFrame({"A": [1, np.nan], "B": [1.0, 2.0]})
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index 7a749f3705e35..93225ff1050a3 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -328,10 +328,13 @@ def test_interp_string_axis(self, axis_name, axis_number):
expected = df.interpolate(method="linear", axis=axis_number)
tm.assert_frame_equal(result, expected)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) support axis=1
@pytest.mark.parametrize("method", ["ffill", "bfill", "pad"])
- def test_interp_fillna_methods(self, axis, method):
+ def test_interp_fillna_methods(self, request, axis, method, using_array_manager):
# GH 12918
+ if using_array_manager and (axis == 1 or axis == "columns"):
+ # TODO(ArrayManager) support axis=1
+ td.mark_array_manager_not_yet_implemented(request)
+
df = DataFrame(
{
"A": [1.0, 2.0, 3.0, 4.0, np.nan, 5.0],
diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py
index 30118d20f67a9..c6bfd94b84908 100644
--- a/pandas/tests/frame/methods/test_join.py
+++ b/pandas/tests/frame/methods/test_join.py
@@ -344,9 +344,7 @@ def test_merge_join_different_levels(self):
columns = ["a", "b", ("a", ""), ("c", "c1")]
expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]])
msg = "merging between different levels is deprecated"
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match=msg):
# stacklevel is chosen to be correct for pd.merge, not DataFrame.join
result = df1.join(df2, on="a")
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py
index 2e6318955e119..ed1623cd87aac 100644
--- a/pandas/tests/frame/methods/test_quantile.py
+++ b/pandas/tests/frame/methods/test_quantile.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+from pandas.compat.numpy import np_percentile_argname
+
import pandas as pd
from pandas import (
DataFrame,
@@ -153,7 +155,10 @@ def test_quantile_interpolation(self):
# cross-check interpolation=nearest results in original dtype
exp = np.percentile(
- np.array([[1, 2, 3], [2, 3, 4]]), 0.5, axis=0, interpolation="nearest"
+ np.array([[1, 2, 3], [2, 3, 4]]),
+ 0.5,
+ axis=0,
+ **{np_percentile_argname: "nearest"},
)
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="int64")
tm.assert_series_equal(result, expected)
@@ -167,7 +172,7 @@ def test_quantile_interpolation(self):
np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]),
0.5,
axis=0,
- interpolation="nearest",
+ **{np_percentile_argname: "nearest"},
)
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="float64")
tm.assert_series_equal(result, expected)
@@ -280,9 +285,13 @@ def test_quantile_datetime(self):
tm.assert_frame_equal(result, expected)
# empty when numeric_only=True
- # FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
- # result = df[['a', 'c']].quantile(.5)
- # result = df[['a', 'c']].quantile([.5])
+ result = df[["a", "c"]].quantile(0.5)
+ expected = Series([], index=[], dtype=np.float64, name=0.5)
+ tm.assert_series_equal(result, expected)
+
+ result = df[["a", "c"]].quantile([0.5])
+ expected = DataFrame(index=[0.5])
+ tm.assert_frame_equal(result, expected)
def test_quantile_invalid(self, datetime_frame):
msg = "percentiles should all be in the interval \\[0, 1\\]"
@@ -481,7 +490,7 @@ def test_quantile_nat(self):
)
tm.assert_frame_equal(res, exp)
- def test_quantile_empty_no_rows(self):
+ def test_quantile_empty_no_rows_floats(self):
# floats
df = DataFrame(columns=["a", "b"], dtype="float64")
@@ -494,21 +503,43 @@ def test_quantile_empty_no_rows(self):
exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5])
tm.assert_frame_equal(res, exp)
- # FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
- # res = df.quantile(0.5, axis=1)
- # res = df.quantile([0.5], axis=1)
+ res = df.quantile(0.5, axis=1)
+ exp = Series([], index=[], dtype="float64", name=0.5)
+ tm.assert_series_equal(res, exp)
+
+ res = df.quantile([0.5], axis=1)
+ exp = DataFrame(columns=[], index=[0.5])
+ tm.assert_frame_equal(res, exp)
+ def test_quantile_empty_no_rows_ints(self):
# ints
df = DataFrame(columns=["a", "b"], dtype="int64")
- # FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
- # res = df.quantile(0.5)
+ res = df.quantile(0.5)
+ exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5)
+ tm.assert_series_equal(res, exp)
+ def test_quantile_empty_no_rows_dt64(self):
# datetimes
df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]")
- # FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
- # res = df.quantile(0.5, numeric_only=False)
+ res = df.quantile(0.5, numeric_only=False)
+ exp = Series(
+ [pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5
+ )
+ tm.assert_series_equal(res, exp)
+
+ # Mixed dt64/dt64tz
+ df["a"] = df["a"].dt.tz_localize("US/Central")
+ res = df.quantile(0.5, numeric_only=False)
+ exp = exp.astype(object)
+ tm.assert_series_equal(res, exp)
+
+ # both dt64tz
+ df["b"] = df["b"].dt.tz_localize("US/Central")
+ res = df.quantile(0.5, numeric_only=False)
+ exp = exp.astype(df["b"].dtype)
+ tm.assert_series_equal(res, exp)
def test_quantile_empty_no_columns(self):
# GH#23925 _get_numeric_data may drop all columns
diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py
index 26ecf1356a946..1581bc8a0c70b 100644
--- a/pandas/tests/frame/methods/test_rename.py
+++ b/pandas/tests/frame/methods/test_rename.py
@@ -365,7 +365,6 @@ def test_rename_mapper_and_positional_arguments_raises(self):
with pytest.raises(TypeError, match=msg):
df.rename({}, columns={}, index={})
- @td.skip_array_manager_not_yet_implemented
def test_rename_with_duplicate_columns(self):
# GH#4403
df4 = DataFrame(
@@ -406,3 +405,14 @@ def test_rename_with_duplicate_columns(self):
],
).set_index(["STK_ID", "RPT_Date"], drop=False)
tm.assert_frame_equal(result, expected)
+
+ def test_rename_boolean_index(self):
+ df = DataFrame(np.arange(15).reshape(3, 5), columns=[False, True, 2, 3, 4])
+ mapper = {0: "foo", 1: "bar", 2: "bah"}
+ res = df.rename(index=mapper)
+ exp = DataFrame(
+ np.arange(15).reshape(3, 5),
+ columns=[False, True, 2, 3, 4],
+ index=["foo", "bar", "bah"],
+ )
+ tm.assert_frame_equal(res, exp)
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 5e321ad33a2bb..d6ecdcd155295 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1,7 +1,6 @@
from __future__ import annotations
from datetime import datetime
-from io import StringIO
import re
import numpy as np
@@ -625,6 +624,15 @@ def test_replace_mixed3(self):
expected.iloc[1, 1] = m[1]
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize("dtype", ["boolean", "Int64", "Float64"])
+ def test_replace_with_nullable_column(self, dtype):
+ # GH-44499
+ nullable_ser = Series([1, 0, 1], dtype=dtype)
+ df = DataFrame({"A": ["A", "B", "x"], "B": nullable_ser})
+ result = df.replace("x", "X")
+ expected = DataFrame({"A": ["A", "B", "X"], "B": nullable_ser})
+ tm.assert_frame_equal(result, expected)
+
def test_replace_simple_nested_dict(self):
df = DataFrame({"col": range(1, 5)})
expected = DataFrame({"col": ["a", 2, 3, "b"]})
@@ -912,12 +920,14 @@ def test_replace_dict_tuple_list_ordering_remains_the_same(self):
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
- raw = """fol T_opp T_Dir T_Enh
- 0 1 0 0 vo
- 1 2 vr 0 0
- 2 2 0 0 0
- 3 3 0 bt 0"""
- df = pd.read_csv(StringIO(raw), sep=r"\s+")
+ df = DataFrame(
+ {
+ "fol": [1, 2, 2, 3],
+ "T_opp": ["0", "vr", "0", "0"],
+ "T_Dir": ["0", "0", "0", "bt"],
+ "T_Enh": ["vo", "0", "0", "0"],
+ }
+ )
res = df.replace({r"\D": 1})
tm.assert_frame_equal(df, res)
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index d8511581f0e94..9cd0b8bb5b315 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -331,6 +331,83 @@ def test_shift_dt64values_int_fill_deprecated(self):
expected = DataFrame({"A": [pd.Timestamp(0), pd.Timestamp(0)], "B": df2["A"]})
tm.assert_frame_equal(result, expected)
+ # same thing but not consolidated
+ # This isn't great that we get different behavior, but
+ # that will go away when the deprecation is enforced
+ df3 = DataFrame({"A": ser})
+ df3["B"] = ser
+ assert len(df3._mgr.arrays) == 2
+ result = df3.shift(1, axis=1, fill_value=0)
+ expected = DataFrame({"A": [0, 0], "B": df2["A"]})
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "as_cat",
+ [
+ pytest.param(
+ True,
+ marks=pytest.mark.xfail(
+ reason="_can_hold_element incorrectly always returns True"
+ ),
+ ),
+ False,
+ ],
+ )
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ date_range("2020-01-01", periods=2),
+ date_range("2020-01-01", periods=2, tz="US/Pacific"),
+ pd.period_range("2020-01-01", periods=2, freq="D"),
+ pd.timedelta_range("2020 Days", periods=2, freq="D"),
+ pd.interval_range(0, 3, periods=2),
+ pytest.param(
+ pd.array([1, 2], dtype="Int64"),
+ marks=pytest.mark.xfail(
+ reason="_can_hold_element incorrectly always returns True"
+ ),
+ ),
+ pytest.param(
+ pd.array([1, 2], dtype="Float32"),
+ marks=pytest.mark.xfail(
+ reason="_can_hold_element incorrectly always returns True"
+ ),
+ ),
+ ],
+ ids=lambda x: str(x.dtype),
+ )
+ def test_shift_dt64values_axis1_invalid_fill(
+ self, vals, as_cat, using_array_manager, request
+ ):
+ # GH#44564
+ if using_array_manager:
+ mark = pytest.mark.xfail(raises=NotImplementedError)
+ request.node.add_marker(mark)
+
+ ser = Series(vals)
+ if as_cat:
+ ser = ser.astype("category")
+
+ df = DataFrame({"A": ser})
+ result = df.shift(-1, axis=1, fill_value="foo")
+ expected = DataFrame({"A": ["foo", "foo"]})
+ tm.assert_frame_equal(result, expected)
+
+ # same thing but multiple blocks
+ df2 = DataFrame({"A": ser, "B": ser})
+ df2._consolidate_inplace()
+
+ result = df2.shift(-1, axis=1, fill_value="foo")
+ expected = DataFrame({"A": df2["B"], "B": ["foo", "foo"]})
+ tm.assert_frame_equal(result, expected)
+
+ # same thing but not consolidated
+ df3 = DataFrame({"A": ser})
+ df3["B"] = ser
+ assert len(df3._mgr.arrays) == 2
+ result = df3.shift(-1, axis=1, fill_value="foo")
+ tm.assert_frame_equal(result, expected)
+
def test_shift_axis1_categorical_columns(self):
# GH#38434
ci = CategoricalIndex(["a", "b", "c"])
diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py
index 59de1ab0c1ce9..7fca752f2a21e 100644
--- a/pandas/tests/frame/methods/test_transpose.py
+++ b/pandas/tests/frame/methods/test_transpose.py
@@ -115,4 +115,4 @@ def test_transpose_get_view_dt64tzget_view(self):
assert result._mgr.nblocks == 1
rtrip = result._mgr.blocks[0].values
- assert np.shares_memory(arr._data, rtrip._data)
+ assert np.shares_memory(arr._ndarray, rtrip._ndarray)
diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py
index 54b87a330f67a..33ba29c65cebb 100644
--- a/pandas/tests/frame/methods/test_truncate.py
+++ b/pandas/tests/frame/methods/test_truncate.py
@@ -85,18 +85,16 @@ def test_truncate_nonsortedindex(self, frame_or_series):
obj.truncate(before=3, after=9)
def test_sort_values_nonsortedindex(self):
- # TODO: belongs elsewhere?
-
rng = date_range("2011-01-01", "2012-01-01", freq="W")
ts = DataFrame(
{"A": np.random.randn(len(rng)), "B": np.random.randn(len(rng))}, index=rng
)
+ decreasing = ts.sort_values("A", ascending=False)
+
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
- ts.sort_values("A", ascending=False).truncate(
- before="2011-11", after="2011-12"
- )
+ decreasing.truncate(before="2011-11", after="2011-12")
def test_truncate_nonsortedindex_axis1(self):
# GH#17935
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 49649c1487f13..3adc4ebceaad5 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -5,6 +5,8 @@
import numpy as np
import pytest
+from pandas._config.config import option_context
+
import pandas.util._test_decorators as td
from pandas.util._test_decorators import (
async_mark,
@@ -87,6 +89,25 @@ def test_tab_completion(self):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), DataFrame)
+ def test_display_max_dir_items(self):
+ # display.max_dir_items increaes the number of columns that are in __dir__.
+ columns = ["a" + str(i) for i in range(420)]
+ values = [range(420), range(420)]
+ df = DataFrame(values, columns=columns)
+
+ # The default value for display.max_dir_items is 100
+ assert "a99" in dir(df)
+ assert "a100" not in dir(df)
+
+ with option_context("display.max_dir_items", 300):
+ df = DataFrame(values, columns=columns)
+ assert "a299" in dir(df)
+ assert "a300" not in dir(df)
+
+ with option_context("display.max_dir_items", None):
+ df = DataFrame(values, columns=columns)
+ assert "a419" in dir(df)
+
def test_not_hashable(self):
empty_frame = DataFrame()
@@ -296,7 +317,6 @@ def test_attrs(self):
result = df.rename(columns=str)
assert result.attrs == {"version": 1}
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem (no copy)
@pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
def test_set_flags(self, allows_duplicate_labels, frame_or_series):
obj = DataFrame({"A": [1, 2]})
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 1ddb18c218cc6..0e6b36a484c47 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -722,11 +722,16 @@ def test_df_add_2d_array_collike_broadcasts(self):
result = collike + df
tm.assert_frame_equal(result, expected)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) decide on dtypes
- def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
+ def test_df_arith_2d_array_rowlike_broadcasts(
+ self, request, all_arithmetic_operators, using_array_manager
+ ):
# GH#23000
opname = all_arithmetic_operators
+ if using_array_manager and opname in ("__rmod__", "__rfloordiv__"):
+ # TODO(ArrayManager) decide on dtypes
+ td.mark_array_manager_not_yet_implemented(request)
+
arr = np.arange(6).reshape(3, 2)
df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
@@ -744,11 +749,16 @@ def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) decide on dtypes
- def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
+ def test_df_arith_2d_array_collike_broadcasts(
+ self, request, all_arithmetic_operators, using_array_manager
+ ):
# GH#23000
opname = all_arithmetic_operators
+ if using_array_manager and opname in ("__rmod__", "__rfloordiv__"):
+ # TODO(ArrayManager) decide on dtypes
+ td.mark_array_manager_not_yet_implemented(request)
+
arr = np.arange(6).reshape(3, 2)
df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
@@ -925,8 +935,8 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements):
(operator.mul, "bool"),
}
- e = DummyElement(value, dtype)
- s = DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
+ elem = DummyElement(value, dtype)
+ df = DataFrame({"A": [elem.value, elem.value]}, dtype=elem.dtype)
invalid = {
(operator.pow, "<M8[ns]"),
@@ -960,7 +970,7 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements):
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(warn):
- op(s, e.value)
+ op(df, elem.value)
elif (op, dtype) in skip:
@@ -971,19 +981,17 @@ def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements):
else:
warn = None
with tm.assert_produces_warning(warn):
- op(s, e.value)
+ op(df, elem.value)
else:
msg = "operator '.*' not implemented for .* dtypes"
with pytest.raises(NotImplementedError, match=msg):
- op(s, e.value)
+ op(df, elem.value)
else:
- # FIXME: Since dispatching to Series, this test no longer
- # asserts anything meaningful
with tm.assert_produces_warning(None):
- result = op(s, e.value).dtypes
- expected = op(s, value).dtypes
+ result = op(df, elem.value).dtypes
+ expected = op(df, value).dtypes
tm.assert_series_equal(result, expected)
@@ -1240,9 +1248,7 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
- def test_combine_series(
- self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
- ):
+ def test_combine_series(self, float_frame, mixed_float_frame, mixed_int_frame):
# Series
series = float_frame.xs(float_frame.index[0])
@@ -1272,17 +1278,18 @@ def test_combine_series(
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype={"C": None})
- # FIXME: don't leave commented-out
- # these raise with numexpr.....as we are adding an int64 to an
- # uint64....weird vs int
-
- # added = mixed_int_frame + (100*series).astype('int64')
- # _check_mixed_int(added, dtype = {"A": 'int64', "B": 'float64', "C":
- # 'int64', "D": 'int64'})
- # added = mixed_int_frame + (100*series).astype('int32')
- # _check_mixed_int(added, dtype = {"A": 'int32', "B": 'float64', "C":
- # 'int32', "D": 'int64'})
+ # these used to raise with numexpr as we are adding an int64 to an
+ # uint64....weird vs int
+ added = mixed_int_frame + (100 * series).astype("int64")
+ _check_mixed_int(
+ added, dtype={"A": "int64", "B": "float64", "C": "int64", "D": "int64"}
+ )
+ added = mixed_int_frame + (100 * series).astype("int32")
+ _check_mixed_int(
+ added, dtype={"A": "int32", "B": "float64", "C": "int32", "D": "int64"}
+ )
+ def test_combine_timeseries(self, datetime_frame):
# TimeSeries
ts = datetime_frame["A"]
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 34854be29ad1f..01a8982c5fe16 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -2,7 +2,6 @@
datetime,
timedelta,
)
-from io import StringIO
import itertools
import numpy as np
@@ -289,15 +288,29 @@ def test_pickle(self, float_string_frame, timezone_frame):
def test_consolidate_datetime64(self):
# numpy vstack bug
- data = (
- "starting,ending,measure\n"
- "2012-06-21 00:00,2012-06-23 07:00,77\n"
- "2012-06-23 07:00,2012-06-23 16:30,65\n"
- "2012-06-23 16:30,2012-06-25 08:00,77\n"
- "2012-06-25 08:00,2012-06-26 12:00,0\n"
- "2012-06-26 12:00,2012-06-27 08:00,77\n"
+ df = DataFrame(
+ {
+ "starting": pd.to_datetime(
+ [
+ "2012-06-21 00:00",
+ "2012-06-23 07:00",
+ "2012-06-23 16:30",
+ "2012-06-25 08:00",
+ "2012-06-26 12:00",
+ ]
+ ),
+ "ending": pd.to_datetime(
+ [
+ "2012-06-23 07:00",
+ "2012-06-23 16:30",
+ "2012-06-25 08:00",
+ "2012-06-26 12:00",
+ "2012-06-27 08:00",
+ ]
+ ),
+ "measure": [77, 65, 77, 0, 77],
+ }
)
- df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
@@ -339,8 +352,7 @@ def test_stale_cached_series_bug_473(self):
assert pd.isna(Y["g"]["c"])
def test_strange_column_corruption_issue(self):
- # FIXME: dont leave commented-out
- # (wesm) Unclear how exactly this is related to internal matters
+ # TODO(wesm): Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = np.nan
wasCol = {}
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index f92bbe1c718ab..7347640fc05a7 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -70,6 +70,27 @@
class TestDataFrameConstructors:
+ def test_constructor_from_2d_datetimearray(self):
+ dti = date_range("2016-01-01", periods=6, tz="US/Pacific")
+ dta = dti._data.reshape(3, 2)
+
+ df = DataFrame(dta)
+ expected = DataFrame({0: dta[:, 0], 1: dta[:, 1]})
+ tm.assert_frame_equal(df, expected)
+
+ def test_constructor_dict_with_tzaware_scalar(self):
+ # GH#42505
+ dt = Timestamp("2019-11-03 01:00:00-0700").tz_convert("America/Los_Angeles")
+
+ df = DataFrame({"dt": dt}, index=[0])
+ expected = DataFrame({"dt": [dt]})
+ tm.assert_frame_equal(df, expected)
+
+ # Non-homogeneous
+ df = DataFrame({"dt": dt, "value": [1]})
+ expected = DataFrame({"dt": [dt], "value": [1]})
+ tm.assert_frame_equal(df, expected)
+
def test_construct_ndarray_with_nas_and_int_dtype(self):
# GH#26919 match Series by not casting np.nan to meaningless int
arr = np.array([[1, np.nan], [2, 3]])
@@ -2274,16 +2295,18 @@ def test_check_dtype_empty_numeric_column(self, dtype):
assert data.b.dtype == dtype
- # TODO(ArrayManager) astype to bytes dtypes does not yet give object dtype
- @td.skip_array_manager_not_yet_implemented
@pytest.mark.parametrize(
"dtype", tm.STRING_DTYPES + tm.BYTES_DTYPES + tm.OBJECT_DTYPES
)
- def test_check_dtype_empty_string_column(self, dtype):
+ def test_check_dtype_empty_string_column(self, request, dtype, using_array_manager):
# GH24386: Ensure dtypes are set correctly for an empty DataFrame.
# Empty DataFrame is generated via dictionary data with non-overlapping columns.
data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype)
+ if using_array_manager and dtype in tm.BYTES_DTYPES:
+ # TODO(ArrayManager) astype to bytes dtypes does not yet give object dtype
+ td.mark_array_manager_not_yet_implemented(request)
+
assert data.b.dtype.name == "object"
def test_to_frame_with_falsey_names(self):
@@ -2453,8 +2476,20 @@ def test_constructor_list_str_na(self, string_dtype):
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("copy", [False, True])
- @td.skip_array_manager_not_yet_implemented
- def test_dict_nocopy(self, copy, any_numeric_ea_dtype, any_numpy_dtype):
+ def test_dict_nocopy(
+ self, request, copy, any_numeric_ea_dtype, any_numpy_dtype, using_array_manager
+ ):
+ if using_array_manager and not (
+ (any_numpy_dtype in (tm.STRING_DTYPES + tm.BYTES_DTYPES))
+ or (
+ any_numpy_dtype
+ in (tm.DATETIME64_DTYPES + tm.TIMEDELTA64_DTYPES + tm.BOOL_DTYPES)
+ and copy
+ )
+ ):
+ # TODO(ArrayManager) properly honor copy keyword for dict input
+ td.mark_array_manager_not_yet_implemented(request)
+
a = np.array([1, 2], dtype=any_numpy_dtype)
b = np.array([3, 4], dtype=any_numpy_dtype)
if b.dtype.kind in ["S", "U"]:
@@ -2664,15 +2699,15 @@ def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt):
expected = DataFrame({0: [ts_naive]})
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = DataFrame({0: ts}, index=[0], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = DataFrame([ts], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = DataFrame(np.array([ts], dtype=object), dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@@ -2680,11 +2715,11 @@ def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt):
result = DataFrame(ts, index=[0], columns=[0], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
df = DataFrame([Series([ts])], dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
df = DataFrame([[ts]], columns=[0], dtype="datetime64[ns]")
tm.assert_equal(df, expected)
@@ -2888,8 +2923,8 @@ def test_from_timedelta_scalar_preserves_nanos(self, constructor):
obj = constructor(td, dtype="m8[ns]")
assert get1(obj) == td
- def test_from_timestamp_scalar_preserves_nanos(self, constructor):
- ts = Timestamp.now() + Timedelta(1)
+ def test_from_timestamp_scalar_preserves_nanos(self, constructor, fixed_now_ts):
+ ts = fixed_now_ts + Timedelta(1)
obj = constructor(ts, dtype="M8[ns]")
assert get1(obj) == ts
@@ -2903,14 +2938,7 @@ def test_from_timedelta64_scalar_object(self, constructor):
assert isinstance(get1(obj), np.timedelta64)
@pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64])
- def test_from_scalar_datetimelike_mismatched(self, constructor, cls, request):
- node = request.node
- params = node.callspec.params
- if params["frame_or_series"] is DataFrame and params["constructor"] is dict:
- mark = pytest.mark.xfail(
- reason="DataFrame incorrectly allows mismatched datetimelike"
- )
- node.add_marker(mark)
+ def test_from_scalar_datetimelike_mismatched(self, constructor, cls):
scalar = cls("NaT", "ns")
dtype = {np.datetime64: "m8[ns]", np.timedelta64: "M8[ns]"}[cls]
@@ -2953,9 +2981,7 @@ def test_tzaware_data_tznaive_dtype(self, constructor):
ts = Timestamp("2019", tz=tz)
ts_naive = Timestamp("2019")
- with tm.assert_produces_warning(
- FutureWarning, match="Data is timezone-aware", check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match="Data is timezone-aware"):
result = constructor(ts, dtype="M8[ns]")
assert np.all(result.dtypes == "M8[ns]")
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index bc71b0c2048f8..331c21de8e4bd 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -1,4 +1,3 @@
-from io import StringIO
import operator
import numpy as np
@@ -110,7 +109,7 @@ def test_ops(self, op_str, op, rop, n):
df.iloc[0] = 2
m = df.mean()
- base = DataFrame( # noqa
+ base = DataFrame( # noqa:F841
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
@@ -493,7 +492,7 @@ def test_query_scope(self):
df = DataFrame(np.random.randn(20, 2), columns=list("ab"))
- a, b = 1, 2 # noqa
+ a, b = 1, 2 # noqa:F841
res = df.query("a > b", engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
@@ -662,7 +661,7 @@ def test_local_variable_with_in(self):
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
- c = 1 # noqa
+ c = 1 # noqa:F841
df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
@@ -681,7 +680,7 @@ def test_query_undefined_local(self):
df.query("a == @c", engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
- n = 1 # noqa
+ n = 1 # noqa:F841
a = np.r_[20:101:20]
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
@@ -835,7 +834,7 @@ def test_nested_scope(self):
engine = self.engine
parser = self.parser
# smoke test
- x = 1 # noqa
+ x = 1 # noqa:F841
result = pd.eval("x + 1", engine=engine, parser=parser)
assert result == 2
@@ -1020,23 +1019,19 @@ def test_object_array_eq_ne(self, parser, engine):
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
- raw = """id event timestamp
- 1 "page 1 load" 1/1/2014 0:00:01
- 1 "page 1 exit" 1/1/2014 0:00:31
- 2 "page 2 load" 1/1/2014 0:01:01
- 2 "page 2 exit" 1/1/2014 0:01:31
- 3 "page 3 load" 1/1/2014 0:02:01
- 3 "page 3 exit" 1/1/2014 0:02:31
- 4 "page 1 load" 2/1/2014 1:00:01
- 4 "page 1 exit" 2/1/2014 1:00:31
- 5 "page 2 load" 2/1/2014 1:01:01
- 5 "page 2 exit" 2/1/2014 1:01:31
- 6 "page 3 load" 2/1/2014 1:02:01
- 6 "page 3 exit" 2/1/2014 1:02:31
- """
- df = pd.read_csv(
- StringIO(raw), sep=r"\s{2,}", engine="python", parse_dates=["timestamp"]
+ events = [
+ f"page {n} {act}" for n in range(1, 4) for act in ["load", "exit"]
+ ] * 2
+ stamps1 = date_range("2014-01-01 0:00:01", freq="30s", periods=6)
+ stamps2 = date_range("2014-02-01 1:00:01", freq="30s", periods=6)
+ df = DataFrame(
+ {
+ "id": np.arange(1, 7).repeat(2),
+ "event": events,
+ "timestamp": stamps1.append(stamps2),
+ }
)
+
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine)
tm.assert_frame_equal(expected, res)
@@ -1078,7 +1073,7 @@ def test_query_string_scalar_variable(self, parser, engine):
}
)
e = df[df.Symbol == "BUD US"]
- symb = "BUD US" # noqa
+ symb = "BUD US" # noqa:F841
r = df.query("Symbol == @symb", parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
@@ -1260,7 +1255,7 @@ def test_call_non_named_expression(self, df):
def func(*_):
return 1
- funcs = [func] # noqa
+ funcs = [func] # noqa:F841
df.eval("@func()")
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index fc2c138538ac9..0161acd8b52cf 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1478,33 +1478,29 @@ def test_frame_any_with_timedelta(self):
expected = Series(data=[False, True])
tm.assert_series_equal(result, expected)
- @pytest.mark.parametrize(
- "func",
- [
- "any",
- "all",
- "count",
- "sum",
- "prod",
- "max",
- "min",
- "mean",
- "median",
- "skew",
- "kurt",
- "sem",
- "var",
- "std",
- "mad",
- ],
- )
- def test_reductions_deprecation_level_argument(self, frame_or_series, func):
+ def test_reductions_deprecation_skipna_none(self, frame_or_series):
+ # GH#44580
+ obj = frame_or_series([1, 2, 3])
+ with tm.assert_produces_warning(FutureWarning, match="skipna"):
+ obj.mad(skipna=None)
+
+ def test_reductions_deprecation_level_argument(
+ self, frame_or_series, reduction_functions
+ ):
# GH#39983
obj = frame_or_series(
[1, 2, 3], index=MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]])
)
with tm.assert_produces_warning(FutureWarning, match="level"):
- getattr(obj, func)(level=0)
+ getattr(obj, reduction_functions)(level=0)
+
+ def test_reductions_skipna_none_raises(self, frame_or_series, reduction_functions):
+ if reduction_functions in ["count", "mad"]:
+ pytest.skip("Count does not accept skipna. Mad needs a depreaction cycle.")
+ obj = frame_or_series([1, 2, 3])
+ msg = 'For argument "skipna" expected type bool, received type NoneType.'
+ with pytest.raises(ValueError, match=msg):
+ getattr(obj, reduction_functions)(skipna=None)
class TestNuisanceColumns:
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index b288fafd8f7f6..bb80bd12c1958 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -265,6 +265,7 @@ def test_repr_column_name_unicode_truncation_bug(self):
with option_context("display.max_columns", 20):
assert "StringCol" in repr(df)
+ @pytest.mark.filterwarnings("ignore::FutureWarning")
def test_latex_repr(self):
result = r"""\begin{tabular}{llll}
\toprule
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index 62512249dabfc..689c54b03b507 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -5,8 +5,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
DataFrame,
@@ -949,7 +947,6 @@ def test_unstack_nan_index4(self):
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 42474ff00ad6d..8d9957b24300f 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -13,6 +13,16 @@
import pandas._testing as tm
+@pytest.fixture()
+def gpd_style_subclass_df():
+ class SubclassedDataFrame(DataFrame):
+ @property
+ def _constructor(self):
+ return SubclassedDataFrame
+
+ return SubclassedDataFrame({"a": [1, 2, 3]})
+
+
class TestDataFrameSubclassing:
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
@@ -704,6 +714,15 @@ def test_idxmax_preserves_subclass(self):
result = df.idxmax()
assert isinstance(result, tm.SubclassedSeries)
+ def test_convert_dtypes_preserves_subclass(self, gpd_style_subclass_df):
+ # GH 43668
+ df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
+ result = df.convert_dtypes()
+ assert isinstance(result, tm.SubclassedDataFrame)
+
+ result = gpd_style_subclass_df.convert_dtypes()
+ assert isinstance(result, type(gpd_style_subclass_df))
+
def test_equals_subclass(self):
# https://github.com/pandas-dev/pandas/pull/34402
# allow subclass in both directions
diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py
index 1b32675ec2d35..c942945029c8e 100644
--- a/pandas/tests/generic/test_duplicate_labels.py
+++ b/pandas/tests/generic/test_duplicate_labels.py
@@ -63,7 +63,6 @@ def test_preserved_frame(self):
assert df.loc[["a"]].flags.allows_duplicate_labels is False
assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False
- @not_implemented
def test_to_frame(self):
s = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False)
assert s.to_frame().flags.allows_duplicate_labels is False
@@ -85,7 +84,6 @@ def test_binops(self, func, other, frame):
assert df.flags.allows_duplicate_labels is False
assert func(df).flags.allows_duplicate_labels is False
- @not_implemented
def test_preserve_getitem(self):
df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)
assert df[["A"]].flags.allows_duplicate_labels is False
@@ -309,18 +307,12 @@ def test_series_raises(self, func):
(operator.itemgetter(["A", "A"]), None),
# loc
(operator.itemgetter(["a", "a"]), "loc"),
- pytest.param(
- operator.itemgetter(("a", ["A", "A"])), "loc", marks=not_implemented
- ),
+ pytest.param(operator.itemgetter(("a", ["A", "A"])), "loc"),
(operator.itemgetter((["a", "a"], "A")), "loc"),
# iloc
(operator.itemgetter([0, 0]), "iloc"),
- pytest.param(
- operator.itemgetter((0, [0, 0])), "iloc", marks=not_implemented
- ),
- pytest.param(
- operator.itemgetter(([0, 0], 0)), "iloc", marks=not_implemented
- ),
+ pytest.param(operator.itemgetter((0, [0, 0])), "iloc"),
+ pytest.param(operator.itemgetter(([0, 0], 0)), "iloc"),
],
)
def test_getitem_raises(self, getter, target):
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index c1f8b5dd7cf41..920301dc2b9c5 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -6,7 +6,7 @@
import numpy as np
import pytest
-
+import pandas._testing as tm
import pandas as pd
# TODO:
@@ -38,14 +38,9 @@
(pd.Series, ([0],), operator.methodcaller("take", [])),
(pd.Series, ([0],), operator.methodcaller("__getitem__", [True])),
(pd.Series, ([0],), operator.methodcaller("repeat", 2)),
- pytest.param(
- (pd.Series, ([0],), operator.methodcaller("reset_index")),
- marks=pytest.mark.xfail,
- ),
+ (pd.Series, ([0],), operator.methodcaller("reset_index")),
(pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)),
- pytest.param(
- (pd.Series, ([0],), operator.methodcaller("to_frame")), marks=pytest.mark.xfail
- ),
+ (pd.Series, ([0],), operator.methodcaller("to_frame")),
(pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")),
(pd.Series, ([0, 0],), operator.methodcaller("duplicated")),
(pd.Series, ([0, 0],), operator.methodcaller("round")),
@@ -347,10 +342,7 @@
operator.methodcaller("infer_objects"),
),
(pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")),
- pytest.param(
- (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")),
- marks=not_implemented_mark,
- ),
+ (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")),
(pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")),
(pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")),
(pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)),
@@ -783,4 +775,4 @@ def test_finalize_frame_series_name():
# ensure we don't copy the column `name` to the Series.
df = pd.DataFrame({"name": [1, 2]})
result = pd.Series([1, 2]).__finalize__(df)
- assert result.name is None
+ assert result.name is None
\ No newline at end of file
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index f178f85154319..9ebe6f8d8c97e 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -57,8 +57,6 @@ def test_agg_must_agg(df):
def test_agg_ser_multi_key(df):
- # TODO(wesm): unused
- ser = df.C # noqa
f = lambda x: x.sum()
results = df.C.groupby([df.A, df.B]).aggregate(f)
diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py
index 694f843ec138f..d9372ba5cbb50 100644
--- a/pandas/tests/groupby/aggregate/test_cython.py
+++ b/pandas/tests/groupby/aggregate/test_cython.py
@@ -97,7 +97,7 @@ def test_cython_agg_nothing_to_agg():
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = frame[["b"]].groupby(frame["a"]).mean()
expected = DataFrame([], index=frame["a"].sort_values().drop_duplicates())
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py
index 8e9df8a6da958..e7fa2e0690066 100644
--- a/pandas/tests/groupby/aggregate/test_numba.py
+++ b/pandas/tests/groupby/aggregate/test_numba.py
@@ -48,7 +48,7 @@ def incorrect_function(x, **kwargs):
@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
@pytest.mark.parametrize("jit", [True, False])
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
@@ -77,7 +77,7 @@ def func_numba(values, index):
@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
@pytest.mark.parametrize("jit", [True, False])
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 66b968e01eef1..06044ddd3f4b8 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -8,8 +8,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
DataFrame,
@@ -424,7 +422,6 @@ def __call__(self, x):
tm.assert_frame_equal(result, expected)
-@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) columns with ndarrays
def test_agg_over_numpy_arrays():
# GH 3788
df = DataFrame(
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 339bb2c30736d..28128dee9da0f 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -3,8 +3,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
Categorical,
@@ -301,9 +299,7 @@ def test_apply(ordered):
tm.assert_series_equal(result, expected)
-# TODO(ArrayManager) incorrect dtype for mean()
-@td.skip_array_manager_not_yet_implemented
-def test_observed(observed, using_array_manager):
+def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index e5870a206f419..c462db526b36d 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -154,9 +154,7 @@ def test_averages(self, df, method):
],
)
- with tm.assert_produces_warning(
- FutureWarning, match="Dropping invalid", check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"):
result = getattr(gb, method)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 203d8abb465d0..8004bb9e97343 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1,6 +1,5 @@
from datetime import datetime
from decimal import Decimal
-from io import StringIO
import numpy as np
import pytest
@@ -20,7 +19,6 @@
Timedelta,
Timestamp,
date_range,
- read_csv,
to_datetime,
)
import pandas._testing as tm
@@ -904,9 +902,7 @@ def test_omit_nuisance_agg(df, agg_function):
def test_omit_nuisance_warnings(df):
# GH 38815
- with tm.assert_produces_warning(
- FutureWarning, filter_level="always", check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, filter_level="always"):
grouped = df.groupby("A")
result = grouped.skew()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").skew()
@@ -1134,14 +1130,18 @@ def test_grouping_ndarray(df):
def test_groupby_wrong_multi_labels():
- data = """index,foo,bar,baz,spam,data
-0,foo1,bar1,baz1,spam2,20
-1,foo1,bar2,baz1,spam3,30
-2,foo2,bar2,baz1,spam2,40
-3,foo1,bar1,baz2,spam1,50
-4,foo3,bar1,baz2,spam1,60"""
- data = read_csv(StringIO(data), index_col=0)
+ index = Index([0, 1, 2, 3, 4], name="index")
+ data = DataFrame(
+ {
+ "foo": ["foo1", "foo1", "foo2", "foo1", "foo3"],
+ "bar": ["bar1", "bar2", "bar2", "bar1", "bar1"],
+ "baz": ["baz1", "baz1", "baz1", "baz2", "baz2"],
+ "spam": ["spam2", "spam3", "spam2", "spam1", "spam1"],
+ "data": [20, 30, 40, 50, 60],
+ },
+ index=index,
+ )
grouped = data.groupby(["foo", "bar", "baz", "spam"])
@@ -2031,6 +2031,16 @@ def get_result():
tm.assert_equal(result, expected)
+def test_empty_groupby_apply_nonunique_columns():
+ # GH#44417
+ df = DataFrame(np.random.randn(0, 4))
+ df[3] = df[3].astype(np.int64)
+ df.columns = [0, 1, 2, 0]
+ gb = df.groupby(df[1])
+ res = gb.apply(lambda x: x)
+ assert (res.dtypes == df.dtypes).all()
+
+
def test_tuple_as_grouping():
# https://github.com/pandas-dev/pandas/issues/18314
df = DataFrame(
diff --git a/pandas/tests/groupby/test_numba.py b/pandas/tests/groupby/test_numba.py
index 98156ae6a63ca..20fd02b21a744 100644
--- a/pandas/tests/groupby/test_numba.py
+++ b/pandas/tests/groupby/test_numba.py
@@ -10,7 +10,7 @@
@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestEngine:
def test_cython_vs_numba_frame(self, sort, nogil, parallel, nopython):
diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py
index 503de5ebd2330..4e1b777296d5b 100644
--- a/pandas/tests/groupby/transform/test_numba.py
+++ b/pandas/tests/groupby/transform/test_numba.py
@@ -45,7 +45,7 @@ def incorrect_function(x, **kwargs):
@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
@pytest.mark.parametrize("jit", [True, False])
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
@@ -74,7 +74,7 @@ def func(values, index):
@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
@pytest.mark.parametrize("jit", [True, False])
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
diff --git a/pandas/tests/indexes/base_class/test_formats.py b/pandas/tests/indexes/base_class/test_formats.py
index f07b06acbfbdb..9053d45dee623 100644
--- a/pandas/tests/indexes/base_class/test_formats.py
+++ b/pandas/tests/indexes/base_class/test_formats.py
@@ -122,6 +122,14 @@ def test_repr_summary(self):
assert len(result) < 200
assert "..." in result
+ def test_summary_bug(self):
+ # GH#3869
+ ind = Index(["{other}%s", "~:{range}:0"], name="A")
+ result = ind._summary()
+ # shouldn't be formatted accidentally.
+ assert "~:{range}:0" in result
+ assert "{other}%s" in result
+
def test_index_repr_bool_nan(self):
# GH32146
arr = Index([True, False, np.nan], dtype=object)
@@ -132,3 +140,9 @@ def test_index_repr_bool_nan(self):
exp2 = repr(arr)
out2 = "Index([True, False, nan], dtype='object')"
assert out2 == exp2
+
+ def test_format_different_scalar_lengths(self):
+ # GH#35439
+ idx = Index(["aaaaaaaaa", "b"])
+ expected = ["aaaaaaaaa", "b"]
+ assert idx.format() == expected
diff --git a/pandas/tests/indexes/base_class/test_reshape.py b/pandas/tests/indexes/base_class/test_reshape.py
index acb6936f70d0f..547d62669943c 100644
--- a/pandas/tests/indexes/base_class/test_reshape.py
+++ b/pandas/tests/indexes/base_class/test_reshape.py
@@ -1,6 +1,7 @@
"""
Tests for ndarray-like method on the base Index class
"""
+import numpy as np
import pytest
from pandas import Index
@@ -42,6 +43,18 @@ def test_insert_missing(self, nulls_fixture):
result = Index(list("abc")).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize(
+ "val", [(1, 2), np.datetime64("2019-12-31"), np.timedelta64(1, "D")]
+ )
+ @pytest.mark.parametrize("loc", [-1, 2])
+ def test_insert_datetime_into_object(self, loc, val):
+ # GH#44509
+ idx = Index(["1", "2", "3"])
+ result = idx.insert(loc, val)
+ expected = Index(["1", "2", val, "3"])
+ tm.assert_index_equal(result, expected)
+ assert type(expected[2]) is type(val)
+
@pytest.mark.parametrize(
"pos,expected",
[
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index 6a9f7c2a80922..2ae6ce99b4ee8 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -264,13 +264,10 @@ def test_ensure_copied_data(self, index):
#
# Must be tested separately from other indexes because
# self.values is not an ndarray.
- # GH#29918 Index.base has been removed
- # FIXME: is this test still meaningful?
- _base = lambda ar: ar if getattr(ar, "base", None) is None else ar.base
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
- assert _base(index.values) is not _base(result.values)
+ assert not np.shares_memory(result._data._codes, index._data._codes)
result = CategoricalIndex(index.values, copy=False)
assert result._data._codes is index._data._codes
diff --git a/pandas/tests/indexes/categorical/test_formats.py b/pandas/tests/indexes/categorical/test_formats.py
index 98948c2113bbe..044b03579d535 100644
--- a/pandas/tests/indexes/categorical/test_formats.py
+++ b/pandas/tests/indexes/categorical/test_formats.py
@@ -16,7 +16,7 @@ def test_format_different_scalar_lengths(self):
def test_string_categorical_index_repr(self):
# short
idx = CategoricalIndex(["a", "bb", "ccc"])
- expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
+ expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa:E501
assert repr(idx) == expected
# multiple lines
@@ -33,7 +33,7 @@ def test_string_categorical_index_repr(self):
expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
- categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa
+ categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa:E501
assert repr(idx) == expected
@@ -41,13 +41,13 @@ def test_string_categorical_index_repr(self):
idx = CategoricalIndex(list("abcdefghijklmmo"))
expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
- categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa
+ categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa:E501
assert repr(idx) == expected
# short
idx = CategoricalIndex(["あ", "いい", "ううう"])
- expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
+ expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501
assert repr(idx) == expected
# multiple lines
@@ -64,7 +64,7 @@ def test_string_categorical_index_repr(self):
expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
- categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
+ categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa:E501
assert repr(idx) == expected
@@ -72,7 +72,7 @@ def test_string_categorical_index_repr(self):
idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ"))
expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
- categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
+ categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa:E501
assert repr(idx) == expected
@@ -81,7 +81,7 @@ def test_string_categorical_index_repr(self):
# short
idx = CategoricalIndex(["あ", "いい", "ううう"])
- expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
+ expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501
assert repr(idx) == expected
# multiple lines
@@ -101,7 +101,7 @@ def test_string_categorical_index_repr(self):
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
- categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
+ categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa:E501
assert repr(idx) == expected
@@ -109,6 +109,6 @@ def test_string_categorical_index_repr(self):
idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ"))
expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
- categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
+ categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa:E501
assert repr(idx) == expected
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 33d2558613baf..a5ee743b5cd9a 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -69,26 +69,6 @@ def test_pickle_compat_construction(self):
with pytest.raises(TypeError, match=msg):
self._index_cls()
- @pytest.mark.parametrize("name", [None, "new_name"])
- def test_to_frame(self, name, simple_index):
- # see GH-15230, GH-22580
- idx = simple_index
-
- if name:
- idx_name = name
- else:
- idx_name = idx.name or 0
-
- df = idx.to_frame(name=idx_name)
-
- assert df.index is idx
- assert len(df.columns) == 1
- assert df.columns[0] == idx_name
- assert df[idx_name].values is not idx.values
-
- df = idx.to_frame(index=False, name=idx_name)
- assert df.index is not idx
-
def test_shift(self, simple_index):
# GH8083 test the base class for shift
@@ -226,46 +206,6 @@ def test_repr_max_seq_item_setting(self, simple_index):
repr(idx)
assert "..." not in str(idx)
- def test_copy_name(self, index):
- # gh-12309: Check that the "name" argument
- # passed at initialization is honored.
- if isinstance(index, MultiIndex):
- return
-
- first = type(index)(index, copy=True, name="mario")
- second = type(first)(first, copy=False)
-
- # Even though "copy=False", we want a new object.
- assert first is not second
-
- # Not using tm.assert_index_equal() since names differ.
- assert index.equals(first)
-
- assert first.name == "mario"
- assert second.name == "mario"
-
- s1 = Series(2, index=first)
- s2 = Series(3, index=second[:-1])
-
- if not isinstance(index, CategoricalIndex):
- # See gh-13365
- s3 = s1 * s2
- assert s3.index.name == "mario"
-
- def test_copy_name2(self, index):
- # gh-35592
- if isinstance(index, MultiIndex):
- return
-
- assert index.copy(name="mario").name == "mario"
-
- with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
- index.copy(name=["mario", "luigi"])
-
- msg = f"{type(index).__name__}.name must be a hashable type"
- with pytest.raises(TypeError, match=msg):
- index.copy(name=[["mario"]])
-
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
diff --git a/pandas/tests/indexes/datetimes/methods/test_insert.py b/pandas/tests/indexes/datetimes/methods/test_insert.py
index 016a29e4cc266..592f4240ee750 100644
--- a/pandas/tests/indexes/datetimes/methods/test_insert.py
+++ b/pandas/tests/indexes/datetimes/methods/test_insert.py
@@ -236,7 +236,6 @@ def test_insert_mismatched_types_raises(self, tz_aware_fixture, item):
result = dti.insert(1, item)
if isinstance(item, np.ndarray):
- # FIXME: without doing .item() here this segfaults
assert item.item() == 0
expected = Index([dti[0], 0] + list(dti[1:]), dtype=object, name=9)
else:
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index a99d2f590be97..377974a918ad9 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -94,7 +94,7 @@ def test_date_range_timestamp_equiv_explicit_pytz(self):
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))
assert ts == stamp
- @td.skip_if_windows_python_3
+ @td.skip_if_windows
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py
index 36046aaeacaae..197038dbadaf7 100644
--- a/pandas/tests/indexes/datetimes/test_formats.py
+++ b/pandas/tests/indexes/datetimes/test_formats.py
@@ -254,3 +254,20 @@ def test_dti_custom_business_summary_dateutil(self):
pd.bdate_range(
"1/1/2005", "1/1/2009", freq="C", tz=dateutil.tz.tzutc()
)._summary()
+
+
+class TestFormat:
+ def test_format_with_name_time_info(self):
+ # bug I fixed 12/20/2011
+ dates = pd.date_range("2011-01-01 04:00:00", periods=10, name="something")
+
+ formatted = dates.format(name=True)
+ assert formatted[0] == "something"
+
+ def test_format_datetime_with_time(self):
+ dti = DatetimeIndex([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
+
+ result = dti.format()
+ expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"]
+ assert len(result) == 2
+ assert result == expected
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index c3152b77d39df..9db6567ca1b56 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -21,25 +21,12 @@
)
import pandas._testing as tm
-from pandas.tseries.offsets import (
- BDay,
- CDay,
-)
+from pandas.tseries.frequencies import to_offset
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestGetItem:
- def test_ellipsis(self):
- # GH#21282
- idx = date_range(
- "2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
- )
-
- result = idx[...]
- assert result.equals(idx)
- assert result is not idx
-
def test_getitem_slice_keeps_name(self):
# GH4226
st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles")
@@ -88,17 +75,17 @@ def test_getitem(self):
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
- def test_dti_business_getitem(self):
- rng = bdate_range(START, END)
+ @pytest.mark.parametrize("freq", ["B", "C"])
+ def test_dti_business_getitem(self, freq):
+ rng = bdate_range(START, END, freq=freq)
smaller = rng[:5]
- exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="B")
+ exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq=freq)
tm.assert_index_equal(smaller, exp)
assert smaller.freq == exp.freq
-
assert smaller.freq == rng.freq
sliced = rng[::5]
- assert sliced.freq == BDay() * 5
+ assert sliced.freq == to_offset(freq) * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
@@ -108,35 +95,9 @@ def test_dti_business_getitem(self):
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np.int_(4)]
- def test_dti_business_getitem_matplotlib_hackaround(self):
- rng = bdate_range(START, END)
- with tm.assert_produces_warning(FutureWarning):
- # GH#30588 multi-dimensional indexing deprecated
- values = rng[:, None]
- expected = rng.values[:, None]
- tm.assert_numpy_array_equal(values, expected)
-
- def test_dti_custom_getitem(self):
- rng = bdate_range(START, END, freq="C")
- smaller = rng[:5]
- exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="C")
- tm.assert_index_equal(smaller, exp)
- assert smaller.freq == exp.freq
- assert smaller.freq == rng.freq
-
- sliced = rng[::5]
- assert sliced.freq == CDay() * 5
-
- fancy_indexed = rng[[4, 3, 2, 1, 0]]
- assert len(fancy_indexed) == 5
- assert isinstance(fancy_indexed, DatetimeIndex)
- assert fancy_indexed.freq is None
-
- # 32-bit vs. 64-bit platforms
- assert rng[4] == rng[np.int_(4)]
-
- def test_dti_custom_getitem_matplotlib_hackaround(self):
- rng = bdate_range(START, END, freq="C")
+ @pytest.mark.parametrize("freq", ["B", "C"])
+ def test_dti_business_getitem_matplotlib_hackaround(self, freq):
+ rng = bdate_range(START, END, freq=freq)
with tm.assert_produces_warning(FutureWarning):
# GH#30588 multi-dimensional indexing deprecated
values = rng[:, None]
@@ -255,6 +216,12 @@ def test_where_tz(self):
class TestTake:
+ def test_take_nan_first_datetime(self):
+ index = DatetimeIndex([pd.NaT, Timestamp("20130101"), Timestamp("20130102")])
+ result = index.take([-1, 0, 1])
+ expected = DatetimeIndex([index[-1], index[0], index[1]])
+ tm.assert_index_equal(result, expected)
+
def test_take(self):
# GH#10295
idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
@@ -549,9 +516,7 @@ def test_get_loc_tz_aware(self):
freq="5s",
)
key = Timestamp("2019-12-12 10:19:25", tz="US/Eastern")
- with tm.assert_produces_warning(
- FutureWarning, match="deprecated", check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match="deprecated"):
result = dti.get_loc(key, method="nearest")
assert result == 7433
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 44c353315562a..76b5b835754aa 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -142,9 +142,7 @@ def test_datetimeindex_accessors4(self):
assert dti.is_month_start[0] == 1
def test_datetimeindex_accessors5(self):
- with tm.assert_produces_warning(
- FutureWarning, match="The 'freq' argument", check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match="The 'freq' argument"):
tests = [
(Timestamp("2013-06-01", freq="M").is_month_start, 1),
(Timestamp("2013-06-01", freq="BM").is_month_start, 0),
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index fda67e7c0a058..c60e56875bfcd 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -65,9 +65,7 @@ def test_dti_timestamp_fields(self, field):
expected = getattr(idx, field)[-1]
warn = FutureWarning if field.startswith("is_") else None
- with tm.assert_produces_warning(
- warn, match="Timestamp.freq is deprecated", check_stacklevel=False
- ):
+ with tm.assert_produces_warning(warn, match="Timestamp.freq is deprecated"):
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index 3201b038c624b..ae4ed04f8adac 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -515,7 +515,7 @@ def test_month_range_union_tz_pytz(self, sort):
early_dr.union(late_dr, sort=sort)
- @td.skip_if_windows_python_3
+ @td.skip_if_windows
def test_month_range_union_tz_dateutil(self, sort):
from pandas._libs.tslibs.timezones import dateutil_gettz
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 8df8eef69e9c9..7c00b23dc9ac4 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -8,9 +8,12 @@
from pandas import (
NA,
CategoricalIndex,
+ Index,
Interval,
IntervalIndex,
+ MultiIndex,
NaT,
+ Series,
Timedelta,
date_range,
timedelta_range,
@@ -373,6 +376,31 @@ def test_get_indexer_with_nans(self):
expected = np.array([0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
+ def test_get_index_non_unique_non_monotonic(self):
+ # GH#44084 (root cause)
+ index = IntervalIndex.from_tuples(
+ [(0.0, 1.0), (1.0, 2.0), (0.0, 1.0), (1.0, 2.0)]
+ )
+
+ result, _ = index.get_indexer_non_unique([Interval(1.0, 2.0)])
+ expected = np.array([1, 3], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_get_indexer_multiindex_with_intervals(self):
+ # GH#44084 (MultiIndex case as reported)
+ interval_index = IntervalIndex.from_tuples(
+ [(2.0, 3.0), (0.0, 1.0), (1.0, 2.0)], name="interval"
+ )
+ foo_index = Index([1, 2, 3], name="foo")
+
+ multi_index = MultiIndex.from_product([foo_index, interval_index])
+
+ result = multi_index.get_level_values("interval").get_indexer_for(
+ [Interval(0.0, 1.0)]
+ )
+ expected = np.array([1, 4, 7], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
class TestSliceLocs:
def test_slice_locs_with_interval(self):
@@ -523,3 +551,37 @@ def test_putmask_td64(self):
result = idx.putmask(mask, idx[-1])
expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:]))
tm.assert_index_equal(result, expected)
+
+
+class TestGetValue:
+ @pytest.mark.parametrize("key", [[5], (2, 3)])
+ def test_get_value_non_scalar_errors(self, key):
+ # GH#31117
+ idx = IntervalIndex.from_tuples([(1, 3), (2, 4), (3, 5), (7, 10), (3, 10)])
+ ser = Series(range(len(idx)), index=idx)
+
+ msg = str(key)
+ with pytest.raises(InvalidIndexError, match=msg):
+ with tm.assert_produces_warning(FutureWarning):
+ idx.get_value(ser, key)
+
+
+class TestContains:
+ # .__contains__, not .contains
+
+ def test_contains_dunder(self):
+
+ index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right")
+
+ # __contains__ requires perfect matches to intervals.
+ assert 0 not in index
+ assert 1 not in index
+ assert 2 not in index
+
+ assert Interval(0, 1, closed="right") in index
+ assert Interval(0, 2, closed="right") not in index
+ assert Interval(0, 0.5, closed="right") not in index
+ assert Interval(3, 5, closed="right") not in index
+ assert Interval(-1, 0, closed="left") not in index
+ assert Interval(0, 1, closed="left") not in index
+ assert Interval(0, 1, closed="both") not in index
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 321d1aa34b9af..843885832690f 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -4,8 +4,6 @@
import numpy as np
import pytest
-from pandas.errors import InvalidIndexError
-
import pandas as pd
from pandas import (
Index,
@@ -500,23 +498,6 @@ def test_contains_method(self):
):
i.contains(Interval(0, 1))
- def test_contains_dunder(self):
-
- index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right")
-
- # __contains__ requires perfect matches to intervals.
- assert 0 not in index
- assert 1 not in index
- assert 2 not in index
-
- assert Interval(0, 1, closed="right") in index
- assert Interval(0, 2, closed="right") not in index
- assert Interval(0, 0.5, closed="right") not in index
- assert Interval(3, 5, closed="right") not in index
- assert Interval(-1, 0, closed="left") not in index
- assert Interval(0, 1, closed="left") not in index
- assert Interval(0, 1, closed="both") not in index
-
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], closed=closed)
@@ -908,24 +889,6 @@ def test_is_all_dates(self):
year_2017_index = IntervalIndex([year_2017])
assert not year_2017_index._is_all_dates
- @pytest.mark.parametrize("key", [[5], (2, 3)])
- def test_get_value_non_scalar_errors(self, key):
- # GH 31117
- idx = IntervalIndex.from_tuples([(1, 3), (2, 4), (3, 5), (7, 10), (3, 10)])
- s = pd.Series(range(len(idx)), index=idx)
-
- msg = str(key)
- with pytest.raises(InvalidIndexError, match=msg):
- with tm.assert_produces_warning(FutureWarning):
- idx.get_value(s, key)
-
- @pytest.mark.parametrize("closed", ["left", "right", "both"])
- def test_pickle_round_trip_closed(self, closed):
- # https://github.com/pandas-dev/pandas/issues/35658
- idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed)
- result = tm.round_trip_pickle(idx)
- tm.assert_index_equal(result, idx)
-
def test_dir():
# GH#27571 dir(interval_index) should not raise
diff --git a/pandas/tests/indexes/interval/test_pickle.py b/pandas/tests/indexes/interval/test_pickle.py
new file mode 100644
index 0000000000000..308a90e72eab5
--- /dev/null
+++ b/pandas/tests/indexes/interval/test_pickle.py
@@ -0,0 +1,13 @@
+import pytest
+
+from pandas import IntervalIndex
+import pandas._testing as tm
+
+
+class TestPickle:
+ @pytest.mark.parametrize("closed", ["left", "right", "both"])
+ def test_pickle_round_trip_closed(self, closed):
+ # https://github.com/pandas-dev/pandas/issues/35658
+ idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed)
+ result = tm.round_trip_pickle(idx)
+ tm.assert_index_equal(result, idx)
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index d2b5a595b8454..cbb4ae0b0d09b 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -96,10 +96,3 @@ def test_inplace_mutation_resets_values():
assert "_values" not in mi2._cache
tm.assert_almost_equal(mi2.values, new_values)
assert "_values" in mi2._cache
-
-
-def test_pickle_compat_construction():
- # this is testing for pickle compat
- # need an object to create with
- with pytest.raises(TypeError, match="Must pass both levels and codes"):
- MultiIndex()
diff --git a/pandas/tests/indexes/multi/test_formats.py b/pandas/tests/indexes/multi/test_formats.py
index 17699aa32929e..a6dadd42f7bf0 100644
--- a/pandas/tests/indexes/multi/test_formats.py
+++ b/pandas/tests/indexes/multi/test_formats.py
@@ -87,10 +87,7 @@ def test_unicode_repr_issues(self):
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
-
- # FIXME: dont leave commented-out
- # NumPy bug
- # repr(index.get_level_values(1))
+ repr(index.get_level_values(1))
def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
diff --git a/pandas/tests/indexes/multi/test_join.py b/pandas/tests/indexes/multi/test_join.py
index 3aa0ac1676acc..e6bec97aedb38 100644
--- a/pandas/tests/indexes/multi/test_join.py
+++ b/pandas/tests/indexes/multi/test_join.py
@@ -3,6 +3,7 @@
from pandas import (
Index,
+ Interval,
MultiIndex,
)
import pandas._testing as tm
@@ -115,3 +116,45 @@ def test_join_multi_return_indexers():
result = midx1.join(midx2, return_indexers=False)
tm.assert_index_equal(result, midx1)
+
+
+def test_join_overlapping_interval_level():
+ # GH 44096
+ idx_1 = MultiIndex.from_tuples(
+ [
+ (1, Interval(0.0, 1.0)),
+ (1, Interval(1.0, 2.0)),
+ (1, Interval(2.0, 5.0)),
+ (2, Interval(0.0, 1.0)),
+ (2, Interval(1.0, 3.0)), # interval limit is here at 3.0, not at 2.0
+ (2, Interval(3.0, 5.0)),
+ ],
+ names=["num", "interval"],
+ )
+
+ idx_2 = MultiIndex.from_tuples(
+ [
+ (1, Interval(2.0, 5.0)),
+ (1, Interval(0.0, 1.0)),
+ (1, Interval(1.0, 2.0)),
+ (2, Interval(3.0, 5.0)),
+ (2, Interval(0.0, 1.0)),
+ (2, Interval(1.0, 3.0)),
+ ],
+ names=["num", "interval"],
+ )
+
+ expected = MultiIndex.from_tuples(
+ [
+ (1, Interval(0.0, 1.0)),
+ (1, Interval(1.0, 2.0)),
+ (1, Interval(2.0, 5.0)),
+ (2, Interval(0.0, 1.0)),
+ (2, Interval(1.0, 3.0)),
+ (2, Interval(3.0, 5.0)),
+ ],
+ names=["num", "interval"],
+ )
+ result = idx_1.join(idx_2, how="outer")
+
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/multi/test_pickle.py b/pandas/tests/indexes/multi/test_pickle.py
new file mode 100644
index 0000000000000..1d8b721404421
--- /dev/null
+++ b/pandas/tests/indexes/multi/test_pickle.py
@@ -0,0 +1,10 @@
+import pytest
+
+from pandas import MultiIndex
+
+
+def test_pickle_compat_construction():
+ # this is testing for pickle compat
+ # need an object to create with
+ with pytest.raises(TypeError, match="Must pass both levels and codes"):
+ MultiIndex()
diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py
index e2340a2db02f7..c44f2efed1fcc 100644
--- a/pandas/tests/indexes/period/methods/test_astype.py
+++ b/pandas/tests/indexes/period/methods/test_astype.py
@@ -164,7 +164,10 @@ def test_period_astype_to_timestamp(self):
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern")
- res = pi.astype("datetime64[ns, US/Eastern]")
+ msg = "Use `obj.to_timestamp"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ # GH#44398
+ res = pi.astype("datetime64[ns, US/Eastern]")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 1b5e64bca03a0..df2f114e73df2 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -52,14 +52,6 @@ def non_comparable_idx(request):
class TestGetItem:
- def test_ellipsis(self):
- # GH#21282
- idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
-
- result = idx[...]
- assert result.equals(idx)
- assert result is not idx
-
def test_getitem_slice_keeps_name(self):
idx = period_range("20010101", periods=10, freq="D", name="bob")
assert idx.name == idx[1:].name
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
index f7dafd78a801f..91679959e7979 100644
--- a/pandas/tests/indexes/test_any_index.py
+++ b/pandas/tests/indexes/test_any_index.py
@@ -137,6 +137,12 @@ def test_pickle_preserves_name(self, index):
class TestIndexing:
+ def test_getitem_ellipsis(self, index):
+ # GH#21282
+ result = index[...]
+ assert result.equals(index)
+ assert result is not index
+
def test_slice_keeps_name(self, index):
assert index.name == index[1:].name
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7f9a5c0b50595..5df1f83028133 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1,8 +1,5 @@
from collections import defaultdict
-from datetime import (
- datetime,
- timedelta,
-)
+from datetime import datetime
from io import StringIO
import math
import re
@@ -10,10 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import (
- IS64,
- np_datetime64_compat,
-)
+from pandas.compat import IS64
from pandas.util._test_decorators import async_mark
import pandas as pd
@@ -27,7 +21,6 @@
RangeIndex,
Series,
TimedeltaIndex,
- Timestamp,
date_range,
period_range,
)
@@ -89,11 +82,6 @@ def test_constructor_copy(self, index):
arr[0] = "SOMEBIGLONGSTRING"
assert new_index[0] != "SOMEBIGLONGSTRING"
- # FIXME: dont leave commented-out
- # what to do here?
- # arr = np.array(5.)
- # pytest.raises(Exception, arr.view, Index)
-
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize(
"index",
@@ -219,91 +207,6 @@ def test_constructor_simple_new(self, vals, dtype):
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
- @pytest.mark.parametrize(
- "vals",
- [
- [1, 2, 3],
- np.array([1, 2, 3]),
- np.array([1, 2, 3], dtype=int),
- # below should coerce
- [1.0, 2.0, 3.0],
- np.array([1.0, 2.0, 3.0], dtype=float),
- ],
- )
- def test_constructor_dtypes_to_int64(self, vals):
- index = Index(vals, dtype=int)
- assert isinstance(index, Int64Index)
-
- @pytest.mark.parametrize(
- "vals",
- [
- [1, 2, 3],
- [1.0, 2.0, 3.0],
- np.array([1.0, 2.0, 3.0]),
- np.array([1, 2, 3], dtype=int),
- np.array([1.0, 2.0, 3.0], dtype=float),
- ],
- )
- def test_constructor_dtypes_to_float64(self, vals):
- index = Index(vals, dtype=float)
- assert isinstance(index, Float64Index)
-
- @pytest.mark.parametrize(
- "vals",
- [
- [1, 2, 3],
- np.array([1, 2, 3], dtype=int),
- np.array(
- [np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")]
- ),
- [datetime(2011, 1, 1), datetime(2011, 1, 2)],
- ],
- )
- def test_constructor_dtypes_to_categorical(self, vals):
- index = Index(vals, dtype="category")
- assert isinstance(index, CategoricalIndex)
-
- @pytest.mark.parametrize("cast_index", [True, False])
- @pytest.mark.parametrize(
- "vals",
- [
- Index(
- np.array(
- [
- np_datetime64_compat("2011-01-01"),
- np_datetime64_compat("2011-01-02"),
- ]
- )
- ),
- Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]),
- ],
- )
- def test_constructor_dtypes_to_datetime(self, cast_index, vals):
- if cast_index:
- index = Index(vals, dtype=object)
- assert isinstance(index, Index)
- assert index.dtype == object
- else:
- index = Index(vals)
- assert isinstance(index, DatetimeIndex)
-
- @pytest.mark.parametrize("cast_index", [True, False])
- @pytest.mark.parametrize(
- "vals",
- [
- np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]),
- [timedelta(1), timedelta(1)],
- ],
- )
- def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
- if cast_index:
- index = Index(vals, dtype=object)
- assert isinstance(index, Index)
- assert index.dtype == object
- else:
- index = Index(vals)
- assert isinstance(index, TimedeltaIndex)
-
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize("attr", ["values", "asi8"])
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
@@ -726,20 +629,6 @@ def test_is_all_dates(self, index, expected):
def test_summary(self, index):
index._summary()
- def test_summary_bug(self):
- # GH3869`
- ind = Index(["{other}%s", "~:{range}:0"], name="A")
- result = ind._summary()
- # shouldn't be formatted accidentally.
- assert "~:{range}:0" in result
- assert "{other}%s" in result
-
- def test_format_different_scalar_lengths(self):
- # GH35439
- idx = Index(["aaaaaaaaa", "b"])
- expected = ["aaaaaaaaa", "b"]
- assert idx.format() == expected
-
def test_format_bug(self):
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
@@ -767,21 +656,6 @@ def test_format_missing(self, vals, nulls_fixture):
assert formatted == expected
assert index[3] is nulls_fixture
- def test_format_with_name_time_info(self):
- # bug I fixed 12/20/2011
- dates = date_range("2011-01-01 04:00:00", periods=10, name="something")
-
- formatted = dates.format(name=True)
- assert formatted[0] == "something"
-
- def test_format_datetime_with_time(self):
- t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
-
- result = t.format()
- expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"]
- assert len(result) == 2
- assert result == expected
-
@pytest.mark.parametrize("op", ["any", "all"])
def test_logical_compat(self, op, simple_index):
index = simple_index
@@ -1129,12 +1003,6 @@ def test_outer_join_sort(self):
tm.assert_index_equal(result, expected)
- def test_nan_first_take_datetime(self):
- index = Index([pd.NaT, Timestamp("20130101"), Timestamp("20130102")])
- result = index.take([-1, 0, 1])
- expected = Index([index[-1], index[0], index[1]])
- tm.assert_index_equal(result, expected)
-
def test_take_fill_value(self):
# GH 12631
index = Index(list("ABC"), name="xxx")
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index ed9243a5ba8d0..80edaf77fe960 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -1,7 +1,7 @@
"""
Collection of tests asserting things that should be true for
-any index subclass. Makes use of the `indices` fixture defined
-in pandas/tests/indexes/conftest.py.
+any index subclass except for MultiIndex. Makes use of the `index_flat`
+fixture defined in pandas/conftest.py.
"""
import re
@@ -29,6 +29,26 @@
class TestCommon:
+ @pytest.mark.parametrize("name", [None, "new_name"])
+ def test_to_frame(self, name, index_flat):
+ # see GH#15230, GH#22580
+ idx = index_flat
+
+ if name:
+ idx_name = name
+ else:
+ idx_name = idx.name or 0
+
+ df = idx.to_frame(name=idx_name)
+
+ assert df.index is idx
+ assert len(df.columns) == 1
+ assert df.columns[0] == idx_name
+ assert df[idx_name].values is not idx.values
+
+ df = idx.to_frame(index=False, name=idx_name)
+ assert df.index is not idx
+
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
@@ -126,6 +146,46 @@ def test_copy_and_deepcopy(self, index_flat):
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
+ def test_copy_name(self, index_flat):
+ # GH#12309: Check that the "name" argument
+ # passed at initialization is honored.
+ index = index_flat
+
+ first = type(index)(index, copy=True, name="mario")
+ second = type(first)(first, copy=False)
+
+ # Even though "copy=False", we want a new object.
+ assert first is not second
+ tm.assert_index_equal(first, second)
+
+ # Not using tm.assert_index_equal() since names differ.
+ assert index.equals(first)
+
+ assert first.name == "mario"
+ assert second.name == "mario"
+
+ # TODO: belongs in series arithmetic tests?
+ s1 = pd.Series(2, index=first)
+ s2 = pd.Series(3, index=second[:-1])
+ # See GH#13365
+ s3 = s1 * s2
+ assert s3.index.name == "mario"
+
+ def test_copy_name2(self, index_flat):
+ # GH#35592
+ index = index_flat
+ if isinstance(index, MultiIndex):
+ return
+
+ assert index.copy(name="mario").name == "mario"
+
+ with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
+ index.copy(name=["mario", "luigi"])
+
+ msg = f"{type(index).__name__}.name must be a hashable type"
+ with pytest.raises(TypeError, match=msg):
+ index.copy(name=[["mario"]])
+
def test_unique_level(self, index_flat):
# don't test a MultiIndex here (as its tested separated)
index = index_flat
@@ -332,6 +392,9 @@ def test_astype_preserves_name(self, index, dtype):
):
# This astype is deprecated in favor of tz_localize
warn = FutureWarning
+ elif isinstance(index, PeriodIndex) and dtype == "datetime64[ns]":
+ # Deprecated in favor of to_timestamp GH#44398
+ warn = FutureWarning
try:
# Some of these conversions cannot succeed so we use a try / except
with tm.assert_produces_warning(warn):
diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py
index 5c5ec7219d2d7..deeaffaf5b9cc 100644
--- a/pandas/tests/indexes/test_index_new.py
+++ b/pandas/tests/indexes/test_index_new.py
@@ -1,11 +1,17 @@
"""
Tests for the Index constructor conducting inference.
"""
+from datetime import (
+ datetime,
+ timedelta,
+)
from decimal import Decimal
import numpy as np
import pytest
+from pandas.compat import np_datetime64_compat
+
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas import (
@@ -27,6 +33,7 @@
)
import pandas._testing as tm
from pandas.core.api import (
+ Float64Index,
Int64Index,
UInt64Index,
)
@@ -232,6 +239,91 @@ def test_constructor_int_dtype_nan_raises(self, dtype):
with pytest.raises(ValueError, match=msg):
Index(data, dtype=dtype)
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ [1, 2, 3],
+ np.array([1, 2, 3]),
+ np.array([1, 2, 3], dtype=int),
+ # below should coerce
+ [1.0, 2.0, 3.0],
+ np.array([1.0, 2.0, 3.0], dtype=float),
+ ],
+ )
+ def test_constructor_dtypes_to_int64(self, vals):
+ index = Index(vals, dtype=int)
+ assert isinstance(index, Int64Index)
+
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ [1, 2, 3],
+ [1.0, 2.0, 3.0],
+ np.array([1.0, 2.0, 3.0]),
+ np.array([1, 2, 3], dtype=int),
+ np.array([1.0, 2.0, 3.0], dtype=float),
+ ],
+ )
+ def test_constructor_dtypes_to_float64(self, vals):
+ index = Index(vals, dtype=float)
+ assert isinstance(index, Float64Index)
+
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ [1, 2, 3],
+ np.array([1, 2, 3], dtype=int),
+ np.array(
+ [np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")]
+ ),
+ [datetime(2011, 1, 1), datetime(2011, 1, 2)],
+ ],
+ )
+ def test_constructor_dtypes_to_categorical(self, vals):
+ index = Index(vals, dtype="category")
+ assert isinstance(index, CategoricalIndex)
+
+ @pytest.mark.parametrize("cast_index", [True, False])
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ Index(
+ np.array(
+ [
+ np_datetime64_compat("2011-01-01"),
+ np_datetime64_compat("2011-01-02"),
+ ]
+ )
+ ),
+ Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]),
+ ],
+ )
+ def test_constructor_dtypes_to_datetime(self, cast_index, vals):
+ if cast_index:
+ index = Index(vals, dtype=object)
+ assert isinstance(index, Index)
+ assert index.dtype == object
+ else:
+ index = Index(vals)
+ assert isinstance(index, DatetimeIndex)
+
+ @pytest.mark.parametrize("cast_index", [True, False])
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]),
+ [timedelta(1), timedelta(1)],
+ ],
+ )
+ def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
+ if cast_index:
+ index = Index(vals, dtype=object)
+ assert isinstance(index, Index)
+ assert index.dtype == object
+ else:
+ index = Index(vals)
+ assert isinstance(index, TimedeltaIndex)
+
class TestIndexConstructorUnwrapping:
# Test passing different arraylike values to pd.Index
diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py
index f7cffe48d1722..9acdd52178e0e 100644
--- a/pandas/tests/indexes/test_indexing.py
+++ b/pandas/tests/indexes/test_indexing.py
@@ -332,3 +332,12 @@ def test_get_indexer_non_unique_multiple_nans(idx, target, expected):
axis = Index(idx)
actual = axis.get_indexer_for(target)
tm.assert_numpy_array_equal(actual, expected)
+
+
+def test_get_indexer_non_unique_nans_in_object_dtype_target(nulls_fixture):
+ idx = Index([1.0, 2.0])
+ target = Index([1, nulls_fixture], dtype="object")
+
+ result_idx, result_missing = idx.get_indexer_non_unique(target)
+ tm.assert_numpy_array_equal(result_idx, np.array([0, -1], dtype=np.intp))
+ tm.assert_numpy_array_equal(result_missing, np.array([1], dtype=np.intp))
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 66fdaa2778600..b618f12e9f6c9 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -21,14 +21,6 @@
class TestGetItem:
- def test_ellipsis(self):
- # GH#21282
- idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
-
- result = idx[...]
- assert result.equals(idx)
- assert result is not idx
-
def test_getitem_slice_keeps_name(self):
# GH#4226
tdi = timedelta_range("1d", "5d", freq="H", name="timebucket")
@@ -157,7 +149,7 @@ def test_where_doesnt_retain_freq(self):
result = tdi.where(cond, tdi[::-1])
tm.assert_index_equal(result, expected)
- def test_where_invalid_dtypes(self):
+ def test_where_invalid_dtypes(self, fixed_now_ts):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
tail = tdi[2:].tolist()
@@ -169,17 +161,17 @@ def test_where_invalid_dtypes(self):
result = tdi.where(mask, i2.asi8)
tm.assert_index_equal(result, expected)
- ts = i2 + Timestamp.now()
+ ts = i2 + fixed_now_ts
expected = Index([ts[0], ts[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
- per = (i2 + Timestamp.now()).to_period("D")
+ per = (i2 + fixed_now_ts).to_period("D")
expected = Index([per[0], per[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, per)
tm.assert_index_equal(result, expected)
- ts = Timestamp.now()
+ ts = fixed_now_ts
expected = Index([ts, ts] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index b97aaf6c551d8..2a12d690ff0bd 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -368,8 +368,7 @@ def test_frame_setitem_multi_column2(self):
assert sliced_a2.name == ("A", "2")
assert sliced_b1.name == ("B", "1")
- # TODO: no setitem here?
- def test_getitem_setitem_tuple_plus_columns(
+ def test_loc_getitem_tuple_plus_columns(
self, multiindex_year_month_day_dataframe_random_data
):
# GH #1013
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py
index 42edaa2fe6c3a..55d45a21d643a 100644
--- a/pandas/tests/indexing/multiindex/test_slice.py
+++ b/pandas/tests/indexing/multiindex/test_slice.py
@@ -702,32 +702,30 @@ def test_per_axis_per_level_setitem(self):
tm.assert_frame_equal(df, expected)
def test_multiindex_label_slicing_with_negative_step(self):
- s = Series(
+ ser = Series(
np.arange(20), MultiIndex.from_product([list("abcde"), np.arange(4)])
)
SLC = pd.IndexSlice
- def assert_slices_equivalent(l_slc, i_slc):
- tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
- tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
+ tm.assert_indexing_slices_equivalent(ser, SLC[::-1], SLC[::-1])
- assert_slices_equivalent(SLC[::-1], SLC[::-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC["d"::-1], SLC[15::-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[("d",)::-1], SLC[15::-1])
- assert_slices_equivalent(SLC["d"::-1], SLC[15::-1])
- assert_slices_equivalent(SLC[("d",)::-1], SLC[15::-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[:"d":-1], SLC[:11:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[:("d",):-1], SLC[:11:-1])
- assert_slices_equivalent(SLC[:"d":-1], SLC[:11:-1])
- assert_slices_equivalent(SLC[:("d",):-1], SLC[:11:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC["d":"b":-1], SLC[15:3:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[("d",):"b":-1], SLC[15:3:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC["d":("b",):-1], SLC[15:3:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[("d",):("b",):-1], SLC[15:3:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC["b":"d":-1], SLC[:0])
- assert_slices_equivalent(SLC["d":"b":-1], SLC[15:3:-1])
- assert_slices_equivalent(SLC[("d",):"b":-1], SLC[15:3:-1])
- assert_slices_equivalent(SLC["d":("b",):-1], SLC[15:3:-1])
- assert_slices_equivalent(SLC[("d",):("b",):-1], SLC[15:3:-1])
- assert_slices_equivalent(SLC["b":"d":-1], SLC[:0])
-
- assert_slices_equivalent(SLC[("c", 2)::-1], SLC[10::-1])
- assert_slices_equivalent(SLC[:("c", 2):-1], SLC[:9:-1])
- assert_slices_equivalent(SLC[("e", 0):("c", 2):-1], SLC[16:9:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[("c", 2)::-1], SLC[10::-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[:("c", 2):-1], SLC[:9:-1])
+ tm.assert_indexing_slices_equivalent(
+ ser, SLC[("e", 0):("c", 2):-1], SLC[16:9:-1]
+ )
def test_multiindex_slice_first_level(self):
# GH 12697
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 27aeb411e36f0..0174219892d92 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -780,7 +780,6 @@ def test_where_index_datetime(self, fill_val):
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
- @pytest.mark.xfail(reason="GH 22839: do not ignore timezone, must be object")
def test_where_index_datetime64tz(self):
fill_val = pd.Timestamp("2012-01-01", tz="US/Eastern")
exp_dtype = object
@@ -795,9 +794,9 @@ def test_where_index_datetime64tz(self):
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
- msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind"
- with pytest.raises(TypeError, match=msg):
- obj.where(cond, fill_val)
+ res = obj.where(cond, fill_val)
+ expected = pd.Index([obj[0], fill_val, obj[2], fill_val], dtype=object)
+ tm.assert_index_equal(res, expected)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index(
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index e46eed05caa86..332ab02255911 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -130,7 +130,7 @@ def test_nanosecond_getitem_setitem_with_tz(self):
expected = DataFrame(-1, index=index, columns=["a"])
tm.assert_frame_equal(result, expected)
- def test_getitem_millisecond_resolution(self, frame_or_series):
+ def test_getitem_str_slice_millisecond_resolution(self, frame_or_series):
# GH#33589
keys = [
@@ -152,16 +152,3 @@ def test_getitem_millisecond_resolution(self, frame_or_series):
],
)
tm.assert_equal(result, expected)
-
- def test_str_subclass(self):
- # GH 37366
- class mystring(str):
- pass
-
- data = ["2020-10-22 01:21:00+00:00"]
- index = pd.DatetimeIndex(data)
- df = DataFrame({"a": [1]}, index=index)
- df["b"] = 2
- df[mystring("c")] = 3
- expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index)
- tm.assert_equal(df, expected)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 7d2f68b00d95f..e088f1ce87a6a 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -106,8 +106,6 @@ def test_iloc_setitem_fullcol_categorical(self, indexer, key, using_array_manage
expected = DataFrame({0: cat, 1: range(3)})
tm.assert_frame_equal(df, expected)
- # TODO(ArrayManager) does not yet update parent
- @td.skip_array_manager_not_yet_implemented
@pytest.mark.parametrize("box", [array, Series])
def test_iloc_setitem_ea_inplace(self, frame_or_series, box, using_array_manager):
# GH#38952 Case with not setting a full column
@@ -1148,6 +1146,18 @@ def test_loc_setitem_boolean_list(self, rhs_func, indexing_func):
expected = DataFrame({"a": [5, 1, 10]})
tm.assert_frame_equal(df, expected)
+ def test_iloc_getitem_slice_negative_step_ea_block(self):
+ # GH#44551
+ df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
+
+ res = df.iloc[:, ::-1]
+ tm.assert_frame_equal(res, df)
+
+ df["B"] = "foo"
+ res = df.iloc[:, ::-1]
+ expected = DataFrame({"B": df["B"], "A": df["A"]})
+ tm.assert_frame_equal(res, expected)
+
class TestILocErrors:
# NB: this test should work for _any_ Series we can pass as
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 7c7e9f79a77ae..0f9612fa5c96c 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -277,7 +277,7 @@ def test_dups_fancy_indexing_only_missing_label(self):
):
dfnu.loc[["E"]]
- # ToDo: check_index_type can be True after GH 11497
+ # TODO: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
@@ -323,9 +323,9 @@ def test_dups_fancy_indexing3(self):
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
- s = Series(range(3), index=[1, 1, 3])
- expected = s[1]
- result = indexer_sl(s)[[1]]
+ ser = Series(range(3), index=[1, 1, 3])
+ expected = Series(range(2), index=[1, 1])
+ result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
@@ -653,13 +653,6 @@ def test_loc_setitem_fullindex_views(self):
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
- def test_float_index_at_iat(self):
- s = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
- for el, item in s.items():
- assert s.at[el] == item
- for i in range(len(s)):
- assert s.iat[i] == i + 1
-
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
@@ -709,21 +702,17 @@ def run_tests(df, rhs, right_loc, right_iloc):
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
- def assert_slices_equivalent(l_slc, i_slc):
- tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
-
- if not idx.is_integer:
- # For integer indices, .loc and plain getitem are position-based.
- tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
- tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
-
for idx in [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]:
idx = Index(idx)
- s = Series(np.arange(20), index=idx)
- assert_slices_equivalent(SLC[idx[9] :: -1], SLC[9::-1])
- assert_slices_equivalent(SLC[: idx[9] : -1], SLC[:8:-1])
- assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1])
- assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0])
+ ser = Series(np.arange(20), index=idx)
+ tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1])
+ tm.assert_indexing_slices_equivalent(
+ ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]
+ )
+ tm.assert_indexing_slices_equivalent(
+ ser, SLC[idx[9] : idx[13] : -1], SLC[:0]
+ )
def test_slice_with_zero_step_raises(self, indexer_sl, frame_or_series):
obj = frame_or_series(np.arange(20), index=_mklbl("A", 20))
@@ -967,7 +956,11 @@ def test_extension_array_cross_section():
def test_extension_array_cross_section_converts():
# all numeric columns -> numeric series
df = DataFrame(
- {"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"]
+ {
+ "A": pd.array([1, 2], dtype="Int64"),
+ "B": np.array([1, 2], dtype="int64"),
+ },
+ index=["a", "b"],
)
result = df.loc["a"]
expected = Series([1, 1], dtype="Int64", index=["A", "B"], name="a")
@@ -987,10 +980,3 @@ def test_extension_array_cross_section_converts():
result = df.iloc[0]
tm.assert_series_equal(result, expected)
-
-
-def test_getitem_object_index_float_string():
- # GH 17286
- s = Series([1] * 4, index=Index(["a", "b", "c", 1.0]))
- assert s["a"] == 1
- assert s[1.0] == 1
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index ed9b5cc0850b9..2a9ee81b7a23a 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -178,6 +178,26 @@ def test_column_types_consistent(self):
)
tm.assert_frame_equal(df, expected)
+ @pytest.mark.parametrize(
+ "obj, key, exp",
+ [
+ (
+ DataFrame([[1]], columns=Index([False])),
+ IndexSlice[:, False],
+ Series([1], name=False),
+ ),
+ (Series([1], index=Index([False])), False, [1]),
+ (DataFrame([[1]], index=Index([False])), False, Series([1], name=False)),
+ ],
+ )
+ def test_loc_getitem_single_boolean_arg(self, obj, key, exp):
+ # GH 44322
+ res = obj.loc[key]
+ if isinstance(exp, (DataFrame, Series)):
+ tm.assert_equal(res, exp)
+ else:
+ assert res == exp
+
class TestLoc2:
# TODO: better name, just separating out things that rely on base class
@@ -1123,9 +1143,6 @@ def test_loc_setitem_empty_append_single_value(self):
df.loc[0, "x"] = expected.loc[0, "x"]
tm.assert_frame_equal(df, expected)
- # TODO(ArrayManager) "split" path doesn't handle this case and gives wrong
- # error message
- @td.skip_array_manager_not_yet_implemented
def test_loc_setitem_empty_append_raises(self):
# GH6173, various appends to an empty dataframe
@@ -1374,7 +1391,7 @@ def test_loc_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture):
tz = tz_naive_fixture
idx = date_range(start="2015-07-12", periods=3, freq="H", tz=tz)
expected = DataFrame(1.2, index=idx, columns=["var"])
- # if result started off with object dtype, tehn the .loc.__setitem__
+ # if result started off with object dtype, then the .loc.__setitem__
# below would retain object dtype
result = DataFrame(index=idx, columns=["var"], dtype=np.float64)
result.loc[:, idxer] = expected
@@ -2630,7 +2647,7 @@ def test_loc_slice_disallows_positional():
with pytest.raises(TypeError, match=msg):
df.loc[1:3, 1]
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
# GH#31840 deprecated incorrect behavior
df.loc[1:3, 1] = 2
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 82d55a7bf7189..95a9fd227c685 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -7,8 +7,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
DataFrame,
@@ -355,10 +353,6 @@ def test_partial_setting2(self):
df.at[dates[-1] + dates.freq, 0] = 7
tm.assert_frame_equal(df, expected)
- # TODO(ArrayManager)
- # df.loc[0] = Series(1, index=range(4)) case creates float columns
- # instead of object dtype
- @td.skip_array_manager_not_yet_implemented
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index bf262e6755289..bcb76fb078e74 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -77,6 +77,13 @@ def _check(f, func, values=False):
class TestAtAndiAT:
# at and iat tests that don't need Base class
+ def test_float_index_at_iat(self):
+ ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
+ for el, item in ser.items():
+ assert ser.at[el] == item
+ for i in range(len(ser)):
+ assert ser.iat[i] == i + 1
+
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index 5d4705dbe7d77..48e8bfe461764 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -69,27 +69,26 @@ def s3_base(worker_id):
endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
# pipe to null to avoid logging in terminal
- proc = subprocess.Popen(
+ with subprocess.Popen(
shlex.split(f"moto_server s3 -p {endpoint_port}"),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
- )
+ ) as proc:
- timeout = 5
- while timeout > 0:
- try:
- # OK to go once server is accepting connections
- r = requests.get(endpoint_uri)
- if r.ok:
- break
- except Exception:
- pass
- timeout -= 0.1
- time.sleep(0.1)
- yield endpoint_uri
+ timeout = 5
+ while timeout > 0:
+ try:
+ # OK to go once server is accepting connections
+ r = requests.get(endpoint_uri)
+ if r.ok:
+ break
+ except Exception:
+ pass
+ timeout -= 0.1
+ time.sleep(0.1)
+ yield endpoint_uri
- proc.terminate()
- proc.wait()
+ proc.terminate()
@pytest.fixture()
@@ -131,12 +130,12 @@ def add_tips_files(bucket_name):
try:
cli.create_bucket(Bucket=bucket)
- except: # noqa
+ except Exception:
# OK is bucket already exists
pass
try:
cli.create_bucket(Bucket="cant_get_it", ACL="private")
- except: # noqa
+ except Exception:
# OK is bucket already exists
pass
timeout = 2
@@ -153,11 +152,11 @@ def add_tips_files(bucket_name):
try:
s3.rm(bucket, recursive=True)
- except: # noqa
+ except Exception:
pass
try:
s3.rm("cant_get_it", recursive=True)
- except: # noqa
+ except Exception:
pass
timeout = 2
while cli.list_buckets()["Buckets"] and timeout > 0:
diff --git a/pandas/tests/io/excel/test_odswriter.py b/pandas/tests/io/excel/test_odswriter.py
index 4bf6051fd36ef..0e6d1dac55506 100644
--- a/pandas/tests/io/excel/test_odswriter.py
+++ b/pandas/tests/io/excel/test_odswriter.py
@@ -19,23 +19,40 @@ def test_write_append_mode_raises(ext):
ExcelWriter(f, engine="odf", mode="a")
-@pytest.mark.parametrize("nan_inf_to_errors", [True, False])
-def test_kwargs(ext, nan_inf_to_errors):
+def test_kwargs(ext):
# GH 42286
- # odswriter doesn't utilize kwargs, nothing to check except that it works
- kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}}
+ # GH 43445
+ # test for error: OpenDocumentSpreadsheet does not accept any arguments
+ kwargs = {"kwarg": 1}
with tm.ensure_clean(ext) as f:
msg = re.escape("Use of **kwargs is deprecated")
- with tm.assert_produces_warning(FutureWarning, match=msg):
- with ExcelWriter(f, engine="odf", **kwargs) as _:
- pass
-
-
-@pytest.mark.parametrize("nan_inf_to_errors", [True, False])
-def test_engine_kwargs(ext, nan_inf_to_errors):
+ error = re.escape(
+ "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'"
+ )
+ with pytest.raises(
+ TypeError,
+ match=error,
+ ):
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ with ExcelWriter(f, engine="odf", **kwargs) as _:
+ pass
+
+
+@pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}])
+def test_engine_kwargs(ext, engine_kwargs):
# GH 42286
- # odswriter doesn't utilize engine_kwargs, nothing to check except that it works
- engine_kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}}
+ # GH 43445
+ # test for error: OpenDocumentSpreadsheet does not accept any arguments
with tm.ensure_clean(ext) as f:
- with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _:
- pass
+ if engine_kwargs is not None:
+ error = re.escape(
+ "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'"
+ )
+ with pytest.raises(
+ TypeError,
+ match=error,
+ ):
+ ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs)
+ else:
+ with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _:
+ pass
diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py
index cd773957c9043..e0d4a0c12ecdf 100644
--- a/pandas/tests/io/excel/test_openpyxl.py
+++ b/pandas/tests/io/excel/test_openpyxl.py
@@ -85,30 +85,63 @@ def test_write_cells_merge_styled(ext):
assert xcell_a2.font == openpyxl_sty_merged
-@pytest.mark.parametrize("write_only", [True, False])
-def test_kwargs(ext, write_only):
- # GH 42286
- # openpyxl doesn't utilize kwargs, only test that supplying a kwarg works
- kwargs = {"write_only": write_only}
+@pytest.mark.parametrize("iso_dates", [True, False])
+def test_kwargs(ext, iso_dates):
+ # GH 42286 GH 43445
+ kwargs = {"iso_dates": iso_dates}
with tm.ensure_clean(ext) as f:
msg = re.escape("Use of **kwargs is deprecated")
with tm.assert_produces_warning(FutureWarning, match=msg):
with ExcelWriter(f, engine="openpyxl", **kwargs) as writer:
+ assert writer.book.iso_dates == iso_dates
# ExcelWriter won't allow us to close without writing something
DataFrame().to_excel(writer)
-@pytest.mark.parametrize("write_only", [True, False])
-def test_engine_kwargs(ext, write_only):
- # GH 42286
- # openpyxl doesn't utilize kwargs, only test that supplying a engine_kwarg works
- engine_kwargs = {"write_only": write_only}
+@pytest.mark.parametrize("iso_dates", [True, False])
+def test_engine_kwargs_write(ext, iso_dates):
+ # GH 42286 GH 43445
+ engine_kwargs = {"iso_dates": iso_dates}
with tm.ensure_clean(ext) as f:
with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer:
+ assert writer.book.iso_dates == iso_dates
# ExcelWriter won't allow us to close without writing something
DataFrame().to_excel(writer)
+def test_engine_kwargs_append_invalid(ext):
+ # GH 43445
+ # test whether an invalid engine kwargs actually raises
+ with tm.ensure_clean(ext) as f:
+ DataFrame(["hello", "world"]).to_excel(f)
+ with pytest.raises(
+ TypeError,
+ match=re.escape(
+ "load_workbook() got an unexpected keyword argument 'apple_banana'"
+ ),
+ ):
+ with ExcelWriter(
+ f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"}
+ ) as writer:
+ # ExcelWriter needs us to write something to close properly
+ DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2")
+
+
+@pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")])
+def test_engine_kwargs_append_data_only(ext, data_only, expected):
+ # GH 43445
+ # tests whether the data_only engine_kwarg actually works well for
+ # openpyxl's load_workbook
+ with tm.ensure_clean(ext) as f:
+ DataFrame(["=1+1"]).to_excel(f)
+ with ExcelWriter(
+ f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only}
+ ) as writer:
+ assert writer.sheets["Sheet1"]["B2"].value == expected
+ # ExcelWriter needs us to writer something to close properly?
+ DataFrame().to_excel(writer, sheet_name="Sheet2")
+
+
@pytest.mark.parametrize(
"mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])]
)
@@ -139,6 +172,7 @@ def test_write_append_mode(ext, mode, expected):
[
("new", 2, ["apple", "banana"]),
("replace", 1, ["pear"]),
+ ("overlay", 1, ["pear", "banana"]),
],
)
def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected):
@@ -164,13 +198,46 @@ def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected
wb.close()
+@pytest.mark.parametrize(
+ "startrow, startcol, greeting, goodbye",
+ [
+ (0, 0, ["poop", "world"], ["goodbye", "people"]),
+ (0, 1, ["hello", "world"], ["poop", "people"]),
+ (1, 0, ["hello", "poop"], ["goodbye", "people"]),
+ (1, 1, ["hello", "world"], ["goodbye", "poop"]),
+ ],
+)
+def test_append_overlay_startrow_startcol(ext, startrow, startcol, greeting, goodbye):
+ df1 = DataFrame({"greeting": ["hello", "world"], "goodbye": ["goodbye", "people"]})
+ df2 = DataFrame(["poop"])
+
+ with tm.ensure_clean(ext) as f:
+ df1.to_excel(f, engine="openpyxl", sheet_name="poo", index=False)
+ with ExcelWriter(
+ f, engine="openpyxl", mode="a", if_sheet_exists="overlay"
+ ) as writer:
+ # use startrow+1 because we don't have a header
+ df2.to_excel(
+ writer,
+ index=False,
+ header=False,
+ startrow=startrow + 1,
+ startcol=startcol,
+ sheet_name="poo",
+ )
+
+ result = pd.read_excel(f, sheet_name="poo", engine="openpyxl")
+ expected = DataFrame({"greeting": greeting, "goodbye": goodbye})
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize(
"if_sheet_exists,msg",
[
(
"invalid",
"'invalid' is not valid for if_sheet_exists. Valid options "
- "are 'error', 'new' and 'replace'.",
+ "are 'error', 'new', 'replace' and 'overlay'.",
),
(
"error",
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 508e767a47004..6f1431c6e410d 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -891,107 +891,6 @@ def test_to_excel_unicode_filename(self, ext, path):
)
tm.assert_frame_equal(result, expected)
- # FIXME: dont leave commented-out
- # def test_to_excel_header_styling_xls(self, engine, ext):
-
- # import StringIO
- # s = StringIO(
- # """Date,ticker,type,value
- # 2001-01-01,x,close,12.2
- # 2001-01-01,x,open ,12.1
- # 2001-01-01,y,close,12.2
- # 2001-01-01,y,open ,12.1
- # 2001-02-01,x,close,12.2
- # 2001-02-01,x,open ,12.1
- # 2001-02-01,y,close,12.2
- # 2001-02-01,y,open ,12.1
- # 2001-03-01,x,close,12.2
- # 2001-03-01,x,open ,12.1
- # 2001-03-01,y,close,12.2
- # 2001-03-01,y,open ,12.1""")
- # df = read_csv(s, parse_dates=["Date"])
- # pdf = df.pivot_table(values="value", rows=["ticker"],
- # cols=["Date", "type"])
-
- # try:
- # import xlwt
- # import xlrd
- # except ImportError:
- # pytest.skip
-
- # filename = '__tmp_to_excel_header_styling_xls__.xls'
- # pdf.to_excel(filename, 'test1')
-
- # wbk = xlrd.open_workbook(filename,
- # formatting_info=True)
- # assert ["test1"] == wbk.sheet_names()
- # ws = wbk.sheet_by_name('test1')
- # assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
- # for i in range(0, 2):
- # for j in range(0, 7):
- # xfx = ws.cell_xf_index(0, 0)
- # cell_xf = wbk.xf_list[xfx]
- # font = wbk.font_list
- # assert 1 == font[cell_xf.font_index].bold
- # assert 1 == cell_xf.border.top_line_style
- # assert 1 == cell_xf.border.right_line_style
- # assert 1 == cell_xf.border.bottom_line_style
- # assert 1 == cell_xf.border.left_line_style
- # assert 2 == cell_xf.alignment.hor_align
- # os.remove(filename)
- # def test_to_excel_header_styling_xlsx(self, engine, ext):
- # import StringIO
- # s = StringIO(
- # """Date,ticker,type,value
- # 2001-01-01,x,close,12.2
- # 2001-01-01,x,open ,12.1
- # 2001-01-01,y,close,12.2
- # 2001-01-01,y,open ,12.1
- # 2001-02-01,x,close,12.2
- # 2001-02-01,x,open ,12.1
- # 2001-02-01,y,close,12.2
- # 2001-02-01,y,open ,12.1
- # 2001-03-01,x,close,12.2
- # 2001-03-01,x,open ,12.1
- # 2001-03-01,y,close,12.2
- # 2001-03-01,y,open ,12.1""")
- # df = read_csv(s, parse_dates=["Date"])
- # pdf = df.pivot_table(values="value", rows=["ticker"],
- # cols=["Date", "type"])
- # try:
- # import openpyxl
- # from openpyxl.cell import get_column_letter
- # except ImportError:
- # pytest.skip
- # if openpyxl.__version__ < '1.6.1':
- # pytest.skip
- # # test xlsx_styling
- # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
- # pdf.to_excel(filename, 'test1')
- # wbk = openpyxl.load_workbook(filename)
- # assert ["test1"] == wbk.get_sheet_names()
- # ws = wbk.get_sheet_by_name('test1')
- # xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
- # xlsaddrs += ["A%s" % i for i in range(1, 6)]
- # xlsaddrs += ["B1", "D1", "F1"]
- # for xlsaddr in xlsaddrs:
- # cell = ws.cell(xlsaddr)
- # assert cell.style.font.bold
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.top.border_style)
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.right.border_style)
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.bottom.border_style)
- # assert (openpyxl.style.Border.BORDER_THIN ==
- # cell.style.borders.left.border_style)
- # assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
- # cell.style.alignment.horizontal)
- # mergedcells_addrs = ["C1", "E1", "G1"]
- # for maddr in mergedcells_addrs:
- # assert ws.cell(maddr).merged
- # os.remove(filename)
-
@pytest.mark.parametrize("use_headers", [True, False])
@pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
@pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
diff --git a/pandas/tests/io/excel/test_xlwt.py b/pandas/tests/io/excel/test_xlwt.py
index c58b9763f9618..ec333defd85ac 100644
--- a/pandas/tests/io/excel/test_xlwt.py
+++ b/pandas/tests/io/excel/test_xlwt.py
@@ -101,25 +101,27 @@ def test_option_xls_writer_deprecated(ext):
options.io.excel.xls.writer = "xlwt"
-@pytest.mark.parametrize("write_only", [True, False])
-def test_kwargs(ext, write_only):
+@pytest.mark.parametrize("style_compression", [0, 2])
+def test_kwargs(ext, style_compression):
# GH 42286
- # xlwt doesn't utilize kwargs, only test that supplying a kwarg works
- kwargs = {"write_only": write_only}
+ kwargs = {"style_compression": style_compression}
with tm.ensure_clean(ext) as f:
msg = re.escape("Use of **kwargs is deprecated")
with tm.assert_produces_warning(FutureWarning, match=msg):
- with ExcelWriter(f, engine="openpyxl", **kwargs) as writer:
+ with ExcelWriter(f, engine="xlwt", **kwargs) as writer:
+ assert (
+ writer.book._Workbook__styles.style_compression == style_compression
+ )
# xlwt won't allow us to close without writing something
DataFrame().to_excel(writer)
-@pytest.mark.parametrize("write_only", [True, False])
-def test_engine_kwargs(ext, write_only):
+@pytest.mark.parametrize("style_compression", [0, 2])
+def test_engine_kwargs(ext, style_compression):
# GH 42286
- # xlwt doesn't utilize kwargs, only test that supplying a engine_kwarg works
- engine_kwargs = {"write_only": write_only}
+ engine_kwargs = {"style_compression": style_compression}
with tm.ensure_clean(ext) as f:
- with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer:
+ with ExcelWriter(f, engine="xlwt", engine_kwargs=engine_kwargs) as writer:
+ assert writer.book._Workbook__styles.style_compression == style_compression
# xlwt won't allow us to close without writing something
DataFrame().to_excel(writer)
diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index cccfd87f6312b..e793857989ac1 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -319,7 +319,12 @@ def test_clear(mi_styler_comp):
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
- assert not (all(res) if (hasattr(res, "__iter__") and len(res) > 0) else res)
+ if hasattr(res, "__iter__") and len(res) > 0:
+ assert not all(res) # some element in iterable differs
+ elif hasattr(res, "__iter__") and len(res) == 0:
+ pass # empty array
+ else:
+ assert not res # explicit var differs
# test vars have same vales on obj and clean copy after clearing
styler.clear()
@@ -749,7 +754,7 @@ def test_applymap_subset_multiindex(self, slice_):
col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
df = DataFrame(np.random.rand(4, 4), columns=col, index=idx)
- with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
+ with tm.assert_produces_warning(warn, match=msg):
df.style.applymap(lambda x: "color: red;", subset=slice_).to_html()
def test_applymap_subset_multiindex_code(self):
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index d9bd8f6809c73..ab0199dca3f24 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -3298,6 +3298,7 @@ def test_repr_html_ipython_config(ip):
assert not result.error_in_exec
+@pytest.mark.filterwarnings("ignore:In future versions `DataFrame.to_latex`")
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
@pytest.mark.parametrize(
"encoding, data",
@@ -3319,7 +3320,8 @@ def test_filepath_or_buffer_arg(
):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
elif encoding == "foo":
- with tm.assert_produces_warning(None):
+ expected_warning = FutureWarning if method == "to_latex" else None
+ with tm.assert_produces_warning(expected_warning):
with pytest.raises(LookupError, match="unknown encoding"):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
else:
@@ -3328,6 +3330,7 @@ def test_filepath_or_buffer_arg(
assert_filepath_or_buffer_equals(expected)
+@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
msg = "buf is not a file name and it has no write method"
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index 4c482bafa6c9c..059fd96db43ad 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -1,6 +1,8 @@
import io
import os
+from pathlib import Path
import sys
+from zipfile import ZipFile
import numpy as np
import pytest
@@ -541,23 +543,38 @@ def test_to_csv_compression_dict_no_method_raises(self):
df.to_csv(path, compression=compression)
@pytest.mark.parametrize("compression", ["zip", "infer"])
- @pytest.mark.parametrize(
- "archive_name", [None, "test_to_csv.csv", "test_to_csv.zip"]
- )
+ @pytest.mark.parametrize("archive_name", ["test_to_csv.csv", "test_to_csv.zip"])
def test_to_csv_zip_arguments(self, compression, archive_name):
# GH 26023
- from zipfile import ZipFile
-
df = DataFrame({"ABC": [1]})
with tm.ensure_clean("to_csv_archive_name.zip") as path:
df.to_csv(
path, compression={"method": compression, "archive_name": archive_name}
)
with ZipFile(path) as zp:
- expected_arcname = path if archive_name is None else archive_name
- expected_arcname = os.path.basename(expected_arcname)
assert len(zp.filelist) == 1
- archived_file = os.path.basename(zp.filelist[0].filename)
+ archived_file = zp.filelist[0].filename
+ assert archived_file == archive_name
+
+ @pytest.mark.parametrize(
+ "filename,expected_arcname",
+ [
+ ("archive.csv", "archive.csv"),
+ ("archive.tsv", "archive.tsv"),
+ ("archive.csv.zip", "archive.csv"),
+ ("archive.tsv.zip", "archive.tsv"),
+ ("archive.zip", "archive"),
+ ],
+ )
+ def test_to_csv_zip_infer_name(self, filename, expected_arcname):
+ # GH 39465
+ df = DataFrame({"ABC": [1]})
+ with tm.ensure_clean_dir() as dir:
+ path = Path(dir, filename)
+ df.to_csv(path, compression="zip")
+ with ZipFile(path) as zp:
+ assert len(zp.filelist) == 1
+ archived_file = zp.filelist[0].filename
assert archived_file == expected_arcname
@pytest.mark.parametrize("df_new_type", ["Int64"])
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 10c8ccae67fb2..01bc94bf594d9 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -19,6 +19,8 @@
RowStringConverter,
)
+pytestmark = pytest.mark.filterwarnings("ignore::FutureWarning")
+
def _dedent(string):
"""Dedent without new line in the beginning.
@@ -1514,3 +1516,15 @@ def test_get_strrow_multindex_multicolumn(self, row_num, expected):
)
assert row_string_converter.get_strrow(row_num=row_num) == expected
+
+ def test_future_warning(self):
+ df = DataFrame([[1]])
+ msg = (
+ "In future versions `DataFrame.to_latex` is expected to utilise the base "
+ "implementation of `Styler.to_latex` for formatting and rendering. "
+ "The arguments signature may therefore change. It is recommended instead "
+ "to use `DataFrame.style.to_latex` which also contains additional "
+ "functionality."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_latex()
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index a2b90f607e918..231228ef6c0af 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -3,8 +3,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
from pandas import (
DataFrame,
Index,
@@ -153,8 +151,6 @@ def test_simple_records(self):
tm.assert_frame_equal(result, expected)
- # TODO(ArrayManager) sanitize S/U numpy dtypes to object
- @td.skip_array_manager_not_yet_implemented
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
@@ -381,8 +377,6 @@ def test_meta_parameter_not_modified(self):
for val in ["metafoo", "metabar", "foo", "bar"]:
assert val in result
- # TODO(ArrayManager) sanitize S/U numpy dtypes to object
- @td.skip_array_manager_not_yet_implemented
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
@@ -634,6 +628,33 @@ def test_missing_meta(self, missing_metadata):
expected = DataFrame(ex_data, columns=columns)
tm.assert_frame_equal(result, expected)
+ def test_missing_nested_meta(self):
+ # GH44312
+ # If errors="ignore" and nested metadata is null, we should return nan
+ data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]}
+ result = json_normalize(
+ data,
+ record_path="value",
+ meta=["meta", ["nested_meta", "leaf"]],
+ errors="ignore",
+ )
+ ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]]
+ columns = ["rec", "meta", "nested_meta.leaf"]
+ expected = DataFrame(ex_data, columns=columns).astype(
+ {"nested_meta.leaf": object}
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # If errors="raise" and nested metadata is null, we should raise with the
+ # key of the first missing level
+ with pytest.raises(KeyError, match="'leaf' not found"):
+ json_normalize(
+ data,
+ record_path="value",
+ meta=["meta", ["nested_meta", "leaf"]],
+ errors="raise",
+ )
+
def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata):
# GH41876
# Ensure errors='raise' works as intended even when a record_path of length
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
index 6d958f46a49dd..96c3709fdb3d8 100644
--- a/pandas/tests/io/parser/common/test_common_basic.py
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -385,7 +385,7 @@ def test_escapechar(all_parsers):
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
-"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
+"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa:E501
parser = all_parsers
result = parser.read_csv(
@@ -491,7 +491,7 @@ def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
- data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
+ data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa:E501
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py
index e0799df8d7a4c..5aa0edfd8b46a 100644
--- a/pandas/tests/io/parser/test_compression.py
+++ b/pandas/tests/io/parser/test_compression.py
@@ -103,8 +103,6 @@ def test_compression(parser_and_data, compression_only, buffer, filename):
tm.write_to_compressed(compress_type, path, data)
compression = "infer" if filename else compress_type
- if ext == "bz2":
- pytest.xfail("pyarrow wheels don't have bz2 codec support")
if buffer:
with open(path, "rb") as f:
result = parser.read_csv(f, compression=compression)
diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py
index 646cb2029919d..58b5eebbec344 100644
--- a/pandas/tests/io/parser/test_index_col.py
+++ b/pandas/tests/io/parser/test_index_col.py
@@ -297,3 +297,38 @@ def test_multiindex_columns_index_col_with_data(all_parsers):
index=Index(["data"]),
)
tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow
+def test_infer_types_boolean_sum(all_parsers):
+ # GH#44079
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO("0,1"),
+ names=["a", "b"],
+ index_col=["a"],
+ dtype={"a": "UInt8"},
+ )
+ expected = DataFrame(
+ data={
+ "a": [
+ 0,
+ ],
+ "b": [1],
+ }
+ ).set_index("a")
+ # Not checking index type now, because the C parser will return a
+ # index column of dtype 'object', and the Python parser will return a
+ # index column of dtype 'int64'.
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+
+@skip_pyarrow
+@pytest.mark.parametrize("dtype, val", [(object, "01"), ("int64", 1)])
+def test_specify_dtype_for_index_col(all_parsers, dtype, val):
+ # GH#9435
+ data = "a,b\n01,2"
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col="a", dtype={"a": dtype})
+ expected = DataFrame({"b": [2]}, index=Index([val], name="a"))
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index c8bea9592e82a..a61f3bc03028c 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -97,6 +97,39 @@ def __custom_date_parser(time):
tm.assert_frame_equal(result, expected)
+@xfail_pyarrow
+def test_read_csv_with_custom_date_parser_parse_dates_false(all_parsers):
+ # GH44366
+ def __custom_date_parser(time):
+ time = time.astype(np.float_)
+ time = time.astype(np.int_) # convert float seconds to int type
+ return pd.to_timedelta(time, unit="s")
+
+ testdata = StringIO(
+ """time e
+ 41047.00 -93.77
+ 41048.00 -95.79
+ 41049.00 -98.73
+ 41050.00 -93.99
+ 41051.00 -97.72
+ """
+ )
+ result = all_parsers.read_csv(
+ testdata,
+ delim_whitespace=True,
+ parse_dates=False,
+ date_parser=__custom_date_parser,
+ index_col="time",
+ )
+ time = Series([41047.00, 41048.00, 41049.00, 41050.00, 41051.00], name="time")
+ expected = DataFrame(
+ {"e": [-93.77, -95.79, -98.73, -93.99, -97.72]},
+ index=time,
+ )
+
+ tm.assert_frame_equal(result, expected)
+
+
@xfail_pyarrow
def test_separator_date_conflict(all_parsers):
# Regression test for gh-4678
@@ -253,8 +286,6 @@ def date_parser(*date_cols):
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
- elif parser.engine == "python":
- expected["X1"] = expected["X1"].astype(np.int64)
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
@@ -392,8 +423,6 @@ def test_multiple_date_col(all_parsers, keep_date_col):
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
- elif parser.engine == "python":
- expected["X1"] = expected["X1"].astype(np.int64)
tm.assert_frame_equal(result, expected)
@@ -1732,6 +1761,39 @@ def test_date_parser_and_names(all_parsers):
tm.assert_frame_equal(result, expected)
+@skip_pyarrow
+def test_date_parser_multiindex_columns(all_parsers):
+ parser = all_parsers
+ data = """a,b
+1,2
+2019-12-31,6"""
+ result = parser.read_csv(StringIO(data), parse_dates=[("a", "1")], header=[0, 1])
+ expected = DataFrame({("a", "1"): Timestamp("2019-12-31"), ("b", "2"): [6]})
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow
+@pytest.mark.parametrize(
+ "parse_spec, col_name",
+ [
+ ([[("a", "1"), ("b", "2")]], ("a_b", "1_2")),
+ ({("foo", "1"): [("a", "1"), ("b", "2")]}, ("foo", "1")),
+ ],
+)
+def test_date_parser_multiindex_columns_combine_cols(all_parsers, parse_spec, col_name):
+ parser = all_parsers
+ data = """a,b,c
+1,2,3
+2019-12,-31,6"""
+ result = parser.read_csv(
+ StringIO(data),
+ parse_dates=parse_spec,
+ header=[0, 1],
+ )
+ expected = DataFrame({col_name: Timestamp("2019-12-31"), ("c", "3"): [6]})
+ tm.assert_frame_equal(result, expected)
+
+
@skip_pyarrow
def test_date_parser_usecols_thousands(all_parsers):
# GH#39365
@@ -1841,3 +1903,44 @@ def test_dayfirst_warnings():
index_col="date",
).index
tm.assert_index_equal(expected, res8)
+
+
+@skip_pyarrow
+def test_infer_first_column_as_index(all_parsers):
+ # GH#11019
+ parser = all_parsers
+ data = "a,b,c\n1970-01-01,2,3,4"
+ result = parser.read_csv(StringIO(data), parse_dates=["a"])
+ expected = DataFrame({"a": "2", "b": 3, "c": 4}, index=["1970-01-01"])
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow
+def test_replace_nans_before_parsing_dates(all_parsers):
+ # GH#26203
+ parser = all_parsers
+ data = """Test
+2012-10-01
+0
+2015-05-15
+#
+2017-09-09
+"""
+ result = parser.read_csv(
+ StringIO(data),
+ na_values={"Test": ["#", "0"]},
+ parse_dates=["Test"],
+ date_parser=lambda x: pd.to_datetime(x, format="%Y-%m-%d"),
+ )
+ expected = DataFrame(
+ {
+ "Test": [
+ Timestamp("2012-10-01"),
+ pd.NaT,
+ Timestamp("2015-05-15"),
+ pd.NaT,
+ Timestamp("2017-09-09"),
+ ]
+ }
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py
index f62c9fd1349bf..df8be721ec871 100644
--- a/pandas/tests/io/parser/test_python_parser_only.py
+++ b/pandas/tests/io/parser/test_python_parser_only.py
@@ -310,3 +310,22 @@ def test_malformed_skipfooter(python_parser_only):
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1)
+
+
+def test_python_engine_file_no_next(python_parser_only):
+ parser = python_parser_only
+
+ class NoNextBuffer:
+ def __init__(self, csv_data):
+ self.data = csv_data
+
+ def __iter__(self):
+ return self.data.__iter__()
+
+ def read(self):
+ return self.data
+
+ def readline(self):
+ return self.data
+
+ parser.read_csv(NoNextBuffer("a\n1"))
diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py
index 6995965467d05..456dd049d2f4a 100644
--- a/pandas/tests/io/parser/test_quoting.py
+++ b/pandas/tests/io/parser/test_quoting.py
@@ -24,7 +24,7 @@
{"quotechar": None, "quoting": csv.QUOTE_MINIMAL},
"quotechar must be set if quoting enabled",
),
- ({"quotechar": 2}, '"quotechar" must be string, not int'),
+ ({"quotechar": 2}, '"quotechar" must be string( or None)?, not int'),
],
)
def test_bad_quote_char(all_parsers, kwargs, msg):
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 8d1fa97f9f8bb..3f43ea0b8a12d 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -311,7 +311,7 @@ def test_fwf_regression():
def test_fwf_for_uint8():
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
-1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
+1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa:E501
df = read_fwf(
StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)],
@@ -862,3 +862,48 @@ def test_colspecs_with_comment():
)
expected = DataFrame([[1, "K"]], columns=[0, 1])
tm.assert_frame_equal(result, expected)
+
+
+def test_skip_rows_and_n_rows():
+ # GH#44021
+ data = """a\tb
+1\t a
+2\t b
+3\t c
+4\t d
+5\t e
+6\t f
+ """
+ result = read_fwf(StringIO(data), nrows=4, skiprows=[2, 4])
+ expected = DataFrame({"a": [1, 3, 5, 6], "b": ["a", "c", "e", "f"]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_skiprows_with_iterator():
+ # GH#10261
+ data = """0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+ """
+ df_iter = read_fwf(
+ StringIO(data),
+ colspecs=[(0, 2)],
+ names=["a"],
+ iterator=True,
+ chunksize=2,
+ skiprows=[0, 1, 2, 6, 9],
+ )
+ expected_frames = [
+ DataFrame({"a": [3, 4]}),
+ DataFrame({"a": [5, 7, 8]}, index=[2, 3, 4]),
+ DataFrame({"a": []}, index=[], dtype="object"),
+ ]
+ for i, result in enumerate(df_iter):
+ tm.assert_frame_equal(result, expected_frames[i])
diff --git a/pandas/tests/io/parser/test_skiprows.py b/pandas/tests/io/parser/test_skiprows.py
index 9df6bf42c55d2..627bda44016e9 100644
--- a/pandas/tests/io/parser/test_skiprows.py
+++ b/pandas/tests/io/parser/test_skiprows.py
@@ -256,3 +256,21 @@ def test_skip_rows_bad_callable(all_parsers):
with pytest.raises(ZeroDivisionError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)
+
+
+def test_skip_rows_and_n_rows(all_parsers):
+ # GH#44021
+ data = """a,b
+1,a
+2,b
+3,c
+4,d
+5,e
+6,f
+7,g
+8,h
+"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), nrows=5, skiprows=[2, 4, 6])
+ expected = DataFrame({"a": [1, 3, 5, 7, 8], "b": ["a", "c", "e", "g", "h"]})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py
index 1e5cf49ce24ae..89d35499fd597 100644
--- a/pandas/tests/io/parser/test_unsupported.py
+++ b/pandas/tests/io/parser/test_unsupported.py
@@ -104,22 +104,25 @@ def test_python_engine(self, python_engine):
with pytest.raises(ValueError, match=msg):
read_csv(StringIO(data), engine=python_engine, **kwargs)
- def test_python_engine_file_no_next(self, python_engine):
+ def test_python_engine_file_no_iter(self, python_engine):
# see gh-16530
class NoNextBuffer:
def __init__(self, csv_data):
self.data = csv_data
- def __iter__(self):
- return self
+ def __next__(self):
+ return self.data.__next__()
def read(self):
return self.data
+ def readline(self):
+ return self.data
+
data = "a\n1"
- msg = "The 'python' engine cannot iterate"
+ msg = "'NoNextBuffer' object is not iterable|argument 1 must be an iterator"
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(TypeError, match=msg):
read_csv(NoNextBuffer(data), engine=python_engine)
def test_pyarrow_engine(self):
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index b5f9e6e74ece9..6cfe8148ad034 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -896,9 +896,6 @@ def test_append_to_multiple_dropna(setup_path):
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
-@pytest.mark.xfail(
- run=False, reason="append_to_multiple_dropna_false is not raising as failed"
-)
def test_append_to_multiple_dropna_false(setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
@@ -912,8 +909,7 @@ def test_append_to_multiple_dropna_false(setup_path):
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
- # TODO Update error message to desired message for this case
- msg = "Cannot select as multiple after appending with dropna=False"
+ msg = "all tables must have exactly the same nrows!"
with pytest.raises(ValueError, match=msg):
store.select_as_multiple(["df1a", "df2a"])
diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py
index f4b70bc6f238a..41addc5023436 100644
--- a/pandas/tests/io/pytables/test_put.py
+++ b/pandas/tests/io/pytables/test_put.py
@@ -176,7 +176,7 @@ def test_put_compression(setup_path):
store.put("b", df, format="fixed", complib="zlib")
-@td.skip_if_windows_python_3
+@td.skip_if_windows
def test_put_compression_blosc(setup_path):
df = tm.makeTimeDataFrame()
diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py
index 97edc3cdffdf7..eba21bd94444a 100644
--- a/pandas/tests/io/pytables/test_round_trip.py
+++ b/pandas/tests/io/pytables/test_round_trip.py
@@ -354,7 +354,7 @@ def test_timeseries_preepoch(setup_path):
@pytest.mark.parametrize(
- "compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
+ "compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_frame(compression, setup_path):
@@ -435,7 +435,7 @@ def test_store_hierarchical(setup_path):
@pytest.mark.parametrize(
- "compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
+ "compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_store_mixed(compression, setup_path):
def _make_one():
diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py
index 56d48945d5852..fc8d4506abda0 100644
--- a/pandas/tests/io/pytables/test_select.py
+++ b/pandas/tests/io/pytables/test_select.py
@@ -265,7 +265,7 @@ def test_select_dtypes(setup_path):
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
- np_zero = np.float64(0) # noqa
+ np_zero = np.float64(0) # noqa:F841
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
@@ -683,17 +683,17 @@ def test_frame_select_complex2(setup_path):
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
- l = selection.index.tolist() # noqa
+ l0 = selection.index.tolist() # noqa:F841
store = HDFStore(hh)
- result = store.select("df", where="l1=l")
+ result = store.select("df", where="l1=l0")
tm.assert_frame_equal(result, expected)
store.close()
- result = read_hdf(hh, "df", where="l1=l")
+ result = read_hdf(hh, "df", where="l1=l0")
tm.assert_frame_equal(result, expected)
# index
- index = selection.index # noqa
+ index = selection.index # noqa:F841
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
@@ -928,7 +928,7 @@ def test_query_compare_column_type(setup_path):
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
- ts = Timestamp("2014-01-01") # noqa
+ ts = Timestamp("2014-01-01") # noqa:F841
result = store.select("test", where="real_date > ts")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 856a2ca15ec4a..83c86d4da05e6 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -533,7 +533,9 @@ def test_same_name_scoping(setup_path):
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
- from datetime import datetime # noqa
+ # changes what 'datetime' points to in the namespace where
+ # 'select' does the lookup
+ from datetime import datetime # noqa:F401
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 699459ab3666d..a782f8dbbc76d 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -348,6 +348,7 @@ def test_read_fspath_all(self, reader, module, path, datapath):
else:
tm.assert_frame_equal(result, expected)
+ @pytest.mark.filterwarnings("ignore:In future versions `DataFrame.to_latex`")
@pytest.mark.parametrize(
"writer_name, writer_kwargs, module",
[
@@ -384,7 +385,6 @@ def test_write_fspath_all(self, writer_name, writer_kwargs, module):
@pytest.mark.filterwarnings( # pytables np.object usage
"ignore:`np.object` is a deprecated alias:DeprecationWarning"
)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) IO HDF5
def test_write_fspath_hdf5(self):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 97ebb3a0d39ba..59c7abc4a4cb8 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -2,14 +2,12 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
import pandas._testing as tm
from pandas.io.feather_format import read_feather, to_feather # isort:skip
-pyarrow = pytest.importorskip("pyarrow")
+pyarrow = pytest.importorskip("pyarrow", minversion="1.0.1")
filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
@@ -120,7 +118,6 @@ def test_read_columns(self):
columns = ["col1", "col3"]
self.check_round_trip(df, expected=df[columns], columns=columns)
- @td.skip_if_no("pyarrow", min_version="0.17.1")
def read_columns_different_order(self):
# GH 33878
df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
@@ -180,12 +177,10 @@ def test_path_localpath(self):
result = tm.round_trip_localpath(df.to_feather, read_feather)
tm.assert_frame_equal(df, result)
- @td.skip_if_no("pyarrow", min_version="0.17.0")
def test_passthrough_keywords(self):
df = tm.makeDataFrame().reset_index()
self.check_round_trip(df, write_kwargs={"version": 1})
- @td.skip_if_no("pyarrow")
@tm.network
def test_http_path(self, feather_file):
# GH 29055
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 887889bce1eaa..2e8e4a9017dbc 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -1,5 +1,6 @@
from io import BytesIO
import os
+import zipfile
import numpy as np
import pytest
@@ -88,16 +89,23 @@ def test_to_read_gcs(gcs_buffer, format):
tm.assert_frame_equal(df1, df2)
-def assert_equal_zip_safe(result: bytes, expected: bytes):
+def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str):
"""
- We would like to assert these are equal, but the 10th and 11th bytes are a
- last-modified timestamp, which in some builds is off-by-one, so we check around
- that.
+ For zip compression, only compare the CRC-32 checksum of the file contents
+ to avoid checking the time-dependent last-modified timestamp which
+ in some CI builds is off-by-one
See https://en.wikipedia.org/wiki/ZIP_(file_format)#File_headers
"""
- assert result[:9] == expected[:9]
- assert result[11:] == expected[11:]
+ if compression == "zip":
+ # Only compare the CRC checksum of the file contents
+ with zipfile.ZipFile(BytesIO(result)) as exp, zipfile.ZipFile(
+ BytesIO(expected)
+ ) as res:
+ for res_info, exp_info in zip(res.infolist(), exp.infolist()):
+ assert res_info.CRC == exp_info.CRC
+ else:
+ assert result == expected
@td.skip_if_no("gcsfs")
@@ -126,7 +134,7 @@ def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding)
df.to_csv(path_gcs, compression=compression, encoding=encoding)
res = gcs_buffer.getvalue()
expected = buffer.getvalue()
- assert_equal_zip_safe(res, expected)
+ assert_equal_zip_safe(res, expected, compression_only)
read_df = read_csv(
path_gcs, index_col=0, compression=compression_only, encoding=encoding
@@ -142,7 +150,7 @@ def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding)
res = gcs_buffer.getvalue()
expected = buffer.getvalue()
- assert_equal_zip_safe(res, expected)
+ assert_equal_zip_safe(res, expected, compression_only)
read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding)
tm.assert_frame_equal(df, read_df)
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index ec724602c5249..270cb402483bf 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -15,7 +15,6 @@
from pandas.compat import is_platform_windows
from pandas.compat.pyarrow import (
- pa_version_under1p0,
pa_version_under2p0,
pa_version_under5p0,
)
@@ -784,11 +783,7 @@ def test_s3_roundtrip_for_dir(
# only used if partition field is string, but this changed again to use
# category dtype for all types (not only strings) in pyarrow 2.0.0
if partition_col:
- partition_col_type = (
- "int32"
- if (not pa_version_under1p0) and pa_version_under2p0
- else "category"
- )
+ partition_col_type = "int32" if pa_version_under2p0 else "category"
expected_df[partition_col] = expected_df[partition_col].astype(
partition_col_type
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 9253e5ae700c7..d656c56b0ee10 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -54,11 +54,9 @@
lzma = import_lzma()
-# TODO(ArrayManager) pickling
-pytestmark = [
- td.skip_array_manager_not_yet_implemented,
- pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning"),
-]
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Timestamp.freq is deprecated:FutureWarning"
+)
@pytest.fixture(scope="module")
@@ -612,6 +610,7 @@ def test_pickle_strings(string_series):
tm.assert_series_equal(unp_series, string_series)
+@td.skip_array_manager_invalid_test
def test_pickle_preserves_block_ndim():
# GH#37631
ser = Series(list("abc")).astype("category").iloc[[0]]
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 386f11b3dd794..92a53a443b217 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -1251,7 +1251,7 @@ def test_database_uri_string(self, test_frame1):
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
- import pg8000 # noqa
+ import pg8000 # noqa:F401
pytest.skip("pg8000 is installed")
except ImportError:
@@ -1366,13 +1366,6 @@ def test_read_sql_delegate(self):
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
- def test_safe_names_warning(self):
- # GH 6798
- df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
- # warns on create table with spaces in names
- with tm.assert_produces_warning(UserWarning):
- sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
-
def test_get_schema2(self, test_frame1):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(test_frame1, "test")
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index f4f79c915b317..fa2305d11f901 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1135,7 +1135,7 @@ def test_read_chunks_117(
):
fname = getattr(self, file)
- with warnings.catch_warnings(record=True) as w:
+ with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
parsed = read_stata(
fname,
@@ -1151,7 +1151,7 @@ def test_read_chunks_117(
pos = 0
for j in range(5):
- with warnings.catch_warnings(record=True) as w: # noqa
+ with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
@@ -1232,7 +1232,7 @@ def test_read_chunks_115(
fname = getattr(self, file)
# Read the whole file
- with warnings.catch_warnings(record=True) as w:
+ with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
parsed = read_stata(
fname,
@@ -1249,7 +1249,7 @@ def test_read_chunks_115(
)
pos = 0
for j in range(5):
- with warnings.catch_warnings(record=True) as w: # noqa
+ with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
diff --git a/pandas/tests/libs/test_join.py b/pandas/tests/libs/test_join.py
index 17601d30739e3..ba2e6e7130929 100644
--- a/pandas/tests/libs/test_join.py
+++ b/pandas/tests/libs/test_join.py
@@ -112,8 +112,8 @@ def test_cython_right_outer_join(self):
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
- tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
- tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
+ tm.assert_numpy_array_equal(ls, exp_ls)
+ tm.assert_numpy_array_equal(rs, exp_rs)
def test_cython_inner_join(self):
left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp)
@@ -134,8 +134,8 @@ def test_cython_inner_join(self):
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
- tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
- tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
+ tm.assert_numpy_array_equal(ls, exp_ls)
+ tm.assert_numpy_array_equal(rs, exp_rs)
@pytest.mark.parametrize("readonly", [True, False])
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index e2b6b5ab3319c..ae9db5e728efe 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -45,6 +45,8 @@ def setup_method(self, method):
from pandas.plotting._matplotlib import compat
+ self.compat = compat
+
mpl.rcdefaults()
self.start_date_to_int64 = 812419200000000000
@@ -550,7 +552,7 @@ def is_grid_on():
obj.plot(kind=kind, grid=False, **kws)
assert not is_grid_on()
- if kind != "pie":
+ if kind not in ["pie", "hexbin", "scatter"]:
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc("axes", grid=True)
@@ -569,6 +571,12 @@ def _unpack_cycler(self, rcParams, field="color"):
"""
return [v[field] for v in rcParams["axes.prop_cycle"]]
+ def get_x_axis(self, ax):
+ return ax._shared_axes["x"] if self.compat.mpl_ge_3_5_0() else ax._shared_x_axes
+
+ def get_y_axis(self, ax):
+ return ax._shared_axes["y"] if self.compat.mpl_ge_3_5_0() else ax._shared_y_axes
+
def _check_plot_works(f, filterwarnings="always", default_axes=False, **kwargs):
"""
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index ccd0bc3d16896..6c07366e402d6 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -525,8 +525,8 @@ def test_area_sharey_dont_overwrite(self):
df.plot(ax=ax1, kind="area")
df.plot(ax=ax2, kind="area")
- assert ax1._shared_y_axes.joined(ax1, ax2)
- assert ax2._shared_y_axes.joined(ax1, ax2)
+ assert self.get_y_axis(ax1).joined(ax1, ax2)
+ assert self.get_y_axis(ax2).joined(ax1, ax2)
def test_bar_linewidth(self):
df = DataFrame(np.random.randn(5, 5))
diff --git a/pandas/tests/plotting/frame/test_hist_box_by.py b/pandas/tests/plotting/frame/test_hist_box_by.py
index ba6d232733762..c92d952587967 100644
--- a/pandas/tests/plotting/frame/test_hist_box_by.py
+++ b/pandas/tests/plotting/frame/test_hist_box_by.py
@@ -195,16 +195,16 @@ def test_axis_share_x_with_by(self):
ax1, ax2, ax3 = self.hist_df.plot.hist(column="A", by="C", sharex=True)
# share x
- assert ax1._shared_x_axes.joined(ax1, ax2)
- assert ax2._shared_x_axes.joined(ax1, ax2)
- assert ax3._shared_x_axes.joined(ax1, ax3)
- assert ax3._shared_x_axes.joined(ax2, ax3)
+ assert self.get_x_axis(ax1).joined(ax1, ax2)
+ assert self.get_x_axis(ax2).joined(ax1, ax2)
+ assert self.get_x_axis(ax3).joined(ax1, ax3)
+ assert self.get_x_axis(ax3).joined(ax2, ax3)
# don't share y
- assert not ax1._shared_y_axes.joined(ax1, ax2)
- assert not ax2._shared_y_axes.joined(ax1, ax2)
- assert not ax3._shared_y_axes.joined(ax1, ax3)
- assert not ax3._shared_y_axes.joined(ax2, ax3)
+ assert not self.get_y_axis(ax1).joined(ax1, ax2)
+ assert not self.get_y_axis(ax2).joined(ax1, ax2)
+ assert not self.get_y_axis(ax3).joined(ax1, ax3)
+ assert not self.get_y_axis(ax3).joined(ax2, ax3)
@pytest.mark.slow
def test_axis_share_y_with_by(self):
@@ -212,16 +212,16 @@ def test_axis_share_y_with_by(self):
ax1, ax2, ax3 = self.hist_df.plot.hist(column="A", by="C", sharey=True)
# share y
- assert ax1._shared_y_axes.joined(ax1, ax2)
- assert ax2._shared_y_axes.joined(ax1, ax2)
- assert ax3._shared_y_axes.joined(ax1, ax3)
- assert ax3._shared_y_axes.joined(ax2, ax3)
+ assert self.get_y_axis(ax1).joined(ax1, ax2)
+ assert self.get_y_axis(ax2).joined(ax1, ax2)
+ assert self.get_y_axis(ax3).joined(ax1, ax3)
+ assert self.get_y_axis(ax3).joined(ax2, ax3)
# don't share x
- assert not ax1._shared_x_axes.joined(ax1, ax2)
- assert not ax2._shared_x_axes.joined(ax1, ax2)
- assert not ax3._shared_x_axes.joined(ax1, ax3)
- assert not ax3._shared_x_axes.joined(ax2, ax3)
+ assert not self.get_x_axis(ax1).joined(ax1, ax2)
+ assert not self.get_x_axis(ax2).joined(ax1, ax2)
+ assert not self.get_x_axis(ax3).joined(ax1, ax3)
+ assert not self.get_x_axis(ax3).joined(ax2, ax3)
@pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
def test_figure_shape_hist_with_by(self, figsize):
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index dbceeae44a493..5c543f96cb55f 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -93,7 +93,6 @@ def test_boxplot_return_type_none(self):
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
- import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
@@ -543,6 +542,14 @@ def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel):
result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
assert expected_xticklabel == result_xticklabel
+ def test_groupby_boxplot_object(self):
+ # GH 43480
+ df = self.hist_df.astype("object")
+ grouped = df.groupby("gender")
+ msg = "boxplot method requires numerical columns, nothing to plot"
+ with pytest.raises(ValueError, match=msg):
+ _check_plot_works(grouped.boxplot, subplots=False)
+
def test_boxplot_multiindex_column(self):
# GH 16748
arrays = [
diff --git a/pandas/tests/plotting/test_common.py b/pandas/tests/plotting/test_common.py
index 4674fc1bb2c18..6eebf0c01ae52 100644
--- a/pandas/tests/plotting/test_common.py
+++ b/pandas/tests/plotting/test_common.py
@@ -39,4 +39,6 @@ def test__gen_two_subplots_with_ax(self):
next(gen)
axes = fig.get_axes()
assert len(axes) == 1
- assert axes[0].get_geometry() == (2, 1, 2)
+ subplot_geometry = list(axes[0].get_subplotspec().get_geometry()[:-1])
+ subplot_geometry[-1] += 1
+ assert subplot_geometry == [2, 1, 2]
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 96fdcebc9b8f7..403f4a2c06df1 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -728,35 +728,35 @@ def test_axis_share_x(self):
ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True)
# share x
- assert ax1._shared_x_axes.joined(ax1, ax2)
- assert ax2._shared_x_axes.joined(ax1, ax2)
+ assert self.get_x_axis(ax1).joined(ax1, ax2)
+ assert self.get_x_axis(ax2).joined(ax1, ax2)
# don't share y
- assert not ax1._shared_y_axes.joined(ax1, ax2)
- assert not ax2._shared_y_axes.joined(ax1, ax2)
+ assert not self.get_y_axis(ax1).joined(ax1, ax2)
+ assert not self.get_y_axis(ax2).joined(ax1, ax2)
def test_axis_share_y(self):
df = self.hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True)
# share y
- assert ax1._shared_y_axes.joined(ax1, ax2)
- assert ax2._shared_y_axes.joined(ax1, ax2)
+ assert self.get_y_axis(ax1).joined(ax1, ax2)
+ assert self.get_y_axis(ax2).joined(ax1, ax2)
# don't share x
- assert not ax1._shared_x_axes.joined(ax1, ax2)
- assert not ax2._shared_x_axes.joined(ax1, ax2)
+ assert not self.get_x_axis(ax1).joined(ax1, ax2)
+ assert not self.get_x_axis(ax2).joined(ax1, ax2)
def test_axis_share_xy(self):
df = self.hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True)
# share both x and y
- assert ax1._shared_x_axes.joined(ax1, ax2)
- assert ax2._shared_x_axes.joined(ax1, ax2)
+ assert self.get_x_axis(ax1).joined(ax1, ax2)
+ assert self.get_x_axis(ax2).joined(ax1, ax2)
- assert ax1._shared_y_axes.joined(ax1, ax2)
- assert ax2._shared_y_axes.joined(ax1, ax2)
+ assert self.get_y_axis(ax1).joined(ax1, ax2)
+ assert self.get_y_axis(ax2).joined(ax1, ax2)
@pytest.mark.parametrize(
"histtype, expected",
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 812aae8d97151..5a80df8d6c779 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -154,8 +154,8 @@ def test_area_sharey_dont_overwrite(self):
abs(self.ts).plot(ax=ax1, kind="area")
abs(self.ts).plot(ax=ax2, kind="area")
- assert ax1._shared_y_axes.joined(ax1, ax2)
- assert ax2._shared_y_axes.joined(ax1, ax2)
+ assert self.get_y_axis(ax1).joined(ax1, ax2)
+ assert self.get_y_axis(ax2).joined(ax1, ax2)
def test_label(self):
s = Series([1, 2])
@@ -720,7 +720,7 @@ def test_custom_business_day_freq(self):
_check_plot_works(s.plot)
- @pytest.mark.xfail
+ @pytest.mark.xfail(reason="TODO: reason?")
def test_plot_accessor_updates_on_inplace(self):
s = Series([1, 2, 3, 4])
_, ax = self.plt.subplots()
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 450bd8b05ea43..2dae9ee48a90a 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -20,7 +20,7 @@
# a fixture value can be overridden by the test parameter value. Note that the
# value of the fixture can be overridden this way even if the test doesn't use
# it directly (doesn't mention it in the function prototype).
-# see https://docs.pytest.org/en/latest/fixture.html#override-a-fixture-with-direct-test-parametrization # noqa
+# see https://docs.pytest.org/en/latest/fixture.html#override-a-fixture-with-direct-test-parametrization # noqa:E501
# in this module we override the fixture values defined in conftest.py
# tuples of '_index_factory,_series_name,_index_start,_index_end'
DATE_RANGE = (date_range, "dti", datetime(2005, 1, 1), datetime(2005, 1, 10))
diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py
index 359c3cea62f9c..3aac7a961fa19 100644
--- a/pandas/tests/resample/test_deprecated.py
+++ b/pandas/tests/resample/test_deprecated.py
@@ -63,12 +63,12 @@ def test_deprecating_on_loffset_and_base():
# not checking the stacklevel for .groupby().resample() because it's complicated to
# reconcile it with the stacklevel for Series.resample() and DataFrame.resample();
# see GH #37603
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
df.groupby("a").resample("3T", base=0).sum()
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
df.groupby("a").resample("3T", loffset="0s").sum()
msg = "'offset' and 'base' cannot be present at the same time"
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
with pytest.raises(ValueError, match=msg):
df.groupby("a").resample("3T", base=0, offset=0).sum()
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 10fabe234d218..bb49450b8414e 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -316,7 +316,7 @@ def test_agg_consistency_int_str_column_mix():
r.agg({2: "mean", "b": "sum"})
-# TODO: once GH 14008 is fixed, move these tests into
+# TODO(GH#14008): once GH 14008 is fixed, move these tests into
# `Base` test class
diff --git a/pandas/tests/reshape/concat/test_invalid.py b/pandas/tests/reshape/concat/test_invalid.py
index cd2a7ca33a267..920d31d1bc43a 100644
--- a/pandas/tests/reshape/concat/test_invalid.py
+++ b/pandas/tests/reshape/concat/test_invalid.py
@@ -34,9 +34,11 @@ def test_concat_invalid_first_argument(self):
with pytest.raises(TypeError, match=msg):
concat(df1)
+ def test_concat_generator_obj(self):
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
+ def test_concat_textreader_obj(self):
# text reader ok
# GH6583
data = """index,A,B,C,D
diff --git a/pandas/tests/reshape/concat/test_series.py b/pandas/tests/reshape/concat/test_series.py
index 34bba581b31c7..f53974d142bec 100644
--- a/pandas/tests/reshape/concat/test_series.py
+++ b/pandas/tests/reshape/concat/test_series.py
@@ -50,7 +50,7 @@ def test_concat_empty_and_non_empty_series_regression(self):
result = concat([s1, s2])
tm.assert_series_equal(result, expected)
- def test_concat_series_axis1(self, sort=sort):
+ def test_concat_series_axis1(self, sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
@@ -79,7 +79,9 @@ def test_concat_series_axis1(self, sort=sort):
s = Series(np.random.randn(3), index=["c", "a", "b"], name="A")
s2 = Series(np.random.randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
- expected = DataFrame({"A": s, "B": s2})
+ expected = DataFrame({"A": s, "B": s2}, index=["c", "a", "b", "d"])
+ if sort:
+ expected = expected.sort_index()
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
diff --git a/pandas/tests/reshape/concat/test_sort.py b/pandas/tests/reshape/concat/test_sort.py
index 3d362ef42d276..a789dc0f8dc83 100644
--- a/pandas/tests/reshape/concat/test_sort.py
+++ b/pandas/tests/reshape/concat/test_sort.py
@@ -92,3 +92,9 @@ def test_concat_frame_with_sort_false(self):
expected = DataFrame([[2, np.nan], [np.nan, 1]], index=[2, 1], columns=[2, 1])
tm.assert_frame_equal(result, expected)
+
+ def test_concat_sort_none_warning(self):
+ # GH#41518
+ df = DataFrame({1: [1, 2], "a": [3, 4]})
+ with tm.assert_produces_warning(FutureWarning, match="sort"):
+ pd.concat([df, df], sort=None)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 48a55022aa484..6533cbb1f70cd 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -630,7 +630,7 @@ def test_join_dups(self):
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer"
)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ["x_x", "y_x", "x_y", "y_y", "x_x", "y_x", "x_y", "y_y"]
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index 5bb9f56adb8d5..f9310db3123f6 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -1484,3 +1484,44 @@ def test_merge_asof_numeri_column_in_index_object_dtype():
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, left_on="a", right_on="a")
+
+
+def test_merge_asof_array_as_on():
+ # GH#42844
+ right = pd.DataFrame(
+ {
+ "a": [2, 6],
+ "ts": [pd.Timestamp("2021/01/01 00:37"), pd.Timestamp("2021/01/01 01:40")],
+ }
+ )
+ ts_merge = pd.date_range(
+ start=pd.Timestamp("2021/01/01 00:00"), periods=3, freq="1h"
+ )
+ left = pd.DataFrame({"b": [4, 8, 7]})
+ result = merge_asof(
+ left,
+ right,
+ left_on=ts_merge,
+ right_on="ts",
+ allow_exact_matches=False,
+ direction="backward",
+ )
+ expected = pd.DataFrame({"b": [4, 8, 7], "a": [np.nan, 2, 6], "ts": ts_merge})
+ tm.assert_frame_equal(result, expected)
+
+ result = merge_asof(
+ right,
+ left,
+ left_on="ts",
+ right_on=ts_merge,
+ allow_exact_matches=False,
+ direction="backward",
+ )
+ expected = pd.DataFrame(
+ {
+ "a": [2, 6],
+ "ts": [pd.Timestamp("2021/01/01 00:37"), pd.Timestamp("2021/01/01 01:40")],
+ "b": [4, 8],
+ }
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index cd1bf21753249..f35033115d2fc 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -40,6 +40,17 @@
class TestPeriodConstruction:
+ def test_from_td64nat_raises(self):
+ # GH#44507
+ td = NaT.to_numpy("m8[ns]")
+
+ msg = "Value must be Period, string, integer, or datetime"
+ with pytest.raises(ValueError, match=msg):
+ Period(td)
+
+ with pytest.raises(ValueError, match=msg):
+ Period(td, freq="D")
+
def test_construction(self):
i1 = Period("1/1/2005", freq="M")
i2 = Period("Jan 2005")
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 21ed57813b60d..f2c2985827a4f 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -182,6 +182,7 @@ def test_nat_methods_nat(method):
def test_nat_iso_format(get_nat):
# see gh-12300
assert get_nat("NaT").isoformat() == "NaT"
+ assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT"
@pytest.mark.parametrize(
@@ -325,6 +326,15 @@ def test_nat_doc_strings(compare):
klass, method = compare
klass_doc = getattr(klass, method).__doc__
+ # Ignore differences with Timestamp.isoformat() as they're intentional
+ if klass == Timestamp and method == "isoformat":
+ return
+
+ if method == "to_numpy":
+ # GH#44460 can return either dt64 or td64 depending on dtype,
+ # different docstring is intentional
+ return
+
nat_doc = getattr(NaT, method).__doc__
assert klass_doc == nat_doc
@@ -506,6 +516,22 @@ def test_to_numpy_alias():
assert isna(expected) and isna(result)
+ # GH#44460
+ result = NaT.to_numpy("M8[s]")
+ assert isinstance(result, np.datetime64)
+ assert result.dtype == "M8[s]"
+
+ result = NaT.to_numpy("m8[ns]")
+ assert isinstance(result, np.timedelta64)
+ assert result.dtype == "m8[ns]"
+
+ result = NaT.to_numpy("m8[s]")
+ assert isinstance(result, np.timedelta64)
+ assert result.dtype == "m8[s]"
+
+ with pytest.raises(ValueError, match="NaT.to_numpy dtype must be a "):
+ NaT.to_numpy(np.int64)
+
@pytest.mark.parametrize(
"other",
@@ -619,11 +645,11 @@ def test_nat_comparisons_invalid_ndarray(other):
op(other, NaT)
-def test_compare_date():
+def test_compare_date(fixed_now_ts):
# GH#39151 comparing NaT with date object is deprecated
# See also: tests.scalar.timestamps.test_comparisons::test_compare_date
- dt = Timestamp.now().to_pydatetime().date()
+ dt = fixed_now_ts.to_pydatetime().date()
for left, right in [(NaT, dt), (dt, NaT)]:
assert not left == right
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 9c36d5777d60c..f2e6b91144898 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -202,6 +202,34 @@ def test_td_add_sub_numeric_raises(self):
with pytest.raises(TypeError, match=msg):
other - td
+ def test_td_add_sub_int_ndarray(self):
+ td = Timedelta("1 day")
+ other = np.array([1])
+
+ msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
+ with pytest.raises(TypeError, match=msg):
+ td + np.array([1])
+
+ msg = "|".join(
+ [
+ (
+ r"unsupported operand type\(s\) for \+: 'numpy.ndarray' "
+ "and 'Timedelta'"
+ ),
+ # This message goes on to say "Please do not rely on this error;
+ # it may not be given on all Python implementations"
+ "Concatenation operation is not implemented for NumPy arrays",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ other + td
+ msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
+ with pytest.raises(TypeError, match=msg):
+ td - other
+ msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
+ with pytest.raises(TypeError, match=msg):
+ other - td
+
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
@@ -224,7 +252,7 @@ def test_td_sub_timedeltalike_object_dtype_array(self):
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
- now = Timestamp.now()
+ now = Timestamp("2021-11-09 09:54:00")
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
@@ -238,7 +266,7 @@ def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
- now = Timestamp.now()
+ now = Timestamp("2021-11-09 09:54:00")
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
msg = r"unsupported operand type\(s\) for \-: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
@@ -255,63 +283,32 @@ def test_td_add_timedeltalike_object_dtype_array(self, op):
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
- now = Timestamp.now()
+ now = Timestamp("2021-11-09 09:54:00")
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
- # TODO: moved from index tests following #24365, may need de-duplication
- def test_ops_ndarray(self):
+ def test_td_add_sub_td64_ndarray(self):
td = Timedelta("1 day")
- # timedelta, timedelta
- other = pd.to_timedelta(["1 day"]).values
- expected = pd.to_timedelta(["2 days"]).values
- tm.assert_numpy_array_equal(td + other, expected)
- tm.assert_numpy_array_equal(other + td, expected)
- msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
- with pytest.raises(TypeError, match=msg):
- td + np.array([1])
- msg = "|".join(
- [
- (
- r"unsupported operand type\(s\) for \+: 'numpy.ndarray' "
- "and 'Timedelta'"
- ),
- "Concatenation operation is not implemented for NumPy arrays",
- ]
- )
- with pytest.raises(TypeError, match=msg):
- np.array([1]) + td
-
- expected = pd.to_timedelta(["0 days"]).values
- tm.assert_numpy_array_equal(td - other, expected)
- tm.assert_numpy_array_equal(-other + td, expected)
- msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
- with pytest.raises(TypeError, match=msg):
- td - np.array([1])
- msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
- with pytest.raises(TypeError, match=msg):
- np.array([1]) - td
+ other = np.array([td.to_timedelta64()])
+ expected = np.array([Timedelta("2 Days").to_timedelta64()])
- expected = pd.to_timedelta(["2 days"]).values
- tm.assert_numpy_array_equal(td * np.array([2]), expected)
- tm.assert_numpy_array_equal(np.array([2]) * td, expected)
- msg = (
- "ufunc '?multiply'? cannot use operands with types "
- r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
- )
- with pytest.raises(TypeError, match=msg):
- td * other
- with pytest.raises(TypeError, match=msg):
- other * td
+ result = td + other
+ tm.assert_numpy_array_equal(result, expected)
+ result = other + td
+ tm.assert_numpy_array_equal(result, expected)
- tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
- tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
+ result = td - other
+ tm.assert_numpy_array_equal(result, expected * 0)
+ result = other - td
+ tm.assert_numpy_array_equal(result, expected * 0)
- # timedelta, datetime
+ def test_td_add_sub_dt64_ndarray(self):
+ td = Timedelta("1 day")
other = pd.to_datetime(["2000-01-01"]).values
+
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
@@ -386,6 +383,30 @@ def test_td_mul_scalar(self, op):
# invalid multiply with another timedelta
op(td, td)
+ def test_td_mul_numeric_ndarray(self):
+ td = Timedelta("1 day")
+ other = np.array([2])
+ expected = np.array([Timedelta("2 Days").to_timedelta64()])
+
+ result = td * other
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = other * td
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_td_mul_td64_ndarray_invalid(self):
+ td = Timedelta("1 day")
+ other = np.array([Timedelta("2 Days").to_timedelta64()])
+
+ msg = (
+ "ufunc '?multiply'? cannot use operands with types "
+ r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
+ )
+ with pytest.raises(TypeError, match=msg):
+ td * other
+ with pytest.raises(TypeError, match=msg):
+ other * td
+
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
@@ -450,6 +471,18 @@ def test_td_div_nan(self, nan):
result = td // nan
assert result is NaT
+ def test_td_div_td64_ndarray(self):
+ td = Timedelta("1 day")
+
+ other = np.array([Timedelta("2 Days").to_timedelta64()])
+ expected = np.array([0.5])
+
+ result = td / other
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = other / td
+ tm.assert_numpy_array_equal(result, expected * 4)
+
# ---------------------------------------------------------------
# Timedelta.__rdiv__
@@ -873,7 +906,7 @@ def test_rdivmod_invalid(self):
"arr",
[
np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]),
- np.array([Timestamp.now(), Timedelta("1D")]),
+ np.array([Timestamp("2021-11-09 09:54:00"), Timedelta("1D")]),
],
)
def test_td_op_timedelta_timedeltalike_array(self, op, arr):
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 4aa2f62fe85a0..448ec4353d7e7 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -1,6 +1,10 @@
""" test the scalar Timedelta """
from datetime import timedelta
+from hypothesis import (
+ given,
+ strategies as st,
+)
import numpy as np
import pytest
@@ -317,6 +321,13 @@ def test_to_numpy_alias(self):
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
+ # GH#44460
+ msg = "dtype and copy arguments are ignored"
+ with pytest.raises(ValueError, match=msg):
+ td.to_numpy("m8[s]")
+ with pytest.raises(ValueError, match=msg):
+ td.to_numpy(copy=True)
+
@pytest.mark.parametrize(
"freq,s1,s2",
[
@@ -387,12 +398,12 @@ def test_round_implementation_bounds(self):
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
- @pytest.mark.parametrize("n", range(100))
+ @given(val=st.integers(min_value=iNaT + 1, max_value=lib.i8max))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
- def test_round_sanity(self, method, n, request):
- val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
+ def test_round_sanity(self, val, method):
+ val = np.int64(val)
td = Timedelta(val)
assert method(td, "ns") == td
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index fd46954fd4c71..1a8fd2a8199a2 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -91,7 +91,7 @@ def test_delta_preserve_nanos(self):
def test_rsub_dtscalars(self, tz_naive_fixture):
# In particular, check that datetime64 - Timestamp works GH#28286
td = Timedelta(1235345642000)
- ts = Timestamp.now(tz_naive_fixture)
+ ts = Timestamp("2021-01-01", tz=tz_naive_fixture)
other = ts + td
assert other - ts == td
@@ -170,9 +170,9 @@ def test_addition_subtraction_preserve_frequency(self, freq, td, td64):
@pytest.mark.parametrize(
"td", [Timedelta(hours=3), np.timedelta64(3, "h"), timedelta(hours=3)]
)
- def test_radd_tdscalar(self, td):
+ def test_radd_tdscalar(self, td, fixed_now_ts):
# GH#24775 timedelta64+Timestamp should not raise
- ts = Timestamp.now()
+ ts = fixed_now_ts
assert td + ts == ts + td
@pytest.mark.parametrize(
diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py
index b7cb7ca8d7069..7ed0a6aedebc1 100644
--- a/pandas/tests/scalar/timestamp/test_comparisons.py
+++ b/pandas/tests/scalar/timestamp/test_comparisons.py
@@ -12,8 +12,8 @@
class TestTimestampComparison:
- def test_comparison_dt64_ndarray(self):
- ts = Timestamp.now()
+ def test_comparison_dt64_ndarray(self, fixed_now_ts):
+ ts = Timestamp("2021-01-01")
ts2 = Timestamp("2019-04-05")
arr = np.array([[ts.asm8, ts2.asm8]], dtype="M8[ns]")
@@ -51,7 +51,7 @@ def test_comparison_dt64_ndarray(self):
@pytest.mark.parametrize("reverse", [True, False])
def test_comparison_dt64_ndarray_tzaware(self, reverse, comparison_op):
- ts = Timestamp.now("UTC")
+ ts = Timestamp("2021-01-01 00:00:00.00000", tz="UTC")
arr = np.array([ts.asm8, ts.asm8], dtype="M8[ns]")
left, right = ts, arr
@@ -147,7 +147,7 @@ def test_compare_invalid(self):
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_compare_date(self, tz):
# GH#36131 comparing Timestamp with date object is deprecated
- ts = Timestamp.now(tz)
+ ts = Timestamp("2021-01-01 00:00:00.00000", tz=tz)
dt = ts.to_pydatetime().date()
# These are incorrectly considered as equal because they
# dispatch to the date comparisons which truncates ts
@@ -278,9 +278,9 @@ def test_timestamp_compare_oob_dt64(self):
assert Timestamp.min > other
assert other < Timestamp.min
- def test_compare_zerodim_array(self):
+ def test_compare_zerodim_array(self, fixed_now_ts):
# GH#26916
- ts = Timestamp.now()
+ ts = fixed_now_ts
dt64 = np.datetime64("2016-01-01", "ns")
arr = np.array(dt64)
assert arr.ndim == 0
diff --git a/pandas/tests/scalar/timestamp/test_formats.py b/pandas/tests/scalar/timestamp/test_formats.py
new file mode 100644
index 0000000000000..71dbf3539bdb2
--- /dev/null
+++ b/pandas/tests/scalar/timestamp/test_formats.py
@@ -0,0 +1,71 @@
+import pytest
+
+from pandas import Timestamp
+
+ts_no_ns = Timestamp(
+ year=2019,
+ month=5,
+ day=18,
+ hour=15,
+ minute=17,
+ second=8,
+ microsecond=132263,
+)
+ts_ns = Timestamp(
+ year=2019,
+ month=5,
+ day=18,
+ hour=15,
+ minute=17,
+ second=8,
+ microsecond=132263,
+ nanosecond=123,
+)
+ts_ns_tz = Timestamp(
+ year=2019,
+ month=5,
+ day=18,
+ hour=15,
+ minute=17,
+ second=8,
+ microsecond=132263,
+ nanosecond=123,
+ tz="UTC",
+)
+ts_no_us = Timestamp(
+ year=2019,
+ month=5,
+ day=18,
+ hour=15,
+ minute=17,
+ second=8,
+ microsecond=0,
+ nanosecond=123,
+)
+
+
+@pytest.mark.parametrize(
+ "ts, timespec, expected_iso",
+ [
+ (ts_no_ns, "auto", "2019-05-18T15:17:08.132263"),
+ (ts_no_ns, "seconds", "2019-05-18T15:17:08"),
+ (ts_no_ns, "nanoseconds", "2019-05-18T15:17:08.132263000"),
+ (ts_ns, "auto", "2019-05-18T15:17:08.132263123"),
+ (ts_ns, "hours", "2019-05-18T15"),
+ (ts_ns, "minutes", "2019-05-18T15:17"),
+ (ts_ns, "seconds", "2019-05-18T15:17:08"),
+ (ts_ns, "milliseconds", "2019-05-18T15:17:08.132"),
+ (ts_ns, "microseconds", "2019-05-18T15:17:08.132263"),
+ (ts_ns, "nanoseconds", "2019-05-18T15:17:08.132263123"),
+ (ts_ns_tz, "auto", "2019-05-18T15:17:08.132263123+00:00"),
+ (ts_ns_tz, "hours", "2019-05-18T15+00:00"),
+ (ts_ns_tz, "minutes", "2019-05-18T15:17+00:00"),
+ (ts_ns_tz, "seconds", "2019-05-18T15:17:08+00:00"),
+ (ts_ns_tz, "milliseconds", "2019-05-18T15:17:08.132+00:00"),
+ (ts_ns_tz, "microseconds", "2019-05-18T15:17:08.132263+00:00"),
+ (ts_ns_tz, "nanoseconds", "2019-05-18T15:17:08.132263123+00:00"),
+ (ts_no_us, "auto", "2019-05-18T15:17:08.000000123"),
+ ],
+)
+def test_isoformat(ts, timespec, expected_iso):
+ assert ts.isoformat(timespec=timespec) == expected_iso
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index f2010b33538fb..b3abec6b9761f 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -579,7 +579,7 @@ def test_timestamp_to_datetime_explicit_pytz(self):
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
- @td.skip_if_windows_python_3
+ @td.skip_if_windows
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp("20090415", tz=gettz("US/Eastern"))
dtval = stamp.to_pydatetime()
@@ -619,6 +619,13 @@ def test_to_numpy_alias(self):
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
+ # GH#44460
+ msg = "dtype and copy arguments are ignored"
+ with pytest.raises(ValueError, match=msg):
+ ts.to_numpy("M8[s]")
+ with pytest.raises(ValueError, match=msg):
+ ts.to_numpy(copy=True)
+
class SubDatetime(datetime):
pass
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 366c0f7cf2f74..5f07cabd51ca1 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -1,6 +1,10 @@
from datetime import datetime
from dateutil.tz import gettz
+from hypothesis import (
+ given,
+ strategies as st,
+)
import numpy as np
import pytest
import pytz
@@ -276,12 +280,12 @@ def test_round_implementation_bounds(self):
with pytest.raises(OverflowError, match=msg):
Timestamp.max.ceil("s")
- @pytest.mark.parametrize("n", range(100))
+ @given(val=st.integers(iNaT + 1, lib.i8max))
@pytest.mark.parametrize(
"method", [Timestamp.round, Timestamp.floor, Timestamp.ceil]
)
- def test_round_sanity(self, method, n):
- val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
+ def test_round_sanity(self, val, method):
+ val = np.int64(val)
ts = Timestamp(val)
def checker(res, ts, nanos):
@@ -490,10 +494,10 @@ def test_normalize_pre_epoch_dates(self):
# --------------------------------------------------------------
@td.skip_if_windows
- def test_timestamp(self):
+ def test_timestamp(self, fixed_now_ts):
# GH#17329
# tz-naive --> treat it as if it were UTC for purposes of timestamp()
- ts = Timestamp.now()
+ ts = fixed_now_ts
uts = ts.replace(tzinfo=utc)
assert ts.timestamp() == uts.timestamp()
diff --git a/pandas/tests/series/accessors/test_cat_accessor.py b/pandas/tests/series/accessors/test_cat_accessor.py
index 289e4cfe9397d..fb2071ac9c3f6 100644
--- a/pandas/tests/series/accessors/test_cat_accessor.py
+++ b/pandas/tests/series/accessors/test_cat_accessor.py
@@ -51,7 +51,7 @@ def test_cat_accessor(self):
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
# issue #37643 inplace kwarg deprecated
return_value = ser.cat.set_categories(["b", "a"], inplace=True)
@@ -88,7 +88,7 @@ def test_cat_accessor_updates_on_inplace(self):
return_value = ser.drop(0, inplace=True)
assert return_value is None
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
return_value = ser.cat.remove_unused_categories(inplace=True)
assert return_value is None
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 03b1c512f9053..4c17917b949ca 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -36,6 +36,12 @@
class TestSeriesGetitemScalars:
+ def test_getitem_object_index_float_string(self):
+ # GH#17286
+ ser = Series([1] * 4, index=Index(["a", "b", "c", 1.0]))
+ assert ser["a"] == 1
+ assert ser[1.0] == 1
+
def test_getitem_float_keys_tuple_values(self):
# see GH#13509
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 6c3587c7eeada..8a34882b1e5d4 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -338,26 +338,19 @@ def test_slice_with_zero_step_raises(index, frame_or_series, indexer_sli):
],
)
def test_slice_with_negative_step(index):
- def assert_slices_equivalent(l_slc, i_slc):
- expected = ts.iloc[i_slc]
-
- tm.assert_series_equal(ts[l_slc], expected)
- tm.assert_series_equal(ts.loc[l_slc], expected)
-
keystr1 = str(index[9])
keystr2 = str(index[13])
- box = type(index[0])
- ts = Series(np.arange(20), index)
+ ser = Series(np.arange(20), index)
SLC = IndexSlice
- for key in [keystr1, box(keystr1)]:
- assert_slices_equivalent(SLC[key::-1], SLC[9::-1])
- assert_slices_equivalent(SLC[:key:-1], SLC[:8:-1])
+ for key in [keystr1, index[9]]:
+ tm.assert_indexing_slices_equivalent(ser, SLC[key::-1], SLC[9::-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[:key:-1], SLC[:8:-1])
- for key2 in [keystr2, box(keystr2)]:
- assert_slices_equivalent(SLC[key2:key:-1], SLC[13:8:-1])
- assert_slices_equivalent(SLC[key:key2:-1], SLC[0:0:-1])
+ for key2 in [keystr2, index[13]]:
+ tm.assert_indexing_slices_equivalent(ser, SLC[key2:key:-1], SLC[13:8:-1])
+ tm.assert_indexing_slices_equivalent(ser, SLC[key:key2:-1], SLC[0:0:-1])
def test_tuple_index():
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index 88b75164d2f3e..bf4bee203a3a1 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -440,13 +440,15 @@ def test_where_categorical(frame_or_series):
tm.assert_equal(exp, res)
-# TODO(ArrayManager) DataFrame.values not yet correctly returning datetime array
-# for categorical with datetime categories
-@td.skip_array_manager_not_yet_implemented
-def test_where_datetimelike_categorical(tz_naive_fixture):
+def test_where_datetimelike_categorical(request, tz_naive_fixture, using_array_manager):
# GH#37682
tz = tz_naive_fixture
+ if using_array_manager and tz is None:
+ # TODO(ArrayManager) DataFrame.values not yet correctly returning datetime array
+ # for categorical with datetime categories
+ td.mark_array_manager_not_yet_implemented(request)
+
dr = date_range("2001-01-01", periods=3, tz=tz)._with_freq(None)
lvals = pd.DatetimeIndex([dr[0], dr[1], pd.NaT])
rvals = pd.Categorical([dr[0], pd.NaT, dr[2]])
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index 732d375d136d0..a20667655590b 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -128,7 +128,7 @@ def test_astype_no_pandas_dtype(self):
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
- s = Series(data)
+ ser = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
@@ -139,7 +139,7 @@ def test_astype_generic_timestamp_no_frequency(self, dtype, request):
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
- s.astype(dtype)
+ ser.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
diff --git a/pandas/tests/series/methods/test_convert.py b/pandas/tests/series/methods/test_convert.py
index b658929dfd0d5..178026c1efc09 100644
--- a/pandas/tests/series/methods/test_convert.py
+++ b/pandas/tests/series/methods/test_convert.py
@@ -108,15 +108,15 @@ def test_convert(self):
result = ser._convert(datetime=True)
tm.assert_series_equal(result, expected)
- # preserver if non-object
+ # preserve if non-object
ser = Series([1], dtype="float32")
result = ser._convert(datetime=True)
tm.assert_series_equal(result, ser)
# FIXME: dont leave commented-out
# res = ser.copy()
- # r[0] = np.nan
- # result = res._convert(convert_dates=True,convert_numeric=False)
+ # res[0] = np.nan
+ # result = res._convert(datetime=True, numeric=False)
# assert result.dtype == 'M8[ns]'
def test_convert_no_arg_error(self):
diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py
index 8b5557ab6e85f..c5cffa0c9fb0f 100644
--- a/pandas/tests/series/methods/test_drop_duplicates.py
+++ b/pandas/tests/series/methods/test_drop_duplicates.py
@@ -77,7 +77,7 @@ def dtype(self, request):
return request.param
@pytest.fixture
- def cat_series1(self, dtype, ordered):
+ def cat_series_unused_category(self, dtype, ordered):
# Test case 1
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
@@ -86,8 +86,8 @@ def cat_series1(self, dtype, ordered):
tc1 = Series(cat)
return tc1
- def test_drop_duplicates_categorical_non_bool(self, cat_series1):
- tc1 = cat_series1
+ def test_drop_duplicates_categorical_non_bool(self, cat_series_unused_category):
+ tc1 = cat_series_unused_category
expected = Series([False, False, False, True])
@@ -102,8 +102,10 @@ def test_drop_duplicates_categorical_non_bool(self, cat_series1):
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
- def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):
- tc1 = cat_series1
+ def test_drop_duplicates_categorical_non_bool_keeplast(
+ self, cat_series_unused_category
+ ):
+ tc1 = cat_series_unused_category
expected = Series([False, False, True, False])
@@ -118,8 +120,10 @@ def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
- def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):
- tc1 = cat_series1
+ def test_drop_duplicates_categorical_non_bool_keepfalse(
+ self, cat_series_unused_category
+ ):
+ tc1 = cat_series_unused_category
expected = Series([False, False, True, True])
@@ -135,8 +139,8 @@ def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):
tm.assert_series_equal(sc, tc1[~expected])
@pytest.fixture
- def cat_series2(self, dtype, ordered):
- # Test case 2; TODO: better name
+ def cat_series(self, dtype, ordered):
+ # no unused categories, unlike cat_series_unused_category
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
@@ -144,9 +148,8 @@ def cat_series2(self, dtype, ordered):
tc2 = Series(cat)
return tc2
- def test_drop_duplicates_categorical_non_bool2(self, cat_series2):
- # Test case 2; TODO: better name
- tc2 = cat_series2
+ def test_drop_duplicates_categorical_non_bool2(self, cat_series):
+ tc2 = cat_series
expected = Series([False, False, False, False, True, True, False])
@@ -161,8 +164,8 @@ def test_drop_duplicates_categorical_non_bool2(self, cat_series2):
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
- def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):
- tc2 = cat_series2
+ def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series):
+ tc2 = cat_series
expected = Series([False, True, True, False, False, False, False])
@@ -177,8 +180,8 @@ def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
- def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series2):
- tc2 = cat_series2
+ def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series):
+ tc2 = cat_series
expected = Series([False, True, True, False, True, True, False])
diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py
index be9f96c8b509a..4350a5d9ac989 100644
--- a/pandas/tests/series/methods/test_reindex.py
+++ b/pandas/tests/series/methods/test_reindex.py
@@ -348,6 +348,31 @@ def test_reindex_periodindex_with_object(p_values, o_values, values, expected_va
tm.assert_series_equal(result, expected)
+def test_reindex_too_many_args():
+ # GH 40980
+ ser = Series([1, 2])
+ with pytest.raises(
+ TypeError, match=r"Only one positional argument \('index'\) is allowed"
+ ):
+ ser.reindex([2, 3], False)
+
+
+def test_reindex_double_index():
+ # GH 40980
+ ser = Series([1, 2])
+ msg = r"'index' passed as both positional and keyword argument"
+ with pytest.raises(TypeError, match=msg):
+ ser.reindex([2, 3], index=[3, 4])
+
+
+def test_reindex_no_posargs():
+ # GH 40980
+ ser = Series([1, 2])
+ result = ser.reindex(index=[1, 0])
+ expected = Series([2, 1], index=[1, 0])
+ tm.assert_series_equal(result, expected)
+
+
@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]])
def test_reindex_empty_with_level(values):
# GH41170
diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py
index a78abfa63cff4..3425dd8f019e7 100644
--- a/pandas/tests/series/methods/test_rename.py
+++ b/pandas/tests/series/methods/test_rename.py
@@ -105,6 +105,19 @@ def test_rename_callable(self):
assert result.name == expected.name
+ def test_rename_method_and_index(self):
+ # GH 40977
+ ser = Series([1, 2])
+ with pytest.raises(TypeError, match="Cannot specify both 'mapper' and 'index'"):
+ ser.rename(str, index=str)
+
+ def test_rename_none(self):
+ # GH 40977
+ ser = Series([1, 2], name="foo")
+ result = ser.rename(None)
+ expected = Series([1, 2])
+ tm.assert_series_equal(result, expected)
+
def test_rename_series_with_multiindex(self):
# issue #43659
arrays = [
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 3a6cd4eb0face..8283604b99d32 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -419,10 +419,10 @@ def test_replace_empty_copy(self, frame):
tm.assert_equal(res, obj)
assert res is not obj
- def test_replace_only_one_dictlike_arg(self):
+ def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
- ser = pd.Series([1, 2, "A", pd.Timestamp.now(), True])
+ ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py
index b159317bf813b..f38491508cc23 100644
--- a/pandas/tests/series/methods/test_reset_index.py
+++ b/pandas/tests/series/methods/test_reset_index.py
@@ -160,6 +160,13 @@ def test_drop_pos_args_deprecation(self):
expected = DataFrame({"a": [1, 2, 3], 0: [1, 2, 3]})
tm.assert_frame_equal(result, expected)
+ def test_reset_index_inplace_and_drop_ignore_name(self):
+ # GH#44575
+ ser = Series(range(2), name="old")
+ ser.reset_index(name="new", drop=True, inplace=True)
+ expected = Series(range(2), name="old")
+ tm.assert_series_equal(ser, expected)
+
@pytest.mark.parametrize(
"array, dtype",
diff --git a/pandas/tests/series/methods/test_to_frame.py b/pandas/tests/series/methods/test_to_frame.py
index 55d49b8fbee70..de8db8cfe9b91 100644
--- a/pandas/tests/series/methods/test_to_frame.py
+++ b/pandas/tests/series/methods/test_to_frame.py
@@ -1,3 +1,5 @@
+import pytest
+
from pandas import (
DataFrame,
Index,
@@ -54,3 +56,13 @@ class SubclassedFrame(DataFrame):
assert isinstance(result, SubclassedFrame)
expected = SubclassedFrame({"X": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("name", [None, "x"])
+ def test_to_frame_finalize(self, datetime_series, name):
+ # GH#28283 Call __finalize__
+ expected_attrs = {"a": 1}
+
+ datetime_series.attrs.update(expected_attrs)
+
+ df = datetime_series.to_frame(name=name)
+ assert df.attrs == expected_attrs
diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py
index 6f8f6d638dd56..23b068214dd91 100644
--- a/pandas/tests/series/methods/test_unstack.py
+++ b/pandas/tests/series/methods/test_unstack.py
@@ -10,6 +10,18 @@
import pandas._testing as tm
+def test_unstack_preserves_object():
+ mi = MultiIndex.from_product([["bar", "foo"], ["one", "two"]])
+
+ ser = Series(np.arange(4.0), index=mi, dtype=object)
+
+ res1 = ser.unstack()
+ assert (res1.dtypes == object).all()
+
+ res2 = ser.unstack(level=0)
+ assert (res2.dtypes == object).all()
+
+
def test_unstack():
index = MultiIndex(
levels=[["bar", "foo"], ["one", "three", "two"]],
diff --git a/pandas/tests/series/methods/test_view.py b/pandas/tests/series/methods/test_view.py
index 818023c01e4e7..22902c8648fc5 100644
--- a/pandas/tests/series/methods/test_view.py
+++ b/pandas/tests/series/methods/test_view.py
@@ -11,6 +11,18 @@
class TestView:
+ def test_view_i8_to_datetimelike(self):
+ dti = date_range("2000", periods=4, tz="US/Central")
+ ser = Series(dti.asi8)
+
+ result = ser.view(dti.dtype)
+ tm.assert_datetime_array_equal(result._values, dti._data._with_freq(None))
+
+ pi = dti.tz_localize(None).to_period("D")
+ ser = Series(pi.asi8)
+ result = ser.view(pi.dtype)
+ tm.assert_period_array_equal(result._values, pi._data)
+
def test_view_tz(self):
# GH#24024
ser = Series(date_range("2000", periods=4, tz="US/Central"))
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index ed83377f31317..efb7b61534619 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -96,10 +96,10 @@ def _constructor(self):
def test_flex_add_scalar_fill_value(self):
# GH12723
- s = Series([0, 1, np.nan, 3, 4, 5])
+ ser = Series([0, 1, np.nan, 3, 4, 5])
- exp = s.fillna(0).add(2)
- res = s.add(2, fill_value=0)
+ exp = ser.fillna(0).add(2)
+ res = ser.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
@@ -226,12 +226,12 @@ def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
- s = Series(
+ ser = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
- result = s + s.shift(1)
- result2 = s.shift(1) + s
+ result = ser + ser.shift(1)
+ result2 = ser.shift(1) + ser
assert isna(result[0])
assert isna(result2[0])
@@ -244,13 +244,6 @@ def test_add_corner_cases(self, datetime_series):
result = empty + empty.copy()
assert len(result) == 0
- # FIXME: dont leave commented-out
- # TODO: this returned NotImplemented earlier, what to do?
- # deltas = Series([timedelta(1)] * 5, index=np.arange(5))
- # sub_deltas = deltas[::2]
- # deltas5 = deltas * 5
- # deltas = deltas + sub_deltas
-
def test_add_float_plus_int(self, datetime_series):
# float + int
int_ts = datetime_series.astype(int)[:-5]
@@ -406,15 +399,12 @@ def test_ser_flex_cmp_return_dtypes_empty(self, opname):
expected = np.dtype("bool")
assert result == expected
- @pytest.mark.parametrize(
- "op",
- [operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
- )
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
- def test_ser_cmp_result_names(self, names, op):
+ def test_ser_cmp_result_names(self, names, comparison_op):
# datetime64 dtype
+ op = comparison_op
dti = date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
@@ -590,9 +580,10 @@ def test_comparison_tuples(self):
expected = Series([False, False])
tm.assert_series_equal(result, expected)
- s = Series([frozenset([1]), frozenset([1, 2])])
+ def test_comparison_frozenset(self):
+ ser = Series([frozenset([1]), frozenset([1, 2])])
- result = s == frozenset([1])
+ result = ser == frozenset([1])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
@@ -613,11 +604,6 @@ def test_comparison_operators_with_nas(self, comparison_op):
tm.assert_series_equal(result, expected)
- # FIXME: dont leave commented-out
- # result = comparison_op(val, ser)
- # expected = comparison_op(val, ser.dropna()).reindex(ser.index)
- # tm.assert_series_equal(result, expected)
-
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
@@ -661,8 +647,8 @@ def test_comp_ops_df_compat(self, left, right, frame_or_series):
def test_compare_series_interval_keyword(self):
# GH#25338
- s = Series(["IntervalA", "IntervalB", "IntervalC"])
- result = s == "IntervalA"
+ ser = Series(["IntervalA", "IntervalB", "IntervalC"])
+ result = ser == "IntervalA"
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
@@ -674,19 +660,6 @@ def test_compare_series_interval_keyword(self):
class TestTimeSeriesArithmetic:
- # TODO: De-duplicate with test below
- def test_series_add_tz_mismatch_converts_to_utc_duplicate(self):
- rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
- ser = Series(np.random.randn(len(rng)), index=rng)
-
- ts_moscow = ser.tz_convert("Europe/Moscow")
-
- result = ser + ts_moscow
- assert result.index.tz is pytz.utc
-
- result = ts_moscow + ser
- assert result.index.tz is pytz.utc
-
def test_series_add_tz_mismatch_converts_to_utc(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
@@ -709,16 +682,6 @@ def test_series_add_tz_mismatch_converts_to_utc(self):
assert result.index.tz == pytz.UTC
tm.assert_series_equal(result, expected)
- # TODO: redundant with test_series_add_tz_mismatch_converts_to_utc?
- def test_series_arithmetic_mismatched_tzs_convert_to_utc(self):
- base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
- idx1 = base.tz_convert("Asia/Tokyo")[:2]
- idx2 = base.tz_convert("US/Eastern")[1:]
-
- res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2)
- expected = Series([np.nan, 3, np.nan], index=base)
- tm.assert_series_equal(res, expected)
-
def test_series_add_aware_naive_raises(self):
rng = date_range("1/1/2011", periods=10, freq="H")
ser = Series(np.random.randn(len(rng)), index=rng)
@@ -790,7 +753,7 @@ def test_series_ops_name_retention(
# GH#37374 logical ops behaving as set ops deprecated
warn = FutureWarning if is_rlogical and box is Index else None
msg = "operating as a set operation is deprecated"
- with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
+ with tm.assert_produces_warning(warn, match=msg):
# stacklevel is correct for Index op, not reversed op
result = op(left, right)
@@ -801,9 +764,9 @@ def test_series_ops_name_retention(
assert isinstance(result, Series)
if box in [Index, Series]:
- assert result.name == names[2]
+ assert result.name is names[2] or result.name == names[2]
else:
- assert result.name == names[0]
+ assert result.name is names[0] or result.name == names[0]
def test_binop_maybe_preserve_name(self, datetime_series):
# names match, preserve
@@ -883,20 +846,20 @@ def test_none_comparison(series_with_simple_index):
series.iloc[0] = np.nan
# noinspection PyComparisonWithNone
- result = series == None # noqa
+ result = series == None # noqa:E711
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
- result = series != None # noqa
+ result = series != None # noqa:E711
assert result.iat[0]
assert result.iat[1]
- result = None == series # noqa
+ result = None == series # noqa:E711
assert not result.iat[0]
assert not result.iat[1]
- result = None != series # noqa
+ result = None != series # noqa:E711
assert result.iat[0]
assert result.iat[1]
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 1b488b4cf0b77..8023713dfcf39 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1397,12 +1397,6 @@ def test_constructor_dtype_timedelta64(self):
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
- # FIXME: dont leave commented-out
- # these are frequency conversion astypes
- # for t in ['s', 'D', 'us', 'ms']:
- # with pytest.raises(TypeError):
- # td.astype('m8[%s]' % t)
-
# valid astype
with tm.assert_produces_warning(FutureWarning):
# astype(int64) deprecated
@@ -1645,12 +1639,12 @@ def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt):
ts = ts.to_pydatetime()
ts_naive = Timestamp("2019")
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = Series([ts], dtype="datetime64[ns]")
expected = Series([ts_naive])
tm.assert_series_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = Series(np.array([ts], dtype=object), dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py
index 563c8f63df57d..9648b01492e02 100644
--- a/pandas/tests/series/test_logical_ops.py
+++ b/pandas/tests/series/test_logical_ops.py
@@ -279,9 +279,7 @@ def test_reversed_xor_with_index_returns_index(self):
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match=msg):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@@ -337,9 +335,7 @@ def test_reverse_ops_with_index(self, op, expected):
idx = Index([False, True])
msg = "operating as a set operation"
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match=msg):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py
index ca30e8f1ee6fd..c5b0428131973 100644
--- a/pandas/tests/series/test_reductions.py
+++ b/pandas/tests/series/test_reductions.py
@@ -7,6 +7,28 @@
Series,
)
import pandas._testing as tm
+from pandas.core.algorithms import mode
+
+
+@pytest.mark.parametrize("as_period", [True, False])
+def test_mode_extension_dtype(as_period):
+ # GH#41927 preserve dt64tz dtype
+ ser = Series([pd.Timestamp(1979, 4, n) for n in range(1, 5)])
+
+ if as_period:
+ ser = ser.dt.to_period("D")
+ else:
+ ser = ser.dt.tz_localize("US/Central")
+
+ res = ser.mode()
+ assert res.dtype == ser.dtype
+ tm.assert_series_equal(res, ser)
+
+ res = mode(ser._values)
+ tm.assert_series_equal(res, ser)
+
+ res = mode(pd.Index(ser))
+ tm.assert_series_equal(res, ser)
def test_reductions_td64_with_nat():
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 555342dd39005..de34caa7b4387 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -196,6 +196,7 @@ def test_timeseries_repr_object_dtype(self):
ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
+ @pytest.mark.filterwarnings("ignore::FutureWarning")
def test_latex_repr(self):
result = r"""\begin{tabular}{ll}
\toprule
@@ -355,7 +356,7 @@ def test_categorical_series_repr_datetime(self):
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
- 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa
+ 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa:E501
assert repr(s) == exp
@@ -369,7 +370,7 @@ def test_categorical_series_repr_datetime(self):
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
- 2011-01-01 13:00:00-05:00]""" # noqa
+ 2011-01-01 13:00:00-05:00]""" # noqa:E501
assert repr(s) == exp
@@ -383,7 +384,7 @@ def test_categorical_series_repr_datetime_ordered(self):
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
- 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa:E501
assert repr(s) == exp
@@ -397,7 +398,7 @@ def test_categorical_series_repr_datetime_ordered(self):
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]""" # noqa
+ 2011-01-01 13:00:00-05:00]""" # noqa:E501
assert repr(s) == exp
@@ -411,7 +412,7 @@ def test_categorical_series_repr_period(self):
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]""" # noqa
+ 2011-01-01 13:00]""" # noqa:E501
assert repr(s) == exp
@@ -437,7 +438,7 @@ def test_categorical_series_repr_period_ordered(self):
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]""" # noqa
+ 2011-01-01 13:00]""" # noqa:E501
assert repr(s) == exp
@@ -481,7 +482,7 @@ def test_categorical_series_repr_timedelta(self):
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
- 8 days 01:00:00, 9 days 01:00:00]""" # noqa
+ 8 days 01:00:00, 9 days 01:00:00]""" # noqa:E501
assert repr(s) == exp
@@ -513,6 +514,6 @@ def test_categorical_series_repr_timedelta_ordered(self):
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
- 8 days 01:00:00 < 9 days 01:00:00]""" # noqa
+ 8 days 01:00:00 < 9 days 01:00:00]""" # noqa:E501
assert repr(s) == exp
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 5e9a53f32e0b7..f81e3d61c8ba5 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -100,10 +100,34 @@ def test_random_state():
(Series([1], name="x"), Series([2]), None),
(Series([1], name="x"), [2], "x"),
([1], Series([2], name="y"), "y"),
+ # matching NAs
+ (Series([1], name=np.nan), pd.Index([], name=np.nan), np.nan),
+ (Series([1], name=np.nan), pd.Index([], name=pd.NaT), None),
+ (Series([1], name=pd.NA), pd.Index([], name=pd.NA), pd.NA),
+ # tuple name GH#39757
+ (
+ Series([1], name=np.int64(1)),
+ pd.Index([], name=(np.int64(1), np.int64(2))),
+ None,
+ ),
+ (
+ Series([1], name=(np.int64(1), np.int64(2))),
+ pd.Index([], name=(np.int64(1), np.int64(2))),
+ (np.int64(1), np.int64(2)),
+ ),
+ pytest.param(
+ Series([1], name=(np.float64("nan"), np.int64(2))),
+ pd.Index([], name=(np.float64("nan"), np.int64(2))),
+ (np.float64("nan"), np.int64(2)),
+ marks=pytest.mark.xfail(
+ reason="Not checking for matching NAs inside tuples."
+ ),
+ ),
],
)
def test_maybe_match_name(left, right, expected):
- assert ops.common._maybe_match_name(left, right) == expected
+ res = ops.common._maybe_match_name(left, right)
+ assert res is expected or res == expected
def test_standardize_mapping():
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index f927a0ec0927b..1972fbbe0f414 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -5,11 +5,12 @@
import subprocess
import sys
-import numpy as np # noqa
+import numpy as np # noqa:F401 needed in namespace for statsmodels
import pytest
import pandas.util._test_decorators as td
+import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
@@ -29,25 +30,29 @@ def df():
return DataFrame({"A": [1, 2, 3]})
-# TODO(ArrayManager) dask is still accessing the blocks
-# https://github.com/dask/dask/pull/7318
-@td.skip_array_manager_not_yet_implemented
@pytest.mark.filterwarnings("ignore:.*64Index is deprecated:FutureWarning")
def test_dask(df):
- toolz = import_module("toolz") # noqa
- dask = import_module("dask") # noqa
+ # dask sets "compute.use_numexpr" to False, so catch the current value
+ # and ensure to reset it afterwards to avoid impacting other tests
+ olduse = pd.get_option("compute.use_numexpr")
- import dask.dataframe as dd
+ try:
+ toolz = import_module("toolz") # noqa:F841
+ dask = import_module("dask") # noqa:F841
+
+ import dask.dataframe as dd
- ddf = dd.from_pandas(df, npartitions=3)
- assert ddf.A is not None
- assert ddf.compute() is not None
+ ddf = dd.from_pandas(df, npartitions=3)
+ assert ddf.A is not None
+ assert ddf.compute() is not None
+ finally:
+ pd.set_option("compute.use_numexpr", olduse)
def test_xarray(df):
- xarray = import_module("xarray") # noqa
+ xarray = import_module("xarray") # noqa:F841
assert df.to_xarray() is not None
@@ -100,7 +105,7 @@ def test_oo_optimized_datetime_index_unpickle():
)
def test_statsmodels():
- statsmodels = import_module("statsmodels") # noqa
+ statsmodels = import_module("statsmodels") # noqa:F841
import statsmodels.api as sm
import statsmodels.formula.api as smf
@@ -112,7 +117,7 @@ def test_statsmodels():
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
- sklearn = import_module("sklearn") # noqa
+ sklearn = import_module("sklearn") # noqa:F841
from sklearn import (
datasets,
svm,
@@ -136,10 +141,14 @@ def test_seaborn():
def test_pandas_gbq(df):
- pandas_gbq = import_module("pandas_gbq") # noqa
+ pandas_gbq = import_module("pandas_gbq") # noqa:F841
-@pytest.mark.xfail(reason="0.8.1 tries to import urlencode from pd.io.common")
+@pytest.mark.xfail(
+ raises=ValueError,
+ reason="The Quandl API key must be provided either through the api_key "
+ "variable or through the environmental variable QUANDL_API_KEY",
+)
@tm.network
def test_pandas_datareader():
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 1348e62148cb1..115b8d716dd3f 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -5,6 +5,7 @@
import numpy as np
import pytest
+from pandas import set_option
import pandas._testing as tm
from pandas.core.api import (
DataFrame,
@@ -65,9 +66,9 @@ def call_op(df, other, flex: bool, opname: str):
else:
op = getattr(operator, opname)
- expr.set_use_numexpr(False)
+ set_option("compute.use_numexpr", False)
expected = op(df, other)
- expr.set_use_numexpr(True)
+ set_option("compute.use_numexpr", True)
expr.get_test_result()
@@ -107,9 +108,9 @@ def run_binary(self, df, other, flex: bool):
def run_frame(self, df, other, flex: bool):
self.run_arithmetic(df, other, flex)
- expr.set_use_numexpr(False)
+ set_option("compute.use_numexpr", False)
binary_comp = other + 1
- expr.set_use_numexpr(True)
+ set_option("compute.use_numexpr", True)
self.run_binary(df, binary_comp, flex)
for i in range(len(df.columns)):
@@ -179,9 +180,9 @@ def testit():
result = expr._can_use_numexpr(op, op_str, right, right, "evaluate")
assert not result
- expr.set_use_numexpr(False)
+ set_option("compute.use_numexpr", False)
testit()
- expr.set_use_numexpr(True)
+ set_option("compute.use_numexpr", True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
@@ -215,9 +216,9 @@ def testit():
result = expr._can_use_numexpr(op, op_str, right, f22, "evaluate")
assert not result
- expr.set_use_numexpr(False)
+ set_option("compute.use_numexpr", False)
testit()
- expr.set_use_numexpr(True)
+ set_option("compute.use_numexpr", True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
@@ -233,9 +234,9 @@ def testit():
expected = np.where(c, df.values, df.values + 1)
tm.assert_numpy_array_equal(result, expected)
- expr.set_use_numexpr(False)
+ set_option("compute.use_numexpr", False)
testit()
- expr.set_use_numexpr(True)
+ set_option("compute.use_numexpr", True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
@@ -287,32 +288,32 @@ def test_bool_ops_warn_on_arithmetic(self, op_str, opname):
return
with tm.use_numexpr(True, min_elements=5):
- with tm.assert_produces_warning(check_stacklevel=False):
+ with tm.assert_produces_warning():
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
- with tm.assert_produces_warning(check_stacklevel=False):
+ with tm.assert_produces_warning():
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
- with tm.assert_produces_warning(check_stacklevel=False):
+ with tm.assert_produces_warning():
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
- with tm.assert_produces_warning(check_stacklevel=False):
+ with tm.assert_produces_warning():
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
- with tm.assert_produces_warning(check_stacklevel=False):
+ with tm.assert_produces_warning():
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
- with tm.assert_produces_warning(check_stacklevel=False):
+ with tm.assert_produces_warning():
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
@@ -360,9 +361,9 @@ def test_frame_series_axis(self, axis, arith):
op_func = getattr(df, arith)
- expr.set_use_numexpr(False)
+ set_option("compute.use_numexpr", False)
expected = op_func(other, axis=axis)
- expr.set_use_numexpr(True)
+ set_option("compute.use_numexpr", True)
result = op_func(other, axis=axis)
tm.assert_frame_equal(expected, result)
@@ -387,9 +388,9 @@ def test_python_semantics_with_numexpr_installed(self, op, box, scalar):
result = method(scalar)
# compare result with numpy
- expr.set_use_numexpr(False)
+ set_option("compute.use_numexpr", False)
expected = method(scalar)
- expr.set_use_numexpr(True)
+ set_option("compute.use_numexpr", True)
tm.assert_equal(result, expected)
# compare result element-wise with Python
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 4867ba58838ef..3842e9a625b8b 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -90,13 +90,13 @@ def test_to_datetime_format(self, cache):
tm.assert_index_equal(result, expected)
def test_to_datetime_format_YYYYMMDD(self, cache):
- s = Series([19801222, 19801222] + [19810105] * 5)
- expected = Series([Timestamp(x) for x in s.apply(str)])
+ ser = Series([19801222, 19801222] + [19810105] * 5)
+ expected = Series([Timestamp(x) for x in ser.apply(str)])
- result = to_datetime(s, format="%Y%m%d", cache=cache)
+ result = to_datetime(ser, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
- result = to_datetime(s.apply(str), format="%Y%m%d", cache=cache)
+ result = to_datetime(ser.apply(str), format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# with NaT
@@ -104,15 +104,15 @@ def test_to_datetime_format_YYYYMMDD(self, cache):
[Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5
)
expected[2] = np.nan
- s[2] = np.nan
+ ser[2] = np.nan
- result = to_datetime(s, format="%Y%m%d", cache=cache)
+ result = to_datetime(ser, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# string with NaT
- s = s.apply(str)
- s[2] = "nat"
- result = to_datetime(s, format="%Y%m%d", cache=cache)
+ ser2 = ser.apply(str)
+ ser2[2] = "nat"
+ result = to_datetime(ser2, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
def test_to_datetime_format_YYYYMMDD_coercion(self, cache):
@@ -208,16 +208,16 @@ def test_to_datetime_with_NA(self, data, format, expected):
def test_to_datetime_format_integer(self, cache):
# GH 10178
- s = Series([2000, 2001, 2002])
- expected = Series([Timestamp(x) for x in s.apply(str)])
+ ser = Series([2000, 2001, 2002])
+ expected = Series([Timestamp(x) for x in ser.apply(str)])
- result = to_datetime(s, format="%Y", cache=cache)
+ result = to_datetime(ser, format="%Y", cache=cache)
tm.assert_series_equal(result, expected)
- s = Series([200001, 200105, 200206])
- expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in s.apply(str)])
+ ser = Series([200001, 200105, 200206])
+ expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in ser.apply(str)])
- result = to_datetime(s, format="%Y%m", cache=cache)
+ result = to_datetime(ser, format="%Y%m", cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
@@ -262,29 +262,36 @@ def test_to_datetime_format_time(self, cache):
"01/10/2010 13:56:01",
"%m/%d/%Y %H:%M:%S",
Timestamp("2010-01-10 13:56:01"),
- ] # ,
- # FIXME: don't leave commented-out
- # ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
- # Timestamp('2010-01-10 20:14')],
- # ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
- # Timestamp('2010-01-10 07:40')],
- # ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
- # Timestamp('2010-01-10 09:12:56')]
+ ],
+ ]
+ locale_specific = [
+ ["01/10/2010 08:14 PM", "%m/%d/%Y %I:%M %p", Timestamp("2010-01-10 20:14")],
+ ["01/10/2010 07:40 AM", "%m/%d/%Y %I:%M %p", Timestamp("2010-01-10 07:40")],
+ [
+ "01/10/2010 09:12:56 AM",
+ "%m/%d/%Y %I:%M:%S %p",
+ Timestamp("2010-01-10 09:12:56"),
+ ],
]
- for s, format, dt in data:
- assert to_datetime(s, format=format, cache=cache) == dt
+ if locale.getlocale()[0] == "en_US":
+ # this fail on a CI build with LC_ALL=zh_CN.utf8, so en_US
+ # may be more specific than necessary.
+ data.extend(locale_specific)
+
+ for value, format, dt in data:
+ assert to_datetime(value, format=format, cache=cache) == dt
@td.skip_if_has_locale
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
# exact kw
- s = Series(
+ ser = Series(
["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"]
)
- result = to_datetime(s, format="%d%b%y", exact=False, cache=cache)
+ result = to_datetime(ser, format="%d%b%y", exact=False, cache=cache)
expected = to_datetime(
- s.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache
+ ser.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache
)
tm.assert_series_equal(result, expected)
@@ -543,8 +550,8 @@ def test_to_datetime_YYYYMMDD(self):
def test_to_datetime_unparseable_ignore(self):
# unparsable
- s = "Month 1, 1999"
- assert to_datetime(s, errors="ignore") == s
+ ser = "Month 1, 1999"
+ assert to_datetime(ser, errors="ignore") == ser
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_now(self):
@@ -1356,8 +1363,8 @@ def test_to_datetime_unit_fractional_seconds(self):
# GH13834
epoch = 1370745748
- s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)
- result = to_datetime(s, unit="s")
+ ser = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)
+ result = to_datetime(ser, unit="s")
expected = Series(
[
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
@@ -1397,13 +1404,6 @@ def test_to_timestamp_unit_coerce(self):
class TestToDatetimeDataFrame:
- @pytest.fixture(params=[True, False])
- def cache(self, request):
- """
- cache keyword to pass to to_datetime.
- """
- return request.param
-
@pytest.fixture
def df(self):
return DataFrame(
@@ -1619,30 +1619,35 @@ def test_to_datetime_default(self, cache):
xp = datetime(2001, 1, 1)
assert rs == xp
+ @pytest.mark.xfail(reason="fails to enforce dayfirst=True, which would raise")
+ def test_to_datetime_respects_dayfirst(self, cache):
# dayfirst is essentially broken
- # FIXME: don't leave commented-out
- # to_datetime('01-13-2012', dayfirst=True)
- # pytest.raises(ValueError, to_datetime('01-13-2012',
- # dayfirst=True))
+
+ # The msg here is not important since it isn't actually raised yet.
+ msg = "Invalid date specified"
+ with pytest.raises(ValueError, match=msg):
+ # if dayfirst is respected, then this would parse as month=13, which
+ # would raise
+ to_datetime("01-13-2012", dayfirst=True, cache=cache)
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
- s = Series(date_range("1/1/2000", periods=10))
+ ser = Series(date_range("1/1/2000", periods=10))
- result = to_datetime(s, cache=cache)
- assert result[0] == s[0]
+ result = to_datetime(ser, cache=cache)
+ assert result[0] == ser[0]
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
- s = Series(["10/18/2006", "10/18/2008", " "])
+ ser = Series(["10/18/2006", "10/18/2008", " "])
msg = r"(\(')?String does not contain a date(:', ' '\))?"
with pytest.raises(ValueError, match=msg):
- to_datetime(s, errors="raise", cache=cache)
- result_coerce = to_datetime(s, errors="coerce", cache=cache)
+ to_datetime(ser, errors="raise", cache=cache)
+ result_coerce = to_datetime(ser, errors="coerce", cache=cache)
expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
- result_ignore = to_datetime(s, errors="ignore", cache=cache)
- tm.assert_series_equal(result_ignore, s)
+ result_ignore = to_datetime(ser, errors="ignore", cache=cache)
+ tm.assert_series_equal(result_ignore, ser)
@td.skip_if_has_locale
def test_to_datetime_with_apply(self, cache):
@@ -1681,23 +1686,22 @@ def test_to_datetime_types(self, cache):
expected = to_datetime(0, cache=cache)
assert result == expected
+ def test_to_datetime_strings(self, cache):
# GH 3888 (strings)
expected = to_datetime(["2012"], cache=cache)[0]
result = to_datetime("2012", cache=cache)
assert result == expected
- # FIXME: don't leave commented-out
- # array = ['2012','20120101','20120101 12:01:01']
- array = ["20120101", "20120101 12:01:01"]
+ array = ["2012", "20120101", "20120101 12:01:01"]
expected = list(to_datetime(array, cache=cache))
result = [Timestamp(date_str) for date_str in array]
tm.assert_almost_equal(result, expected)
- # FIXME: don't leave commented-out
- # currently fails ###
- # result = Timestamp('2012')
- # expected = to_datetime('2012')
- # assert result == expected
+ expected = Timestamp(2012, 1, 1)
+ result = Timestamp("2012")
+ assert result == expected
+ result = to_datetime("2012")
+ assert result == expected
def test_to_datetime_unprocessable_input(self, cache):
# GH 4928
@@ -1963,12 +1967,12 @@ def test_guess_datetime_format_for_array(self):
class TestToDatetimeInferFormat:
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
- s = Series(date_range("20000101", periods=50, freq="H"))
+ ser = Series(date_range("20000101", periods=50, freq="H"))
test_formats = ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"]
for test_format in test_formats:
- s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
+ s_as_dt_strings = ser.apply(lambda x: x.strftime(test_format))
with_format = to_datetime(s_as_dt_strings, format=test_format, cache=cache)
no_infer = to_datetime(
@@ -1984,7 +1988,7 @@ def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
tm.assert_series_equal(no_infer, yes_infer)
def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
- s = Series(
+ ser = Series(
np.array(
["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"]
)
@@ -1993,31 +1997,31 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(
- to_datetime(s, infer_datetime_format=False, cache=cache),
- to_datetime(s, infer_datetime_format=True, cache=cache),
+ to_datetime(ser, infer_datetime_format=False, cache=cache),
+ to_datetime(ser, infer_datetime_format=True, cache=cache),
)
- s = Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
+ ser = Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
tm.assert_series_equal(
- to_datetime(s, infer_datetime_format=False, cache=cache),
- to_datetime(s, infer_datetime_format=True, cache=cache),
+ to_datetime(ser, infer_datetime_format=False, cache=cache),
+ to_datetime(ser, infer_datetime_format=True, cache=cache),
)
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
- s = Series(
+ ser = Series(
np.array(
["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan],
dtype=object,
)
)
tm.assert_series_equal(
- to_datetime(s, infer_datetime_format=False, cache=cache),
- to_datetime(s, infer_datetime_format=True, cache=cache),
+ to_datetime(ser, infer_datetime_format=False, cache=cache),
+ to_datetime(ser, infer_datetime_format=True, cache=cache),
)
def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
- s = Series(
+ ser = Series(
np.array(
[
np.nan,
@@ -2031,8 +2035,8 @@ def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
)
tm.assert_series_equal(
- to_datetime(s, infer_datetime_format=False, cache=cache),
- to_datetime(s, infer_datetime_format=True, cache=cache),
+ to_datetime(ser, infer_datetime_format=False, cache=cache),
+ to_datetime(ser, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize(
@@ -2040,8 +2044,8 @@ def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
)
def test_infer_datetime_format_tz_name(self, tz_name, offset):
# GH 33133
- s = Series([f"2019-02-02 08:07:13 {tz_name}"])
- result = to_datetime(s, infer_datetime_format=True)
+ ser = Series([f"2019-02-02 08:07:13 {tz_name}"])
+ result = to_datetime(ser, infer_datetime_format=True)
expected = Series(
[Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
)
@@ -2058,15 +2062,15 @@ def test_infer_datetime_format_tz_name(self, tz_name, offset):
)
def test_infer_datetime_format_zero_tz(self, ts, zero_tz, is_utc):
# GH 41047
- s = Series([ts + zero_tz])
- result = to_datetime(s, infer_datetime_format=True)
+ ser = Series([ts + zero_tz])
+ result = to_datetime(ser, infer_datetime_format=True)
tz = pytz.utc if is_utc else None
expected = Series([Timestamp(ts, tz=tz)])
tm.assert_series_equal(result, expected)
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
- s = Series(["2014-1-1", "2014-2-2", "2015-3-3"])
+ ser = Series(["2014-1-1", "2014-2-2", "2015-3-3"])
expected = Series(
[
Timestamp("2014-01-01"),
@@ -2074,8 +2078,10 @@ def test_to_datetime_iso8601_noleading_0s(self, cache):
Timestamp("2015-03-03"),
]
)
- tm.assert_series_equal(to_datetime(s, cache=cache), expected)
- tm.assert_series_equal(to_datetime(s, format="%Y-%m-%d", cache=cache), expected)
+ tm.assert_series_equal(to_datetime(ser, cache=cache), expected)
+ tm.assert_series_equal(
+ to_datetime(ser, format="%Y-%m-%d", cache=cache), expected
+ )
class TestDaysInMonth:
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 395fdea67f1bd..7b35e8d55c338 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -267,9 +267,9 @@ def test_to_timedelta_precision_over_nanos(self, input, expected, func):
result = func(input)
assert result == expected
- def test_to_timedelta_zerodim(self):
+ def test_to_timedelta_zerodim(self, fixed_now_ts):
# ndarray.item() incorrectly returns int for dt64[ns] and td64[ns]
- dt64 = pd.Timestamp.now().to_datetime64()
+ dt64 = fixed_now_ts.to_datetime64()
arg = np.array(dt64)
msg = (
diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py
index 0227a07877db0..d8e98bb0c6876 100644
--- a/pandas/tests/tseries/offsets/common.py
+++ b/pandas/tests/tseries/offsets/common.py
@@ -28,7 +28,7 @@
def assert_offset_equal(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
- actual_apply = offset.apply(base)
+ actual_apply = offset._apply(base)
try:
assert actual == expected
assert actual_swapped == expected
@@ -155,7 +155,7 @@ def test_rsub(self):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
+ assert self.d - self.offset2 == (-self.offset2)._apply(self.d)
def test_radd(self):
if self._offset is None or not hasattr(self, "offset2"):
diff --git a/pandas/tests/tseries/offsets/test_business_day.py b/pandas/tests/tseries/offsets/test_business_day.py
index 92daafaf469cd..c40ae611687dd 100644
--- a/pandas/tests/tseries/offsets/test_business_day.py
+++ b/pandas/tests/tseries/offsets/test_business_day.py
@@ -14,9 +14,11 @@
BDay,
BMonthEnd,
)
+from pandas.compat import np_datetime64_compat
from pandas import (
DatetimeIndex,
+ Timedelta,
_testing as tm,
)
from pandas.tests.tseries.offsets.common import (
@@ -34,10 +36,11 @@ class TestBusinessDay(Base):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
+ self.nd = np_datetime64_compat("2008-01-01 00:00:00Z")
- self.offset = BDay()
+ self.offset = self._offset()
self.offset1 = self.offset
- self.offset2 = BDay(2)
+ self.offset2 = self._offset(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
@@ -57,11 +60,24 @@ def test_with_offset(self):
assert (self.d + offset) == datetime(2008, 1, 2, 2)
- def test_with_offset_index(self):
- dti = DatetimeIndex([self.d])
- result = dti + (self.offset + timedelta(hours=2))
+ @pytest.mark.parametrize(
+ "td",
+ [
+ Timedelta(hours=2),
+ Timedelta(hours=2).to_pytimedelta(),
+ Timedelta(hours=2).to_timedelta64(),
+ ],
+ ids=lambda x: type(x),
+ )
+ def test_with_offset_index(self, td):
+ dti = DatetimeIndex([self.d])
expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
+
+ result = dti + (td + self.offset)
+ tm.assert_index_equal(result, expected)
+
+ result = dti + (self.offset + td)
tm.assert_index_equal(result, expected)
def test_eq(self):
@@ -77,21 +93,24 @@ def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 1, 3)
+ assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRollback1(self):
- assert BDay(10).rollback(self.d) == self.d
+ assert self._offset(10).rollback(self.d) == self.d
def testRollback2(self):
- assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
+ assert self._offset(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
def testRollforward1(self):
- assert BDay(10).rollforward(self.d) == self.d
+ assert self._offset(10).rollforward(self.d) == self.d
def testRollforward2(self):
- assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
+ assert self._offset(10).rollforward(datetime(2008, 1, 5)) == datetime(
+ 2008, 1, 7
+ )
def test_roll_date_object(self):
- offset = BDay()
+ offset = self._offset()
dt = date(2012, 9, 15)
@@ -110,8 +129,8 @@ def test_roll_date_object(self):
def test_is_on_offset(self):
tests = [
- (BDay(), datetime(2008, 1, 1), True),
- (BDay(), datetime(2008, 1, 5), False),
+ (self._offset(), datetime(2008, 1, 1), True),
+ (self._offset(), datetime(2008, 1, 5), False),
]
for offset, d, expected in tests:
@@ -119,7 +138,7 @@ def test_is_on_offset(self):
apply_cases: _ApplyCases = [
(
- BDay(),
+ 1,
{
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
@@ -129,7 +148,7 @@ def test_is_on_offset(self):
},
),
(
- 2 * BDay(),
+ 2,
{
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
@@ -139,7 +158,7 @@ def test_is_on_offset(self):
},
),
(
- -BDay(),
+ -1,
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
@@ -150,7 +169,7 @@ def test_is_on_offset(self):
},
),
(
- -2 * BDay(),
+ -2,
{
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
@@ -162,7 +181,7 @@ def test_is_on_offset(self):
},
),
(
- BDay(0),
+ 0,
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
@@ -175,20 +194,21 @@ def test_is_on_offset(self):
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
- offset, cases = case
+ n, cases = case
+ offset = self._offset(n)
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
- result = dt + BDay(10)
+ result = dt + self._offset(10)
assert result == datetime(2012, 11, 6)
- result = dt + BDay(100) - BDay(100)
+ result = dt + self._offset(100) - self._offset(100)
assert result == dt
- off = BDay() * 6
+ off = self._offset() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
@@ -198,12 +218,18 @@ def test_apply_large_n(self):
xp = datetime(2011, 12, 26)
assert rs == xp
- off = BDay() * 10
+ off = self._offset() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
- msg = "Only know how to combine business day with datetime or timedelta"
+ if self._offset is BDay:
+ msg = "Only know how to combine business day with datetime or timedelta"
+ else:
+ msg = (
+ "Only know how to combine trading day "
+ "with datetime, datetime64 or timedelta"
+ )
with pytest.raises(ApplyTypeError, match=msg):
- BDay().apply(BMonthEnd())
+ self._offset()._apply(BMonthEnd())
diff --git a/pandas/tests/tseries/offsets/test_business_hour.py b/pandas/tests/tseries/offsets/test_business_hour.py
index ee05eab5ec5ca..401bfe664a3a2 100644
--- a/pandas/tests/tseries/offsets/test_business_hour.py
+++ b/pandas/tests/tseries/offsets/test_business_hour.py
@@ -318,7 +318,7 @@ def test_roll_date_object(self):
def test_normalize(self, case):
offset, cases = case
for dt, expected in cases.items():
- assert offset.apply(dt) == expected
+ assert offset._apply(dt) == expected
on_offset_cases = []
on_offset_cases.append(
diff --git a/pandas/tests/tseries/offsets/test_custom_business_day.py b/pandas/tests/tseries/offsets/test_custom_business_day.py
index b8014f7112435..3bbbaa891709f 100644
--- a/pandas/tests/tseries/offsets/test_custom_business_day.py
+++ b/pandas/tests/tseries/offsets/test_custom_business_day.py
@@ -2,7 +2,6 @@
Tests for offsets.CustomBusinessDay / CDay
"""
from datetime import (
- date,
datetime,
timedelta,
)
@@ -10,46 +9,21 @@
import numpy as np
import pytest
-from pandas._libs.tslibs.offsets import (
- ApplyTypeError,
- BMonthEnd,
- CDay,
-)
-from pandas.compat import np_datetime64_compat
+from pandas._libs.tslibs.offsets import CDay
from pandas import (
- DatetimeIndex,
_testing as tm,
read_pickle,
)
-from pandas.tests.tseries.offsets.common import (
- Base,
- assert_is_on_offset,
- assert_offset_equal,
-)
-from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
+from pandas.tests.tseries.offsets.common import assert_offset_equal
+from pandas.tests.tseries.offsets.test_business_day import TestBusinessDay
-from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
-class TestCustomBusinessDay(Base):
+class TestCustomBusinessDay(TestBusinessDay):
_offset = CDay
- def setup_method(self, method):
- self.d = datetime(2008, 1, 1)
- self.nd = np_datetime64_compat("2008-01-01 00:00:00Z")
-
- self.offset = CDay()
- self.offset1 = self.offset
- self.offset2 = CDay(2)
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessDay>"
assert repr(self.offset2) == "<2 * CustomBusinessDays>"
@@ -57,162 +31,6 @@ def test_repr(self):
expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
assert repr(self.offset + timedelta(1)) == expected
- def test_with_offset(self):
- offset = self.offset + timedelta(hours=2)
-
- assert (self.d + offset) == datetime(2008, 1, 2, 2)
-
- def test_with_offset_index(self):
- dti = DatetimeIndex([self.d])
- result = dti + (self.offset + timedelta(hours=2))
-
- expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
- tm.assert_index_equal(result, expected)
-
- def test_eq(self):
- assert self.offset2 == self.offset2
-
- def test_mul(self):
- pass
-
- def test_hash(self):
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 1, 3)
- assert self.offset2(self.nd) == datetime(2008, 1, 3)
-
- def testRollback1(self):
- assert CDay(10).rollback(self.d) == self.d
-
- def testRollback2(self):
- assert CDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
-
- def testRollforward1(self):
- assert CDay(10).rollforward(self.d) == self.d
-
- def testRollforward2(self):
- assert CDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
-
- def test_roll_date_object(self):
- offset = CDay()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 14)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 17)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- on_offset_cases = [
- (CDay(), datetime(2008, 1, 1), True),
- (CDay(), datetime(2008, 1, 5), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, day, expected = case
- assert_is_on_offset(offset, day, expected)
-
- apply_cases: _ApplyCases = [
- (
- CDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 2),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 8),
- },
- ),
- (
- 2 * CDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 3),
- datetime(2008, 1, 4): datetime(2008, 1, 8),
- datetime(2008, 1, 5): datetime(2008, 1, 8),
- datetime(2008, 1, 6): datetime(2008, 1, 8),
- datetime(2008, 1, 7): datetime(2008, 1, 9),
- },
- ),
- (
- -CDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 3),
- datetime(2008, 1, 5): datetime(2008, 1, 4),
- datetime(2008, 1, 6): datetime(2008, 1, 4),
- datetime(2008, 1, 7): datetime(2008, 1, 4),
- datetime(2008, 1, 8): datetime(2008, 1, 7),
- },
- ),
- (
- -2 * CDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 28),
- datetime(2008, 1, 4): datetime(2008, 1, 2),
- datetime(2008, 1, 5): datetime(2008, 1, 3),
- datetime(2008, 1, 6): datetime(2008, 1, 3),
- datetime(2008, 1, 7): datetime(2008, 1, 3),
- datetime(2008, 1, 8): datetime(2008, 1, 4),
- datetime(2008, 1, 9): datetime(2008, 1, 7),
- },
- ),
- (
- CDay(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 4): datetime(2008, 1, 4),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7),
- },
- ),
- ]
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + CDay(10)
- assert result == datetime(2012, 11, 6)
-
- result = dt + CDay(100) - CDay(100)
- assert result == dt
-
- off = CDay() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 12, 23)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
- xp = datetime(2011, 12, 26)
- assert rs == xp
-
- def test_apply_corner(self):
- msg = (
- "Only know how to combine trading day "
- "with datetime, datetime64 or timedelta"
- )
- with pytest.raises(ApplyTypeError, match=msg):
- CDay().apply(BMonthEnd())
-
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
diff --git a/pandas/tests/tseries/offsets/test_custom_business_hour.py b/pandas/tests/tseries/offsets/test_custom_business_hour.py
index 8bc06cdd45a50..dbc0ff4371fd9 100644
--- a/pandas/tests/tseries/offsets/test_custom_business_hour.py
+++ b/pandas/tests/tseries/offsets/test_custom_business_hour.py
@@ -192,7 +192,7 @@ def test_roll_date_object(self):
def test_normalize(self, norm_cases):
offset, cases = norm_cases
for dt, expected in cases.items():
- assert offset.apply(dt) == expected
+ assert offset._apply(dt) == expected
def test_is_on_offset(self):
tests = [
diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py
index 9721d7fbd9067..50c5a91fc2390 100644
--- a/pandas/tests/tseries/offsets/test_dst.py
+++ b/pandas/tests/tseries/offsets/test_dst.py
@@ -177,18 +177,20 @@ def test_all_offset_classes(self, tup):
assert first == second
-@pytest.mark.xfail(
- strict=False, reason="'Africa/Kinshasa' test case fails under pytz=2017.3"
-)
@pytest.mark.parametrize(
"original_dt, target_dt, offset, tz",
[
- (
+ pytest.param(
Timestamp("1900-01-01"),
Timestamp("1905-07-01"),
MonthBegin(66),
"Africa/Kinshasa",
- ), # GH41906
+ marks=pytest.mark.xfail(
+ # error: Module has no attribute "__version__"
+ float(pytz.__version__) <= 2020.1, # type: ignore[attr-defined]
+ reason="GH#41906",
+ ),
+ ),
(
Timestamp("2021-10-01 01:15"),
Timestamp("2021-10-31 01:15"),
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
index 1eee9e611e0f1..8df93102d4bd2 100644
--- a/pandas/tests/tseries/offsets/test_fiscal.py
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -643,18 +643,18 @@ def test_bunched_yearends():
fy = FY5253(n=1, weekday=5, startingMonth=12, variation="nearest")
dt = Timestamp("2004-01-01")
assert fy.rollback(dt) == Timestamp("2002-12-28")
- assert (-fy).apply(dt) == Timestamp("2002-12-28")
+ assert (-fy)._apply(dt) == Timestamp("2002-12-28")
assert dt - fy == Timestamp("2002-12-28")
assert fy.rollforward(dt) == Timestamp("2004-01-03")
- assert fy.apply(dt) == Timestamp("2004-01-03")
+ assert fy._apply(dt) == Timestamp("2004-01-03")
assert fy + dt == Timestamp("2004-01-03")
assert dt + fy == Timestamp("2004-01-03")
# Same thing, but starting from a Timestamp in the previous year.
dt = Timestamp("2003-12-31")
assert fy.rollback(dt) == Timestamp("2002-12-28")
- assert (-fy).apply(dt) == Timestamp("2002-12-28")
+ assert (-fy)._apply(dt) == Timestamp("2002-12-28")
assert dt - fy == Timestamp("2002-12-28")
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 0c79c0b64f4cd..134ba79e7773d 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -125,7 +125,7 @@ def test_return_type(self, offset_types):
assert offset + NaT is NaT
assert NaT - offset is NaT
- assert (-offset).apply(NaT) is NaT
+ assert (-offset)._apply(NaT) is NaT
def test_offset_n(self, offset_types):
offset = self._get_offset(offset_types)
@@ -188,7 +188,7 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=Fals
if (
type(offset_s).__name__ == "DateOffset"
- and (funcname == "apply" or normalize)
+ and (funcname in ["apply", "_apply"] or normalize)
and ts.nanosecond > 0
):
exp_warning = UserWarning
@@ -196,6 +196,17 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=Fals
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning):
result = func(ts)
+
+ if exp_warning is None and funcname == "_apply":
+ # GH#44522
+ # Check in this particular case to avoid headaches with
+ # testing for multiple warnings produced by the same call.
+ with tm.assert_produces_warning(FutureWarning, match="apply is deprecated"):
+ res2 = offset_s.apply(ts)
+
+ assert type(res2) is type(result)
+ assert res2 == result
+
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected + Nano(5)
@@ -225,7 +236,7 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=Fals
if (
type(offset_s).__name__ == "DateOffset"
- and (funcname == "apply" or normalize)
+ and (funcname in ["apply", "_apply"] or normalize)
and ts.nanosecond > 0
):
exp_warning = UserWarning
@@ -243,13 +254,14 @@ def test_apply(self, offset_types):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat("2011-01-01 09:00Z")
+ expected = self.expecteds[offset_types.__name__]
+ expected_norm = Timestamp(expected.date())
+
for dt in [sdt, ndt]:
- expected = self.expecteds[offset_types.__name__]
- self._check_offsetfunc_works(offset_types, "apply", dt, expected)
+ self._check_offsetfunc_works(offset_types, "_apply", dt, expected)
- expected = Timestamp(expected.date())
self._check_offsetfunc_works(
- offset_types, "apply", dt, expected, normalize=True
+ offset_types, "_apply", dt, expected_norm, normalize=True
)
def test_rollforward(self, offset_types):
@@ -498,11 +510,11 @@ def test_pickle_dateoffset_odd_inputs(self):
base_dt = datetime(2020, 1, 1)
assert base_dt + off == base_dt + res
- def test_onOffset_deprecated(self, offset_types):
+ def test_onOffset_deprecated(self, offset_types, fixed_now_ts):
# GH#30340 use idiomatic naming
off = self._get_offset(offset_types)
- ts = Timestamp.now()
+ ts = fixed_now_ts
with tm.assert_produces_warning(FutureWarning):
result = off.onOffset(ts)
diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py
index 52a2f3aeee850..464eeaed1e725 100644
--- a/pandas/tests/tseries/offsets/test_ticks.py
+++ b/pandas/tests/tseries/offsets/test_ticks.py
@@ -45,7 +45,7 @@
def test_apply_ticks():
- result = offsets.Hour(3).apply(offsets.Hour(4))
+ result = offsets.Hour(3)._apply(offsets.Hour(4))
exp = offsets.Hour(7)
assert result == exp
@@ -76,7 +76,7 @@ def test_tick_add_sub(cls, n, m):
expected = cls(n + m)
assert left + right == expected
- assert left.apply(right) == expected
+ assert left._apply(right) == expected
expected = cls(n - m)
assert left - right == expected
@@ -230,9 +230,16 @@ def test_Nanosecond():
)
def test_tick_addition(kls, expected):
offset = kls(3)
- result = offset + Timedelta(hours=2)
- assert isinstance(result, Timedelta)
- assert result == expected
+ td = Timedelta(hours=2)
+
+ for other in [td, td.to_pytimedelta(), td.to_timedelta64()]:
+ result = offset + other
+ assert isinstance(result, Timedelta)
+ assert result == expected
+
+ result = other + offset
+ assert isinstance(result, Timedelta)
+ assert result == expected
@pytest.mark.parametrize("cls", tick_classes)
diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py
index 4ded555ed8f73..d7abb19530837 100644
--- a/pandas/tests/tslibs/test_api.py
+++ b/pandas/tests/tslibs/test_api.py
@@ -29,7 +29,6 @@ def test_namespace():
"NaT",
"NaTType",
"iNaT",
- "is_null_datetimelike",
"nat_strings",
"OutOfBoundsDatetime",
"OutOfBoundsTimedelta",
diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py
index fbda5e8fda9dd..7ab0ad0856af0 100644
--- a/pandas/tests/tslibs/test_timezones.py
+++ b/pandas/tests/tslibs/test_timezones.py
@@ -143,7 +143,7 @@ def test_maybe_get_tz_invalid_types():
msg = "<class 'pandas._libs.tslibs.timestamps.Timestamp'>"
with pytest.raises(TypeError, match=msg):
- timezones.maybe_get_tz(Timestamp.now("UTC"))
+ timezones.maybe_get_tz(Timestamp("2021-01-01", tz="UTC"))
def test_maybe_get_tz_offset_only():
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index faea0a54dc330..74f701d68ddd5 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -329,3 +329,21 @@ def test_assert_series_equal_check_like_different_indexes():
df2 = DataFrame(index=pd.RangeIndex(start=0, stop=0, step=1))
with pytest.raises(AssertionError, match="DataFrame.index are different"):
tm.assert_frame_equal(df1, df2, check_like=True)
+
+
+def test_assert_frame_equal_attrs():
+ # GH#28283
+
+ expected_attrs = {"a": 1}
+
+ left = DataFrame({"a": [1, 2], "b": [3, 4]})
+ left.attrs.update(expected_attrs)
+
+ right = DataFrame({"a": [1, 2], "b": [3, 4]})
+
+ msg = f"{expected_attrs} != {{}}"
+ with pytest.raises(AssertionError, match=msg):
+ tm.assert_frame_equal(left, right)
+
+ right.attrs.update(expected_attrs)
+ tm.assert_frame_equal(left, right)
diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index 2ebc6e17ba497..270efea156992 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -1,5 +1,7 @@
import pytest
+from pandas.core.dtypes.common import is_extension_array_dtype
+
import pandas as pd
from pandas import (
Categorical,
@@ -105,7 +107,7 @@ def test_series_not_equal_metadata_mismatch(kwargs):
@pytest.mark.parametrize("data1,data2", [(0.12345, 0.12346), (0.1235, 0.1236)])
-@pytest.mark.parametrize("dtype", ["float32", "float64"])
+@pytest.mark.parametrize("dtype", ["float32", "float64", "Float32"])
@pytest.mark.parametrize("decimals", [0, 1, 2, 3, 5, 10])
def test_less_precise(data1, data2, dtype, decimals):
rtol = 10 ** -decimals
@@ -115,7 +117,10 @@ def test_less_precise(data1, data2, dtype, decimals):
if (decimals == 5 or decimals == 10) or (
decimals >= 3 and abs(data1 - data2) >= 0.0005
):
- msg = "Series values are different"
+ if is_extension_array_dtype(dtype):
+ msg = "ExtensionArray are different"
+ else:
+ msg = "Series values are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, rtol=rtol)
else:
@@ -346,3 +351,21 @@ def test_assert_series_equal_identical_na(nulls_fixture):
# while we're here do Index too
idx = pd.Index(ser)
tm.assert_index_equal(idx, idx.copy(deep=True))
+
+
+def test_assert_frame_equal_attrs():
+ # GH#28283
+
+ expected_attrs = {"a": 1}
+
+ left = Series([1])
+ left.attrs.update(expected_attrs)
+
+ right = Series([1])
+
+ msg = f"{expected_attrs} != {{}}"
+ with pytest.raises(AssertionError, match=msg):
+ tm.assert_series_equal(left, right)
+
+ right.attrs.update(expected_attrs)
+ tm.assert_series_equal(left, right)
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index 7b1aa93b5923a..bf1af0c83c93f 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -64,11 +64,15 @@ def arithmetic_win_operators(request):
@pytest.fixture(
params=[
- "sum",
- "mean",
- "median",
- "max",
- "min",
+ ["sum", {}],
+ ["mean", {}],
+ ["median", {}],
+ ["max", {}],
+ ["min", {}],
+ ["var", {}],
+ ["var", {"ddof": 0}],
+ ["std", {}],
+ ["std", {"ddof": 0}],
]
)
def arithmetic_numba_supported_operators(request):
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index df4666d16ace0..a7ad409683ec8 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -452,3 +452,46 @@ def test_rolling_groupby_with_fixed_forward_many(group_keys, window_size):
manual = manual.set_index(["a", "c"])["b"]
tm.assert_series_equal(result, manual)
+
+
+def test_unequal_start_end_bounds():
+ class CustomIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed):
+ return np.array([1]), np.array([1, 2])
+
+ indexer = CustomIndexer()
+ roll = Series(1).rolling(indexer)
+ match = "start"
+ with pytest.raises(ValueError, match=match):
+ roll.mean()
+
+ with pytest.raises(ValueError, match=match):
+ next(iter(roll))
+
+ with pytest.raises(ValueError, match=match):
+ roll.corr(pairwise=True)
+
+ with pytest.raises(ValueError, match=match):
+ roll.cov(pairwise=True)
+
+
+def test_unequal_bounds_to_object():
+ # GH 44470
+ class CustomIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed):
+ return np.array([1]), np.array([2])
+
+ indexer = CustomIndexer()
+ roll = Series([1, 1]).rolling(indexer)
+ match = "start and end"
+ with pytest.raises(ValueError, match=match):
+ roll.mean()
+
+ with pytest.raises(ValueError, match=match):
+ next(iter(roll))
+
+ with pytest.raises(ValueError, match=match):
+ roll.corr(pairwise=True)
+
+ with pytest.raises(ValueError, match=match):
+ roll.cov(pairwise=True)
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index 9fd4bd422178a..8cae9c0182724 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -15,7 +15,7 @@
@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestEngine:
@pytest.mark.parametrize("jit", [True, False])
@@ -50,16 +50,18 @@ def test_numba_vs_cython_rolling_methods(
self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
- method = arithmetic_numba_supported_operators
+ method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = data.rolling(2)
- result = getattr(roll, method)(engine="numba", engine_kwargs=engine_kwargs)
- expected = getattr(roll, method)(engine="cython")
+ result = getattr(roll, method)(
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
+ )
+ expected = getattr(roll, method)(engine="cython", **kwargs)
# Check the cache
- if method not in ("mean", "sum"):
+ if method not in ("mean", "sum", "var", "std"):
assert (
getattr(np, f"nan{method}"),
"Rolling_apply_single",
@@ -74,17 +76,19 @@ def test_numba_vs_cython_expanding_methods(
self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
- method = arithmetic_numba_supported_operators
+ method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
data = DataFrame(np.eye(5))
expand = data.expanding()
- result = getattr(expand, method)(engine="numba", engine_kwargs=engine_kwargs)
- expected = getattr(expand, method)(engine="cython")
+ result = getattr(expand, method)(
+ engine="numba", engine_kwargs=engine_kwargs, **kwargs
+ )
+ expected = getattr(expand, method)(engine="cython", **kwargs)
# Check the cache
- if method not in ("mean", "sum"):
+ if method not in ("mean", "sum", "var", "std"):
assert (
getattr(np, f"nan{method}"),
"Expanding_apply_single",
@@ -265,7 +269,7 @@ def test_invalid_kwargs_nopython():
@td.skip_if_no("numba")
@pytest.mark.slow
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestTableMethod:
def test_table_series_valueerror(self):
@@ -282,19 +286,26 @@ def f(x):
def test_table_method_rolling_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
- method = arithmetic_numba_supported_operators
+ method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(3))
-
- result = getattr(
- df.rolling(2, method="table", axis=axis, min_periods=0), method
- )(engine_kwargs=engine_kwargs, engine="numba")
- expected = getattr(
- df.rolling(2, method="single", axis=axis, min_periods=0), method
- )(engine_kwargs=engine_kwargs, engine="numba")
- tm.assert_frame_equal(result, expected)
+ roll_table = df.rolling(2, method="table", axis=axis, min_periods=0)
+ if method in ("var", "std"):
+ with pytest.raises(NotImplementedError, match=f"{method} not supported"):
+ getattr(roll_table, method)(
+ engine_kwargs=engine_kwargs, engine="numba", **kwargs
+ )
+ else:
+ roll_single = df.rolling(2, method="single", axis=axis, min_periods=0)
+ result = getattr(roll_table, method)(
+ engine_kwargs=engine_kwargs, engine="numba", **kwargs
+ )
+ expected = getattr(roll_single, method)(
+ engine_kwargs=engine_kwargs, engine="numba", **kwargs
+ )
+ tm.assert_frame_equal(result, expected)
def test_table_method_rolling_apply(self, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
@@ -349,19 +360,26 @@ def f(x):
def test_table_method_expanding_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
- method = arithmetic_numba_supported_operators
+ method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(3))
-
- result = getattr(df.expanding(method="table", axis=axis), method)(
- engine_kwargs=engine_kwargs, engine="numba"
- )
- expected = getattr(df.expanding(method="single", axis=axis), method)(
- engine_kwargs=engine_kwargs, engine="numba"
- )
- tm.assert_frame_equal(result, expected)
+ expand_table = df.expanding(method="table", axis=axis)
+ if method in ("var", "std"):
+ with pytest.raises(NotImplementedError, match=f"{method} not supported"):
+ getattr(expand_table, method)(
+ engine_kwargs=engine_kwargs, engine="numba", **kwargs
+ )
+ else:
+ expand_single = df.expanding(method="single", axis=axis)
+ result = getattr(expand_table, method)(
+ engine_kwargs=engine_kwargs, engine="numba", **kwargs
+ )
+ expected = getattr(expand_single, method)(
+ engine_kwargs=engine_kwargs, engine="numba", **kwargs
+ )
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data", [np.eye(3), np.ones((2, 3)), np.ones((3, 2))])
@pytest.mark.parametrize("method", ["mean", "sum"])
diff --git a/pandas/tests/window/test_online.py b/pandas/tests/window/test_online.py
index a21be0b8be049..80cf1c55958ee 100644
--- a/pandas/tests/window/test_online.py
+++ b/pandas/tests/window/test_online.py
@@ -11,7 +11,7 @@
@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
+@pytest.mark.filterwarnings("ignore:\n")
class TestEWM:
def test_invalid_update(self):
df = DataFrame({"a": range(5), "b": range(5)})
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 27b06e78d8ce2..b60f2e60e1035 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -697,7 +697,7 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]
# GH 31302
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = frame_or_series(values).rolling(3).count()
expected = frame_or_series(expected_counts)
tm.assert_equal(result, expected)
@@ -1634,7 +1634,6 @@ def test_rolling_quantile_np_percentile():
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
-@pytest.mark.xfail(reason="GH#44343", strict=False)
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py
index 2094791ecdc60..59666fa0048dd 100644
--- a/pandas/tseries/api.py
+++ b/pandas/tseries/api.py
@@ -2,7 +2,7 @@
Timeseries API
"""
-# flake8: noqa
+# flake8: noqa:F401
from pandas.tseries.frequencies import infer_freq
import pandas.tseries.offsets as offsets
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index c2d7f7b3f716c..415af96a29aa3 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -29,6 +29,7 @@
from pandas._libs.tslibs.parsing import get_rule_month
from pandas._typing import npt
from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_datetime64_dtype,
@@ -116,7 +117,7 @@ def get_offset(name: str) -> DateOffset:
"get_offset is deprecated and will be removed in a future version, "
"use to_offset instead.",
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
return _get_offset(name)
@@ -398,10 +399,12 @@ def _is_business_daily(self) -> bool:
shifts = np.diff(self.index.asi8)
shifts = np.floor_divide(shifts, _ONE_DAY)
weekdays = np.mod(first_weekday + np.cumsum(shifts), 7)
- # error: Incompatible return value type (got "bool_", expected "bool")
- return np.all( # type: ignore[return-value]
- ((weekdays == 0) & (shifts == 3))
- | ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))
+
+ return bool(
+ np.all(
+ ((weekdays == 0) & (shifts == 3))
+ | ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))
+ )
)
def _get_wom_rule(self) -> str | None:
diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py
index 35a88a802003e..7adfca73c2f1e 100644
--- a/pandas/util/__init__.py
+++ b/pandas/util/__init__.py
@@ -1,10 +1,10 @@
-from pandas.util._decorators import ( # noqa
+from pandas.util._decorators import ( # noqa:F401
Appender,
Substitution,
cache_readonly,
)
-from pandas.core.util.hashing import ( # noqa
+from pandas.core.util.hashing import ( # noqa:F401
hash_array,
hash_pandas_object,
)
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index d98b0d24d22b9..a936b8d1f585c 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -11,7 +11,7 @@
)
import warnings
-from pandas._libs.properties import cache_readonly # noqa
+from pandas._libs.properties import cache_readonly # noqa:F401
from pandas._typing import F
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index b78f1652dc419..f63212c777048 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -122,7 +122,7 @@ def _skip_if_no_scipy() -> bool:
)
-# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
+# TODO(pytest#7469): return type, _pytest.mark.structures.MarkDecorator is not public
# https://github.com/pytest-dev/pytest/issues/7469
def skip_if_installed(package: str):
"""
@@ -138,7 +138,7 @@ def skip_if_installed(package: str):
)
-# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
+# TODO(pytest#7469): return type, _pytest.mark.structures.MarkDecorator is not public
# https://github.com/pytest-dev/pytest/issues/7469
def skip_if_no(package: str, min_version: str | None = None):
"""
@@ -184,9 +184,6 @@ def skip_if_no(package: str, min_version: str | None = None):
skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), reason="matplotlib is present")
skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit")
skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows")
-skip_if_windows_python_3 = pytest.mark.skipif(
- is_platform_windows(), reason="not used on win32"
-)
skip_if_has_locale = pytest.mark.skipif(
_skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}"
)
@@ -202,7 +199,7 @@ def skip_if_no(package: str, min_version: str | None = None):
)
-# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
+# TODO(pytest#7469): return type, _pytest.mark.structures.MarkDecorator is not public
# https://github.com/pytest-dev/pytest/issues/7469
def skip_if_np_lt(ver_str: str, *args, reason: str | None = None):
if reason is None:
@@ -285,7 +282,12 @@ def async_mark():
return async_mark
-skip_array_manager_not_yet_implemented = pytest.mark.skipif(
+def mark_array_manager_not_yet_implemented(request):
+ mark = pytest.mark.xfail(reason="Not yet implemented for ArrayManager")
+ request.node.add_marker(mark)
+
+
+skip_array_manager_not_yet_implemented = pytest.mark.xfail(
get_option("mode.data_manager") == "array",
reason="Not yet implemented for ArrayManager",
)
diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py
index 1bdf0d8483c76..541776619a2d3 100644
--- a/pandas/util/_tester.py
+++ b/pandas/util/_tester.py
@@ -13,7 +13,7 @@ def test(extra_args=None):
except ImportError as err:
raise ImportError("Need pytest>=5.0.1 to run tests") from err
try:
- import hypothesis # noqa
+ import hypothesis # noqa:F401
except ImportError as err:
raise ImportError("Need hypothesis>=3.58 to run tests") from err
cmd = ["--skip-slow", "--skip-network", "--skip-db"]
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index f8bd1ec7bc96a..ee54b1b2074cb 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -12,6 +12,8 @@
import numpy as np
+from pandas.util._exceptions import find_stack_level
+
from pandas.core.dtypes.common import (
is_bool,
is_integer,
@@ -339,7 +341,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"positional arguments for 'index' or 'columns' will raise "
"a 'TypeError'."
)
- warnings.warn(msg, FutureWarning, stacklevel=4)
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
out[data._get_axis_name(0)] = args[0]
out[data._get_axis_name(1)] = args[1]
else:
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index af9fe4846b27d..db9bfc274cd78 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1,6 +1,8 @@
import warnings
-from pandas._testing import * # noqa
+from pandas.util._exceptions import find_stack_level
+
+from pandas._testing import * # noqa:F401,F403,PDF014
warnings.warn(
(
@@ -8,5 +10,5 @@
"public API at pandas.testing instead."
),
FutureWarning,
- stacklevel=2,
+ stacklevel=find_stack_level(),
)
diff --git a/pyproject.toml b/pyproject.toml
index 98ab112ab459a..0c3e078d8761a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@
requires = [
"setuptools>=51.0.0",
"wheel",
- "Cython>=0.29.24,<3", # Note: sync with setup.py
+ "Cython>=0.29.24,<3", # Note: sync with setup.py, environment.yml and asv.conf.json
"oldest-supported-numpy>=0.10"
]
# uncomment to enable pep517 after versioneer problem is fixed.
diff --git a/setup.py b/setup.py
index f5151621c9efe..ca71510c5f051 100755
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,8 @@ def is_platform_mac():
return sys.platform == "darwin"
-min_cython_ver = "0.29.24" # note: sync with pyproject.toml
+# note: sync with pyproject.toml, environment.yml and asv.conf.json
+min_cython_ver = "0.29.24"
try:
from Cython import (
| - [x] ~~closes~~ Incremental progress on #28283
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/44166 | 2021-10-24T16:36:46Z | 2021-11-28T18:18:31Z | null | 2021-11-28T18:18:31Z |
TST: Adding tests for checking Boolean series and df as indexes for series | diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 6c3587c7eeada..d77f831bee8bc 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -377,3 +377,17 @@ def test_frozenset_index():
assert s[idx1] == 2
s[idx1] = 3
assert s[idx1] == 3
+
+
+def test_boolean_index():
+ # GH18579
+ s1 = Series([1, 2, 3], index=[4, 5, 6])
+ s2 = Series([1, 3, 2], index=s1 == 2)
+ tm.assert_series_equal(Series([1, 3, 2], [False, True, False]), s2)
+
+
+def test_index_ndim_gt_1_raises():
+ # GH18579
+ df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9])
+ with pytest.raises(ValueError, match="Index data must be 1-dimensional"):
+ Series([1, 3, 2], index=df)
| - [ ] closes #18579
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] After discussion in #18579 the current tests were suggested
| https://api.github.com/repos/pandas-dev/pandas/pulls/44165 | 2021-10-24T15:37:53Z | 2021-10-24T22:07:43Z | 2021-10-24T22:07:43Z | 2021-10-24T22:07:47Z |
TST: Move some consistency rolling tests to misc rolling functions | diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index 7ec5846ef4acf..bda8ba05d4024 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -1,17 +1,7 @@
-from datetime import datetime
-
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
-from pandas import (
- DataFrame,
- DatetimeIndex,
- Index,
- MultiIndex,
- Series,
-)
+from pandas import Series
import pandas._testing as tm
@@ -24,62 +14,6 @@ def _rolling_consistency_cases():
yield window, min_periods, center
-# binary moments
-def test_rolling_cov(series):
- A = series
- B = A + np.random.randn(len(A))
-
- result = A.rolling(window=50, min_periods=25).cov(B)
- tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
-
-
-def test_rolling_corr(series):
- A = series
- B = A + np.random.randn(len(A))
-
- result = A.rolling(window=50, min_periods=25).corr(B)
- tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
-
- # test for correct bias correction
- a = tm.makeTimeSeries()
- b = tm.makeTimeSeries()
- a[:5] = np.nan
- b[:10] = np.nan
-
- result = a.rolling(window=len(a), min_periods=1).corr(b)
- tm.assert_almost_equal(result[-1], a.corr(b))
-
-
-@pytest.mark.parametrize("func", ["cov", "corr"])
-def test_rolling_pairwise_cov_corr(func, frame):
- result = getattr(frame.rolling(window=10, min_periods=5), func)()
- result = result.loc[(slice(None), 1), 5]
- result.index = result.index.droplevel(1)
- expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
- tm.assert_series_equal(result, expected, check_names=False)
-
-
-@pytest.mark.parametrize("method", ["corr", "cov"])
-def test_flex_binary_frame(method, frame):
- series = frame[1]
-
- res = getattr(series.rolling(window=10), method)(frame)
- res2 = getattr(frame.rolling(window=10), method)(series)
- exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
-
- tm.assert_frame_equal(res, exp)
- tm.assert_frame_equal(res2, exp)
-
- frame2 = frame.copy()
- frame2.values[:] = np.random.randn(*frame2.shape)
-
- res3 = getattr(frame.rolling(window=10), method)(frame2)
- exp = DataFrame(
- {k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
- )
- tm.assert_frame_equal(res3, exp)
-
-
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@@ -123,375 +57,6 @@ def test_rolling_apply_consistency_sum_no_nans(
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
-@pytest.mark.parametrize("window", range(7))
-def test_rolling_corr_with_zero_variance(window):
- # GH 18430
- s = Series(np.zeros(20))
- other = Series(np.arange(20))
-
- assert s.rolling(window=window).corr(other=other).isna().all()
-
-
-def test_corr_sanity():
- # GH 3155
- df = DataFrame(
- np.array(
- [
- [0.87024726, 0.18505595],
- [0.64355431, 0.3091617],
- [0.92372966, 0.50552513],
- [0.00203756, 0.04520709],
- [0.84780328, 0.33394331],
- [0.78369152, 0.63919667],
- ]
- )
- )
-
- res = df[0].rolling(5, center=True).corr(df[1])
- assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
-
- df = DataFrame(np.random.rand(30, 2))
- res = df[0].rolling(5, center=True).corr(df[1])
- assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
-
-
-def test_rolling_cov_diff_length():
- # GH 7512
- s1 = Series([1, 2, 3], index=[0, 1, 2])
- s2 = Series([1, 3], index=[0, 2])
- result = s1.rolling(window=3, min_periods=2).cov(s2)
- expected = Series([None, None, 2.0])
- tm.assert_series_equal(result, expected)
-
- s2a = Series([1, None, 3], index=[0, 1, 2])
- result = s1.rolling(window=3, min_periods=2).cov(s2a)
- tm.assert_series_equal(result, expected)
-
-
-def test_rolling_corr_diff_length():
- # GH 7512
- s1 = Series([1, 2, 3], index=[0, 1, 2])
- s2 = Series([1, 3], index=[0, 2])
- result = s1.rolling(window=3, min_periods=2).corr(s2)
- expected = Series([None, None, 1.0])
- tm.assert_series_equal(result, expected)
-
- s2a = Series([1, None, 3], index=[0, 1, 2])
- result = s1.rolling(window=3, min_periods=2).corr(s2a)
- tm.assert_series_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "f",
- [
- lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
- lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
- lambda x: x.rolling(window=10, min_periods=5).max(),
- lambda x: x.rolling(window=10, min_periods=5).min(),
- lambda x: x.rolling(window=10, min_periods=5).sum(),
- lambda x: x.rolling(window=10, min_periods=5).mean(),
- lambda x: x.rolling(window=10, min_periods=5).std(),
- lambda x: x.rolling(window=10, min_periods=5).var(),
- lambda x: x.rolling(window=10, min_periods=5).skew(),
- lambda x: x.rolling(window=10, min_periods=5).kurt(),
- lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
- lambda x: x.rolling(window=10, min_periods=5).median(),
- lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
- lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
- pytest.param(
- lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
- marks=td.skip_if_no_scipy,
- ),
- ],
-)
-def test_rolling_functions_window_non_shrinkage(f):
- # GH 7764
- s = Series(range(4))
- s_expected = Series(np.nan, index=s.index)
- df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
- df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
-
- s_result = f(s)
- tm.assert_series_equal(s_result, s_expected)
-
- df_result = f(df)
- tm.assert_frame_equal(df_result, df_expected)
-
-
-@pytest.mark.parametrize(
- "f",
- [
- lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
- lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
- ],
-)
-def test_rolling_functions_window_non_shrinkage_binary(f):
-
- # corr/cov return a MI DataFrame
- df = DataFrame(
- [[1, 5], [3, 2], [3, 9], [-1, 0]],
- columns=Index(["A", "B"], name="foo"),
- index=Index(range(4), name="bar"),
- )
- df_expected = DataFrame(
- columns=Index(["A", "B"], name="foo"),
- index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
- dtype="float64",
- )
- df_result = f(df)
- tm.assert_frame_equal(df_result, df_expected)
-
-
-def test_rolling_skew_edge_cases():
-
- all_nan = Series([np.NaN] * 5)
-
- # yields all NaN (0 variance)
- d = Series([1] * 5)
- x = d.rolling(window=5).skew()
- tm.assert_series_equal(all_nan, x)
-
- # yields all NaN (window too small)
- d = Series(np.random.randn(5))
- x = d.rolling(window=2).skew()
- tm.assert_series_equal(all_nan, x)
-
- # yields [NaN, NaN, NaN, 0.177994, 1.548824]
- d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
- expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
- x = d.rolling(window=4).skew()
- tm.assert_series_equal(expected, x)
-
-
-def test_rolling_kurt_edge_cases():
-
- all_nan = Series([np.NaN] * 5)
-
- # yields all NaN (0 variance)
- d = Series([1] * 5)
- x = d.rolling(window=5).kurt()
- tm.assert_series_equal(all_nan, x)
-
- # yields all NaN (window too small)
- d = Series(np.random.randn(5))
- x = d.rolling(window=3).kurt()
- tm.assert_series_equal(all_nan, x)
-
- # yields [NaN, NaN, NaN, 1.224307, 2.671499]
- d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
- expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
- x = d.rolling(window=4).kurt()
- tm.assert_series_equal(expected, x)
-
-
-def test_rolling_skew_eq_value_fperr():
- # #18804 all rolling skew for all equal values should return Nan
- a = Series([1.1] * 15).rolling(window=10).skew()
- assert np.isnan(a).all()
-
-
-def test_rolling_kurt_eq_value_fperr():
- # #18804 all rolling kurt for all equal values should return Nan
- a = Series([1.1] * 15).rolling(window=10).kurt()
- assert np.isnan(a).all()
-
-
-def test_rolling_max_gh6297():
- """Replicate result expected in GH #6297"""
- indices = [datetime(1975, 1, i) for i in range(1, 6)]
- # So that we can have 2 datapoints on one of the days
- indices.append(datetime(1975, 1, 3, 6, 0))
- series = Series(range(1, 7), index=indices)
- # Use floats instead of ints as values
- series = series.map(lambda x: float(x))
- # Sort chronologically
- series = series.sort_index()
-
- expected = Series(
- [1.0, 2.0, 6.0, 4.0, 5.0],
- index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
- )
- x = series.resample("D").max().rolling(window=1).max()
- tm.assert_series_equal(expected, x)
-
-
-def test_rolling_max_resample():
-
- indices = [datetime(1975, 1, i) for i in range(1, 6)]
- # So that we can have 3 datapoints on last day (4, 10, and 20)
- indices.append(datetime(1975, 1, 5, 1))
- indices.append(datetime(1975, 1, 5, 2))
- series = Series(list(range(0, 5)) + [10, 20], index=indices)
- # Use floats instead of ints as values
- series = series.map(lambda x: float(x))
- # Sort chronologically
- series = series.sort_index()
-
- # Default how should be max
- expected = Series(
- [0.0, 1.0, 2.0, 3.0, 20.0],
- index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
- )
- x = series.resample("D").max().rolling(window=1).max()
- tm.assert_series_equal(expected, x)
-
- # Now specify median (10.0)
- expected = Series(
- [0.0, 1.0, 2.0, 3.0, 10.0],
- index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
- )
- x = series.resample("D").median().rolling(window=1).max()
- tm.assert_series_equal(expected, x)
-
- # Now specify mean (4+10+20)/3
- v = (4.0 + 10.0 + 20.0) / 3.0
- expected = Series(
- [0.0, 1.0, 2.0, 3.0, v],
- index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
- )
- x = series.resample("D").mean().rolling(window=1).max()
- tm.assert_series_equal(expected, x)
-
-
-def test_rolling_min_resample():
-
- indices = [datetime(1975, 1, i) for i in range(1, 6)]
- # So that we can have 3 datapoints on last day (4, 10, and 20)
- indices.append(datetime(1975, 1, 5, 1))
- indices.append(datetime(1975, 1, 5, 2))
- series = Series(list(range(0, 5)) + [10, 20], index=indices)
- # Use floats instead of ints as values
- series = series.map(lambda x: float(x))
- # Sort chronologically
- series = series.sort_index()
-
- # Default how should be min
- expected = Series(
- [0.0, 1.0, 2.0, 3.0, 4.0],
- index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
- )
- r = series.resample("D").min().rolling(window=1)
- tm.assert_series_equal(expected, r.min())
-
-
-def test_rolling_median_resample():
-
- indices = [datetime(1975, 1, i) for i in range(1, 6)]
- # So that we can have 3 datapoints on last day (4, 10, and 20)
- indices.append(datetime(1975, 1, 5, 1))
- indices.append(datetime(1975, 1, 5, 2))
- series = Series(list(range(0, 5)) + [10, 20], index=indices)
- # Use floats instead of ints as values
- series = series.map(lambda x: float(x))
- # Sort chronologically
- series = series.sort_index()
-
- # Default how should be median
- expected = Series(
- [0.0, 1.0, 2.0, 3.0, 10],
- index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
- )
- x = series.resample("D").median().rolling(window=1).median()
- tm.assert_series_equal(expected, x)
-
-
-def test_rolling_median_memory_error():
- # GH11722
- n = 20000
- Series(np.random.randn(n)).rolling(window=2, center=False).median()
- Series(np.random.randn(n)).rolling(window=2, center=False).median()
-
-
-@pytest.mark.parametrize(
- "data_type",
- [np.dtype(f"f{width}") for width in [4, 8]]
- + [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],
-)
-def test_rolling_min_max_numeric_types(data_type):
- # GH12373
-
- # Just testing that these don't throw exceptions and that
- # the return type is float64. Other tests will cover quantitative
- # correctness
- result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
- assert result.dtypes[0] == np.dtype("f8")
- result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
- assert result.dtypes[0] == np.dtype("f8")
-
-
-@pytest.mark.parametrize(
- "f",
- [
- lambda x: x.rolling(window=10, min_periods=0).count(),
- lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
- lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
- lambda x: x.rolling(window=10, min_periods=5).max(),
- lambda x: x.rolling(window=10, min_periods=5).min(),
- lambda x: x.rolling(window=10, min_periods=5).sum(),
- lambda x: x.rolling(window=10, min_periods=5).mean(),
- lambda x: x.rolling(window=10, min_periods=5).std(),
- lambda x: x.rolling(window=10, min_periods=5).var(),
- lambda x: x.rolling(window=10, min_periods=5).skew(),
- lambda x: x.rolling(window=10, min_periods=5).kurt(),
- lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
- lambda x: x.rolling(window=10, min_periods=5).median(),
- lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
- lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
- pytest.param(
- lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
- marks=td.skip_if_no_scipy,
- ),
- ],
-)
-def test_moment_functions_zero_length(f):
- # GH 8056
- s = Series(dtype=np.float64)
- s_expected = s
- df1 = DataFrame()
- df1_expected = df1
- df2 = DataFrame(columns=["a"])
- df2["a"] = df2["a"].astype("float64")
- df2_expected = df2
-
- s_result = f(s)
- tm.assert_series_equal(s_result, s_expected)
-
- df1_result = f(df1)
- tm.assert_frame_equal(df1_result, df1_expected)
-
- df2_result = f(df2)
- tm.assert_frame_equal(df2_result, df2_expected)
-
-
-@pytest.mark.parametrize(
- "f",
- [
- lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
- lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
- ],
-)
-def test_moment_functions_zero_length_pairwise(f):
-
- df1 = DataFrame()
- df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
- df2["a"] = df2["a"].astype("float64")
-
- df1_expected = DataFrame(
- index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
- )
- df2_expected = DataFrame(
- index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
- columns=Index(["a"], name="foo"),
- dtype="float64",
- )
-
- df1_result = f(df1)
- tm.assert_frame_equal(df1_result, df1_expected)
-
- df2_result = f(df2)
- tm.assert_frame_equal(df2_result, df2_expected)
-
-
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py
index f43d7ec99e312..77ff6ae03d836 100644
--- a/pandas/tests/window/test_pairwise.py
+++ b/pandas/tests/window/test_pairwise.py
@@ -5,6 +5,7 @@
from pandas import (
DataFrame,
+ Index,
MultiIndex,
Series,
date_range,
@@ -13,6 +14,172 @@
from pandas.core.algorithms import safe_sort
+def test_rolling_cov(series):
+ A = series
+ B = A + np.random.randn(len(A))
+
+ result = A.rolling(window=50, min_periods=25).cov(B)
+ tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
+
+
+def test_rolling_corr(series):
+ A = series
+ B = A + np.random.randn(len(A))
+
+ result = A.rolling(window=50, min_periods=25).corr(B)
+ tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
+
+ # test for correct bias correction
+ a = tm.makeTimeSeries()
+ b = tm.makeTimeSeries()
+ a[:5] = np.nan
+ b[:10] = np.nan
+
+ result = a.rolling(window=len(a), min_periods=1).corr(b)
+ tm.assert_almost_equal(result[-1], a.corr(b))
+
+
+@pytest.mark.parametrize("func", ["cov", "corr"])
+def test_rolling_pairwise_cov_corr(func, frame):
+ result = getattr(frame.rolling(window=10, min_periods=5), func)()
+ result = result.loc[(slice(None), 1), 5]
+ result.index = result.index.droplevel(1)
+ expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
+ tm.assert_series_equal(result, expected, check_names=False)
+
+
+@pytest.mark.parametrize("method", ["corr", "cov"])
+def test_flex_binary_frame(method, frame):
+ series = frame[1]
+
+ res = getattr(series.rolling(window=10), method)(frame)
+ res2 = getattr(frame.rolling(window=10), method)(series)
+ exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
+
+ tm.assert_frame_equal(res, exp)
+ tm.assert_frame_equal(res2, exp)
+
+ frame2 = frame.copy()
+ frame2.values[:] = np.random.randn(*frame2.shape)
+
+ res3 = getattr(frame.rolling(window=10), method)(frame2)
+ exp = DataFrame(
+ {k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
+ )
+ tm.assert_frame_equal(res3, exp)
+
+
+@pytest.mark.parametrize("window", range(7))
+def test_rolling_corr_with_zero_variance(window):
+ # GH 18430
+ s = Series(np.zeros(20))
+ other = Series(np.arange(20))
+
+ assert s.rolling(window=window).corr(other=other).isna().all()
+
+
+def test_corr_sanity():
+ # GH 3155
+ df = DataFrame(
+ np.array(
+ [
+ [0.87024726, 0.18505595],
+ [0.64355431, 0.3091617],
+ [0.92372966, 0.50552513],
+ [0.00203756, 0.04520709],
+ [0.84780328, 0.33394331],
+ [0.78369152, 0.63919667],
+ ]
+ )
+ )
+
+ res = df[0].rolling(5, center=True).corr(df[1])
+ assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
+
+ df = DataFrame(np.random.rand(30, 2))
+ res = df[0].rolling(5, center=True).corr(df[1])
+ assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
+
+
+def test_rolling_cov_diff_length():
+ # GH 7512
+ s1 = Series([1, 2, 3], index=[0, 1, 2])
+ s2 = Series([1, 3], index=[0, 2])
+ result = s1.rolling(window=3, min_periods=2).cov(s2)
+ expected = Series([None, None, 2.0])
+ tm.assert_series_equal(result, expected)
+
+ s2a = Series([1, None, 3], index=[0, 1, 2])
+ result = s1.rolling(window=3, min_periods=2).cov(s2a)
+ tm.assert_series_equal(result, expected)
+
+
+def test_rolling_corr_diff_length():
+ # GH 7512
+ s1 = Series([1, 2, 3], index=[0, 1, 2])
+ s2 = Series([1, 3], index=[0, 2])
+ result = s1.rolling(window=3, min_periods=2).corr(s2)
+ expected = Series([None, None, 1.0])
+ tm.assert_series_equal(result, expected)
+
+ s2a = Series([1, None, 3], index=[0, 1, 2])
+ result = s1.rolling(window=3, min_periods=2).corr(s2a)
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "f",
+ [
+ lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
+ lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
+ ],
+)
+def test_rolling_functions_window_non_shrinkage_binary(f):
+
+ # corr/cov return a MI DataFrame
+ df = DataFrame(
+ [[1, 5], [3, 2], [3, 9], [-1, 0]],
+ columns=Index(["A", "B"], name="foo"),
+ index=Index(range(4), name="bar"),
+ )
+ df_expected = DataFrame(
+ columns=Index(["A", "B"], name="foo"),
+ index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
+ dtype="float64",
+ )
+ df_result = f(df)
+ tm.assert_frame_equal(df_result, df_expected)
+
+
+@pytest.mark.parametrize(
+ "f",
+ [
+ lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
+ lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
+ ],
+)
+def test_moment_functions_zero_length_pairwise(f):
+
+ df1 = DataFrame()
+ df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
+ df2["a"] = df2["a"].astype("float64")
+
+ df1_expected = DataFrame(
+ index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
+ )
+ df2_expected = DataFrame(
+ index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
+ columns=Index(["a"], name="foo"),
+ dtype="float64",
+ )
+
+ df1_result = f(df1)
+ tm.assert_frame_equal(df1_result, df1_expected)
+
+ df2_result = f(df2)
+ tm.assert_frame_equal(df2_result, df2_expected)
+
+
class TestPairwise:
# GH 7738
diff --git a/pandas/tests/window/test_rolling_functions.py b/pandas/tests/window/test_rolling_functions.py
index b25b3c3b17637..c788b3d88cb63 100644
--- a/pandas/tests/window/test_rolling_functions.py
+++ b/pandas/tests/window/test_rolling_functions.py
@@ -1,8 +1,13 @@
+from datetime import datetime
+
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas import (
DataFrame,
+ DatetimeIndex,
Series,
concat,
isna,
@@ -316,3 +321,207 @@ def test_center_reindex_frame(frame, roll_func, kwargs, minp, fill_value):
if fill_value is not None:
frame_xp = frame_xp.fillna(fill_value)
tm.assert_frame_equal(frame_xp, frame_rs)
+
+
+@pytest.mark.parametrize(
+ "f",
+ [
+ lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
+ lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
+ lambda x: x.rolling(window=10, min_periods=5).max(),
+ lambda x: x.rolling(window=10, min_periods=5).min(),
+ lambda x: x.rolling(window=10, min_periods=5).sum(),
+ lambda x: x.rolling(window=10, min_periods=5).mean(),
+ lambda x: x.rolling(window=10, min_periods=5).std(),
+ lambda x: x.rolling(window=10, min_periods=5).var(),
+ lambda x: x.rolling(window=10, min_periods=5).skew(),
+ lambda x: x.rolling(window=10, min_periods=5).kurt(),
+ lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
+ lambda x: x.rolling(window=10, min_periods=5).median(),
+ lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
+ lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
+ pytest.param(
+ lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
+ marks=td.skip_if_no_scipy,
+ ),
+ ],
+)
+def test_rolling_functions_window_non_shrinkage(f):
+ # GH 7764
+ s = Series(range(4))
+ s_expected = Series(np.nan, index=s.index)
+ df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
+ df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
+
+ s_result = f(s)
+ tm.assert_series_equal(s_result, s_expected)
+
+ df_result = f(df)
+ tm.assert_frame_equal(df_result, df_expected)
+
+
+def test_rolling_max_gh6297():
+ """Replicate result expected in GH #6297"""
+ indices = [datetime(1975, 1, i) for i in range(1, 6)]
+ # So that we can have 2 datapoints on one of the days
+ indices.append(datetime(1975, 1, 3, 6, 0))
+ series = Series(range(1, 7), index=indices)
+ # Use floats instead of ints as values
+ series = series.map(lambda x: float(x))
+ # Sort chronologically
+ series = series.sort_index()
+
+ expected = Series(
+ [1.0, 2.0, 6.0, 4.0, 5.0],
+ index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
+ )
+ x = series.resample("D").max().rolling(window=1).max()
+ tm.assert_series_equal(expected, x)
+
+
+def test_rolling_max_resample():
+
+ indices = [datetime(1975, 1, i) for i in range(1, 6)]
+ # So that we can have 3 datapoints on last day (4, 10, and 20)
+ indices.append(datetime(1975, 1, 5, 1))
+ indices.append(datetime(1975, 1, 5, 2))
+ series = Series(list(range(0, 5)) + [10, 20], index=indices)
+ # Use floats instead of ints as values
+ series = series.map(lambda x: float(x))
+ # Sort chronologically
+ series = series.sort_index()
+
+ # Default how should be max
+ expected = Series(
+ [0.0, 1.0, 2.0, 3.0, 20.0],
+ index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
+ )
+ x = series.resample("D").max().rolling(window=1).max()
+ tm.assert_series_equal(expected, x)
+
+ # Now specify median (10.0)
+ expected = Series(
+ [0.0, 1.0, 2.0, 3.0, 10.0],
+ index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
+ )
+ x = series.resample("D").median().rolling(window=1).max()
+ tm.assert_series_equal(expected, x)
+
+ # Now specify mean (4+10+20)/3
+ v = (4.0 + 10.0 + 20.0) / 3.0
+ expected = Series(
+ [0.0, 1.0, 2.0, 3.0, v],
+ index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
+ )
+ x = series.resample("D").mean().rolling(window=1).max()
+ tm.assert_series_equal(expected, x)
+
+
+def test_rolling_min_resample():
+
+ indices = [datetime(1975, 1, i) for i in range(1, 6)]
+ # So that we can have 3 datapoints on last day (4, 10, and 20)
+ indices.append(datetime(1975, 1, 5, 1))
+ indices.append(datetime(1975, 1, 5, 2))
+ series = Series(list(range(0, 5)) + [10, 20], index=indices)
+ # Use floats instead of ints as values
+ series = series.map(lambda x: float(x))
+ # Sort chronologically
+ series = series.sort_index()
+
+ # Default how should be min
+ expected = Series(
+ [0.0, 1.0, 2.0, 3.0, 4.0],
+ index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
+ )
+ r = series.resample("D").min().rolling(window=1)
+ tm.assert_series_equal(expected, r.min())
+
+
+def test_rolling_median_resample():
+
+ indices = [datetime(1975, 1, i) for i in range(1, 6)]
+ # So that we can have 3 datapoints on last day (4, 10, and 20)
+ indices.append(datetime(1975, 1, 5, 1))
+ indices.append(datetime(1975, 1, 5, 2))
+ series = Series(list(range(0, 5)) + [10, 20], index=indices)
+ # Use floats instead of ints as values
+ series = series.map(lambda x: float(x))
+ # Sort chronologically
+ series = series.sort_index()
+
+ # Default how should be median
+ expected = Series(
+ [0.0, 1.0, 2.0, 3.0, 10],
+ index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
+ )
+ x = series.resample("D").median().rolling(window=1).median()
+ tm.assert_series_equal(expected, x)
+
+
+def test_rolling_median_memory_error():
+ # GH11722
+ n = 20000
+ Series(np.random.randn(n)).rolling(window=2, center=False).median()
+ Series(np.random.randn(n)).rolling(window=2, center=False).median()
+
+
+@pytest.mark.parametrize(
+ "data_type",
+ [np.dtype(f"f{width}") for width in [4, 8]]
+ + [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],
+)
+def test_rolling_min_max_numeric_types(data_type):
+ # GH12373
+
+ # Just testing that these don't throw exceptions and that
+ # the return type is float64. Other tests will cover quantitative
+ # correctness
+ result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
+ assert result.dtypes[0] == np.dtype("f8")
+ result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
+ assert result.dtypes[0] == np.dtype("f8")
+
+
+@pytest.mark.parametrize(
+ "f",
+ [
+ lambda x: x.rolling(window=10, min_periods=0).count(),
+ lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
+ lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
+ lambda x: x.rolling(window=10, min_periods=5).max(),
+ lambda x: x.rolling(window=10, min_periods=5).min(),
+ lambda x: x.rolling(window=10, min_periods=5).sum(),
+ lambda x: x.rolling(window=10, min_periods=5).mean(),
+ lambda x: x.rolling(window=10, min_periods=5).std(),
+ lambda x: x.rolling(window=10, min_periods=5).var(),
+ lambda x: x.rolling(window=10, min_periods=5).skew(),
+ lambda x: x.rolling(window=10, min_periods=5).kurt(),
+ lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
+ lambda x: x.rolling(window=10, min_periods=5).median(),
+ lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
+ lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
+ pytest.param(
+ lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
+ marks=td.skip_if_no_scipy,
+ ),
+ ],
+)
+def test_moment_functions_zero_length(f):
+ # GH 8056
+ s = Series(dtype=np.float64)
+ s_expected = s
+ df1 = DataFrame()
+ df1_expected = df1
+ df2 = DataFrame(columns=["a"])
+ df2["a"] = df2["a"].astype("float64")
+ df2_expected = df2
+
+ s_result = f(s)
+ tm.assert_series_equal(s_result, s_expected)
+
+ df1_result = f(df1)
+ tm.assert_frame_equal(df1_result, df1_expected)
+
+ df2_result = f(df2)
+ tm.assert_frame_equal(df2_result, df2_expected)
diff --git a/pandas/tests/window/test_rolling_skew_kurt.py b/pandas/tests/window/test_rolling_skew_kurt.py
index 34d5f686eb853..2c275ed6f4a28 100644
--- a/pandas/tests/window/test_rolling_skew_kurt.py
+++ b/pandas/tests/window/test_rolling_skew_kurt.py
@@ -168,3 +168,57 @@ def test_center_reindex_frame(frame, roll_func):
)
frame_rs = getattr(frame.rolling(window=25, center=True), roll_func)()
tm.assert_frame_equal(frame_xp, frame_rs)
+
+
+def test_rolling_skew_edge_cases():
+
+ all_nan = Series([np.NaN] * 5)
+
+ # yields all NaN (0 variance)
+ d = Series([1] * 5)
+ x = d.rolling(window=5).skew()
+ tm.assert_series_equal(all_nan, x)
+
+ # yields all NaN (window too small)
+ d = Series(np.random.randn(5))
+ x = d.rolling(window=2).skew()
+ tm.assert_series_equal(all_nan, x)
+
+ # yields [NaN, NaN, NaN, 0.177994, 1.548824]
+ d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
+ expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
+ x = d.rolling(window=4).skew()
+ tm.assert_series_equal(expected, x)
+
+
+def test_rolling_kurt_edge_cases():
+
+ all_nan = Series([np.NaN] * 5)
+
+ # yields all NaN (0 variance)
+ d = Series([1] * 5)
+ x = d.rolling(window=5).kurt()
+ tm.assert_series_equal(all_nan, x)
+
+ # yields all NaN (window too small)
+ d = Series(np.random.randn(5))
+ x = d.rolling(window=3).kurt()
+ tm.assert_series_equal(all_nan, x)
+
+ # yields [NaN, NaN, NaN, 1.224307, 2.671499]
+ d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
+ expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
+ x = d.rolling(window=4).kurt()
+ tm.assert_series_equal(expected, x)
+
+
+def test_rolling_skew_eq_value_fperr():
+ # #18804 all rolling skew for all equal values should return Nan
+ a = Series([1.1] * 15).rolling(window=10).skew()
+ assert np.isnan(a).all()
+
+
+def test_rolling_kurt_eq_value_fperr():
+ # #18804 all rolling kurt for all equal values should return Nan
+ a = Series([1.1] * 15).rolling(window=10).kurt()
+ assert np.isnan(a).all()
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
Since some tests are not running on windows/moments (#37535), moving the "consistency" rolling tests to the relevant files.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44164 | 2021-10-24T03:38:42Z | 2021-10-24T22:04:52Z | 2021-10-24T22:04:52Z | 2021-10-24T22:05:30Z |
TST: Move some consistency expanding tests to test_expanding | diff --git a/pandas/tests/window/moments/test_moments_consistency_expanding.py b/pandas/tests/window/moments/test_moments_consistency_expanding.py
index df3e79fb79eca..d0fe7bf9fc2d2 100644
--- a/pandas/tests/window/moments/test_moments_consistency_expanding.py
+++ b/pandas/tests/window/moments/test_moments_consistency_expanding.py
@@ -1,171 +1,10 @@
import numpy as np
import pytest
-from pandas import (
- DataFrame,
- Index,
- MultiIndex,
- Series,
- isna,
- notna,
-)
+from pandas import Series
import pandas._testing as tm
-def test_expanding_corr(series):
- A = series.dropna()
- B = (A + np.random.randn(len(A)))[:-5]
-
- result = A.expanding().corr(B)
-
- rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
-
- tm.assert_almost_equal(rolling_result, result)
-
-
-def test_expanding_count(series):
- result = series.expanding(min_periods=0).count()
- tm.assert_almost_equal(
- result, series.rolling(window=len(series), min_periods=0).count()
- )
-
-
-def test_expanding_quantile(series):
- result = series.expanding().quantile(0.5)
-
- rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
-
- tm.assert_almost_equal(result, rolling_result)
-
-
-def test_expanding_cov(series):
- A = series
- B = (A + np.random.randn(len(A)))[:-5]
-
- result = A.expanding().cov(B)
-
- rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
-
- tm.assert_almost_equal(rolling_result, result)
-
-
-def test_expanding_cov_pairwise(frame):
- result = frame.expanding().cov()
-
- rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()
-
- tm.assert_frame_equal(result, rolling_result)
-
-
-def test_expanding_corr_pairwise(frame):
- result = frame.expanding().corr()
-
- rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()
- tm.assert_frame_equal(result, rolling_result)
-
-
-@pytest.mark.parametrize(
- "func,static_comp",
- [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
- ids=["sum", "mean", "max", "min"],
-)
-def test_expanding_func(func, static_comp, frame_or_series):
- data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
- result = getattr(data.expanding(min_periods=1, axis=0), func)()
- assert isinstance(result, frame_or_series)
-
- if frame_or_series is Series:
- tm.assert_almost_equal(result[10], static_comp(data[:11]))
- else:
- tm.assert_series_equal(
- result.iloc[10], static_comp(data[:11]), check_names=False
- )
-
-
-@pytest.mark.parametrize(
- "func,static_comp",
- [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
- ids=["sum", "mean", "max", "min"],
-)
-def test_expanding_min_periods(func, static_comp):
- ser = Series(np.random.randn(50))
-
- result = getattr(ser.expanding(min_periods=30, axis=0), func)()
- assert result[:29].isna().all()
- tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
-
- # min_periods is working correctly
- result = getattr(ser.expanding(min_periods=15, axis=0), func)()
- assert isna(result.iloc[13])
- assert notna(result.iloc[14])
-
- ser2 = Series(np.random.randn(20))
- result = getattr(ser2.expanding(min_periods=5, axis=0), func)()
- assert isna(result[3])
- assert notna(result[4])
-
- # min_periods=0
- result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()
- result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()
- tm.assert_almost_equal(result0, result1)
-
- result = getattr(ser.expanding(min_periods=1, axis=0), func)()
- tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
-
-
-def test_expanding_apply(engine_and_raw, frame_or_series):
- engine, raw = engine_and_raw
- data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
- result = data.expanding(min_periods=1).apply(
- lambda x: x.mean(), raw=raw, engine=engine
- )
- assert isinstance(result, frame_or_series)
-
- if frame_or_series is Series:
- tm.assert_almost_equal(result[9], np.mean(data[:11]))
- else:
- tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False)
-
-
-def test_expanding_min_periods_apply(engine_and_raw):
- engine, raw = engine_and_raw
- ser = Series(np.random.randn(50))
-
- result = ser.expanding(min_periods=30).apply(
- lambda x: x.mean(), raw=raw, engine=engine
- )
- assert result[:29].isna().all()
- tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
-
- # min_periods is working correctly
- result = ser.expanding(min_periods=15).apply(
- lambda x: x.mean(), raw=raw, engine=engine
- )
- assert isna(result.iloc[13])
- assert notna(result.iloc[14])
-
- ser2 = Series(np.random.randn(20))
- result = ser2.expanding(min_periods=5).apply(
- lambda x: x.mean(), raw=raw, engine=engine
- )
- assert isna(result[3])
- assert notna(result[4])
-
- # min_periods=0
- result0 = ser.expanding(min_periods=0).apply(
- lambda x: x.mean(), raw=raw, engine=engine
- )
- result1 = ser.expanding(min_periods=1).apply(
- lambda x: x.mean(), raw=raw, engine=engine
- )
- tm.assert_almost_equal(result0, result1)
-
- result = ser.expanding(min_periods=1).apply(
- lambda x: x.mean(), raw=raw, engine=engine
- )
- tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
-
-
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
@@ -334,202 +173,3 @@ def test_expanding_consistency_var_debiasing_factors(consistency_data, min_perio
x.expanding().count() - 1.0
).replace(0.0, np.nan)
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
-
-
-@pytest.mark.parametrize(
- "f",
- [
- lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),
- lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
- ],
-)
-def test_moment_functions_zero_length_pairwise(f):
-
- df1 = DataFrame()
- df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
- df2["a"] = df2["a"].astype("float64")
-
- df1_expected = DataFrame(
- index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
- )
- df2_expected = DataFrame(
- index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
- columns=Index(["a"], name="foo"),
- dtype="float64",
- )
-
- df1_result = f(df1)
- tm.assert_frame_equal(df1_result, df1_expected)
-
- df2_result = f(df2)
- tm.assert_frame_equal(df2_result, df2_expected)
-
-
-@pytest.mark.parametrize(
- "f",
- [
- lambda x: x.expanding().count(),
- lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),
- lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),
- lambda x: x.expanding(min_periods=5).max(),
- lambda x: x.expanding(min_periods=5).min(),
- lambda x: x.expanding(min_periods=5).sum(),
- lambda x: x.expanding(min_periods=5).mean(),
- lambda x: x.expanding(min_periods=5).std(),
- lambda x: x.expanding(min_periods=5).var(),
- lambda x: x.expanding(min_periods=5).skew(),
- lambda x: x.expanding(min_periods=5).kurt(),
- lambda x: x.expanding(min_periods=5).quantile(0.5),
- lambda x: x.expanding(min_periods=5).median(),
- lambda x: x.expanding(min_periods=5).apply(sum, raw=False),
- lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
- ],
-)
-def test_moment_functions_zero_length(f):
- # GH 8056
- s = Series(dtype=np.float64)
- s_expected = s
- df1 = DataFrame()
- df1_expected = df1
- df2 = DataFrame(columns=["a"])
- df2["a"] = df2["a"].astype("float64")
- df2_expected = df2
-
- s_result = f(s)
- tm.assert_series_equal(s_result, s_expected)
-
- df1_result = f(df1)
- tm.assert_frame_equal(df1_result, df1_expected)
-
- df2_result = f(df2)
- tm.assert_frame_equal(df2_result, df2_expected)
-
-
-def test_expanding_apply_empty_series(engine_and_raw):
- engine, raw = engine_and_raw
- ser = Series([], dtype=np.float64)
- tm.assert_series_equal(
- ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)
- )
-
-
-def test_expanding_apply_min_periods_0(engine_and_raw):
- # GH 8080
- engine, raw = engine_and_raw
- s = Series([None, None, None])
- result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw, engine=engine)
- expected = Series([1.0, 2.0, 3.0])
- tm.assert_series_equal(result, expected)
-
-
-def test_expanding_cov_diff_index():
- # GH 7512
- s1 = Series([1, 2, 3], index=[0, 1, 2])
- s2 = Series([1, 3], index=[0, 2])
- result = s1.expanding().cov(s2)
- expected = Series([None, None, 2.0])
- tm.assert_series_equal(result, expected)
-
- s2a = Series([1, None, 3], index=[0, 1, 2])
- result = s1.expanding().cov(s2a)
- tm.assert_series_equal(result, expected)
-
- s1 = Series([7, 8, 10], index=[0, 1, 3])
- s2 = Series([7, 9, 10], index=[0, 2, 3])
- result = s1.expanding().cov(s2)
- expected = Series([None, None, None, 4.5])
- tm.assert_series_equal(result, expected)
-
-
-def test_expanding_corr_diff_index():
- # GH 7512
- s1 = Series([1, 2, 3], index=[0, 1, 2])
- s2 = Series([1, 3], index=[0, 2])
- result = s1.expanding().corr(s2)
- expected = Series([None, None, 1.0])
- tm.assert_series_equal(result, expected)
-
- s2a = Series([1, None, 3], index=[0, 1, 2])
- result = s1.expanding().corr(s2a)
- tm.assert_series_equal(result, expected)
-
- s1 = Series([7, 8, 10], index=[0, 1, 3])
- s2 = Series([7, 9, 10], index=[0, 2, 3])
- result = s1.expanding().corr(s2)
- expected = Series([None, None, None, 1.0])
- tm.assert_series_equal(result, expected)
-
-
-def test_expanding_cov_pairwise_diff_length():
- # GH 7512
- df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo"))
- df1a = DataFrame(
- [[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo")
- )
- df2 = DataFrame(
- [[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo")
- )
- df2a = DataFrame(
- [[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo")
- )
- # TODO: xref gh-15826
- # .loc is not preserving the names
- result1 = df1.expanding().cov(df2, pairwise=True).loc[2]
- result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
- result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
- result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
- expected = DataFrame(
- [[-3.0, -6.0], [-5.0, -10.0]],
- columns=Index(["A", "B"], name="foo"),
- index=Index(["X", "Y"], name="foo"),
- )
- tm.assert_frame_equal(result1, expected)
- tm.assert_frame_equal(result2, expected)
- tm.assert_frame_equal(result3, expected)
- tm.assert_frame_equal(result4, expected)
-
-
-def test_expanding_corr_pairwise_diff_length():
- # GH 7512
- df1 = DataFrame(
- [[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar")
- )
- df1a = DataFrame(
- [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"]
- )
- df2 = DataFrame(
- [[5, 6], [None, None], [2, 1]],
- columns=["X", "Y"],
- index=Index(range(3), name="bar"),
- )
- df2a = DataFrame(
- [[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"]
- )
- result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
- result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
- result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
- result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
- expected = DataFrame(
- [[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"])
- )
- tm.assert_frame_equal(result1, expected)
- tm.assert_frame_equal(result2, expected)
- tm.assert_frame_equal(result3, expected)
- tm.assert_frame_equal(result4, expected)
-
-
-def test_expanding_apply_args_kwargs(engine_and_raw):
- def mean_w_arg(x, const):
- return np.mean(x) + const
-
- engine, raw = engine_and_raw
-
- df = DataFrame(np.random.rand(20, 3))
-
- expected = df.expanding().apply(np.mean, engine=engine, raw=raw) + 20.0
-
- result = df.expanding().apply(mean_w_arg, engine=engine, raw=raw, args=(20,))
- tm.assert_frame_equal(result, expected)
-
- result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20})
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 680ac3654222a..ad43a02724960 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -6,7 +6,11 @@
from pandas import (
DataFrame,
DatetimeIndex,
+ Index,
+ MultiIndex,
Series,
+ isna,
+ notna,
)
import pandas._testing as tm
from pandas.core.window import Expanding
@@ -288,3 +292,356 @@ def test_rank(window, method, pct, ascending, test_data):
result = ser.expanding(window).rank(method=method, pct=pct, ascending=ascending)
tm.assert_series_equal(result, expected)
+
+
+def test_expanding_corr(series):
+ A = series.dropna()
+ B = (A + np.random.randn(len(A)))[:-5]
+
+ result = A.expanding().corr(B)
+
+ rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
+
+ tm.assert_almost_equal(rolling_result, result)
+
+
+def test_expanding_count(series):
+ result = series.expanding(min_periods=0).count()
+ tm.assert_almost_equal(
+ result, series.rolling(window=len(series), min_periods=0).count()
+ )
+
+
+def test_expanding_quantile(series):
+ result = series.expanding().quantile(0.5)
+
+ rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
+
+ tm.assert_almost_equal(result, rolling_result)
+
+
+def test_expanding_cov(series):
+ A = series
+ B = (A + np.random.randn(len(A)))[:-5]
+
+ result = A.expanding().cov(B)
+
+ rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
+
+ tm.assert_almost_equal(rolling_result, result)
+
+
+def test_expanding_cov_pairwise(frame):
+ result = frame.expanding().cov()
+
+ rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()
+
+ tm.assert_frame_equal(result, rolling_result)
+
+
+def test_expanding_corr_pairwise(frame):
+ result = frame.expanding().corr()
+
+ rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()
+ tm.assert_frame_equal(result, rolling_result)
+
+
+@pytest.mark.parametrize(
+ "func,static_comp",
+ [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
+ ids=["sum", "mean", "max", "min"],
+)
+def test_expanding_func(func, static_comp, frame_or_series):
+ data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
+ result = getattr(data.expanding(min_periods=1, axis=0), func)()
+ assert isinstance(result, frame_or_series)
+
+ if frame_or_series is Series:
+ tm.assert_almost_equal(result[10], static_comp(data[:11]))
+ else:
+ tm.assert_series_equal(
+ result.iloc[10], static_comp(data[:11]), check_names=False
+ )
+
+
+@pytest.mark.parametrize(
+ "func,static_comp",
+ [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
+ ids=["sum", "mean", "max", "min"],
+)
+def test_expanding_min_periods(func, static_comp):
+ ser = Series(np.random.randn(50))
+
+ result = getattr(ser.expanding(min_periods=30, axis=0), func)()
+ assert result[:29].isna().all()
+ tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
+
+ # min_periods is working correctly
+ result = getattr(ser.expanding(min_periods=15, axis=0), func)()
+ assert isna(result.iloc[13])
+ assert notna(result.iloc[14])
+
+ ser2 = Series(np.random.randn(20))
+ result = getattr(ser2.expanding(min_periods=5, axis=0), func)()
+ assert isna(result[3])
+ assert notna(result[4])
+
+ # min_periods=0
+ result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()
+ result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()
+ tm.assert_almost_equal(result0, result1)
+
+ result = getattr(ser.expanding(min_periods=1, axis=0), func)()
+ tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
+
+
+def test_expanding_apply(engine_and_raw, frame_or_series):
+ engine, raw = engine_and_raw
+ data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
+ result = data.expanding(min_periods=1).apply(
+ lambda x: x.mean(), raw=raw, engine=engine
+ )
+ assert isinstance(result, frame_or_series)
+
+ if frame_or_series is Series:
+ tm.assert_almost_equal(result[9], np.mean(data[:11]))
+ else:
+ tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False)
+
+
+def test_expanding_min_periods_apply(engine_and_raw):
+ engine, raw = engine_and_raw
+ ser = Series(np.random.randn(50))
+
+ result = ser.expanding(min_periods=30).apply(
+ lambda x: x.mean(), raw=raw, engine=engine
+ )
+ assert result[:29].isna().all()
+ tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
+
+ # min_periods is working correctly
+ result = ser.expanding(min_periods=15).apply(
+ lambda x: x.mean(), raw=raw, engine=engine
+ )
+ assert isna(result.iloc[13])
+ assert notna(result.iloc[14])
+
+ ser2 = Series(np.random.randn(20))
+ result = ser2.expanding(min_periods=5).apply(
+ lambda x: x.mean(), raw=raw, engine=engine
+ )
+ assert isna(result[3])
+ assert notna(result[4])
+
+ # min_periods=0
+ result0 = ser.expanding(min_periods=0).apply(
+ lambda x: x.mean(), raw=raw, engine=engine
+ )
+ result1 = ser.expanding(min_periods=1).apply(
+ lambda x: x.mean(), raw=raw, engine=engine
+ )
+ tm.assert_almost_equal(result0, result1)
+
+ result = ser.expanding(min_periods=1).apply(
+ lambda x: x.mean(), raw=raw, engine=engine
+ )
+ tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
+
+
+@pytest.mark.parametrize(
+ "f",
+ [
+ lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),
+ lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
+ ],
+)
+def test_moment_functions_zero_length_pairwise(f):
+
+ df1 = DataFrame()
+ df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
+ df2["a"] = df2["a"].astype("float64")
+
+ df1_expected = DataFrame(
+ index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
+ )
+ df2_expected = DataFrame(
+ index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
+ columns=Index(["a"], name="foo"),
+ dtype="float64",
+ )
+
+ df1_result = f(df1)
+ tm.assert_frame_equal(df1_result, df1_expected)
+
+ df2_result = f(df2)
+ tm.assert_frame_equal(df2_result, df2_expected)
+
+
+@pytest.mark.parametrize(
+ "f",
+ [
+ lambda x: x.expanding().count(),
+ lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),
+ lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),
+ lambda x: x.expanding(min_periods=5).max(),
+ lambda x: x.expanding(min_periods=5).min(),
+ lambda x: x.expanding(min_periods=5).sum(),
+ lambda x: x.expanding(min_periods=5).mean(),
+ lambda x: x.expanding(min_periods=5).std(),
+ lambda x: x.expanding(min_periods=5).var(),
+ lambda x: x.expanding(min_periods=5).skew(),
+ lambda x: x.expanding(min_periods=5).kurt(),
+ lambda x: x.expanding(min_periods=5).quantile(0.5),
+ lambda x: x.expanding(min_periods=5).median(),
+ lambda x: x.expanding(min_periods=5).apply(sum, raw=False),
+ lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
+ ],
+)
+def test_moment_functions_zero_length(f):
+ # GH 8056
+ s = Series(dtype=np.float64)
+ s_expected = s
+ df1 = DataFrame()
+ df1_expected = df1
+ df2 = DataFrame(columns=["a"])
+ df2["a"] = df2["a"].astype("float64")
+ df2_expected = df2
+
+ s_result = f(s)
+ tm.assert_series_equal(s_result, s_expected)
+
+ df1_result = f(df1)
+ tm.assert_frame_equal(df1_result, df1_expected)
+
+ df2_result = f(df2)
+ tm.assert_frame_equal(df2_result, df2_expected)
+
+
+def test_expanding_apply_empty_series(engine_and_raw):
+ engine, raw = engine_and_raw
+ ser = Series([], dtype=np.float64)
+ tm.assert_series_equal(
+ ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)
+ )
+
+
+def test_expanding_apply_min_periods_0(engine_and_raw):
+ # GH 8080
+ engine, raw = engine_and_raw
+ s = Series([None, None, None])
+ result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw, engine=engine)
+ expected = Series([1.0, 2.0, 3.0])
+ tm.assert_series_equal(result, expected)
+
+
+def test_expanding_cov_diff_index():
+ # GH 7512
+ s1 = Series([1, 2, 3], index=[0, 1, 2])
+ s2 = Series([1, 3], index=[0, 2])
+ result = s1.expanding().cov(s2)
+ expected = Series([None, None, 2.0])
+ tm.assert_series_equal(result, expected)
+
+ s2a = Series([1, None, 3], index=[0, 1, 2])
+ result = s1.expanding().cov(s2a)
+ tm.assert_series_equal(result, expected)
+
+ s1 = Series([7, 8, 10], index=[0, 1, 3])
+ s2 = Series([7, 9, 10], index=[0, 2, 3])
+ result = s1.expanding().cov(s2)
+ expected = Series([None, None, None, 4.5])
+ tm.assert_series_equal(result, expected)
+
+
+def test_expanding_corr_diff_index():
+ # GH 7512
+ s1 = Series([1, 2, 3], index=[0, 1, 2])
+ s2 = Series([1, 3], index=[0, 2])
+ result = s1.expanding().corr(s2)
+ expected = Series([None, None, 1.0])
+ tm.assert_series_equal(result, expected)
+
+ s2a = Series([1, None, 3], index=[0, 1, 2])
+ result = s1.expanding().corr(s2a)
+ tm.assert_series_equal(result, expected)
+
+ s1 = Series([7, 8, 10], index=[0, 1, 3])
+ s2 = Series([7, 9, 10], index=[0, 2, 3])
+ result = s1.expanding().corr(s2)
+ expected = Series([None, None, None, 1.0])
+ tm.assert_series_equal(result, expected)
+
+
+def test_expanding_cov_pairwise_diff_length():
+ # GH 7512
+ df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo"))
+ df1a = DataFrame(
+ [[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo")
+ )
+ df2 = DataFrame(
+ [[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo")
+ )
+ df2a = DataFrame(
+ [[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo")
+ )
+ # TODO: xref gh-15826
+ # .loc is not preserving the names
+ result1 = df1.expanding().cov(df2, pairwise=True).loc[2]
+ result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
+ result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
+ result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
+ expected = DataFrame(
+ [[-3.0, -6.0], [-5.0, -10.0]],
+ columns=Index(["A", "B"], name="foo"),
+ index=Index(["X", "Y"], name="foo"),
+ )
+ tm.assert_frame_equal(result1, expected)
+ tm.assert_frame_equal(result2, expected)
+ tm.assert_frame_equal(result3, expected)
+ tm.assert_frame_equal(result4, expected)
+
+
+def test_expanding_corr_pairwise_diff_length():
+ # GH 7512
+ df1 = DataFrame(
+ [[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar")
+ )
+ df1a = DataFrame(
+ [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"]
+ )
+ df2 = DataFrame(
+ [[5, 6], [None, None], [2, 1]],
+ columns=["X", "Y"],
+ index=Index(range(3), name="bar"),
+ )
+ df2a = DataFrame(
+ [[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"]
+ )
+ result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
+ result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
+ result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
+ result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
+ expected = DataFrame(
+ [[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"])
+ )
+ tm.assert_frame_equal(result1, expected)
+ tm.assert_frame_equal(result2, expected)
+ tm.assert_frame_equal(result3, expected)
+ tm.assert_frame_equal(result4, expected)
+
+
+def test_expanding_apply_args_kwargs(engine_and_raw):
+ def mean_w_arg(x, const):
+ return np.mean(x) + const
+
+ engine, raw = engine_and_raw
+
+ df = DataFrame(np.random.rand(20, 3))
+
+ expected = df.expanding().apply(np.mean, engine=engine, raw=raw) + 20.0
+
+ result = df.expanding().apply(mean_w_arg, engine=engine, raw=raw, args=(20,))
+ tm.assert_frame_equal(result, expected)
+
+ result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20})
+ tm.assert_frame_equal(result, expected)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
Since some tests are not running on windows/moments (#37535), moving the "consistency" expanding tests to `test_expanding.py`
| https://api.github.com/repos/pandas-dev/pandas/pulls/44163 | 2021-10-24T03:03:08Z | 2021-10-24T14:43:41Z | 2021-10-24T14:43:41Z | 2021-10-24T17:20:55Z |
TST: Move remaining ewm tests to window/ | diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py
deleted file mode 100644
index f87ff654e554a..0000000000000
--- a/pandas/tests/window/moments/test_moments_ewm.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import pytest
-
-from pandas import (
- DataFrame,
- Series,
-)
-import pandas._testing as tm
-
-
-@pytest.mark.parametrize("name", ["var", "std", "mean"])
-def test_ewma_series(series, name):
- series_result = getattr(series.ewm(com=10), name)()
- assert isinstance(series_result, Series)
-
-
-@pytest.mark.parametrize("name", ["var", "std", "mean"])
-def test_ewma_frame(frame, name):
- frame_result = getattr(frame.ewm(com=10), name)()
- assert isinstance(frame_result, DataFrame)
-
-
-def test_ewma_span_com_args(series):
- A = series.ewm(com=9.5).mean()
- B = series.ewm(span=20).mean()
- tm.assert_almost_equal(A, B)
- msg = "comass, span, halflife, and alpha are mutually exclusive"
- with pytest.raises(ValueError, match=msg):
- series.ewm(com=9.5, span=20)
-
- msg = "Must pass one of comass, span, halflife, or alpha"
- with pytest.raises(ValueError, match=msg):
- series.ewm().mean()
-
-
-def test_ewma_halflife_arg(series):
- A = series.ewm(com=13.932726172912965).mean()
- B = series.ewm(halflife=10.0).mean()
- tm.assert_almost_equal(A, B)
- msg = "comass, span, halflife, and alpha are mutually exclusive"
- with pytest.raises(ValueError, match=msg):
- series.ewm(span=20, halflife=50)
- with pytest.raises(ValueError, match=msg):
- series.ewm(com=9.5, halflife=50)
- with pytest.raises(ValueError, match=msg):
- series.ewm(com=9.5, span=20, halflife=50)
- msg = "Must pass one of comass, span, halflife, or alpha"
- with pytest.raises(ValueError, match=msg):
- series.ewm()
-
-
-def test_ewm_alpha_arg(series):
- # GH 10789
- s = series
- msg = "Must pass one of comass, span, halflife, or alpha"
- with pytest.raises(ValueError, match=msg):
- s.ewm()
-
- msg = "comass, span, halflife, and alpha are mutually exclusive"
- with pytest.raises(ValueError, match=msg):
- s.ewm(com=10.0, alpha=0.5)
- with pytest.raises(ValueError, match=msg):
- s.ewm(span=10.0, alpha=0.5)
- with pytest.raises(ValueError, match=msg):
- s.ewm(halflife=10.0, alpha=0.5)
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 4ca090fba4955..21c0099bbc0e6 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -600,3 +600,60 @@ def test_different_input_array_raise_exception(name):
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
+
+
+@pytest.mark.parametrize("name", ["var", "std", "mean"])
+def test_ewma_series(series, name):
+ series_result = getattr(series.ewm(com=10), name)()
+ assert isinstance(series_result, Series)
+
+
+@pytest.mark.parametrize("name", ["var", "std", "mean"])
+def test_ewma_frame(frame, name):
+ frame_result = getattr(frame.ewm(com=10), name)()
+ assert isinstance(frame_result, DataFrame)
+
+
+def test_ewma_span_com_args(series):
+ A = series.ewm(com=9.5).mean()
+ B = series.ewm(span=20).mean()
+ tm.assert_almost_equal(A, B)
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
+ series.ewm(com=9.5, span=20)
+
+ msg = "Must pass one of comass, span, halflife, or alpha"
+ with pytest.raises(ValueError, match=msg):
+ series.ewm().mean()
+
+
+def test_ewma_halflife_arg(series):
+ A = series.ewm(com=13.932726172912965).mean()
+ B = series.ewm(halflife=10.0).mean()
+ tm.assert_almost_equal(A, B)
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
+ series.ewm(span=20, halflife=50)
+ with pytest.raises(ValueError, match=msg):
+ series.ewm(com=9.5, halflife=50)
+ with pytest.raises(ValueError, match=msg):
+ series.ewm(com=9.5, span=20, halflife=50)
+ msg = "Must pass one of comass, span, halflife, or alpha"
+ with pytest.raises(ValueError, match=msg):
+ series.ewm()
+
+
+def test_ewm_alpha_arg(series):
+ # GH 10789
+ s = series
+ msg = "Must pass one of comass, span, halflife, or alpha"
+ with pytest.raises(ValueError, match=msg):
+ s.ewm()
+
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(com=10.0, alpha=0.5)
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(span=10.0, alpha=0.5)
+ with pytest.raises(ValueError, match=msg):
+ s.ewm(halflife=10.0, alpha=0.5)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
Since some tests are not running on windows/moments (#37535), moving the remaining tests to test_ewm.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/44161 | 2021-10-24T02:02:33Z | 2021-10-24T14:42:33Z | 2021-10-24T14:42:33Z | 2021-10-24T17:20:46Z |
IO ExcelFile New Method | https://github.com/pandas-dev/pandas/pull/44160.diff | …frame access
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew Added method to ExcelFile class to read all the sheets. This allows the users to access a dictionary of all pd.DataFrames using the .sheets() method and access them more easily. Before, you would need to then call a pd.read_excel(ExcelFile, sheet_name) to access the data in that sheet.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44160 | 2021-10-23T23:52:47Z | 2021-10-24T00:29:44Z | null | 2021-10-24T21:17:28Z |
TST: enable interval dtype feather test again | diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index ba8a9ed070236..97ebb3a0d39ba 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -89,9 +89,7 @@ def test_basic(self):
)
df["periods"] = pd.period_range("2013", freq="M", periods=3)
df["timedeltas"] = pd.timedelta_range("1 day", periods=3)
- # TODO temporary disable due to regression in pyarrow 0.17.1
- # https://github.com/pandas-dev/pandas/issues/34255
- # df["intervals"] = pd.interval_range(0, 3, 3)
+ df["intervals"] = pd.interval_range(0, 3, 3)
assert df.dttz.dtype.tz.zone == "US/Eastern"
self.check_round_trip(df)
| Closes #34255 | https://api.github.com/repos/pandas-dev/pandas/pulls/44155 | 2021-10-23T14:57:40Z | 2021-10-23T17:50:44Z | 2021-10-23T17:50:44Z | 2021-10-23T17:54:37Z |
CLN/TST: address/annotate TODOs | diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index d112b44c51c07..68c09f83e1cdf 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -947,7 +947,7 @@ def rank_1d(
N = len(values)
if labels is not None:
- # TODO Cython 3.0: cast won't be necessary (#2992)
+ # TODO(cython3): cast won't be necessary (#2992)
assert <Py_ssize_t>len(labels) == N
out = np.empty(N)
grp_sizes = np.ones(N, dtype=np.int64)
@@ -1086,7 +1086,7 @@ cdef void rank_sorted_1d(
# array that we sorted previously, which gives us the location of
# that sorted value for retrieval back from the original
# values / masked_vals arrays
- # TODO: de-duplicate once cython supports conditional nogil
+ # TODO(cython3): de-duplicate once cython supports conditional nogil
if iu_64_floating_obj_t is object:
with gil:
for i in range(N):
@@ -1413,7 +1413,7 @@ ctypedef fused out_t:
@cython.boundscheck(False)
@cython.wraparound(False)
def diff_2d(
- ndarray[diff_t, ndim=2] arr, # TODO(cython 3) update to "const diff_t[:, :] arr"
+ ndarray[diff_t, ndim=2] arr, # TODO(cython3) update to "const diff_t[:, :] arr"
ndarray[out_t, ndim=2] out,
Py_ssize_t periods,
int axis,
@@ -1422,7 +1422,7 @@ def diff_2d(
cdef:
Py_ssize_t i, j, sx, sy, start, stop
bint f_contig = arr.flags.f_contiguous
- # bint f_contig = arr.is_f_contig() # TODO(cython 3)
+ # bint f_contig = arr.is_f_contig() # TODO(cython3)
diff_t left, right
# Disable for unsupported dtype combinations,
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index c229c67519a66..078cb8e02e824 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -963,7 +963,7 @@ def group_last(iu_64_floating_obj_t[:, ::1] out,
ndarray[int64_t, ndim=2] nobs
bint runtime_error = False
- # TODO(cython 3.0):
+ # TODO(cython3):
# Instead of `labels.shape[0]` use `len(labels)`
if not len(values) == labels.shape[0]:
raise AssertionError("len(index) != len(labels)")
@@ -978,7 +978,7 @@ def group_last(iu_64_floating_obj_t[:, ::1] out,
N, K = (<object>values).shape
if iu_64_floating_obj_t is object:
- # TODO: De-duplicate once conditional-nogil is available
+ # TODO(cython3): De-duplicate once conditional-nogil is available
for i in range(N):
lab = labels[i]
if lab < 0:
@@ -1057,7 +1057,7 @@ def group_nth(iu_64_floating_obj_t[:, ::1] out,
ndarray[int64_t, ndim=2] nobs
bint runtime_error = False
- # TODO(cython 3.0):
+ # TODO(cython3):
# Instead of `labels.shape[0]` use `len(labels)`
if not len(values) == labels.shape[0]:
raise AssertionError("len(index) != len(labels)")
@@ -1072,7 +1072,7 @@ def group_nth(iu_64_floating_obj_t[:, ::1] out,
N, K = (<object>values).shape
if iu_64_floating_obj_t is object:
- # TODO: De-duplicate once conditional-nogil is available
+ # TODO(cython3): De-duplicate once conditional-nogil is available
for i in range(N):
lab = labels[i]
if lab < 0:
@@ -1255,7 +1255,7 @@ cdef group_min_max(iu_64_floating_t[:, ::1] out,
bint uses_mask = mask is not None
bint isna_entry
- # TODO(cython 3.0):
+ # TODO(cython3):
# Instead of `labels.shape[0]` use `len(labels)`
if not len(values) == labels.shape[0]:
raise AssertionError("len(index) != len(labels)")
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 23094bdb90483..2aebf75ba35d4 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -166,7 +166,6 @@ cdef class _NaT(datetime):
elif util.is_integer_object(other):
# For Period compat
- # TODO: the integer behavior is deprecated, remove it
return c_NaT
elif util.is_array(other):
@@ -201,7 +200,6 @@ cdef class _NaT(datetime):
elif util.is_integer_object(other):
# For Period compat
- # TODO: the integer behavior is deprecated, remove it
return c_NaT
elif util.is_array(other):
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 6cf1fdbfa4585..b8f957a4c2ea8 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -671,7 +671,7 @@ def try_parse_date_and_time(
object[:] result
n = len(dates)
- # TODO(cython 3.0): Use len instead of `shape[0]`
+ # TODO(cython3): Use len instead of `shape[0]`
if times.shape[0] != n:
raise ValueError('Length of dates and times must be equal')
result = np.empty(n, dtype='O')
@@ -709,7 +709,7 @@ def try_parse_year_month_day(
object[:] result
n = len(years)
- # TODO(cython 3.0): Use len instead of `shape[0]`
+ # TODO(cython3): Use len instead of `shape[0]`
if months.shape[0] != n or days.shape[0] != n:
raise ValueError('Length of years/months/days must all be equal')
result = np.empty(n, dtype='O')
@@ -735,7 +735,7 @@ def try_parse_datetime_components(object[:] years,
double micros
n = len(years)
- # TODO(cython 3.0): Use len instead of `shape[0]`
+ # TODO(cython3): Use len instead of `shape[0]`
if (
months.shape[0] != n
or days.shape[0] != n
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 39aa5da95cc29..274c78c30aec4 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -136,7 +136,7 @@ def __init__(
self.window = window
self.min_periods = min_periods
self.center = center
- # TODO: Change this back to self.win_type once deprecation is enforced
+ # TODO(2.0): Change this back to self.win_type once deprecation is enforced
self._win_type = win_type
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.method = method
@@ -262,7 +262,7 @@ def _gotitem(self, key, ndim, subset=None):
# we need to make a shallow copy of ourselves
# with the same groupby
with warnings.catch_warnings():
- # TODO: Remove once win_type deprecation is enforced
+ # TODO(2.0): Remove once win_type deprecation is enforced
warnings.filterwarnings("ignore", "win_type", FutureWarning)
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index afbe37b2ef44c..49c2b28207ed5 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -606,7 +606,7 @@ def read_csv(
# Error Handling
error_bad_lines=None,
warn_bad_lines=None,
- # TODO (2.0): set on_bad_lines to "error".
+ # TODO(2.0): set on_bad_lines to "error".
# See _refine_defaults_read comment for why we do this.
on_bad_lines=None,
# Internal
@@ -704,7 +704,7 @@ def read_table(
# Error Handling
error_bad_lines=None,
warn_bad_lines=None,
- # TODO (2.0): set on_bad_lines to "error".
+ # TODO(2.0): set on_bad_lines to "error".
# See _refine_defaults_read comment for why we do this.
on_bad_lines=None,
# Internal
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index ef34aa1e34596..34bcc6687e902 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -338,8 +338,8 @@ def convert_delta_safe(base, deltas, unit) -> Series:
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
- # TODO: If/when pandas supports more than datetime64[ns], this should be
- # improved to use correct range, e.g. datetime[Y] for yearly
+ # TODO(non-nano): If/when pandas supports more than datetime64[ns], this
+ # should be improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 3a3103b095e45..07ae7511bb333 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -1203,7 +1203,7 @@ def test_from_coo(self):
row = [0, 3, 1, 0]
col = [0, 3, 1, 2]
data = [4, 5, 7, 9]
- # TODO: Remove dtype when scipy is fixed
+ # TODO(scipy#13585): Remove dtype when scipy is fixed
# https://github.com/scipy/scipy/issues/13585
sp_array = scipy.sparse.coo_matrix((data, (row, col)), dtype="int")
result = pd.Series.sparse.from_coo(sp_array)
diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py
index 989a9be181a3f..30118d20f67a9 100644
--- a/pandas/tests/frame/methods/test_join.py
+++ b/pandas/tests/frame/methods/test_join.py
@@ -143,15 +143,14 @@ def test_join_index_more(float_frame):
def test_join_index_series(float_frame):
df = float_frame.copy()
- s = df.pop(float_frame.columns[-1])
- joined = df.join(s)
+ ser = df.pop(float_frame.columns[-1])
+ joined = df.join(ser)
- # TODO should this check_names ?
- tm.assert_frame_equal(joined, float_frame, check_names=False)
+ tm.assert_frame_equal(joined, float_frame)
- s.name = None
+ ser.name = None
with pytest.raises(ValueError, match="must have a name"):
- df.join(s)
+ df.join(ser)
def test_join_overlap(float_frame):
@@ -241,8 +240,7 @@ def test_join(self, multiindex_dataframe_random_data):
assert not np.isnan(joined.values).all()
- # TODO what should join do with names ?
- tm.assert_frame_equal(joined, expected, check_names=False)
+ tm.assert_frame_equal(joined, expected)
def test_join_segfault(self):
# GH#1532
diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 76d259707787d..43af48cf4a654 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -144,32 +144,31 @@ def test_reset_index(self, float_frame):
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
- # TODO should reset_index check_names ?
- tm.assert_frame_equal(rs, float_frame, check_names=False)
+ tm.assert_frame_equal(rs, float_frame)
rs = df.reset_index(["index", "A", "B"])
- tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
+ tm.assert_frame_equal(rs, float_frame.reset_index())
rs = df.reset_index(["index", "A", "B"])
- tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
+ tm.assert_frame_equal(rs, float_frame.reset_index())
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
- tm.assert_frame_equal(rs, xp, check_names=False)
+ tm.assert_frame_equal(rs, xp)
# test resetting in place
df = float_frame.copy()
reset = float_frame.reset_index()
return_value = df.reset_index(inplace=True)
assert return_value is None
- tm.assert_frame_equal(df, reset, check_names=False)
+ tm.assert_frame_equal(df, reset)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
- tm.assert_frame_equal(rs, xp, check_names=False)
+ tm.assert_frame_equal(rs, xp)
def test_reset_index_name(self):
df = DataFrame(
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 02ff93bf67a4f..704af61ee2390 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2528,7 +2528,7 @@ def check_views():
else:
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
- # FIXME: enable after GH#35417
+ # FIXME(GH#35417): enable after GH#35417
# assert b[0] == 0
assert df.iloc[0, 2] == 0
diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py
index 8be721c13eea8..59ccd93e44af7 100644
--- a/pandas/tests/groupby/test_allowlist.py
+++ b/pandas/tests/groupby/test_allowlist.py
@@ -8,8 +8,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
from pandas import (
DataFrame,
Index,
@@ -359,8 +357,7 @@ def test_groupby_function_rename(mframe):
"cummax",
"cummin",
"cumprod",
- # TODO(ArrayManager) quantile
- pytest.param("describe", marks=td.skip_array_manager_not_yet_implemented),
+ "describe",
"rank",
"quantile",
"diff",
diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py
index 0a001008c2f1b..f7cffe48d1722 100644
--- a/pandas/tests/indexes/test_indexing.py
+++ b/pandas/tests/indexes/test_indexing.py
@@ -172,7 +172,7 @@ class TestGetValue:
"index", ["string", "int", "datetime", "timedelta"], indirect=True
)
def test_get_value(self, index):
- # TODO: Remove function? GH#19728
+ # TODO(2.0): can remove once get_value deprecation is enforced GH#19728
values = np.random.randn(100)
value = index[67]
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index ba8a9ed070236..97ebb3a0d39ba 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -89,9 +89,7 @@ def test_basic(self):
)
df["periods"] = pd.period_range("2013", freq="M", periods=3)
df["timedeltas"] = pd.timedelta_range("1 day", periods=3)
- # TODO temporary disable due to regression in pyarrow 0.17.1
- # https://github.com/pandas-dev/pandas/issues/34255
- # df["intervals"] = pd.interval_range(0, 3, 3)
+ df["intervals"] = pd.interval_range(0, 3, 3)
assert df.dttz.dtype.tz.zone == "US/Eastern"
self.check_round_trip(df)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index eb3097618e158..386f11b3dd794 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2042,7 +2042,7 @@ def test_to_sql_with_negative_npinf(self, input, request):
# GH 36465
# The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error
# for pymysql version >= 0.10
- # TODO: remove this version check after GH 36465 is fixed
+ # TODO(GH#36465): remove this version check after GH 36465 is fixed
import pymysql
if pymysql.VERSION[0:3] >= (0, 10, 0) and "infe0" in df.columns:
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/44154 | 2021-10-23T02:34:37Z | 2021-10-24T14:41:57Z | 2021-10-24T14:41:57Z | 2021-10-24T16:34:52Z |
TST: fix FIXMEs | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 408e58e23aaed..04e9864299dd1 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -978,7 +978,7 @@ def astype_dt64_to_dt64tz(
stacklevel=level,
)
- # FIXME: GH#33401 this doesn't match DatetimeArray.astype, which
+ # GH#33401 this doesn't match DatetimeArray.astype, which
# goes through the `not via_utc` path
return values.tz_localize("UTC").tz_convert(dtype.tz)
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index bd76f214e0261..3116f2b40900a 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -384,7 +384,7 @@ def fillna(self, value, downcast=None):
cat = self._data.fillna(value)
except (ValueError, TypeError):
# invalid fill_value
- if not self.isna().any():
+ if not self.hasnans:
# nothing to fill, we can get away without casting
return self.copy()
return self.astype(object).fillna(value, downcast=downcast)
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index ca100a60a81b6..5277a3514b423 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -145,7 +145,7 @@ def to_timedelta(arg, unit=None, errors="raise"):
def _coerce_scalar_to_timedelta_type(r, unit="ns", errors="raise"):
"""Convert string 'r' to a timedelta object."""
- result: Timedelta | NaTType # TODO: alias?
+ result: Timedelta | NaTType
try:
result = Timedelta(r, unit)
diff --git a/pandas/tests/arrays/boolean/test_logical.py b/pandas/tests/arrays/boolean/test_logical.py
index 8ed1c27087b02..afcbe36e165c9 100644
--- a/pandas/tests/arrays/boolean/test_logical.py
+++ b/pandas/tests/arrays/boolean/test_logical.py
@@ -38,10 +38,8 @@ def test_empty_ok(self, all_logical_operators):
result = getattr(a, op_name)(False)
tm.assert_extension_array_equal(a, result)
- # FIXME: dont leave commented-out
- # TODO: pd.NA
- # result = getattr(a, op_name)(pd.NA)
- # tm.assert_extension_array_equal(a, result)
+ result = getattr(a, op_name)(pd.NA)
+ tm.assert_extension_array_equal(a, result)
def test_logical_length_mismatch_raises(self, all_logical_operators):
op_name = all_logical_operators
diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py
index 9c59c79f677de..4987751f31dac 100644
--- a/pandas/tests/extension/base/casting.py
+++ b/pandas/tests/extension/base/casting.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
+from pandas.compat import np_version_under1p20
import pandas.util._test_decorators as td
import pandas as pd
@@ -30,10 +31,12 @@ def test_astype_object_frame(self, all_data):
assert isinstance(result._mgr.arrays[0], np.ndarray)
assert result._mgr.arrays[0].dtype == np.dtype(object)
- # FIXME: these currently fail; dont leave commented-out
- # check that we can compare the dtypes
- # cmp = result.dtypes.equals(df.dtypes)
- # assert not cmp.any()
+ # earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64"
+ # instead of returning False
+ if not np_version_under1p20:
+ # check that we can compare the dtypes
+ comp = result.dtypes == df.dtypes
+ assert not comp.any()
def test_tolist(self, data):
result = pd.Series(data).tolist()
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 9c21f717573c1..6358b2fe27ef3 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -17,6 +17,7 @@
import numpy as np
import pytest
+from pandas.compat import np_version_under1p20
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_object_dtype
@@ -374,10 +375,12 @@ def test_astype_object_frame(self, all_data):
result = df.astype(object)
assert is_object_dtype(result._mgr.arrays[0].dtype)
- # FIXME: these currently fail; dont leave commented-out
- # check that we can compare the dtypes
- # comp = result.dtypes.equals(df.dtypes)
- # assert not comp.any()
+ # earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64"
+ # instead of returning False
+ if not np_version_under1p20:
+ # check that we can compare the dtypes
+ comp = result.dtypes == df.dtypes
+ assert not comp.any()
def test_astype_str(self, data):
result = pd.Series(data[:5]).astype(str)
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 07111af598599..1ea436520bf20 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -576,13 +576,11 @@ def test_ix_multi_take(self):
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
- # FIXME: dont leave commented-out
- """ #1321
+ # GH#1321
df = DataFrame(np.random.randn(3, 2))
- rs = df.loc[df.index==0, df.columns==1]
- xp = df.reindex([0], [1])
+ rs = df.loc[df.index == 0, df.columns == 1]
+ xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
- """
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py
index 49a1dc8bbb21c..3588cd56d1060 100644
--- a/pandas/tests/generic/test_frame.py
+++ b/pandas/tests/generic/test_frame.py
@@ -87,18 +87,9 @@ def test_metadata_propagation_indiv_resample(self):
result = df.resample("1T")
self.check_metadata(df, result)
- def test_metadata_propagation_indiv(self):
+ def test_metadata_propagation_indiv(self, monkeypatch):
# merging with override
# GH 6923
- _metadata = DataFrame._metadata
- _finalize = DataFrame.__finalize__
-
- np.random.seed(10)
- df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["a", "b"])
- df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["c", "d"])
- DataFrame._metadata = ["filename"]
- df1.filename = "fname1.csv"
- df2.filename = "fname2.csv"
def finalize(self, other, method=None, **kwargs):
@@ -107,41 +98,37 @@ def finalize(self, other, method=None, **kwargs):
left, right = other.left, other.right
value = getattr(left, name, "") + "|" + getattr(right, name, "")
object.__setattr__(self, name, value)
- else:
- object.__setattr__(self, name, getattr(other, name, ""))
-
- return self
-
- DataFrame.__finalize__ = finalize
- result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
- assert result.filename == "fname1.csv|fname2.csv"
-
- # concat
- # GH 6927
- DataFrame._metadata = ["filename"]
- df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list("ab"))
- df1.filename = "foo"
-
- def finalize(self, other, method=None, **kwargs):
- for name in self._metadata:
- if method == "concat":
+ elif method == "concat":
value = "+".join(
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
)
object.__setattr__(self, name, value)
else:
- object.__setattr__(self, name, getattr(other, name, None))
+ object.__setattr__(self, name, getattr(other, name, ""))
return self
- DataFrame.__finalize__ = finalize
+ with monkeypatch.context() as m:
+ m.setattr(DataFrame, "_metadata", ["filename"])
+ m.setattr(DataFrame, "__finalize__", finalize)
+
+ np.random.seed(10)
+ df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["a", "b"])
+ df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["c", "d"])
+ DataFrame._metadata = ["filename"]
+ df1.filename = "fname1.csv"
+ df2.filename = "fname2.csv"
+
+ result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
+ assert result.filename == "fname1.csv|fname2.csv"
- result = pd.concat([df1, df1])
- assert result.filename == "foo+foo"
+ # concat
+ # GH#6927
+ df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list("ab"))
+ df1.filename = "foo"
- # reset
- DataFrame._metadata = _metadata
- DataFrame.__finalize__ = _finalize # FIXME: use monkeypatch
+ result = pd.concat([df1, df1])
+ assert result.filename == "foo+foo"
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index b733957cb9170..784ced96286a6 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -109,7 +109,7 @@ def test_metadata_propagation_indiv_resample(self):
result = ts.resample("1T").apply(lambda x: x.sum())
self.check_metadata(ts, result)
- def test_metadata_propagation_indiv(self):
+ def test_metadata_propagation_indiv(self, monkeypatch):
# check that the metadata matches up on the resulting ops
ser = Series(range(3), range(3))
@@ -120,12 +120,6 @@ def test_metadata_propagation_indiv(self):
result = ser.T
self.check_metadata(ser, result)
- _metadata = Series._metadata
- _finalize = Series.__finalize__
- Series._metadata = ["name", "filename"]
- ser.filename = "foo"
- ser2.filename = "bar"
-
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "concat" and name == "filename":
@@ -142,12 +136,13 @@ def finalize(self, other, method=None, **kwargs):
return self
- Series.__finalize__ = finalize
+ with monkeypatch.context() as m:
+ m.setattr(Series, "_metadata", ["name", "filename"])
+ m.setattr(Series, "__finalize__", finalize)
- result = pd.concat([ser, ser2])
- assert result.filename == "foo+bar"
- assert result.name is None
+ ser.filename = "foo"
+ ser2.filename = "bar"
- # reset
- Series._metadata = _metadata
- Series.__finalize__ = _finalize # FIXME: use monkeypatch
+ result = pd.concat([ser, ser2])
+ assert result.filename == "foo+bar"
+ assert result.name is None
diff --git a/pandas/tests/indexes/timedeltas/methods/test_insert.py b/pandas/tests/indexes/timedeltas/methods/test_insert.py
index 809d21db805e0..d35404ab09492 100644
--- a/pandas/tests/indexes/timedeltas/methods/test_insert.py
+++ b/pandas/tests/indexes/timedeltas/methods/test_insert.py
@@ -87,14 +87,17 @@ def test_insert_nat(self, null):
def test_insert_invalid_na(self):
idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx")
- # FIXME: assert_index_equal fails if we pass a different
- # instance of np.datetime64("NaT")
item = np.datetime64("NaT")
result = idx.insert(0, item)
expected = Index([item] + list(idx), dtype=object, name="idx")
tm.assert_index_equal(result, expected)
+ # Also works if we pass a different dt64nat object
+ item2 = np.datetime64("NaT")
+ result = idx.insert(0, item2)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize(
"item", [0, np.int64(0), np.float64(0), np.array(0), np.datetime64(456, "us")]
)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44151 | 2021-10-22T22:01:50Z | 2021-10-24T14:41:02Z | 2021-10-24T14:41:02Z | 2021-10-24T16:39:02Z |
CLN: remove unused fields.pyx code/asvs | diff --git a/asv_bench/benchmarks/tslibs/fields.py b/asv_bench/benchmarks/tslibs/fields.py
index 0607a799ec707..23ae73811204c 100644
--- a/asv_bench/benchmarks/tslibs/fields.py
+++ b/asv_bench/benchmarks/tslibs/fields.py
@@ -12,7 +12,7 @@
class TimeGetTimedeltaField:
params = [
_sizes,
- ["days", "h", "s", "seconds", "ms", "microseconds", "us", "ns", "nanoseconds"],
+ ["days", "seconds", "microseconds", "nanoseconds"],
]
param_names = ["size", "field"]
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 4d55967c1e135..2c4b420656259 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -487,28 +487,6 @@ def get_timedelta_field(const int64_t[:] tdindex, str field):
out[i] = tds.days
return out
- elif field == 'h':
- with nogil:
- for i in range(count):
- if tdindex[i] == NPY_NAT:
- out[i] = -1
- continue
-
- td64_to_tdstruct(tdindex[i], &tds)
- out[i] = tds.hrs
- return out
-
- elif field == 's':
- with nogil:
- for i in range(count):
- if tdindex[i] == NPY_NAT:
- out[i] = -1
- continue
-
- td64_to_tdstruct(tdindex[i], &tds)
- out[i] = tds.sec
- return out
-
elif field == 'seconds':
with nogil:
for i in range(count):
@@ -520,17 +498,6 @@ def get_timedelta_field(const int64_t[:] tdindex, str field):
out[i] = tds.seconds
return out
- elif field == 'ms':
- with nogil:
- for i in range(count):
- if tdindex[i] == NPY_NAT:
- out[i] = -1
- continue
-
- td64_to_tdstruct(tdindex[i], &tds)
- out[i] = tds.ms
- return out
-
elif field == 'microseconds':
with nogil:
for i in range(count):
@@ -542,28 +509,6 @@ def get_timedelta_field(const int64_t[:] tdindex, str field):
out[i] = tds.microseconds
return out
- elif field == 'us':
- with nogil:
- for i in range(count):
- if tdindex[i] == NPY_NAT:
- out[i] = -1
- continue
-
- td64_to_tdstruct(tdindex[i], &tds)
- out[i] = tds.us
- return out
-
- elif field == 'ns':
- with nogil:
- for i in range(count):
- if tdindex[i] == NPY_NAT:
- out[i] = -1
- continue
-
- td64_to_tdstruct(tdindex[i], &tds)
- out[i] = tds.ns
- return out
-
elif field == 'nanoseconds':
with nogil:
for i in range(count):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44150 | 2021-10-22T19:48:46Z | 2021-10-23T19:04:08Z | 2021-10-23T19:04:08Z | 2021-10-23T20:26:43Z |
Backport PR #44144 on branch 1.3.x (CI: Fix mypy failures) | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 49f31ac82ff8d..4a518b0c6d5bf 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -908,9 +908,9 @@ def maybe_upcast(
"""
new_dtype, fill_value = maybe_promote(values.dtype, fill_value)
# We get a copy in all cases _except_ (values.dtype == new_dtype and not copy)
- values = values.astype(new_dtype, copy=copy)
+ upcast_values = values.astype(new_dtype, copy=copy)
- return values, fill_value
+ return upcast_values, fill_value # type: ignore[return-value]
def invalidate_string_dtypes(dtype_set: set[DtypeObj]):
diff --git a/pandas/tests/indexes/test_engines.py b/pandas/tests/indexes/test_engines.py
index 9f41c68909f6e..52af29d999fcc 100644
--- a/pandas/tests/indexes/test_engines.py
+++ b/pandas/tests/indexes/test_engines.py
@@ -61,13 +61,7 @@ class TestTimedeltaEngine:
@pytest.mark.parametrize(
"scalar",
[
- # error: Argument 1 to "Timestamp" has incompatible type "timedelta64";
- # expected "Union[integer[Any], float, str, date, datetime64]"
- pd.Timestamp(
- pd.Timedelta(days=42).asm8.view(
- "datetime64[ns]"
- ) # type: ignore[arg-type]
- ),
+ pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")),
pd.Timedelta(days=42).value,
pd.Timedelta(days=42).to_pytimedelta(),
pd.Timedelta(days=42).to_timedelta64(),
| Backport PR #44144: CI: Fix mypy failures | https://api.github.com/repos/pandas-dev/pandas/pulls/44149 | 2021-10-22T18:50:04Z | 2021-10-22T21:44:51Z | 2021-10-22T21:44:51Z | 2021-10-22T21:44:51Z |
CLN: tests | diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 3dbf49df72558..60d5d8c8ccaca 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -189,15 +189,14 @@ def test_corr_nullable_integer(self, nullable_column, other_column, method):
expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
tm.assert_frame_equal(result, expected)
- def test_corr_item_cache(self, using_array_manager):
+ def test_corr_item_cache(self):
# Check that corr does not lead to incorrect entries in item_cache
df = DataFrame({"A": range(10)})
df["B"] = range(10)[::-1]
ser = df["A"] # populate item_cache
- if not using_array_manager:
- assert len(df._mgr.blocks) == 2
+ assert len(df._mgr.arrays) == 2 # i.e. 2 blocks
_ = df.corr()
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index e2cfc50510173..b288fafd8f7f6 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -26,16 +26,14 @@
class TestDataFrameReprInfoEtc:
- def test_repr_bytes_61_lines(self, using_array_manager):
+ def test_repr_bytes_61_lines(self):
# GH#12857
lets = list("ACDEFGHIJKLMNOP")
slen = 50
nseqs = 1000
words = [[np.random.choice(lets) for x in range(slen)] for _ in range(nseqs)]
df = DataFrame(words).astype("U1")
- # TODO(Arraymanager) astype("U1") actually gives this dtype instead of object
- if not using_array_manager:
- assert (df.dtypes == object).all()
+ assert (df.dtypes == object).all()
# smoke tests; at one point this raised with 61 but not 60
repr(df)
diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py
index 8be721c13eea8..f1fc40ee7aba0 100644
--- a/pandas/tests/groupby/test_allowlist.py
+++ b/pandas/tests/groupby/test_allowlist.py
@@ -406,6 +406,7 @@ def test_groupby_selection_tshift_raises(df):
def test_groupby_selection_other_methods(df):
# some methods which require DatetimeIndex
rng = date_range("2014", periods=len(df))
+ df.columns.name = "foo"
df.index = rng
g = df.groupby(["A"])[["C"]]
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 94ecfe81abd45..c7c575b479988 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -838,7 +838,7 @@ def test_loc_setitem_missing_columns(self, index, box, expected):
def test_loc_coercion(self):
- # 12411
+ # GH#12411
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
expected = df.dtypes
@@ -848,7 +848,8 @@ def test_loc_coercion(self):
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
- # 12045
+ def test_loc_coercion2(self):
+ # GH#12045
import datetime
df = DataFrame(
@@ -862,7 +863,8 @@ def test_loc_coercion(self):
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
- # 11594
+ def test_loc_coercion3(self):
+ # GH#11594
df = DataFrame({"text": ["some words"] + [None] * 9})
expected = df.dtypes
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 7b2713ad274c6..c487777fc339e 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -122,6 +122,7 @@ def test_partial_setting(self):
df.loc[:, "C"] = df.loc[:, "A"]
tm.assert_frame_equal(df, expected)
+ def test_partial_setting2(self):
# GH 8473
dates = date_range("1/1/2000", periods=8)
df_orig = DataFrame(
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index be99eb0bf0a69..8436c2db445ee 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -575,12 +575,14 @@ def test_resample_ohlc_dataframe():
}
)
).reindex(["VOLUME", "PRICE"], axis=1)
+ df.columns.name = "Cols"
res = df.resample("H").ohlc()
exp = pd.concat(
[df["VOLUME"].resample("H").ohlc(), df["PRICE"].resample("H").ohlc()],
axis=1,
- keys=["VOLUME", "PRICE"],
+ keys=df.columns,
)
+ assert exp.columns.names[0] == "Cols"
tm.assert_frame_equal(exp, res)
df.columns = [["a", "b"], ["c", "d"]]
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index 7e6c2a452f1a0..9b2e0cac5de84 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -1312,6 +1312,13 @@ def test_add_offset(self):
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
+ msg = "|".join(
+ [
+ "Input has different freq",
+ "Input cannot be converted to Period",
+ ]
+ )
+
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
@@ -1319,21 +1326,15 @@ def test_add_offset(self):
np.timedelta64(365, "D"),
timedelta(365),
]:
- msg = "Input has different freq|Input cannot be converted to Period"
+
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
- msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=msg):
+ td_msg = "cannot use operands with types"
+ with pytest.raises(TypeError, match=td_msg):
o + p
else:
- msg = "|".join(
- [
- "Input has different freq",
- "Input cannot be converted to Period",
- ]
- )
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
@@ -1368,6 +1369,13 @@ def test_add_offset(self):
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
+ msg = "|".join(
+ [
+ "Input has different freq",
+ "Input cannot be converted to Period",
+ ]
+ )
+
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
@@ -1375,21 +1383,14 @@ def test_add_offset(self):
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
- msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
- msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=msg):
+ td_msg = "cannot use operands with types"
+ with pytest.raises(TypeError, match=td_msg):
o + p
else:
- msg = "|".join(
- [
- "Input has different freq",
- "Input cannot be converted to Period",
- ]
- )
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
@@ -1423,6 +1424,13 @@ def test_add_offset(self):
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
+ msg = "|".join(
+ [
+ "Input has different freq",
+ "Input cannot be converted to Period",
+ ]
+ )
+
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
@@ -1430,27 +1438,26 @@ def test_add_offset(self):
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
- msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
- msg = "cannot use operands with types"
- with pytest.raises(TypeError, match=msg):
+ td_msg = "cannot use operands with types"
+ with pytest.raises(TypeError, match=td_msg):
o + p
else:
- msg = "|".join(
- [
- "Input has different freq",
- "Input cannot be converted to Period",
- ]
- )
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
def test_sub_offset(self):
# freq is DateOffset
- msg = "Input has different freq|Input cannot be converted to Period"
+ msg = "|".join(
+ [
+ "Input has different freq",
+ "Input cannot be converted to Period",
+ ]
+ )
+
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
assert p - offsets.YearEnd(2) == Period("2009", freq=freq)
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 7dfda0463ecaf..9c36d5777d60c 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -273,9 +273,14 @@ def test_ops_ndarray(self):
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
- msg = (
- r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'|"
- "Concatenation operation is not implemented for NumPy arrays"
+ msg = "|".join(
+ [
+ (
+ r"unsupported operand type\(s\) for \+: 'numpy.ndarray' "
+ "and 'Timedelta'"
+ ),
+ "Concatenation operation is not implemented for NumPy arrays",
+ ]
)
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index da8de5c553f53..03b1c512f9053 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -234,11 +234,12 @@ def test_getitem_partial_str_slice_high_reso_with_timedeltaindex(self):
result = ser["1 days, 10:11:12.001001"]
assert result == ser.iloc[1001]
- def test_getitem_slice_2d(self, datetime_series, using_array_manager):
+ # TODO: redundant with test_getitem_ndim_deprecated?
+ def test_getitem_slice_2d(self, datetime_series):
# GH#30588 multi-dimensional indexing deprecated
with tm.assert_produces_warning(
- FutureWarning, check_stacklevel=not using_array_manager
+ FutureWarning, match="Support for multi-dimensional indexing"
):
# GH#30867 Don't want to support this long-term, but
# for now ensure that the warning from Index
@@ -520,11 +521,10 @@ def test_getitem_generator(string_series):
Series(date_range("2012-01-01", periods=2, tz="CET")),
],
)
-def test_getitem_ndim_deprecated(series, using_array_manager):
+def test_getitem_ndim_deprecated(series):
with tm.assert_produces_warning(
FutureWarning,
match="Support for multi-dimensional indexing",
- check_stacklevel=not using_array_manager,
):
result = series[:, None]
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index 8361ec6c6b5fa..a28da1d856cf9 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -742,10 +742,12 @@ def test_fillna_method_and_limit_invalid(self):
# related GH#9217, make sure limit is an int and greater than 0
ser = Series([1, 2, 3, None])
- msg = (
- r"Cannot specify both 'value' and 'method'\.|"
- r"Limit must be greater than 0|"
- "Limit must be an integer"
+ msg = "|".join(
+ [
+ r"Cannot specify both 'value' and 'method'\.",
+ "Limit must be greater than 0",
+ "Limit must be an integer",
+ ]
)
for limit in [-1, 0, 1.0, 2.0]:
for method in ["backfill", "bfill", "pad", "ffill", None]:
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 8f0991eb98bb5..779d6e6b6bb0f 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -241,11 +241,7 @@ def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
- msg = (
- "unorderable types: .* [<>] .*"
- "|" # the above case happens for numpy < 1.14
- "'[<>]' not supported between instances of .*"
- )
+ msg = "'[<>]' not supported between instances of .*"
with pytest.raises(TypeError, match=msg):
algos.factorize(x17[::-1], sort=True)
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index a49b7c2b7f86e..6a39638af9c87 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -434,11 +434,7 @@ def test_mixed_integer_from_list(self):
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
- msg = (
- "unorderable types: .* [<>] .*"
- "|" # the above case happens for numpy < 1.14
- "'[<>]' not supported between instances of .*"
- )
+ msg = "'[<>]' not supported between instances of .*"
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py
index 4a2e3f971670e..f0737f7dc4cce 100644
--- a/pandas/tests/test_take.py
+++ b/pandas/tests/test_take.py
@@ -313,9 +313,11 @@ def test_take_empty(self, allow_fill):
result = algos.take(arr, [], allow_fill=allow_fill)
tm.assert_numpy_array_equal(arr, result)
- msg = (
- "cannot do a non-empty take from an empty axes.|"
- "indices are out-of-bounds"
+ msg = "|".join(
+ [
+ "cannot do a non-empty take from an empty axes.",
+ "indices are out-of-bounds",
+ ]
)
with pytest.raises(IndexError, match=msg):
algos.take(arr, [0], allow_fill=allow_fill)
diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py
index e500517d82d4c..cbbe29fb6cf9a 100644
--- a/pandas/tests/tseries/frequencies/test_inference.py
+++ b/pandas/tests/tseries/frequencies/test_inference.py
@@ -399,9 +399,11 @@ def test_non_datetime_index2():
"idx", [tm.makeIntIndex(10), tm.makeFloatIndex(10), tm.makePeriodIndex(10)]
)
def test_invalid_index_types(idx):
- msg = (
- "(cannot infer freq from a non-convertible)|"
- "(Check the `freq` attribute instead of using infer_freq)"
+ msg = "|".join(
+ [
+ "cannot infer freq from a non-convertible",
+ "Check the `freq` attribute instead of using infer_freq",
+ ]
)
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index d58eeaa7cbcb1..74ee991b040d1 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -63,9 +63,11 @@ def test_invalid_constructor(frame_or_series, w):
c = frame_or_series(range(5)).rolling
- msg = (
- "window must be an integer|"
- "passed window foo is not compatible with a datetimelike index"
+ msg = "|".join(
+ [
+ "window must be an integer",
+ "passed window foo is not compatible with a datetimelike index",
+ ]
)
with pytest.raises(ValueError, match=msg):
c(window=w)
| Includes de-special-casing some using_array_manager tests | https://api.github.com/repos/pandas-dev/pandas/pulls/44148 | 2021-10-22T17:46:49Z | 2021-10-24T14:39:32Z | 2021-10-24T14:39:32Z | 2021-10-24T16:37:13Z |
CI: Fix mypy failures | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 408e58e23aaed..db3d764fb97a2 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -910,9 +910,9 @@ def maybe_upcast(
"""
new_dtype, fill_value = maybe_promote(values.dtype, fill_value)
# We get a copy in all cases _except_ (values.dtype == new_dtype and not copy)
- values = values.astype(new_dtype, copy=copy)
+ upcast_values = values.astype(new_dtype, copy=copy)
- return values, fill_value
+ return upcast_values, fill_value # type: ignore[return-value]
def invalidate_string_dtypes(dtype_set: set[DtypeObj]):
diff --git a/pandas/tests/indexes/test_engines.py b/pandas/tests/indexes/test_engines.py
index 663ba7332c864..02d8c5b2a6a22 100644
--- a/pandas/tests/indexes/test_engines.py
+++ b/pandas/tests/indexes/test_engines.py
@@ -57,13 +57,7 @@ class TestTimedeltaEngine:
@pytest.mark.parametrize(
"scalar",
[
- # error: Argument 1 to "Timestamp" has incompatible type "timedelta64";
- # expected "Union[integer[Any], float, str, date, datetime64]"
- pd.Timestamp(
- pd.Timedelta(days=42).asm8.view(
- "datetime64[ns]"
- ) # type: ignore[arg-type]
- ),
+ pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")),
pd.Timedelta(days=42).value,
pd.Timedelta(days=42).to_pytimedelta(),
pd.Timedelta(days=42).to_timedelta64(),
| - [x] closes #44143
| https://api.github.com/repos/pandas-dev/pandas/pulls/44144 | 2021-10-22T13:28:21Z | 2021-10-22T18:49:38Z | 2021-10-22T18:49:38Z | 2021-10-22T18:49:38Z |
Fixes Issues#44132, #40148, #29033, #22275, #18869: groupby | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c3ad87082c8ed..ffbf74437289b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8072,7 +8072,7 @@ def resample(
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
- >>> df.resample('M', on='week_starting').mean()
+ >>> df.resample('M', on='week_starting')['price', 'volume'].mean()
price volume
week_starting
2018-01-31 10.75 62.5
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 07cef290c8919..8ff531fe2b5b9 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1224,7 +1224,7 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool:
else:
obj = self._obj_with_exclusions
check = obj._get_numeric_data()
- if len(obj.columns) and not len(check.columns) and not obj.empty:
+ if len(obj.columns) > len(check.columns) and not obj.empty:
numeric_only = False
# TODO: v1.4+ Add FutureWarning
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index db0190d488d42..56dcf3f9ca3f8 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -96,7 +96,7 @@ def test_in_numeric_groupby(self, data_for_grouping):
"C": [1, 1, 1, 1, 1, 1, 1, 1],
}
)
- result = df.groupby("A").sum().columns
+ result = df.groupby("A").sum(numeric_only=True).columns
if data_for_grouping.dtype._is_numeric:
expected = pd.Index(["B", "C"])
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index 905b33b285625..d25a3bc8d82c4 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -1749,7 +1749,7 @@ def test_stack_multiple_bug(self):
multi = df.set_index(["DATE", "ID"])
multi.columns.name = "Params"
unst = multi.unstack("ID")
- down = unst.resample("W-THU").mean()
+ down = unst.loc[:, ["VAR1"]].resample("W-THU").mean()
rs = down.stack("ID")
xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID")
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 339bb2c30736d..7536e1ed10425 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -333,7 +333,7 @@ def test_observed(observed, using_array_manager):
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
- result = gb.sum()
+ result = gb.sum(numeric_only=True)
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 3ae11847cc06b..383e71e4eebdd 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -125,9 +125,17 @@ def df(self):
@pytest.mark.parametrize("method", ["mean", "median"])
def test_averages(self, df, method):
# mean / median
- expected_columns_numeric = Index(["int", "float", "category_int"])
-
- gb = df.groupby("group")
+ expected_column_names = [
+ "int",
+ "float",
+ "category_int",
+ "datetime",
+ "datetimetz",
+ "timedelta",
+ ]
+ expected_columns_numeric = Index(expected_column_names)
+
+ gb = df[["group", *expected_column_names]].groupby("group")
expected = DataFrame(
{
"category_int": [7.5, 9],
@@ -154,10 +162,7 @@ def test_averages(self, df, method):
],
)
- with tm.assert_produces_warning(
- FutureWarning, match="Dropping invalid", check_stacklevel=False
- ):
- result = getattr(gb, method)(numeric_only=False)
+ result = getattr(gb, method)()
tm.assert_frame_equal(result.reindex_like(expected), expected)
expected_columns = expected.columns
@@ -205,14 +210,9 @@ def test_first_last(self, df, method):
@pytest.mark.parametrize("method", ["sum", "cumsum"])
def test_sum_cumsum(self, df, method):
+ expected_columns = Index(["int", "float", "category_int"])
- expected_columns_numeric = Index(["int", "float", "category_int"])
- expected_columns = Index(
- ["int", "float", "string", "category_int", "timedelta"]
- )
- if method == "cumsum":
- # cumsum loses string
- expected_columns = Index(["int", "float", "category_int", "timedelta"])
+ expected_columns_numeric = expected_columns
self._check(df, method, expected_columns, expected_columns_numeric)
@@ -231,26 +231,30 @@ def test_cummin_cummax(self, df, method):
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
- # GH#15561: numeric_only=False set by default like min/max
expected_columns_numeric = expected_columns
self._check(df, method, expected_columns, expected_columns_numeric)
def _check(self, df, method, expected_columns, expected_columns_numeric):
- gb = df.groupby("group")
# cummin, cummax dont have numeric_only kwarg, always use False
warn = None
- if method in ["cummin", "cummax"]:
+ if method in ["cummin", "cummax", "min", "max"]:
# these dont have numeric_only kwarg, always use False
warn = FutureWarning
- elif method in ["min", "max"]:
- # these have numeric_only kwarg, but default to False
- warn = FutureWarning
+ df["object"] = [
+ None,
+ "y",
+ "z",
+ ] # add a column that is non numeric and will be dropped
+ gb = df[["group", "object", *list(expected_columns_numeric)]].groupby(
+ "group"
+ )
+ else:
+ gb = df[["group", *list(expected_columns_numeric)]].groupby("group")
with tm.assert_produces_warning(warn, match="Dropping invalid columns"):
result = getattr(gb, method)()
-
tm.assert_index_equal(result.columns, expected_columns_numeric)
# GH#41475 deprecated silently ignoring nuisance columns
@@ -258,7 +262,7 @@ def _check(self, df, method, expected_columns, expected_columns_numeric):
if len(expected_columns) < len(gb._obj_with_exclusions.columns):
warn = FutureWarning
with tm.assert_produces_warning(warn, match="Dropping invalid columns"):
- result = getattr(gb, method)(numeric_only=False)
+ result = getattr(gb, method)()
tm.assert_index_equal(result.columns, expected_columns)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 203d8abb465d0..450fa5e570fc0 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -896,7 +896,7 @@ def test_keep_nuisance_agg(df, agg_function):
)
def test_omit_nuisance_agg(df, agg_function):
# GH 38774, GH 38815
- grouped = df.groupby("A")
+ grouped = df.groupby("A")["C", "D"]
result = getattr(grouped, agg_function)()
expected = getattr(df.loc[:, ["A", "C", "D"]].groupby("A"), agg_function)()
tm.assert_frame_equal(result, expected)
@@ -1126,8 +1126,8 @@ def test_groupby_with_hier_columns():
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
- result = grouped.sum()
- expected = df.groupby("A").sum()
+ result = grouped.sum(numeric_only=True)
+ expected = df.groupby("A").sum(numeric_only=True)
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
@@ -2549,7 +2549,7 @@ def test_groupby_aggregation_numeric_with_non_numeric_dtype():
)
gb = df.groupby(by=["x"])
- result = gb.sum()
+ result = gb.sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index d4b21633309db..57219c1c32d5b 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -105,14 +105,14 @@ def test_groupby_with_timegrouper(self):
)
expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype="int64")
- result1 = df.resample("5D").sum()
+ result1 = df.resample("5D").sum()["Quantity"].to_frame()
tm.assert_frame_equal(result1, expected)
df_sorted = df.sort_index()
- result2 = df_sorted.groupby(Grouper(freq="5D")).sum()
+ result2 = df_sorted.groupby(Grouper(freq="5D")).sum()["Quantity"].to_frame()
tm.assert_frame_equal(result2, expected)
- result3 = df.groupby(Grouper(freq="5D")).sum()
+ result3 = df.groupby(Grouper(freq="5D")).sum()["Quantity"].to_frame()
tm.assert_frame_equal(result3, expected)
@pytest.mark.parametrize("should_sort", [True, False])
@@ -185,8 +185,7 @@ def test_timegrouper_with_reg_groups(self):
],
}
).set_index(["Date", "Buyer"])
-
- result = df.groupby([Grouper(freq="A"), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="A"), "Buyer"]).sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
@@ -201,7 +200,7 @@ def test_timegrouper_with_reg_groups(self):
],
}
).set_index(["Date", "Buyer"])
- result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
df_original = DataFrame(
@@ -239,10 +238,10 @@ def test_timegrouper_with_reg_groups(self):
}
).set_index(["Date", "Buyer"])
- result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M"), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="1M"), "Buyer"]).sum(numeric_only=True)
expected = DataFrame(
{
"Buyer": "Carl Joe Mark".split(),
@@ -258,7 +257,9 @@ def test_timegrouper_with_reg_groups(self):
# passing the name
df = df.reset_index()
- result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum(
+ numeric_only=True
+ )
tm.assert_frame_equal(result, expected)
with pytest.raises(KeyError, match="'The grouper name foo is not found'"):
@@ -266,9 +267,13 @@ def test_timegrouper_with_reg_groups(self):
# passing the level
df = df.set_index("Date")
- result = df.groupby([Grouper(freq="1M", level="Date"), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="1M", level="Date"), "Buyer"]).sum(
+ numeric_only=True
+ )
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M", level=0), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="1M", level=0), "Buyer"]).sum(
+ numeric_only=True
+ )
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match="The level foo is not valid"):
@@ -277,7 +282,9 @@ def test_timegrouper_with_reg_groups(self):
# multi names
df = df.copy()
df["Date"] = df.index + offsets.MonthEnd(2)
- result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum()
+ result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum(
+ numeric_only=True
+ )
expected = DataFrame(
{
"Buyer": "Carl Joe Mark".split(),
@@ -306,18 +313,18 @@ def test_timegrouper_with_reg_groups(self):
[datetime(2013, 10, 31, 0, 0)], freq=offsets.MonthEnd(), name="Date"
),
)
- result = df.groupby(Grouper(freq="1M")).sum()
+ result = df.groupby(Grouper(freq="1M")).sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M")]).sum()
+ result = df.groupby([Grouper(freq="1M")]).sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
expected.index = expected.index.shift(1)
assert expected.index.freq == offsets.MonthEnd()
- result = df.groupby(Grouper(freq="1M", key="Date")).sum()
+ result = df.groupby(Grouper(freq="1M", key="Date")).sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
- result = df.groupby([Grouper(freq="1M", key="Date")]).sum()
+ result = df.groupby([Grouper(freq="1M", key="Date")]).sum(numeric_only=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("freq", ["D", "M", "A", "Q-APR"])
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 10fabe234d218..77a9118097bf2 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -90,7 +90,7 @@ def test_groupby_resample_on_api():
expected = df.set_index("dates").groupby("key").resample("D").mean()
- result = df.groupby("key").resample("D", on="dates").mean()
+ result = df.groupby("key").resample("D", on="dates").mean()["values"].to_frame()
tm.assert_frame_equal(result, expected)
@@ -169,7 +169,7 @@ def tests_skip_nuisance(test_frame):
tm.assert_frame_equal(result, expected)
expected = r[["A", "B", "C"]].sum()
- result = r.sum()
+ result = r.sum()[["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 594b6b44aafa1..ef9a6c566322a 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -401,7 +401,7 @@ def test_resample_groupby_agg():
df["date"] = pd.to_datetime(df["date"])
resampled = df.groupby("cat").resample("Y", on="date")
- expected = resampled.sum()
+ expected = resampled.sum()["num"].to_frame()
result = resampled.agg({"num": "sum"})
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 48a55022aa484..147ffadf69b09 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -559,7 +559,7 @@ def test_mixed_type_join_with_suffix(self):
df.insert(5, "dt", "foo")
grouped = df.groupby("id")
- mn = grouped.mean()
+ mn = grouped.mean(numeric_only=True)
cn = grouped.count()
# it works!
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 88607f4b036a0..ab7d58a3fc72f 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -898,14 +898,18 @@ def _check_output(
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
- table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc=np.mean)
+ table = self.data[["AA", "DD", "EE", "FF"]].pivot_table(
+ index=["AA"], margins=True, aggfunc=np.mean
+ )
for value_col in table.columns:
- totals = table.loc[("All", ""), value_col]
+ totals = table.loc[("All"), value_col]
assert totals == self.data[value_col].mean()
- table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean")
+ table = self.data[["AA", "DD", "EE", "FF"]].pivot_table(
+ index=["AA"], margins=True, aggfunc="mean"
+ )
for item in ["DD", "EE", "FF"]:
- totals = table.loc[("All", ""), item]
+ totals = table.loc[("All"), item]
assert totals == self.data[item].mean()
@pytest.mark.parametrize(
@@ -959,7 +963,9 @@ def test_margin_with_only_columns_defined(
}
)
- result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc)
+ result = df[["A", "B", "D", "E"]].pivot_table(
+ columns=columns, margins=True, aggfunc=aggfunc
+ )
expected = DataFrame(values, index=Index(["D", "E"]), columns=expected_columns)
tm.assert_frame_equal(result, expected)
@@ -1984,8 +1990,12 @@ def test_pivot_string_as_func(self):
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
- result = pivot_table(self.data, index="A", columns="B", aggfunc=f)
- expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy)
+ result = pivot_table(
+ self.data[["D", "E", "F"]], index="D", columns="E", aggfunc=f
+ )
+ expected = pivot_table(
+ self.data[["D", "E", "F"]], index="D", columns="E", aggfunc=f_numpy
+ )
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
| - [x] closes #44132
- [x] closes #40148
- [x] closes #29033
- [x] closes #22275
- [x] closes #18869
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44142 | 2021-10-22T07:17:43Z | 2022-01-16T18:08:33Z | null | 2022-01-16T18:08:34Z |
Move moments/test_moments_rolling_*.py tests to windows/test_rolling_*.py | diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index fd8c11bbace6e..7b1aa93b5923a 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -1,11 +1,17 @@
-from datetime import timedelta
+from datetime import (
+ datetime,
+ timedelta,
+)
+import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
+ Series,
+ bdate_range,
to_datetime,
)
@@ -235,3 +241,23 @@ def pairwise_other_frame():
[[None, 1, 1], [None, 1, 2], [None, 3, 2], [None, 8, 1]],
columns=["Y", "Z", "X"],
)
+
+
+@pytest.fixture
+def series():
+ """Make mocked series as fixture."""
+ arr = np.random.randn(100)
+ locs = np.arange(20, 40)
+ arr[locs] = np.NaN
+ series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100))
+ return series
+
+
+@pytest.fixture
+def frame():
+ """Make mocked frame as fixture."""
+ return DataFrame(
+ np.random.randn(100, 10),
+ index=bdate_range(datetime(2009, 1, 1), periods=100),
+ columns=np.arange(10),
+ )
diff --git a/pandas/tests/window/moments/conftest.py b/pandas/tests/window/moments/conftest.py
index 829df1f3bfe2f..e843caa48f6d7 100644
--- a/pandas/tests/window/moments/conftest.py
+++ b/pandas/tests/window/moments/conftest.py
@@ -1,36 +1,13 @@
-from datetime import datetime
-
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
- bdate_range,
notna,
)
-@pytest.fixture
-def series():
- """Make mocked series as fixture."""
- arr = np.random.randn(100)
- locs = np.arange(20, 40)
- arr[locs] = np.NaN
- series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100))
- return series
-
-
-@pytest.fixture
-def frame():
- """Make mocked frame as fixture."""
- return DataFrame(
- np.random.randn(100, 10),
- index=bdate_range(datetime(2009, 1, 1), periods=100),
- columns=np.arange(10),
- )
-
-
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
diff --git a/pandas/tests/window/moments/test_moments_rolling_functions.py b/pandas/tests/window/test_rolling_functions.py
similarity index 100%
rename from pandas/tests/window/moments/test_moments_rolling_functions.py
rename to pandas/tests/window/test_rolling_functions.py
diff --git a/pandas/tests/window/moments/test_moments_rolling_quantile.py b/pandas/tests/window/test_rolling_quantile.py
similarity index 100%
rename from pandas/tests/window/moments/test_moments_rolling_quantile.py
rename to pandas/tests/window/test_rolling_quantile.py
diff --git a/pandas/tests/window/moments/test_moments_rolling_skew_kurt.py b/pandas/tests/window/test_rolling_skew_kurt.py
similarity index 100%
rename from pandas/tests/window/moments/test_moments_rolling_skew_kurt.py
rename to pandas/tests/window/test_rolling_skew_kurt.py
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
A part of addressing (#37535), moving some tests under the window directory.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44141 | 2021-10-22T03:49:04Z | 2021-10-24T01:45:19Z | 2021-10-24T01:45:19Z | 2021-10-24T01:45:25Z |
Move moments/test_moments_rolling_apply.py to window/test_apply.py | diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index fd8c11bbace6e..7b1aa93b5923a 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -1,11 +1,17 @@
-from datetime import timedelta
+from datetime import (
+ datetime,
+ timedelta,
+)
+import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
+ Series,
+ bdate_range,
to_datetime,
)
@@ -235,3 +241,23 @@ def pairwise_other_frame():
[[None, 1, 1], [None, 1, 2], [None, 3, 2], [None, 8, 1]],
columns=["Y", "Z", "X"],
)
+
+
+@pytest.fixture
+def series():
+ """Make mocked series as fixture."""
+ arr = np.random.randn(100)
+ locs = np.arange(20, 40)
+ arr[locs] = np.NaN
+ series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100))
+ return series
+
+
+@pytest.fixture
+def frame():
+ """Make mocked frame as fixture."""
+ return DataFrame(
+ np.random.randn(100, 10),
+ index=bdate_range(datetime(2009, 1, 1), periods=100),
+ columns=np.arange(10),
+ )
diff --git a/pandas/tests/window/moments/test_moments_rolling_apply.py b/pandas/tests/window/moments/test_moments_rolling_apply.py
deleted file mode 100644
index d7ce1c92bcd83..0000000000000
--- a/pandas/tests/window/moments/test_moments_rolling_apply.py
+++ /dev/null
@@ -1,157 +0,0 @@
-import warnings
-
-import numpy as np
-import pytest
-
-from pandas import (
- DataFrame,
- Series,
- concat,
- isna,
- notna,
-)
-import pandas._testing as tm
-
-import pandas.tseries.offsets as offsets
-
-
-def f(x):
- # suppress warnings about empty slices, as we are deliberately testing
- # with a 0-length Series
- with warnings.catch_warnings():
- warnings.filterwarnings(
- "ignore",
- message=".*(empty slice|0 for slice).*",
- category=RuntimeWarning,
- )
- return x[np.isfinite(x)].mean()
-
-
-def test_series(raw, series):
- result = series.rolling(50).apply(f, raw=raw)
- assert isinstance(result, Series)
- tm.assert_almost_equal(result.iloc[-1], np.mean(series[-50:]))
-
-
-def test_frame(raw, frame):
- result = frame.rolling(50).apply(f, raw=raw)
- assert isinstance(result, DataFrame)
- tm.assert_series_equal(
- result.iloc[-1, :],
- frame.iloc[-50:, :].apply(np.mean, axis=0, raw=raw),
- check_names=False,
- )
-
-
-def test_time_rule_series(raw, series):
- win = 25
- minp = 10
- ser = series[::2].resample("B").mean()
- series_result = ser.rolling(window=win, min_periods=minp).apply(f, raw=raw)
- last_date = series_result.index[-1]
- prev_date = last_date - 24 * offsets.BDay()
-
- trunc_series = series[::2].truncate(prev_date, last_date)
- tm.assert_almost_equal(series_result[-1], np.mean(trunc_series))
-
-
-def test_time_rule_frame(raw, frame):
- win = 25
- minp = 10
- frm = frame[::2].resample("B").mean()
- frame_result = frm.rolling(window=win, min_periods=minp).apply(f, raw=raw)
- last_date = frame_result.index[-1]
- prev_date = last_date - 24 * offsets.BDay()
-
- trunc_frame = frame[::2].truncate(prev_date, last_date)
- tm.assert_series_equal(
- frame_result.xs(last_date),
- trunc_frame.apply(np.mean, raw=raw),
- check_names=False,
- )
-
-
-def test_nans(raw):
- obj = Series(np.random.randn(50))
- obj[:10] = np.NaN
- obj[-10:] = np.NaN
-
- result = obj.rolling(50, min_periods=30).apply(f, raw=raw)
- tm.assert_almost_equal(result.iloc[-1], np.mean(obj[10:-10]))
-
- # min_periods is working correctly
- result = obj.rolling(20, min_periods=15).apply(f, raw=raw)
- assert isna(result.iloc[23])
- assert not isna(result.iloc[24])
-
- assert not isna(result.iloc[-6])
- assert isna(result.iloc[-5])
-
- obj2 = Series(np.random.randn(20))
- result = obj2.rolling(10, min_periods=5).apply(f, raw=raw)
- assert isna(result.iloc[3])
- assert notna(result.iloc[4])
-
- result0 = obj.rolling(20, min_periods=0).apply(f, raw=raw)
- result1 = obj.rolling(20, min_periods=1).apply(f, raw=raw)
- tm.assert_almost_equal(result0, result1)
-
-
-@pytest.mark.parametrize("minp", [0, 99, 100])
-def test_min_periods(raw, series, minp):
- result = series.rolling(len(series) + 1, min_periods=minp).apply(f, raw=raw)
- expected = series.rolling(len(series), min_periods=minp).apply(f, raw=raw)
- nan_mask = isna(result)
- tm.assert_series_equal(nan_mask, isna(expected))
-
- nan_mask = ~nan_mask
- tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
-
-
-def test_center(raw):
- obj = Series(np.random.randn(50))
- obj[:10] = np.NaN
- obj[-10:] = np.NaN
-
- result = obj.rolling(20, min_periods=15, center=True).apply(f, raw=raw)
- expected = (
- concat([obj, Series([np.NaN] * 9)])
- .rolling(20, min_periods=15)
- .apply(f, raw=raw)[9:]
- .reset_index(drop=True)
- )
- tm.assert_series_equal(result, expected)
-
-
-def test_center_reindex_series(raw, series):
- # shifter index
- s = [f"x{x:d}" for x in range(12)]
- minp = 10
-
- series_xp = (
- series.reindex(list(series.index) + s)
- .rolling(window=25, min_periods=minp)
- .apply(f, raw=raw)
- .shift(-12)
- .reindex(series.index)
- )
- series_rs = series.rolling(window=25, min_periods=minp, center=True).apply(
- f, raw=raw
- )
- tm.assert_series_equal(series_xp, series_rs)
-
-
-def test_center_reindex_frame(raw, frame):
- # shifter index
- s = [f"x{x:d}" for x in range(12)]
- minp = 10
-
- frame_xp = (
- frame.reindex(list(frame.index) + s)
- .rolling(window=25, min_periods=minp)
- .apply(f, raw=raw)
- .shift(-12)
- .reindex(frame.index)
- )
- frame_rs = frame.rolling(window=25, min_periods=minp, center=True).apply(f, raw=raw)
- tm.assert_frame_equal(frame_xp, frame_rs)
diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py
index baab562b4d177..886ed676dabb2 100644
--- a/pandas/tests/window/test_apply.py
+++ b/pandas/tests/window/test_apply.py
@@ -1,3 +1,5 @@
+import warnings
+
import numpy as np
import pytest
@@ -7,10 +9,27 @@
MultiIndex,
Series,
Timestamp,
+ concat,
date_range,
+ isna,
+ notna,
)
import pandas._testing as tm
+import pandas.tseries.offsets as offsets
+
+
+def f(x):
+ # suppress warnings about empty slices, as we are deliberately testing
+ # with a 0-length Series
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ message=".*(empty slice|0 for slice).*",
+ category=RuntimeWarning,
+ )
+ return x[np.isfinite(x)].mean()
+
@pytest.mark.parametrize("bad_raw", [None, 1, 0])
def test_rolling_apply_invalid_raw(bad_raw):
@@ -158,3 +177,133 @@ def foo(x, par):
result = gb_rolling.apply(foo, args=args_kwargs[0], kwargs=args_kwargs[1])
tm.assert_series_equal(result, expected)
+
+
+def test_nans(raw):
+ obj = Series(np.random.randn(50))
+ obj[:10] = np.NaN
+ obj[-10:] = np.NaN
+
+ result = obj.rolling(50, min_periods=30).apply(f, raw=raw)
+ tm.assert_almost_equal(result.iloc[-1], np.mean(obj[10:-10]))
+
+ # min_periods is working correctly
+ result = obj.rolling(20, min_periods=15).apply(f, raw=raw)
+ assert isna(result.iloc[23])
+ assert not isna(result.iloc[24])
+
+ assert not isna(result.iloc[-6])
+ assert isna(result.iloc[-5])
+
+ obj2 = Series(np.random.randn(20))
+ result = obj2.rolling(10, min_periods=5).apply(f, raw=raw)
+ assert isna(result.iloc[3])
+ assert notna(result.iloc[4])
+
+ result0 = obj.rolling(20, min_periods=0).apply(f, raw=raw)
+ result1 = obj.rolling(20, min_periods=1).apply(f, raw=raw)
+ tm.assert_almost_equal(result0, result1)
+
+
+def test_center(raw):
+ obj = Series(np.random.randn(50))
+ obj[:10] = np.NaN
+ obj[-10:] = np.NaN
+
+ result = obj.rolling(20, min_periods=15, center=True).apply(f, raw=raw)
+ expected = (
+ concat([obj, Series([np.NaN] * 9)])
+ .rolling(20, min_periods=15)
+ .apply(f, raw=raw)[9:]
+ .reset_index(drop=True)
+ )
+ tm.assert_series_equal(result, expected)
+
+
+def test_series(raw, series):
+ result = series.rolling(50).apply(f, raw=raw)
+ assert isinstance(result, Series)
+ tm.assert_almost_equal(result.iloc[-1], np.mean(series[-50:]))
+
+
+def test_frame(raw, frame):
+ result = frame.rolling(50).apply(f, raw=raw)
+ assert isinstance(result, DataFrame)
+ tm.assert_series_equal(
+ result.iloc[-1, :],
+ frame.iloc[-50:, :].apply(np.mean, axis=0, raw=raw),
+ check_names=False,
+ )
+
+
+def test_time_rule_series(raw, series):
+ win = 25
+ minp = 10
+ ser = series[::2].resample("B").mean()
+ series_result = ser.rolling(window=win, min_periods=minp).apply(f, raw=raw)
+ last_date = series_result.index[-1]
+ prev_date = last_date - 24 * offsets.BDay()
+
+ trunc_series = series[::2].truncate(prev_date, last_date)
+ tm.assert_almost_equal(series_result[-1], np.mean(trunc_series))
+
+
+def test_time_rule_frame(raw, frame):
+ win = 25
+ minp = 10
+ frm = frame[::2].resample("B").mean()
+ frame_result = frm.rolling(window=win, min_periods=minp).apply(f, raw=raw)
+ last_date = frame_result.index[-1]
+ prev_date = last_date - 24 * offsets.BDay()
+
+ trunc_frame = frame[::2].truncate(prev_date, last_date)
+ tm.assert_series_equal(
+ frame_result.xs(last_date),
+ trunc_frame.apply(np.mean, raw=raw),
+ check_names=False,
+ )
+
+
+@pytest.mark.parametrize("minp", [0, 99, 100])
+def test_min_periods(raw, series, minp):
+ result = series.rolling(len(series) + 1, min_periods=minp).apply(f, raw=raw)
+ expected = series.rolling(len(series), min_periods=minp).apply(f, raw=raw)
+ nan_mask = isna(result)
+ tm.assert_series_equal(nan_mask, isna(expected))
+
+ nan_mask = ~nan_mask
+ tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
+
+
+def test_center_reindex_series(raw, series):
+ # shifter index
+ s = [f"x{x:d}" for x in range(12)]
+ minp = 10
+
+ series_xp = (
+ series.reindex(list(series.index) + s)
+ .rolling(window=25, min_periods=minp)
+ .apply(f, raw=raw)
+ .shift(-12)
+ .reindex(series.index)
+ )
+ series_rs = series.rolling(window=25, min_periods=minp, center=True).apply(
+ f, raw=raw
+ )
+ tm.assert_series_equal(series_xp, series_rs)
+
+
+def test_center_reindex_frame(raw, frame):
+ # shifter index
+ s = [f"x{x:d}" for x in range(12)]
+ minp = 10
+
+ frame_xp = (
+ frame.reindex(list(frame.index) + s)
+ .rolling(window=25, min_periods=minp)
+ .apply(f, raw=raw)
+ .shift(-12)
+ .reindex(frame.index)
+ )
+ frame_rs = frame.rolling(window=25, min_periods=minp, center=True).apply(f, raw=raw)
+ tm.assert_frame_equal(frame_xp, frame_rs)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
Since some tests are not running on `windows/moments` (https://github.com/pandas-dev/pandas/issues/37535), moving some lighter apply tests to `test_apply.py`
| https://api.github.com/repos/pandas-dev/pandas/pulls/44140 | 2021-10-22T03:24:00Z | 2021-10-24T18:16:53Z | 2021-10-24T18:16:53Z | 2021-10-24T19:05:42Z |
CLN: remove pyarrow<1 compat | diff --git a/ci/deps/actions-38-locale.yaml b/ci/deps/actions-38-locale.yaml
index 13b132109effb..b7043735d9457 100644
--- a/ci/deps/actions-38-locale.yaml
+++ b/ci/deps/actions-38-locale.yaml
@@ -35,7 +35,7 @@ dependencies:
- xlsxwriter
- xlwt
- moto
- - pyarrow=1.0.0
+ - pyarrow=1.0.1
- pip
- pip:
- pyxlsb
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index fd5c46f7a6d5a..57b13fef9ad8a 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -20,7 +20,7 @@
np_version_under1p20,
)
from pandas.compat.pyarrow import (
- pa_version_under1p0,
+ pa_version_under1p01,
pa_version_under2p0,
pa_version_under3p0,
pa_version_under4p0,
@@ -153,7 +153,7 @@ def get_lzma_file(lzma):
"np_datetime64_compat",
"np_version_under1p19",
"np_version_under1p20",
- "pa_version_under1p0",
+ "pa_version_under1p01",
"pa_version_under2p0",
"pa_version_under3p0",
"pa_version_under4p0",
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index adf20f3322a79..1cf57404bbe01 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -21,7 +21,7 @@
"odfpy": "1.4.1",
"openpyxl": "3.0.2",
"pandas_gbq": "0.14.0",
- "pyarrow": "0.17.0",
+ "pyarrow": "1.0.1",
"pytest": "6.0",
"pyxlsb": "1.0.6",
"s3fs": "0.4.0",
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index f9b9409317774..e6ac0c59e789a 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -7,14 +7,14 @@
_pa_version = pa.__version__
_palv = Version(_pa_version)
- pa_version_under1p0 = _palv < Version("1.0.0")
+ pa_version_under1p01 = _palv < Version("1.0.1")
pa_version_under2p0 = _palv < Version("2.0.0")
pa_version_under3p0 = _palv < Version("3.0.0")
pa_version_under4p0 = _palv < Version("4.0.0")
pa_version_under5p0 = _palv < Version("5.0.0")
pa_version_under6p0 = _palv < Version("6.0.0")
except ImportError:
- pa_version_under1p0 = True
+ pa_version_under1p01 = True
pa_version_under2p0 = True
pa_version_under3p0 = True
pa_version_under4p0 = True
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index e9fb5bdf80045..5a8e5f488fbf2 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -19,7 +19,7 @@
Scalar,
type_t,
)
-from pandas.compat import pa_version_under1p0
+from pandas.compat import pa_version_under1p01
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.base import (
@@ -104,11 +104,10 @@ def __init__(self, storage=None):
raise ValueError(
f"Storage must be 'python' or 'pyarrow'. Got {storage} instead."
)
- if storage == "pyarrow" and pa_version_under1p0:
+ if storage == "pyarrow" and pa_version_under1p01:
raise ImportError(
"pyarrow>=1.0.0 is required for PyArrow backed StringArray."
)
-
self.storage = storage
@property
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index b3278a81e93b7..b1daf0e393ef0 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -27,7 +27,7 @@
npt,
)
from pandas.compat import (
- pa_version_under1p0,
+ pa_version_under1p01,
pa_version_under2p0,
pa_version_under3p0,
pa_version_under4p0,
@@ -63,10 +63,7 @@
)
from pandas.core.strings.object_array import ObjectStringArrayMixin
-# PyArrow backed StringArrays are available starting at 1.0.0, but this
-# file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute
-# and its compute functions existed. GH38801
-if not pa_version_under1p0:
+if not pa_version_under1p01:
import pyarrow as pa
import pyarrow.compute as pc
@@ -87,7 +84,7 @@
def _chk_pyarrow_available() -> None:
- if pa_version_under1p0:
+ if pa_version_under1p01:
msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray."
raise ImportError(msg)
diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py
index d66a603ad568c..3f0a1b5d0eaf3 100644
--- a/pandas/tests/arrays/masked/test_arrow_compat.py
+++ b/pandas/tests/arrays/masked/test_arrow_compat.py
@@ -1,12 +1,10 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
import pandas._testing as tm
-pa = pytest.importorskip("pyarrow", minversion="0.17.0")
+pa = pytest.importorskip("pyarrow", minversion="1.0.1")
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
@@ -29,7 +27,6 @@ def test_arrow_array(data):
assert arr.equals(expected)
-@td.skip_if_no("pyarrow")
def test_arrow_roundtrip(data):
df = pd.DataFrame({"a": data})
table = pa.table(df)
@@ -39,7 +36,6 @@ def test_arrow_roundtrip(data):
tm.assert_frame_equal(result, df)
-@td.skip_if_no("pyarrow")
def test_arrow_load_from_zero_chunks(data):
# GH-41040
@@ -54,7 +50,6 @@ def test_arrow_load_from_zero_chunks(data):
tm.assert_frame_equal(result, df)
-@td.skip_if_no("pyarrow")
def test_arrow_from_arrow_uint():
# https://github.com/pandas-dev/pandas/issues/31896
# possible mismatch in types
@@ -66,7 +61,6 @@ def test_arrow_from_arrow_uint():
tm.assert_extension_array_equal(result, expected)
-@td.skip_if_no("pyarrow")
def test_arrow_sliced(data):
# https://github.com/pandas-dev/pandas/issues/38525
@@ -161,7 +155,6 @@ def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays):
tm.assert_numpy_array_equal(mask, mask_expected_empty)
-@td.skip_if_no("pyarrow")
def test_from_arrow_type_error(request, data):
# ensure that __from_arrow__ returns a TypeError when getting a wrong
# array type
diff --git a/pandas/tests/arrays/period/test_arrow_compat.py b/pandas/tests/arrays/period/test_arrow_compat.py
index 5211397f20c36..560299a4a47f5 100644
--- a/pandas/tests/arrays/period/test_arrow_compat.py
+++ b/pandas/tests/arrays/period/test_arrow_compat.py
@@ -1,7 +1,5 @@
import pytest
-import pandas.util._test_decorators as td
-
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
@@ -11,10 +9,9 @@
period_array,
)
-pyarrow_skip = td.skip_if_no("pyarrow", min_version="0.17.0")
+pa = pytest.importorskip("pyarrow", minversion="1.0.1")
-@pyarrow_skip
def test_arrow_extension_type():
from pandas.core.arrays._arrow_utils import ArrowPeriodType
@@ -29,7 +26,6 @@ def test_arrow_extension_type():
assert not hash(p1) == hash(p3)
-@pyarrow_skip
@pytest.mark.parametrize(
"data, freq",
[
@@ -38,8 +34,6 @@ def test_arrow_extension_type():
],
)
def test_arrow_array(data, freq):
- import pyarrow as pa
-
from pandas.core.arrays._arrow_utils import ArrowPeriodType
periods = period_array(data, freq=freq)
@@ -62,10 +56,7 @@ def test_arrow_array(data, freq):
pa.array(periods, type=ArrowPeriodType("T"))
-@pyarrow_skip
def test_arrow_array_missing():
- import pyarrow as pa
-
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
@@ -78,10 +69,7 @@ def test_arrow_array_missing():
assert result.storage.equals(expected)
-@pyarrow_skip
def test_arrow_table_roundtrip():
- import pyarrow as pa
-
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
@@ -100,10 +88,8 @@ def test_arrow_table_roundtrip():
tm.assert_frame_equal(result, expected)
-@pyarrow_skip
def test_arrow_load_from_zero_chunks():
# GH-41040
- import pyarrow as pa
from pandas.core.arrays._arrow_utils import ArrowPeriodType
@@ -120,10 +106,7 @@ def test_arrow_load_from_zero_chunks():
tm.assert_frame_equal(result, df)
-@pyarrow_skip
def test_arrow_table_roundtrip_without_metadata():
- import pyarrow as pa
-
arr = PeriodArray([1, 2, 3], freq="H")
arr[1] = pd.NaT
df = pd.DataFrame({"a": arr})
diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py
index c3f951adf7f89..265afa89d6530 100644
--- a/pandas/tests/arrays/string_/test_string_arrow.py
+++ b/pandas/tests/arrays/string_/test_string_arrow.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas.compat import pa_version_under1p0
+from pandas.compat import pa_version_under1p01
import pandas as pd
import pandas._testing as tm
@@ -14,7 +14,7 @@
from pandas.core.arrays.string_arrow import ArrowStringArray
skip_if_no_pyarrow = pytest.mark.skipif(
- pa_version_under1p0,
+ pa_version_under1p01,
reason="pyarrow>=1.0.0 is required for PyArrow backed StringArray",
)
@@ -118,7 +118,7 @@ def test_from_sequence_wrong_dtype_raises():
@pytest.mark.skipif(
- not pa_version_under1p0,
+ not pa_version_under1p01,
reason="pyarrow is installed",
)
def test_pyarrow_not_installed_raises():
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index d262f09182a9c..320bfc13f7032 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -6,7 +6,7 @@
from pandas.api.types import is_bool_dtype
from pandas.tests.extension import base
-pytest.importorskip("pyarrow", minversion="0.13.0")
+pytest.importorskip("pyarrow", minversion="1.0.1")
from pandas.tests.extension.arrow.arrays import ( # isort:skip
ArrowBoolArray,
diff --git a/pandas/tests/extension/arrow/test_timestamp.py b/pandas/tests/extension/arrow/test_timestamp.py
index c61cc30950a23..fe2c484731019 100644
--- a/pandas/tests/extension/arrow/test_timestamp.py
+++ b/pandas/tests/extension/arrow/test_timestamp.py
@@ -12,7 +12,7 @@
register_extension_dtype,
)
-pytest.importorskip("pyarrow", minversion="0.13.0")
+pytest.importorskip("pyarrow", minversion="1.0.1")
import pyarrow as pa # isort:skip
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 97ebb3a0d39ba..59c7abc4a4cb8 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -2,14 +2,12 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
import pandas._testing as tm
from pandas.io.feather_format import read_feather, to_feather # isort:skip
-pyarrow = pytest.importorskip("pyarrow")
+pyarrow = pytest.importorskip("pyarrow", minversion="1.0.1")
filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
@@ -120,7 +118,6 @@ def test_read_columns(self):
columns = ["col1", "col3"]
self.check_round_trip(df, expected=df[columns], columns=columns)
- @td.skip_if_no("pyarrow", min_version="0.17.1")
def read_columns_different_order(self):
# GH 33878
df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
@@ -180,12 +177,10 @@ def test_path_localpath(self):
result = tm.round_trip_localpath(df.to_feather, read_feather)
tm.assert_frame_equal(df, result)
- @td.skip_if_no("pyarrow", min_version="0.17.0")
def test_passthrough_keywords(self):
df = tm.makeDataFrame().reset_index()
self.check_round_trip(df, write_kwargs={"version": 1})
- @td.skip_if_no("pyarrow")
@tm.network
def test_http_path(self, feather_file):
# GH 29055
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index ec724602c5249..270cb402483bf 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -15,7 +15,6 @@
from pandas.compat import is_platform_windows
from pandas.compat.pyarrow import (
- pa_version_under1p0,
pa_version_under2p0,
pa_version_under5p0,
)
@@ -784,11 +783,7 @@ def test_s3_roundtrip_for_dir(
# only used if partition field is string, but this changed again to use
# category dtype for all types (not only strings) in pyarrow 2.0.0
if partition_col:
- partition_col_type = (
- "int32"
- if (not pa_version_under1p0) and pa_version_under2p0
- else "category"
- )
+ partition_col_type = "int32" if pa_version_under2p0 else "category"
expected_df[partition_col] = expected_df[partition_col].astype(
partition_col_type
| xref #41329 #44064 | https://api.github.com/repos/pandas-dev/pandas/pulls/44139 | 2021-10-22T02:22:34Z | 2021-11-15T14:00:16Z | 2021-11-15T14:00:16Z | 2022-11-18T02:20:00Z |
ENH: ExtensionArray.insert | diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst
index e2e8c94ef8fc6..ce8d8d5c2ca10 100644
--- a/doc/source/reference/extensions.rst
+++ b/doc/source/reference/extensions.rst
@@ -48,6 +48,7 @@ objects.
api.extensions.ExtensionArray.equals
api.extensions.ExtensionArray.factorize
api.extensions.ExtensionArray.fillna
+ api.extensions.ExtensionArray.insert
api.extensions.ExtensionArray.isin
api.extensions.ExtensionArray.isna
api.extensions.ExtensionArray.ravel
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 3769c686da029..cf9820c3aa8f8 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -31,6 +31,7 @@
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
+ validate_insert_loc,
)
from pandas.core.dtypes.common import is_dtype_equal
@@ -359,6 +360,8 @@ def insert(
-------
type(self)
"""
+ loc = validate_insert_loc(loc, len(self))
+
code = self._validate_scalar(item)
new_vals = np.concatenate(
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index bf54f7166e14d..9b25a1b5abccd 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -47,6 +47,7 @@
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
+ validate_insert_loc,
)
from pandas.core.dtypes.cast import maybe_cast_to_extension_array
@@ -123,6 +124,7 @@ class ExtensionArray:
factorize
fillna
equals
+ insert
isin
isna
ravel
@@ -1388,6 +1390,34 @@ def delete(self: ExtensionArrayT, loc: PositionalIndexer) -> ExtensionArrayT:
indexer = np.delete(np.arange(len(self)), loc)
return self.take(indexer)
+ def insert(self: ExtensionArrayT, loc: int, item) -> ExtensionArrayT:
+ """
+ Insert an item at the given position.
+
+ Parameters
+ ----------
+ loc : int
+ item : scalar-like
+
+ Returns
+ -------
+ same type as self
+
+ Notes
+ -----
+ This method should be both type and dtype-preserving. If the item
+ cannot be held in an array of this type/dtype, either ValueError or
+ TypeError should be raised.
+
+ The default implementation relies on _from_sequence to raise on invalid
+ items.
+ """
+ loc = validate_insert_loc(loc, len(self))
+
+ item_arr = type(self)._from_sequence([item], dtype=self.dtype)
+
+ return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]])
+
@classmethod
def _empty(cls, shape: Shape, dtype: ExtensionDtype):
"""
diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py
index 12220e825aed4..88b26dcc4d707 100644
--- a/pandas/tests/arithmetic/test_interval.py
+++ b/pandas/tests/arithmetic/test_interval.py
@@ -28,16 +28,16 @@
(Index([0, 2, 4, 4]), Index([1, 3, 5, 8])),
(Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])),
(
- timedelta_range("0 days", periods=3).insert(4, pd.NaT),
- timedelta_range("1 day", periods=3).insert(4, pd.NaT),
+ timedelta_range("0 days", periods=3).insert(3, pd.NaT),
+ timedelta_range("1 day", periods=3).insert(3, pd.NaT),
),
(
- date_range("20170101", periods=3).insert(4, pd.NaT),
- date_range("20170102", periods=3).insert(4, pd.NaT),
+ date_range("20170101", periods=3).insert(3, pd.NaT),
+ date_range("20170102", periods=3).insert(3, pd.NaT),
),
(
- date_range("20170101", periods=3, tz="US/Eastern").insert(4, pd.NaT),
- date_range("20170102", periods=3, tz="US/Eastern").insert(4, pd.NaT),
+ date_range("20170101", periods=3, tz="US/Eastern").insert(3, pd.NaT),
+ date_range("20170102", periods=3, tz="US/Eastern").insert(3, pd.NaT),
),
],
ids=lambda x: str(x[0].dtype),
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index d390d4b5d8143..c96e2fb49e397 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -511,6 +511,48 @@ def test_delete(self, data):
expected = data._concat_same_type([data[[0]], data[[2]], data[4:]])
self.assert_extension_array_equal(result, expected)
+ def test_insert(self, data):
+ # insert at the beginning
+ result = data[1:].insert(0, data[0])
+ self.assert_extension_array_equal(result, data)
+
+ result = data[1:].insert(-len(data[1:]), data[0])
+ self.assert_extension_array_equal(result, data)
+
+ # insert at the middle
+ result = data[:-1].insert(4, data[-1])
+
+ taker = np.arange(len(data))
+ taker[5:] = taker[4:-1]
+ taker[4] = len(data) - 1
+ expected = data.take(taker)
+ self.assert_extension_array_equal(result, expected)
+
+ def test_insert_invalid(self, data, invalid_scalar):
+ item = invalid_scalar
+
+ with pytest.raises((TypeError, ValueError)):
+ data.insert(0, item)
+
+ with pytest.raises((TypeError, ValueError)):
+ data.insert(4, item)
+
+ with pytest.raises((TypeError, ValueError)):
+ data.insert(len(data) - 1, item)
+
+ def test_insert_invalid_loc(self, data):
+ ub = len(data)
+
+ with pytest.raises(IndexError):
+ data.insert(ub + 1, data[0])
+
+ with pytest.raises(IndexError):
+ data.insert(-ub - 1, data[0])
+
+ with pytest.raises(TypeError):
+ # we expect TypeError here instead of IndexError to match np.insert
+ data.insert(1.5, data[0])
+
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
def test_equals(self, data, na_value, as_series, box):
data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype)
diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py
index 1942d737780da..3827ba234cfd8 100644
--- a/pandas/tests/extension/conftest.py
+++ b/pandas/tests/extension/conftest.py
@@ -181,3 +181,15 @@ def as_array(request):
Boolean fixture to support ExtensionDtype _from_sequence method testing.
"""
return request.param
+
+
+@pytest.fixture
+def invalid_scalar(data):
+ """
+ A scalar that *cannot* be held by this ExtensionArray.
+
+ The default should work for most subclasses, but is not guaranteed.
+
+ If the array can hold any item (i.e. object dtype), then use pytest.skip.
+ """
+ return object.__new__(object)
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 7be776819e399..0e3e26e7e9500 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -265,6 +265,18 @@ def test_searchsorted(self, data_for_sorting, as_series):
def test_diff(self, data, periods):
return super().test_diff(data, periods)
+ def test_insert(self, data, request):
+ if data.dtype.numpy_dtype == object:
+ mark = pytest.mark.xfail(reason="Dimension mismatch in np.concatenate")
+ request.node.add_marker(mark)
+
+ super().test_insert(data)
+
+ @skip_nested
+ def test_insert_invalid(self, data, invalid_scalar):
+ # PandasArray[object] can hold anything, so skip
+ super().test_insert_invalid(data, invalid_scalar)
+
class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
divmod_exc = None
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index af86c359c4c00..06b07968f949e 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -160,6 +160,13 @@ def test_value_counts(self, all_data, dropna):
def test_value_counts_with_normalize(self, data):
pass
+ def test_insert_invalid(self, data, invalid_scalar, request):
+ if data.dtype.storage == "pyarrow":
+ mark = pytest.mark.xfail(reason="casts invalid_scalar to string")
+ request.node.add_marker(mark)
+
+ super().test_insert_invalid(data, invalid_scalar)
+
class TestCasting(base.BaseCastingTests):
pass
diff --git a/pandas/tests/indexes/timedeltas/methods/test_insert.py b/pandas/tests/indexes/timedeltas/methods/test_insert.py
index 809d21db805e0..3af4b6b47fa2f 100644
--- a/pandas/tests/indexes/timedeltas/methods/test_insert.py
+++ b/pandas/tests/indexes/timedeltas/methods/test_insert.py
@@ -136,8 +136,8 @@ def test_insert_empty(self):
result = idx[:0].insert(0, td)
assert result.freq == "D"
- result = idx[:0].insert(1, td)
- assert result.freq == "D"
+ with pytest.raises(IndexError, match="loc must be an integer between"):
+ result = idx[:0].insert(1, td)
- result = idx[:0].insert(-1, td)
- assert result.freq == "D"
+ with pytest.raises(IndexError, match="loc must be an integer between"):
+ result = idx[:0].insert(-1, td)
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 7e03e3ceea11d..f8bd1ec7bc96a 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -12,7 +12,10 @@
import numpy as np
-from pandas.core.dtypes.common import is_bool
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_integer,
+)
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
@@ -494,3 +497,21 @@ def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]:
)
return left_right_inclusive
+
+
+def validate_insert_loc(loc: int, length: int) -> int:
+ """
+ Check that we have an integer between -length and length, inclusive.
+
+ Standardize negative loc to within [0, length].
+
+ The exceptions we raise on failure match np.insert.
+ """
+ if not is_integer(loc):
+ raise TypeError(f"loc must be an integer between -{length} and {length}")
+
+ if loc < 0:
+ loc += length
+ if not 0 <= loc <= length:
+ raise IndexError(f"loc must be an integer between -{length} and {length}")
+ return loc
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
Needed for #43930
| https://api.github.com/repos/pandas-dev/pandas/pulls/44138 | 2021-10-22T02:02:02Z | 2021-10-24T14:48:49Z | 2021-10-24T14:48:49Z | 2021-10-24T16:25:52Z |
BUG: yaml.dump(DataFrame) | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index d6ad5eb2003ce..a9d697adc301f 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -528,6 +528,7 @@ I/O
- Bug in :func:`read_json` not handling non-numpy dtypes correctly (especially ``category``) (:issue:`21892`, :issue:`33205`)
- Bug in :func:`json_normalize` where multi-character ``sep`` parameter is incorrectly prefixed to every key (:issue:`43831`)
- Bug in :func:`read_csv` with :code:`float_precision="round_trip"` which did not skip initial/trailing whitespace (:issue:`43713`)
+- Bug in dumping/loading a :class:`DataFrame` with ``yaml.dump(frame)`` (:issue:`42748`)
-
Period
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 78853ce6e41dc..957432df20395 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -565,6 +565,14 @@ cpdef update_blklocs_and_blknos(
return new_blklocs, new_blknos
+def _unpickle_block(values, placement, ndim):
+ # We have to do some gymnastics b/c "ndim" is keyword-only
+
+ from pandas.core.internals.blocks import new_block
+
+ return new_block(values, placement, ndim=ndim)
+
+
@cython.freelist(64)
cdef class SharedBlock:
"""
@@ -588,14 +596,8 @@ cdef class SharedBlock:
self.ndim = ndim
cpdef __reduce__(self):
- # We have to do some gymnastics b/c "ndim" is keyword-only
- from functools import partial
-
- from pandas.core.internals.blocks import new_block
-
- args = (self.values, self.mgr_locs.indexer)
- func = partial(new_block, ndim=self.ndim)
- return func, args
+ args = (self.values, self.mgr_locs.indexer, self.ndim)
+ return _unpickle_block, args
cpdef __setstate__(self, state):
from pandas.core.construction import extract_array
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 583a22d09b110..9344aea8221d5 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -170,7 +170,12 @@ def set_axis(self, axis: int, new_labels: Index) -> None:
def get_dtypes(self):
return np.array([arr.dtype for arr in self.arrays], dtype="object")
- # TODO setstate getstate
+ def __getstate__(self):
+ return self.arrays, self._axes
+
+ def __setstate__(self, state):
+ self.arrays = state[0]
+ self._axes = state[1]
def __repr__(self) -> str:
output = type(self).__name__
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index a8b05e3178197..f927a0ec0927b 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -167,6 +167,19 @@ def test_pyarrow(df):
tm.assert_frame_equal(result, df)
+def test_yaml_dump(df):
+ # GH#42748
+ yaml = import_module("yaml")
+
+ dumped = yaml.dump(df)
+
+ loaded = yaml.load(dumped, Loader=yaml.Loader)
+ tm.assert_frame_equal(df, loaded)
+
+ loaded2 = yaml.load(dumped, Loader=yaml.UnsafeLoader)
+ tm.assert_frame_equal(df, loaded2)
+
+
def test_missing_required_dependency():
# GH 23868
# To ensure proper isolation, we pass these flags
| - [x] closes #42748
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44137 | 2021-10-21T19:18:31Z | 2021-10-24T14:45:43Z | 2021-10-24T14:45:42Z | 2021-10-24T16:29:30Z |
Backport PR #44134 on branch 1.3.x (CI: Pin pre-commit python version) | diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index 51b52105b483a..adbe8f403c10b 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -13,4 +13,6 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
+ with:
+ python-version: '3.9.7'
- uses: pre-commit/action@v2.0.0
| Backport PR #44134: CI: Pin pre-commit python version | https://api.github.com/repos/pandas-dev/pandas/pulls/44135 | 2021-10-21T17:45:57Z | 2021-10-21T19:55:42Z | 2021-10-21T19:55:42Z | 2021-10-21T19:55:42Z |
CI: Pin pre-commit python version | diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index 0609755678d78..93e30e4d551af 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -16,4 +16,6 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
+ with:
+ python-version: '3.9.7'
- uses: pre-commit/action@v2.0.0
| - [x] closes #44116
Looks like GH actions/setup-python seems to have randomly jumped from python 3.9.7 to python 3.10.0
Working:
https://github.com/pandas-dev/pandas/runs/3947277622?check_suite_focus=true#step:3:5
Failing:
https://github.com/pandas-dev/pandas/runs/3960125988?check_suite_focus=true#step:3:5
We can pin to a specific python version as per:
https://github.com/actions/setup-python#usage
Needs backport to 1.3.x also failing there to: https://github.com/pandas-dev/pandas/runs/3959049188?check_suite_focus=true | https://api.github.com/repos/pandas-dev/pandas/pulls/44134 | 2021-10-21T16:29:28Z | 2021-10-21T17:45:26Z | 2021-10-21T17:45:26Z | 2021-10-21T17:47:11Z |
REF: simplify EA.astype | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index bf54f7166e14d..8365e58632c70 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -561,11 +561,9 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
Returns
-------
array : np.ndarray or ExtensionArray
- An ExtensionArray if dtype is StringDtype,
- or same as that of underlying array.
+ An ExtensionArray if dtype is ExtensionDtype,
Otherwise a NumPy ndarray with 'dtype' for its dtype.
"""
- from pandas.core.arrays.string_ import StringDtype
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
@@ -574,16 +572,11 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
else:
return self.copy()
- # FIXME: Really hard-code here?
- if isinstance(dtype, StringDtype):
- # allow conversion to StringArrays
- return dtype.construct_array_type()._from_sequence(self, copy=False)
+ if isinstance(dtype, ExtensionDtype):
+ cls = dtype.construct_array_type()
+ return cls._from_sequence(self, dtype=dtype, copy=copy)
- # error: Argument "dtype" to "array" has incompatible type
- # "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None, type,
- # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,
- # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
- return np.array(self, dtype=dtype, copy=copy) # type: ignore[arg-type]
+ return np.array(self, dtype=dtype, copy=copy)
def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:
"""
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index a4d6c0f3cd832..35e2dd25678e5 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -110,7 +110,6 @@
)
import pandas.core.common as com
from pandas.core.construction import (
- array as pd_array,
extract_array,
sanitize_array,
)
@@ -527,7 +526,7 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
# TODO: consolidate with ndarray case?
elif isinstance(dtype, ExtensionDtype):
- result = pd_array(self, dtype=dtype, copy=copy)
+ return super().astype(dtype, copy=copy)
elif is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index d1b926bd25055..72c00dfe7c65a 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -74,7 +74,6 @@
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
- is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like,
@@ -87,6 +86,7 @@
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
+ ExtensionDtype,
PeriodDtype,
)
from pandas.core.dtypes.missing import (
@@ -407,12 +407,11 @@ def astype(self, dtype, copy: bool = True):
if is_object_dtype(dtype):
return self._box_values(self.asi8.ravel()).reshape(self.shape)
- elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
- if is_extension_array_dtype(dtype):
- arr_cls = dtype.construct_array_type()
- return arr_cls._from_sequence(self, dtype=dtype, copy=copy)
- else:
- return self._format_native_types()
+
+ elif isinstance(dtype, ExtensionDtype):
+ return super().astype(dtype, copy=copy)
+ elif is_string_dtype(dtype):
+ return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
@@ -442,9 +441,6 @@ def astype(self, dtype, copy: bool = True):
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
- elif is_categorical_dtype(dtype):
- arr_cls = dtype.construct_array_type()
- return arr_cls(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 68365613c8c77..d5718d59bf8b0 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -79,7 +79,6 @@
ExtensionArray,
_extension_array_shared_docs,
)
-from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.construction import (
array as pd_array,
@@ -850,7 +849,6 @@ def astype(self, dtype, copy: bool = True):
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
"""
from pandas import Index
- from pandas.core.arrays.string_ import StringDtype
if dtype is not None:
dtype = pandas_dtype(dtype)
@@ -871,17 +869,12 @@ def astype(self, dtype, copy: bool = True):
)
raise TypeError(msg) from err
return self._shallow_copy(new_left, new_right)
- elif is_categorical_dtype(dtype):
- return Categorical(np.asarray(self), dtype=dtype)
- elif isinstance(dtype, StringDtype):
- return dtype.construct_array_type()._from_sequence(self, copy=False)
-
- # TODO: This try/except will be repeated.
- try:
- return np.asarray(self).astype(dtype, copy=copy)
- except (TypeError, ValueError) as err:
- msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
- raise TypeError(msg) from err
+ else:
+ try:
+ return super().astype(dtype, copy=copy)
+ except (TypeError, ValueError) as err:
+ msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
+ raise TypeError(msg) from err
def equals(self, other) -> bool:
if type(self) != type(other):
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index d93fa4bbdd7fc..e9fb5bdf80045 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -437,8 +437,7 @@ def astype(self, dtype, copy=True):
values = arr.astype(dtype.numpy_dtype)
return FloatingArray(values, mask, copy=False)
elif isinstance(dtype, ExtensionDtype):
- cls = dtype.construct_array_type()
- return cls._from_sequence(self, dtype=dtype, copy=copy)
+ return super().astype(dtype, copy=copy)
elif np.issubdtype(dtype, np.floating):
arr = self._ndarray.copy()
mask = self.isna()
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index c7d08f7873c09..a83cfa89c4728 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -35,7 +35,6 @@
from pandas.util._decorators import doc
from pandas.util._validators import validate_fillna_kwargs
-from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
@@ -685,11 +684,7 @@ def astype(self, dtype, copy=True):
data = self._data.cast(pa.from_numpy_dtype(dtype.numpy_dtype))
return dtype.__from_arrow__(data)
- elif isinstance(dtype, ExtensionDtype):
- cls = dtype.construct_array_type()
- return cls._from_sequence(self, dtype=dtype, copy=copy)
-
- return super().astype(dtype, copy)
+ return super().astype(dtype, copy=copy)
# ------------------------------------------------------------------------
# String methods interface
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/44133 | 2021-10-21T16:23:27Z | 2021-10-24T14:47:02Z | 2021-10-24T14:47:02Z | 2021-10-24T14:47:02Z |
BUG: make `mean()` raise an exception for strings | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 47a087d38d146..a1d31e39e4a57 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -405,6 +405,39 @@ raise a ``ValueError`` if the operation could produce a result with more than
.. ---------------------------------------------------------------------------
+.. _whatsnew_140.notable_bug_fixes.mean_implicit_conversion_to_numeric:
+
+Implicit conversion of string to numeric type in mean
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When computing the ``mean`` of a :class:`Series` or :class:`DataFrame` with a string-type value, the elements are concatenated
+to a single string then coerced to a numeric type implicitly before computing the mean. This can lead to unexpected results:
+
+.. code-block:: ipython
+
+ In [5]: df = DataFrame({
+ "A": ["1", "2", "3"],
+ "B": ["0", "1", "J"],
+ })
+ In [6]: df.mean(numeric_only=False)
+ Out[6]:
+ A 41.00000+0.00000j
+ C 0.000000+0.333333j
+ dtype: complex128
+
+Now, an exception will be raised whenever ``mean`` is called on a string-type column or :class:`Series`.
+
+.. code-block:: ipython
+
+ In [7]: df = DataFrame({
+ "A": ["1", "2", "3"],
+ "B": ["0", "1", "J"],
+ })
+ In [8]: df.mean(numeric_only=False)
+ Out[8]:
+ ...
+ TypeError: cannot find the mean of type 'str'
+
.. _whatsnew_140.notable_bug_fixes.groupby_apply_mutation:
groupby.apply consistent transform detection
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 40664f178993e..ac2ff4fe00467 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -20,6 +20,7 @@
iNaT,
lib,
)
+from pandas._libs.lib import infer_dtype
from pandas._typing import (
ArrayLike,
Dtype,
@@ -695,7 +696,10 @@ def nanmean(
dtype_count = dtype
count = _get_counts(values.shape, mask, axis, dtype=dtype_count)
- the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
+ the_sum = values.sum(axis, dtype=dtype_sum)
+ if infer_dtype(the_sum) in ("string", "byte", "mixed-integer", "mixed"):
+ raise TypeError("cannot find the mean of type 'str'")
+ the_sum = _ensure_numeric(the_sum)
if axis is not None and getattr(the_sum, "ndim", False):
count = cast(np.ndarray, count)
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index 410a8f6bf3965..2a1944e52a5b8 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -257,7 +257,10 @@ def test_agg_cython_table_raises_frame(df, func, expected, axis):
)
def test_agg_cython_table_raises_series(series, func, expected):
# GH21224
- msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
+ msg = (
+ r"[Cc]ould not convert|can't multiply sequence by non-int of type"
+ r"|cannot find the mean of type 'str'"
+ )
with pytest.raises(expected, match=msg):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 43cf8cccb1784..4c01e9e7d6d7c 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -513,6 +513,33 @@ def test_mean_mixed_string_decimal(self):
expected = Series([2.7, 681.6], index=["A", "C"])
tm.assert_series_equal(result, expected)
+ def test_mean_string(self):
+ # https://github.com/pandas-dev/pandas/issues/44008
+ # https://github.com/pandas-dev/pandas/issues/34671
+ # https://github.com/pandas-dev/pandas/issues/22642
+ # https://github.com/pandas-dev/pandas/issues/26927
+ # https://github.com/pandas-dev/pandas/issues/13916
+ # https://github.com/pandas-dev/pandas/issues/36703
+
+ df = DataFrame(
+ {
+ "A": ["1", "2", "3"],
+ "B": ["a", "b", "c"],
+ "C": [1, 2, 3],
+ "D": ["0", "1", "J"],
+ }
+ )
+ with tm.assert_produces_warning(FutureWarning, match="Dropping of nuisance"):
+ result = df.mean()
+ expected = Series([2.0], index=["C"])
+ tm.assert_series_equal(result, expected)
+ msg = "cannot find the mean of type 'str'"
+ with pytest.raises(TypeError, match=msg):
+ df.mean(numeric_only=False)
+ result = df.sum()
+ expected = Series(["123", "abc", 6, "01J"], index=["A", "B", "C", "D"])
+ tm.assert_series_equal(result, expected)
+
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py
index d9372ba5cbb50..9a4daad61b97a 100644
--- a/pandas/tests/groupby/aggregate/test_cython.py
+++ b/pandas/tests/groupby/aggregate/test_cython.py
@@ -92,7 +92,7 @@ def test_cython_agg_nothing_to_agg():
with pytest.raises(NotImplementedError, match="does not implement"):
frame.groupby("a")["b"].mean(numeric_only=True)
- with pytest.raises(TypeError, match="Could not convert (foo|bar)*"):
+ with pytest.raises(TypeError, match="cannot find the mean of*"):
frame.groupby("a")["b"].mean()
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
| - [x] closes #44008
- [x] closes #36703
- [x] closes #34671
- [x] closes #22642
- [x] closes #26927
- [x] closes #13916
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
- [x] whatsnew entry
This PR makes `mean()` raise an exception when `sum()` returns a string. Sum performs as expected since it concatenates. However `_ensure_numeric` causes implicit conversion of `str` to a numeric datatype
https://github.com/pandas-dev/pandas/blob/f3f90c33966e3ba334c459ad89d607f820caa1f8/pandas/core/nanops.py#L694
Instead of this, the return value of `sum()` will be checked and if it is a string, an exception will be raised.
This is a breaking change. | https://api.github.com/repos/pandas-dev/pandas/pulls/44131 | 2021-10-21T11:22:00Z | 2022-05-07T02:44:29Z | null | 2022-05-07T02:44:30Z |
disambiguate documentation for pandas.Series replace | diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 9c9e70789390d..bc4f4d657b859 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -535,13 +535,13 @@
**Scalar `to_replace` and `value`**
- >>> s = pd.Series([0, 1, 2, 3, 4])
- >>> s.replace(0, 5)
+ >>> s = pd.Series([1, 2, 3, 4, 5])
+ >>> s.replace(1, 5)
0 5
- 1 1
- 2 2
- 3 3
- 4 4
+ 1 2
+ 2 3
+ 3 4
+ 4 5
dtype: int64
>>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4],
@@ -574,11 +574,11 @@
4 4 9 e
>>> s.replace([1, 2], method='bfill')
- 0 0
+ 0 3
1 3
2 3
- 3 3
- 4 4
+ 3 4
+ 4 5
dtype: int64
**dict-like `to_replace`**
| Currently, the `pandas.Series` that is used for the example for `pandas.Series.replace` has an index which is identical to its values (i.e. `pandas.Series([0, 1, 2, 3, 4])`). This can be quite confusing at a first glance (as it was for me), as I assumed that `pandas.Series.replace` was using the index of the series (whereas it is actually using the values). Updated the documentation to use `pandas.Series([1, 2, 3, 4, 5])` to remove this ambiguity.
| https://api.github.com/repos/pandas-dev/pandas/pulls/44130 | 2021-10-21T06:31:37Z | 2021-10-27T12:12:44Z | 2021-10-27T12:12:44Z | 2021-10-27T12:12:48Z |
TST: Move test_moments_rolling.py to approproate files | diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
deleted file mode 100644
index b2e53a676b039..0000000000000
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ /dev/null
@@ -1,648 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas.util._test_decorators as td
-
-from pandas import (
- DataFrame,
- Series,
- date_range,
-)
-import pandas._testing as tm
-
-
-def test_centered_axis_validation():
-
- # ok
- Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
-
- # bad axis
- msg = "No axis named 1 for object type Series"
- with pytest.raises(ValueError, match=msg):
- Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
-
- # ok ok
- DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
- DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
-
- # bad axis
- msg = "No axis named 2 for object type DataFrame"
- with pytest.raises(ValueError, match=msg):
- (DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
-
-
-@td.skip_if_no_scipy
-def test_cmov_mean():
- # GH 8238
- vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
- result = Series(vals).rolling(5, center=True).mean()
- expected_values = [
- np.nan,
- np.nan,
- 9.962,
- 11.27,
- 11.564,
- 12.516,
- 12.818,
- 12.952,
- np.nan,
- np.nan,
- ]
- expected = Series(expected_values)
- tm.assert_series_equal(expected, result)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window():
- # GH 8238
- vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
- result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
- expected_values = [
- np.nan,
- np.nan,
- 9.962,
- 11.27,
- 11.564,
- 12.516,
- 12.818,
- 12.952,
- np.nan,
- np.nan,
- ]
- expected = Series(expected_values)
- tm.assert_series_equal(expected, result)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window_corner():
- # GH 8238
- # all nan
- vals = Series([np.nan] * 10)
- result = vals.rolling(5, center=True, win_type="boxcar").mean()
- assert np.isnan(result).all()
-
- # empty
- vals = Series([], dtype=object)
- result = vals.rolling(5, center=True, win_type="boxcar").mean()
- assert len(result) == 0
-
- # shorter than window
- vals = Series(np.random.randn(5))
- result = vals.rolling(10, win_type="boxcar").mean()
- assert np.isnan(result).all()
- assert len(result) == 5
-
-
-@td.skip_if_no_scipy
-@pytest.mark.parametrize(
- "f,xp",
- [
- (
- "mean",
- [
- [np.nan, np.nan],
- [np.nan, np.nan],
- [9.252, 9.392],
- [8.644, 9.906],
- [8.87, 10.208],
- [6.81, 8.588],
- [7.792, 8.644],
- [9.05, 7.824],
- [np.nan, np.nan],
- [np.nan, np.nan],
- ],
- ),
- (
- "std",
- [
- [np.nan, np.nan],
- [np.nan, np.nan],
- [3.789706, 4.068313],
- [3.429232, 3.237411],
- [3.589269, 3.220810],
- [3.405195, 2.380655],
- [3.281839, 2.369869],
- [3.676846, 1.801799],
- [np.nan, np.nan],
- [np.nan, np.nan],
- ],
- ),
- (
- "var",
- [
- [np.nan, np.nan],
- [np.nan, np.nan],
- [14.36187, 16.55117],
- [11.75963, 10.48083],
- [12.88285, 10.37362],
- [11.59535, 5.66752],
- [10.77047, 5.61628],
- [13.51920, 3.24648],
- [np.nan, np.nan],
- [np.nan, np.nan],
- ],
- ),
- (
- "sum",
- [
- [np.nan, np.nan],
- [np.nan, np.nan],
- [46.26, 46.96],
- [43.22, 49.53],
- [44.35, 51.04],
- [34.05, 42.94],
- [38.96, 43.22],
- [45.25, 39.12],
- [np.nan, np.nan],
- [np.nan, np.nan],
- ],
- ),
- ],
-)
-def test_cmov_window_frame(f, xp):
- # Gh 8238
- df = DataFrame(
- np.array(
- [
- [12.18, 3.64],
- [10.18, 9.16],
- [13.24, 14.61],
- [4.51, 8.11],
- [6.15, 11.44],
- [9.14, 6.21],
- [11.31, 10.67],
- [2.94, 6.51],
- [9.42, 8.39],
- [12.44, 7.34],
- ]
- )
- )
- xp = DataFrame(np.array(xp))
-
- roll = df.rolling(5, win_type="boxcar", center=True)
- rs = getattr(roll, f)()
-
- tm.assert_frame_equal(xp, rs)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window_na_min_periods():
- # min_periods
- vals = Series(np.random.randn(10))
- vals[4] = np.nan
- vals[8] = np.nan
-
- xp = vals.rolling(5, min_periods=4, center=True).mean()
- rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
- tm.assert_series_equal(xp, rs)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window_regular(win_types):
- # GH 8238
- vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
- xps = {
- "hamming": [
- np.nan,
- np.nan,
- 8.71384,
- 9.56348,
- 12.38009,
- 14.03687,
- 13.8567,
- 11.81473,
- np.nan,
- np.nan,
- ],
- "triang": [
- np.nan,
- np.nan,
- 9.28667,
- 10.34667,
- 12.00556,
- 13.33889,
- 13.38,
- 12.33667,
- np.nan,
- np.nan,
- ],
- "barthann": [
- np.nan,
- np.nan,
- 8.4425,
- 9.1925,
- 12.5575,
- 14.3675,
- 14.0825,
- 11.5675,
- np.nan,
- np.nan,
- ],
- "bohman": [
- np.nan,
- np.nan,
- 7.61599,
- 9.1764,
- 12.83559,
- 14.17267,
- 14.65923,
- 11.10401,
- np.nan,
- np.nan,
- ],
- "blackmanharris": [
- np.nan,
- np.nan,
- 6.97691,
- 9.16438,
- 13.05052,
- 14.02156,
- 15.10512,
- 10.74574,
- np.nan,
- np.nan,
- ],
- "nuttall": [
- np.nan,
- np.nan,
- 7.04618,
- 9.16786,
- 13.02671,
- 14.03559,
- 15.05657,
- 10.78514,
- np.nan,
- np.nan,
- ],
- "blackman": [
- np.nan,
- np.nan,
- 7.73345,
- 9.17869,
- 12.79607,
- 14.20036,
- 14.57726,
- 11.16988,
- np.nan,
- np.nan,
- ],
- "bartlett": [
- np.nan,
- np.nan,
- 8.4425,
- 9.1925,
- 12.5575,
- 14.3675,
- 14.0825,
- 11.5675,
- np.nan,
- np.nan,
- ],
- }
-
- xp = Series(xps[win_types])
- rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
- tm.assert_series_equal(xp, rs)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window_regular_linear_range(win_types):
- # GH 8238
- vals = np.array(range(10), dtype=float)
- xp = vals.copy()
- xp[:2] = np.nan
- xp[-2:] = np.nan
- xp = Series(xp)
-
- rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
- tm.assert_series_equal(xp, rs)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window_regular_missing_data(win_types):
- # GH 8238
- vals = np.array(
- [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
- )
- xps = {
- "bartlett": [
- np.nan,
- np.nan,
- 9.70333,
- 10.5225,
- 8.4425,
- 9.1925,
- 12.5575,
- 14.3675,
- 15.61667,
- 13.655,
- ],
- "blackman": [
- np.nan,
- np.nan,
- 9.04582,
- 11.41536,
- 7.73345,
- 9.17869,
- 12.79607,
- 14.20036,
- 15.8706,
- 13.655,
- ],
- "barthann": [
- np.nan,
- np.nan,
- 9.70333,
- 10.5225,
- 8.4425,
- 9.1925,
- 12.5575,
- 14.3675,
- 15.61667,
- 13.655,
- ],
- "bohman": [
- np.nan,
- np.nan,
- 8.9444,
- 11.56327,
- 7.61599,
- 9.1764,
- 12.83559,
- 14.17267,
- 15.90976,
- 13.655,
- ],
- "hamming": [
- np.nan,
- np.nan,
- 9.59321,
- 10.29694,
- 8.71384,
- 9.56348,
- 12.38009,
- 14.20565,
- 15.24694,
- 13.69758,
- ],
- "nuttall": [
- np.nan,
- np.nan,
- 8.47693,
- 12.2821,
- 7.04618,
- 9.16786,
- 13.02671,
- 14.03673,
- 16.08759,
- 13.65553,
- ],
- "triang": [
- np.nan,
- np.nan,
- 9.33167,
- 9.76125,
- 9.28667,
- 10.34667,
- 12.00556,
- 13.82125,
- 14.49429,
- 13.765,
- ],
- "blackmanharris": [
- np.nan,
- np.nan,
- 8.42526,
- 12.36824,
- 6.97691,
- 9.16438,
- 13.05052,
- 14.02175,
- 16.1098,
- 13.65509,
- ],
- }
-
- xp = Series(xps[win_types])
- rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
- tm.assert_series_equal(xp, rs)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window_special(win_types_special):
- # GH 8238
- kwds = {
- "kaiser": {"beta": 1.0},
- "gaussian": {"std": 1.0},
- "general_gaussian": {"p": 2.0, "sig": 2.0},
- "exponential": {"tau": 10},
- }
-
- vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
-
- xps = {
- "gaussian": [
- np.nan,
- np.nan,
- 8.97297,
- 9.76077,
- 12.24763,
- 13.89053,
- 13.65671,
- 12.01002,
- np.nan,
- np.nan,
- ],
- "general_gaussian": [
- np.nan,
- np.nan,
- 9.85011,
- 10.71589,
- 11.73161,
- 13.08516,
- 12.95111,
- 12.74577,
- np.nan,
- np.nan,
- ],
- "kaiser": [
- np.nan,
- np.nan,
- 9.86851,
- 11.02969,
- 11.65161,
- 12.75129,
- 12.90702,
- 12.83757,
- np.nan,
- np.nan,
- ],
- "exponential": [
- np.nan,
- np.nan,
- 9.83364,
- 11.10472,
- 11.64551,
- 12.66138,
- 12.92379,
- 12.83770,
- np.nan,
- np.nan,
- ],
- }
-
- xp = Series(xps[win_types_special])
- rs = (
- Series(vals)
- .rolling(5, win_type=win_types_special, center=True)
- .mean(**kwds[win_types_special])
- )
- tm.assert_series_equal(xp, rs)
-
-
-@td.skip_if_no_scipy
-def test_cmov_window_special_linear_range(win_types_special):
- # GH 8238
- kwds = {
- "kaiser": {"beta": 1.0},
- "gaussian": {"std": 1.0},
- "general_gaussian": {"p": 2.0, "sig": 2.0},
- "slepian": {"width": 0.5},
- "exponential": {"tau": 10},
- }
-
- vals = np.array(range(10), dtype=float)
- xp = vals.copy()
- xp[:2] = np.nan
- xp[-2:] = np.nan
- xp = Series(xp)
-
- rs = (
- Series(vals)
- .rolling(5, win_type=win_types_special, center=True)
- .mean(**kwds[win_types_special])
- )
- tm.assert_series_equal(xp, rs)
-
-
-def test_rolling_min_min_periods():
- a = Series([1, 2, 3, 4, 5])
- result = a.rolling(window=100, min_periods=1).min()
- expected = Series(np.ones(len(a)))
- tm.assert_series_equal(result, expected)
- msg = "min_periods 5 must be <= window 3"
- with pytest.raises(ValueError, match=msg):
- Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
-
-
-def test_rolling_max_min_periods():
- a = Series([1, 2, 3, 4, 5], dtype=np.float64)
- b = a.rolling(window=100, min_periods=1).max()
- tm.assert_almost_equal(a, b)
- msg = "min_periods 5 must be <= window 3"
- with pytest.raises(ValueError, match=msg):
- Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
-
-
-def test_rolling_quantile_np_percentile():
- # #9413: Tests that rolling window's quantile default behavior
- # is analogous to Numpy's percentile
- row = 10
- col = 5
- idx = date_range("20100101", periods=row, freq="B")
- df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
-
- df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
- np_percentile = np.percentile(df, [25, 50, 75], axis=0)
-
- tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
-
-
-@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
-@pytest.mark.parametrize(
- "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
-)
-@pytest.mark.parametrize(
- "data",
- [
- [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
- [8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
- [0.0, np.nan, 0.2, np.nan, 0.4],
- [np.nan, np.nan, np.nan, np.nan],
- [np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
- [0.5],
- [np.nan, 0.7, 0.6],
- ],
-)
-def test_rolling_quantile_interpolation_options(quantile, interpolation, data):
- # Tests that rolling window's quantile behavior is analogous to
- # Series' quantile for each interpolation option
- s = Series(data)
-
- q1 = s.quantile(quantile, interpolation)
- q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
-
- if np.isnan(q1):
- assert np.isnan(q2)
- else:
- assert q1 == q2
-
-
-def test_invalid_quantile_value():
- data = np.arange(5)
- s = Series(data)
-
- msg = "Interpolation 'invalid' is not supported"
- with pytest.raises(ValueError, match=msg):
- s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
-
-
-def test_rolling_quantile_param():
- ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
- msg = "quantile value -0.1 not in \\[0, 1\\]"
- with pytest.raises(ValueError, match=msg):
- ser.rolling(3).quantile(-0.1)
-
- msg = "quantile value 10.0 not in \\[0, 1\\]"
- with pytest.raises(ValueError, match=msg):
- ser.rolling(3).quantile(10.0)
-
- msg = "must be real number, not str"
- with pytest.raises(TypeError, match=msg):
- ser.rolling(3).quantile("foo")
-
-
-def test_rolling_std_1obs():
- vals = Series([1.0, 2.0, 3.0, 4.0, 5.0])
-
- result = vals.rolling(1, min_periods=1).std()
- expected = Series([np.nan] * 5)
- tm.assert_series_equal(result, expected)
-
- result = vals.rolling(1, min_periods=1).std(ddof=0)
- expected = Series([0.0] * 5)
- tm.assert_series_equal(result, expected)
-
- result = Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
- assert np.isnan(result[2])
-
-
-def test_rolling_std_neg_sqrt():
- # unit test from Bottleneck
-
- # Test move_nanstd for neg sqrt.
-
- a = Series(
- [
- 0.0011448196318903589,
- 0.00028718669878572767,
- 0.00028718669878572767,
- 0.00028718669878572767,
- 0.00028718669878572767,
- ]
- )
- b = a.rolling(window=3).std()
- assert np.isfinite(b[2:]).all()
-
- b = a.ewm(span=3).std()
- assert np.isfinite(b[2:]).all()
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 5cc22249c26f0..f84a579247630 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -349,3 +349,42 @@ def test_dont_modify_attributes_after_methods(
getattr(roll_obj, arithmetic_win_operators)()
result = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes}
assert result == expected
+
+
+def test_centered_axis_validation():
+
+ # ok
+ Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
+
+ # bad axis
+ msg = "No axis named 1 for object type Series"
+ with pytest.raises(ValueError, match=msg):
+ Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
+
+ # ok ok
+ DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
+ DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
+
+ # bad axis
+ msg = "No axis named 2 for object type DataFrame"
+ with pytest.raises(ValueError, match=msg):
+ (DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
+
+
+def test_rolling_min_min_periods():
+ a = Series([1, 2, 3, 4, 5])
+ result = a.rolling(window=100, min_periods=1).min()
+ expected = Series(np.ones(len(a)))
+ tm.assert_series_equal(result, expected)
+ msg = "min_periods 5 must be <= window 3"
+ with pytest.raises(ValueError, match=msg):
+ Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
+
+
+def test_rolling_max_min_periods():
+ a = Series([1, 2, 3, 4, 5], dtype=np.float64)
+ b = a.rolling(window=100, min_periods=1).max()
+ tm.assert_almost_equal(a, b)
+ msg = "min_periods 5 must be <= window 3"
+ with pytest.raises(ValueError, match=msg):
+ Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index d58eeaa7cbcb1..f908839229532 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1616,3 +1616,107 @@ def test_rank(window, method, pct, ascending, test_data):
result = ser.rolling(window).rank(method=method, pct=pct, ascending=ascending)
tm.assert_series_equal(result, expected)
+
+
+def test_rolling_quantile_np_percentile():
+ # #9413: Tests that rolling window's quantile default behavior
+ # is analogous to Numpy's percentile
+ row = 10
+ col = 5
+ idx = date_range("20100101", periods=row, freq="B")
+ df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
+
+ df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
+ np_percentile = np.percentile(df, [25, 50, 75], axis=0)
+
+ tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
+
+
+@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
+@pytest.mark.parametrize(
+ "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
+)
+@pytest.mark.parametrize(
+ "data",
+ [
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
+ [8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
+ [0.0, np.nan, 0.2, np.nan, 0.4],
+ [np.nan, np.nan, np.nan, np.nan],
+ [np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
+ [0.5],
+ [np.nan, 0.7, 0.6],
+ ],
+)
+def test_rolling_quantile_interpolation_options(quantile, interpolation, data):
+ # Tests that rolling window's quantile behavior is analogous to
+ # Series' quantile for each interpolation option
+ s = Series(data)
+
+ q1 = s.quantile(quantile, interpolation)
+ q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
+
+ if np.isnan(q1):
+ assert np.isnan(q2)
+ else:
+ assert q1 == q2
+
+
+def test_invalid_quantile_value():
+ data = np.arange(5)
+ s = Series(data)
+
+ msg = "Interpolation 'invalid' is not supported"
+ with pytest.raises(ValueError, match=msg):
+ s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
+
+
+def test_rolling_quantile_param():
+ ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
+ msg = "quantile value -0.1 not in \\[0, 1\\]"
+ with pytest.raises(ValueError, match=msg):
+ ser.rolling(3).quantile(-0.1)
+
+ msg = "quantile value 10.0 not in \\[0, 1\\]"
+ with pytest.raises(ValueError, match=msg):
+ ser.rolling(3).quantile(10.0)
+
+ msg = "must be real number, not str"
+ with pytest.raises(TypeError, match=msg):
+ ser.rolling(3).quantile("foo")
+
+
+def test_rolling_std_1obs():
+ vals = Series([1.0, 2.0, 3.0, 4.0, 5.0])
+
+ result = vals.rolling(1, min_periods=1).std()
+ expected = Series([np.nan] * 5)
+ tm.assert_series_equal(result, expected)
+
+ result = vals.rolling(1, min_periods=1).std(ddof=0)
+ expected = Series([0.0] * 5)
+ tm.assert_series_equal(result, expected)
+
+ result = Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
+ assert np.isnan(result[2])
+
+
+def test_rolling_std_neg_sqrt():
+ # unit test from Bottleneck
+
+ # Test move_nanstd for neg sqrt.
+
+ a = Series(
+ [
+ 0.0011448196318903589,
+ 0.00028718669878572767,
+ 0.00028718669878572767,
+ 0.00028718669878572767,
+ 0.00028718669878572767,
+ ]
+ )
+ b = a.rolling(window=3).std()
+ assert np.isfinite(b[2:]).all()
+
+ b = a.ewm(span=3).std()
+ assert np.isfinite(b[2:]).all()
diff --git a/pandas/tests/window/test_win_type.py b/pandas/tests/window/test_win_type.py
index a1f388b1eb5d9..6e0edc9e8500c 100644
--- a/pandas/tests/window/test_win_type.py
+++ b/pandas/tests/window/test_win_type.py
@@ -159,3 +159,497 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
indexer = CustomIndexer()
with pytest.raises(NotImplementedError, match="BaseIndexer subclasses not"):
df.rolling(indexer, win_type="boxcar")
+
+
+@td.skip_if_no_scipy
+def test_cmov_mean():
+ # GH 8238
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+ result = Series(vals).rolling(5, center=True).mean()
+ expected_values = [
+ np.nan,
+ np.nan,
+ 9.962,
+ 11.27,
+ 11.564,
+ 12.516,
+ 12.818,
+ 12.952,
+ np.nan,
+ np.nan,
+ ]
+ expected = Series(expected_values)
+ tm.assert_series_equal(expected, result)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window():
+ # GH 8238
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+ result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
+ expected_values = [
+ np.nan,
+ np.nan,
+ 9.962,
+ 11.27,
+ 11.564,
+ 12.516,
+ 12.818,
+ 12.952,
+ np.nan,
+ np.nan,
+ ]
+ expected = Series(expected_values)
+ tm.assert_series_equal(expected, result)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window_corner():
+ # GH 8238
+ # all nan
+ vals = Series([np.nan] * 10)
+ result = vals.rolling(5, center=True, win_type="boxcar").mean()
+ assert np.isnan(result).all()
+
+ # empty
+ vals = Series([], dtype=object)
+ result = vals.rolling(5, center=True, win_type="boxcar").mean()
+ assert len(result) == 0
+
+ # shorter than window
+ vals = Series(np.random.randn(5))
+ result = vals.rolling(10, win_type="boxcar").mean()
+ assert np.isnan(result).all()
+ assert len(result) == 5
+
+
+@td.skip_if_no_scipy
+@pytest.mark.parametrize(
+ "f,xp",
+ [
+ (
+ "mean",
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [9.252, 9.392],
+ [8.644, 9.906],
+ [8.87, 10.208],
+ [6.81, 8.588],
+ [7.792, 8.644],
+ [9.05, 7.824],
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ ],
+ ),
+ (
+ "std",
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [3.789706, 4.068313],
+ [3.429232, 3.237411],
+ [3.589269, 3.220810],
+ [3.405195, 2.380655],
+ [3.281839, 2.369869],
+ [3.676846, 1.801799],
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ ],
+ ),
+ (
+ "var",
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [14.36187, 16.55117],
+ [11.75963, 10.48083],
+ [12.88285, 10.37362],
+ [11.59535, 5.66752],
+ [10.77047, 5.61628],
+ [13.51920, 3.24648],
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ ],
+ ),
+ (
+ "sum",
+ [
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ [46.26, 46.96],
+ [43.22, 49.53],
+ [44.35, 51.04],
+ [34.05, 42.94],
+ [38.96, 43.22],
+ [45.25, 39.12],
+ [np.nan, np.nan],
+ [np.nan, np.nan],
+ ],
+ ),
+ ],
+)
+def test_cmov_window_frame(f, xp):
+ # Gh 8238
+ df = DataFrame(
+ np.array(
+ [
+ [12.18, 3.64],
+ [10.18, 9.16],
+ [13.24, 14.61],
+ [4.51, 8.11],
+ [6.15, 11.44],
+ [9.14, 6.21],
+ [11.31, 10.67],
+ [2.94, 6.51],
+ [9.42, 8.39],
+ [12.44, 7.34],
+ ]
+ )
+ )
+ xp = DataFrame(np.array(xp))
+
+ roll = df.rolling(5, win_type="boxcar", center=True)
+ rs = getattr(roll, f)()
+
+ tm.assert_frame_equal(xp, rs)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window_na_min_periods():
+ # min_periods
+ vals = Series(np.random.randn(10))
+ vals[4] = np.nan
+ vals[8] = np.nan
+
+ xp = vals.rolling(5, min_periods=4, center=True).mean()
+ rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
+ tm.assert_series_equal(xp, rs)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window_regular(win_types):
+ # GH 8238
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+ xps = {
+ "hamming": [
+ np.nan,
+ np.nan,
+ 8.71384,
+ 9.56348,
+ 12.38009,
+ 14.03687,
+ 13.8567,
+ 11.81473,
+ np.nan,
+ np.nan,
+ ],
+ "triang": [
+ np.nan,
+ np.nan,
+ 9.28667,
+ 10.34667,
+ 12.00556,
+ 13.33889,
+ 13.38,
+ 12.33667,
+ np.nan,
+ np.nan,
+ ],
+ "barthann": [
+ np.nan,
+ np.nan,
+ 8.4425,
+ 9.1925,
+ 12.5575,
+ 14.3675,
+ 14.0825,
+ 11.5675,
+ np.nan,
+ np.nan,
+ ],
+ "bohman": [
+ np.nan,
+ np.nan,
+ 7.61599,
+ 9.1764,
+ 12.83559,
+ 14.17267,
+ 14.65923,
+ 11.10401,
+ np.nan,
+ np.nan,
+ ],
+ "blackmanharris": [
+ np.nan,
+ np.nan,
+ 6.97691,
+ 9.16438,
+ 13.05052,
+ 14.02156,
+ 15.10512,
+ 10.74574,
+ np.nan,
+ np.nan,
+ ],
+ "nuttall": [
+ np.nan,
+ np.nan,
+ 7.04618,
+ 9.16786,
+ 13.02671,
+ 14.03559,
+ 15.05657,
+ 10.78514,
+ np.nan,
+ np.nan,
+ ],
+ "blackman": [
+ np.nan,
+ np.nan,
+ 7.73345,
+ 9.17869,
+ 12.79607,
+ 14.20036,
+ 14.57726,
+ 11.16988,
+ np.nan,
+ np.nan,
+ ],
+ "bartlett": [
+ np.nan,
+ np.nan,
+ 8.4425,
+ 9.1925,
+ 12.5575,
+ 14.3675,
+ 14.0825,
+ 11.5675,
+ np.nan,
+ np.nan,
+ ],
+ }
+
+ xp = Series(xps[win_types])
+ rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
+ tm.assert_series_equal(xp, rs)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window_regular_linear_range(win_types):
+ # GH 8238
+ vals = np.array(range(10), dtype=float)
+ xp = vals.copy()
+ xp[:2] = np.nan
+ xp[-2:] = np.nan
+ xp = Series(xp)
+
+ rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
+ tm.assert_series_equal(xp, rs)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window_regular_missing_data(win_types):
+ # GH 8238
+ vals = np.array(
+ [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
+ )
+ xps = {
+ "bartlett": [
+ np.nan,
+ np.nan,
+ 9.70333,
+ 10.5225,
+ 8.4425,
+ 9.1925,
+ 12.5575,
+ 14.3675,
+ 15.61667,
+ 13.655,
+ ],
+ "blackman": [
+ np.nan,
+ np.nan,
+ 9.04582,
+ 11.41536,
+ 7.73345,
+ 9.17869,
+ 12.79607,
+ 14.20036,
+ 15.8706,
+ 13.655,
+ ],
+ "barthann": [
+ np.nan,
+ np.nan,
+ 9.70333,
+ 10.5225,
+ 8.4425,
+ 9.1925,
+ 12.5575,
+ 14.3675,
+ 15.61667,
+ 13.655,
+ ],
+ "bohman": [
+ np.nan,
+ np.nan,
+ 8.9444,
+ 11.56327,
+ 7.61599,
+ 9.1764,
+ 12.83559,
+ 14.17267,
+ 15.90976,
+ 13.655,
+ ],
+ "hamming": [
+ np.nan,
+ np.nan,
+ 9.59321,
+ 10.29694,
+ 8.71384,
+ 9.56348,
+ 12.38009,
+ 14.20565,
+ 15.24694,
+ 13.69758,
+ ],
+ "nuttall": [
+ np.nan,
+ np.nan,
+ 8.47693,
+ 12.2821,
+ 7.04618,
+ 9.16786,
+ 13.02671,
+ 14.03673,
+ 16.08759,
+ 13.65553,
+ ],
+ "triang": [
+ np.nan,
+ np.nan,
+ 9.33167,
+ 9.76125,
+ 9.28667,
+ 10.34667,
+ 12.00556,
+ 13.82125,
+ 14.49429,
+ 13.765,
+ ],
+ "blackmanharris": [
+ np.nan,
+ np.nan,
+ 8.42526,
+ 12.36824,
+ 6.97691,
+ 9.16438,
+ 13.05052,
+ 14.02175,
+ 16.1098,
+ 13.65509,
+ ],
+ }
+
+ xp = Series(xps[win_types])
+ rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
+ tm.assert_series_equal(xp, rs)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window_special(win_types_special):
+ # GH 8238
+ kwds = {
+ "kaiser": {"beta": 1.0},
+ "gaussian": {"std": 1.0},
+ "general_gaussian": {"p": 2.0, "sig": 2.0},
+ "exponential": {"tau": 10},
+ }
+
+ vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
+
+ xps = {
+ "gaussian": [
+ np.nan,
+ np.nan,
+ 8.97297,
+ 9.76077,
+ 12.24763,
+ 13.89053,
+ 13.65671,
+ 12.01002,
+ np.nan,
+ np.nan,
+ ],
+ "general_gaussian": [
+ np.nan,
+ np.nan,
+ 9.85011,
+ 10.71589,
+ 11.73161,
+ 13.08516,
+ 12.95111,
+ 12.74577,
+ np.nan,
+ np.nan,
+ ],
+ "kaiser": [
+ np.nan,
+ np.nan,
+ 9.86851,
+ 11.02969,
+ 11.65161,
+ 12.75129,
+ 12.90702,
+ 12.83757,
+ np.nan,
+ np.nan,
+ ],
+ "exponential": [
+ np.nan,
+ np.nan,
+ 9.83364,
+ 11.10472,
+ 11.64551,
+ 12.66138,
+ 12.92379,
+ 12.83770,
+ np.nan,
+ np.nan,
+ ],
+ }
+
+ xp = Series(xps[win_types_special])
+ rs = (
+ Series(vals)
+ .rolling(5, win_type=win_types_special, center=True)
+ .mean(**kwds[win_types_special])
+ )
+ tm.assert_series_equal(xp, rs)
+
+
+@td.skip_if_no_scipy
+def test_cmov_window_special_linear_range(win_types_special):
+ # GH 8238
+ kwds = {
+ "kaiser": {"beta": 1.0},
+ "gaussian": {"std": 1.0},
+ "general_gaussian": {"p": 2.0, "sig": 2.0},
+ "slepian": {"width": 0.5},
+ "exponential": {"tau": 10},
+ }
+
+ vals = np.array(range(10), dtype=float)
+ xp = vals.copy()
+ xp[:2] = np.nan
+ xp[-2:] = np.nan
+ xp = Series(xp)
+
+ rs = (
+ Series(vals)
+ .rolling(5, win_type=win_types_special, center=True)
+ .mean(**kwds[win_types_special])
+ )
+ tm.assert_series_equal(xp, rs)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
Since some tests are not running on `windows/moments` (https://github.com/pandas-dev/pandas/issues/37535), moving some lighter rolling test to appropriate locations | https://api.github.com/repos/pandas-dev/pandas/pulls/44129 | 2021-10-21T05:36:20Z | 2021-10-24T01:45:48Z | 2021-10-24T01:45:48Z | 2021-10-24T01:45:50Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.