title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
STY: enable PLR5501 | diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 422436d376f69..72cb410c9068d 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -941,15 +941,14 @@ def test_column_as_series(backend, using_copy_on_write, warn_copy_on_write):
if using_copy_on_write:
s[0] = 0
+ elif warn_copy_on_write:
+ with tm.assert_cow_warning():
+ s[0] = 0
else:
- if warn_copy_on_write:
- with tm.assert_cow_warning():
+ warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(warn):
s[0] = 0
- else:
- warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
- with pd.option_context("chained_assignment", "warn"):
- with tm.assert_produces_warning(warn):
- s[0] = 0
expected = Series([0, 2, 3], name="a")
tm.assert_series_equal(s, expected)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 2a3a0a54d0767..0ca6bf0de94dd 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1693,12 +1693,11 @@ def test_rolling_quantile_interpolation_options(quantile, interpolation, data):
if np.isnan(q1):
assert np.isnan(q2)
+ elif not IS64:
+ # Less precision on 32-bit
+ assert np.allclose([q1], [q2], rtol=1e-07, atol=0)
else:
- if not IS64:
- # Less precision on 32-bit
- assert np.allclose([q1], [q2], rtol=1e-07, atol=0)
- else:
- assert q1 == q2
+ assert q1 == q2
def test_invalid_quantile_value():
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index cb0b4d549f49e..4e542d1b7f04a 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -335,9 +335,8 @@ def validate_percentile(q: float | Iterable[float]) -> np.ndarray:
if q_arr.ndim == 0:
if not 0 <= q_arr <= 1:
raise ValueError(msg)
- else:
- if not all(0 <= qs <= 1 for qs in q_arr):
- raise ValueError(msg)
+ elif not all(0 <= qs <= 1 for qs in q_arr):
+ raise ValueError(msg)
return q_arr
diff --git a/pyproject.toml b/pyproject.toml
index ebdf9deb034b5..4f11cfb17edd1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -337,8 +337,6 @@ ignore = [
"PLR2004",
# comparison-with-itself
"PLR0124",
- # Consider `elif` instead of `else` then `if` to remove indentation level
- "PLR5501",
# collection-literal-concatenation
"RUF005",
# pairwise-over-zipped (>=PY310 only)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56895 | 2024-01-15T20:42:04Z | 2024-01-16T16:06:50Z | 2024-01-16T16:06:50Z | 2024-01-16T16:18:44Z |
STY: Remove black-specific rule | diff --git a/pyproject.toml b/pyproject.toml
index ebdf9deb034b5..89965b4f6765d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -246,8 +246,6 @@ select = [
ignore = [
### Intentionally disabled
- # space before : (needed for how black formats slicing)
- "E203",
# module level import not at top of file
"E402",
# do not assign a lambda expression, use a def
| As `pandas` uses `ruff` as the formatter now, I think that we can safely enable this rule.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56894 | 2024-01-15T20:30:57Z | 2024-01-16T16:07:24Z | 2024-01-16T16:07:24Z | 2024-01-16T16:18:27Z |
Backport PR #56891 on branch 2.2.x (DOC: Add deprecated markers for downcast keyword) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index de25a02c6b37c..f8728c61e46fc 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7187,6 +7187,8 @@ def fillna(
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
+ .. deprecated:: 2.2.0
+
Returns
-------
{klass} or None
@@ -7522,6 +7524,8 @@ def ffill(
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
+ .. deprecated:: 2.2.0
+
Returns
-------
{klass} or None
@@ -7713,6 +7717,8 @@ def bfill(
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
+ .. deprecated:: 2.2.0
+
Returns
-------
{klass} or None
| Backport PR #56891: DOC: Add deprecated markers for downcast keyword | https://api.github.com/repos/pandas-dev/pandas/pulls/56893 | 2024-01-15T19:38:00Z | 2024-01-15T19:57:42Z | 2024-01-15T19:57:41Z | 2024-01-15T19:57:42Z |
TYP: Update pyright | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 73ac14f1ed5ce..fc71d785da8bc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -132,7 +132,7 @@ repos:
types: [python]
stages: [manual]
additional_dependencies: &pyright_dependencies
- - pyright@1.1.339
+ - pyright@1.1.347
- id: pyright
# note: assumes python env is setup and activated
name: pyright reportGeneralTypeIssues
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 73d69105541d8..5b114ff1f2111 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -88,7 +88,7 @@ class DeprecatedOption(NamedTuple):
class RegisteredOption(NamedTuple):
key: str
- defval: object
+ defval: Any
doc: str
validator: Callable[[object], Any] | None
cb: Callable[[str], Any] | None
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index 5c1a0ff139533..69a56d3911316 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -10,7 +10,10 @@
import platform
import re
import subprocess
-from typing import TYPE_CHECKING
+from typing import (
+ TYPE_CHECKING,
+ cast,
+)
from pandas._config.config import options
@@ -152,7 +155,7 @@ def get_locales(
out_locales = []
for x in split_raw_locales:
try:
- out_locales.append(str(x, encoding=options.display.encoding))
+ out_locales.append(str(x, encoding=cast(str, options.display.encoding)))
except UnicodeError:
# 'locale -a' is used to populated 'raw_locales' and on
# Redhat 7 Linux (and maybe others) prints locale names
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index 32ecd264262d6..c8befabbf86de 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -69,16 +69,26 @@ def fast_multiget(
mapping: dict,
keys: np.ndarray, # object[:]
default=...,
-) -> np.ndarray: ...
+) -> ArrayLike: ...
def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ...
def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ...
+@overload
def map_infer(
arr: np.ndarray,
f: Callable[[Any], Any],
- convert: bool = ...,
+ *,
+ convert: Literal[False],
ignore_na: bool = ...,
) -> np.ndarray: ...
@overload
+def map_infer(
+ arr: np.ndarray,
+ f: Callable[[Any], Any],
+ *,
+ convert: bool = ...,
+ ignore_na: bool = ...,
+) -> ArrayLike: ...
+@overload
def maybe_convert_objects(
objects: npt.NDArray[np.object_],
*,
@@ -164,14 +174,26 @@ def is_all_arraylike(obj: list) -> bool: ...
# Functions which in reality take memoryviews
def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64
+@overload
def map_infer_mask(
arr: np.ndarray,
f: Callable[[Any], Any],
mask: np.ndarray, # const uint8_t[:]
- convert: bool = ...,
+ *,
+ convert: Literal[False],
na_value: Any = ...,
dtype: np.dtype = ...,
) -> np.ndarray: ...
+@overload
+def map_infer_mask(
+ arr: np.ndarray,
+ f: Callable[[Any], Any],
+ mask: np.ndarray, # const uint8_t[:]
+ *,
+ convert: bool = ...,
+ na_value: Any = ...,
+ dtype: np.dtype = ...,
+) -> ArrayLike: ...
def indices_fast(
index: npt.NDArray[np.intp],
labels: np.ndarray, # const int64_t[:]
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index c483f35513a40..5eb20960f0e3d 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -2864,10 +2864,11 @@ def map_infer_mask(
ndarray[object] arr,
object f,
const uint8_t[:] mask,
+ *,
bint convert=True,
object na_value=no_default,
cnp.dtype dtype=np.dtype(object)
-) -> np.ndarray:
+) -> "ArrayLike":
"""
Substitute for np.vectorize with pandas-friendly dtype inference.
@@ -2887,7 +2888,7 @@ def map_infer_mask(
Returns
-------
- np.ndarray
+ np.ndarray or an ExtensionArray
"""
cdef Py_ssize_t n = len(arr)
result = np.empty(n, dtype=dtype)
@@ -2941,8 +2942,8 @@ def _map_infer_mask(
@cython.boundscheck(False)
@cython.wraparound(False)
def map_infer(
- ndarray arr, object f, bint convert=True, bint ignore_na=False
-) -> np.ndarray:
+ ndarray arr, object f, *, bint convert=True, bint ignore_na=False
+) -> "ArrayLike":
"""
Substitute for np.vectorize with pandas-friendly dtype inference.
@@ -2956,7 +2957,7 @@ def map_infer(
Returns
-------
- np.ndarray
+ np.ndarray or an ExtensionArray
"""
cdef:
Py_ssize_t i, n
@@ -3091,7 +3092,7 @@ def to_object_array_tuples(rows: object) -> np.ndarray:
@cython.wraparound(False)
@cython.boundscheck(False)
-def fast_multiget(dict mapping, object[:] keys, default=np.nan) -> np.ndarray:
+def fast_multiget(dict mapping, object[:] keys, default=np.nan) -> "ArrayLike":
cdef:
Py_ssize_t i, n = len(keys)
object val
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index d4da5840689de..b73b49eca3e18 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -4,6 +4,7 @@
TYPE_CHECKING,
ClassVar,
Literal,
+ cast,
)
import numpy as np
@@ -637,7 +638,7 @@ def _str_map(
# error: Argument 1 to "dtype" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
# "Type[object]"
- dtype=np.dtype(dtype), # type: ignore[arg-type]
+ dtype=np.dtype(cast(type, dtype)),
)
if not na_value_is_na:
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 8c8787d15c8fe..a76eef8095695 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -7,6 +7,7 @@
TYPE_CHECKING,
Callable,
Union,
+ cast,
)
import warnings
@@ -327,7 +328,7 @@ def _str_map(
# error: Argument 1 to "dtype" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
# "Type[object]"
- dtype=np.dtype(dtype), # type: ignore[arg-type]
+ dtype=np.dtype(cast(type, dtype)),
)
if not na_value_is_na:
@@ -640,7 +641,7 @@ def _str_map(
mask.view("uint8"),
convert=False,
na_value=na_value,
- dtype=np.dtype(dtype), # type: ignore[arg-type]
+ dtype=np.dtype(cast(type, dtype)),
)
return result
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 6313c2e2c98de..55421090d4202 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -4,7 +4,10 @@
from __future__ import annotations
import tokenize
-from typing import TYPE_CHECKING
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
import warnings
from pandas.util._exceptions import find_stack_level
@@ -177,7 +180,7 @@ def eval(
level: int = 0,
target=None,
inplace: bool = False,
-):
+) -> Any:
"""
Evaluate a Python expression as a string using various backends.
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 3e4227a8a2598..52ec4a0b012e3 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -258,7 +258,9 @@ def _use_inf_as_na(key) -> None:
globals()["INF_AS_NA"] = False
-def _isna_array(values: ArrayLike, inf_as_na: bool = False):
+def _isna_array(
+ values: ArrayLike, inf_as_na: bool = False
+) -> npt.NDArray[np.bool_] | NDFrame:
"""
Return an array indicating which values of the input array are NaN / NA.
@@ -275,6 +277,7 @@ def _isna_array(values: ArrayLike, inf_as_na: bool = False):
Array of boolean values denoting the NA status of each element.
"""
dtype = values.dtype
+ result: npt.NDArray[np.bool_] | NDFrame
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c01e551b38c32..c2b809e5b6d3e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9802,7 +9802,9 @@ def explode(
return result.__finalize__(self, method="explode")
- def unstack(self, level: IndexLabel = -1, fill_value=None, sort: bool = True):
+ def unstack(
+ self, level: IndexLabel = -1, fill_value=None, sort: bool = True
+ ) -> DataFrame | Series:
"""
Pivot a level of the (necessarily hierarchical) index labels.
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 62afcf8badb50..02bbc73c05d70 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -491,7 +491,7 @@ def copy(self, name: Hashable | None = None, deep: bool = False) -> Self:
new_index = self._rename(name=name)
return new_index
- def _minmax(self, meth: str):
+ def _minmax(self, meth: str) -> int | float:
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
@@ -500,13 +500,13 @@ def _minmax(self, meth: str):
return self.start + self.step * no_steps
- def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
+ def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int | float:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
- def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
+ def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int | float:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py
index 559977bacf881..fa085a1f0262b 100644
--- a/pandas/core/ops/common.py
+++ b/pandas/core/ops/common.py
@@ -40,7 +40,7 @@ def wrapper(method: F) -> F:
return wrapper
-def _unpack_zerodim_and_defer(method, name: str):
+def _unpack_zerodim_and_defer(method: F, name: str) -> F:
"""
Boilerplate for pandas conventions in arithmetic and comparison methods.
@@ -75,7 +75,9 @@ def new_method(self, other):
return method(self, other)
- return new_method
+ # error: Incompatible return value type (got "Callable[[Any, Any], Any]",
+ # expected "F")
+ return new_method # type: ignore[return-value]
def get_op_result_name(left, right):
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index ea74c17917279..ff973f6defc09 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -568,7 +568,8 @@ def pivot(
# error: Argument 1 to "unstack" of "DataFrame" has incompatible type "Union
# [List[Any], ExtensionArray, ndarray[Any, Any], Index, Series]"; expected
# "Hashable"
- result = indexed.unstack(columns_listlike) # type: ignore[arg-type]
+ # unstack with a MultiIndex returns a DataFrame
+ result = cast("DataFrame", indexed.unstack(columns_listlike)) # type: ignore[arg-type]
result.index.names = [
name if name is not lib.no_default else None for name in result.index.names
]
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 3493f1c78da91..39cd619715a91 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -4,6 +4,7 @@
from typing import (
TYPE_CHECKING,
cast,
+ overload,
)
import warnings
@@ -451,7 +452,11 @@ def _unstack_multiple(
result = data
while clocs:
val = clocs.pop(0)
- result = result.unstack(val, fill_value=fill_value, sort=sort)
+ # error: Incompatible types in assignment (expression has type
+ # "DataFrame | Series", variable has type "DataFrame")
+ result = result.unstack( # type: ignore[assignment]
+ val, fill_value=fill_value, sort=sort
+ )
clocs = [v if v < val else v - 1 for v in clocs]
return result
@@ -460,7 +465,9 @@ def _unstack_multiple(
dummy_df = data.copy(deep=False)
dummy_df.index = dummy_index
- unstacked = dummy_df.unstack(
+ # error: Incompatible types in assignment (expression has type "DataFrame |
+ # Series", variable has type "DataFrame")
+ unstacked = dummy_df.unstack( # type: ignore[assignment]
"__placeholder__", fill_value=fill_value, sort=sort
)
if isinstance(unstacked, Series):
@@ -486,7 +493,21 @@ def _unstack_multiple(
return unstacked
-def unstack(obj: Series | DataFrame, level, fill_value=None, sort: bool = True):
+@overload
+def unstack(obj: Series, level, fill_value=..., sort: bool = ...) -> DataFrame:
+ ...
+
+
+@overload
+def unstack(
+ obj: Series | DataFrame, level, fill_value=..., sort: bool = ...
+) -> Series | DataFrame:
+ ...
+
+
+def unstack(
+ obj: Series | DataFrame, level, fill_value=None, sort: bool = True
+) -> Series | DataFrame:
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
@@ -573,10 +594,14 @@ def _unstack_extension_series(
# equiv: result.droplevel(level=0, axis=1)
# but this avoids an extra copy
result.columns = result.columns._drop_level_numbers([0])
- return result
+ # error: Incompatible return value type (got "DataFrame | Series", expected
+ # "DataFrame")
+ return result # type: ignore[return-value]
-def stack(frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True):
+def stack(
+ frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True
+) -> Series | DataFrame:
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
@@ -659,7 +684,9 @@ def stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = Tr
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
- result = stack(result, lev, dropna=dropna, sort=sort)
+ # error: Incompatible types in assignment (expression has type
+ # "Series | DataFrame", variable has type "DataFrame")
+ result = stack(result, lev, dropna=dropna, sort=sort) # type: ignore[assignment]
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
@@ -672,7 +699,9 @@ def stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = Tr
while level:
lev = level.pop(0)
- result = stack(result, lev, dropna=dropna, sort=sort)
+ # error: Incompatible types in assignment (expression has type
+ # "Series | DataFrame", variable has type "DataFrame")
+ result = stack(result, lev, dropna=dropna, sort=sort) # type: ignore[assignment]
# Decrement all level numbers greater than current, as these
# have now shifted down by one
level = [v if v <= lev else v - 1 for v in level]
@@ -894,6 +923,7 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:
if len(level) > 1:
# Arrange columns in the order we want to take them, e.g. level=[2, 0, 1]
sorter = np.argsort(level)
+ assert isinstance(stack_cols, MultiIndex)
ordered_stack_cols = stack_cols._reorder_ilevels(sorter)
else:
ordered_stack_cols = stack_cols
@@ -956,13 +986,15 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:
codes, uniques = factorize(frame.index, use_na_sentinel=False)
index_levels = [uniques]
index_codes = list(np.tile(codes, (1, ratio)))
- if isinstance(stack_cols, MultiIndex):
+ if isinstance(ordered_stack_cols, MultiIndex):
column_levels = ordered_stack_cols.levels
column_codes = ordered_stack_cols.drop_duplicates().codes
else:
column_levels = [ordered_stack_cols.unique()]
column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]]
- column_codes = [np.repeat(codes, len(frame)) for codes in column_codes]
+ # error: Incompatible types in assignment (expression has type "list[ndarray[Any,
+ # dtype[Any]]]", variable has type "FrozenList")
+ column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] # type: ignore[assignment]
result.index = MultiIndex(
levels=index_levels + column_levels,
codes=index_codes + column_codes,
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 29d17e7174ee9..bdcf55e61d2d1 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -29,8 +29,6 @@
Scalar,
)
- from pandas import Series
-
class ObjectStringArrayMixin(BaseStringArrayMethods):
"""
@@ -75,7 +73,9 @@ def _str_map(
mask = isna(arr)
map_convert = convert and not np.all(mask)
try:
- result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert)
+ result = lib.map_infer_mask(
+ arr, f, mask.view(np.uint8), convert=map_convert
+ )
except (TypeError, AttributeError) as err:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
@@ -456,7 +456,7 @@ def _str_lstrip(self, to_strip=None):
def _str_rstrip(self, to_strip=None):
return self._str_map(lambda x: x.rstrip(to_strip))
- def _str_removeprefix(self, prefix: str) -> Series:
+ def _str_removeprefix(self, prefix: str):
# outstanding question on whether to use native methods for users on Python 3.9+
# https://github.com/pandas-dev/pandas/pull/39226#issuecomment-836719770,
# in which case we could do return self._str_map(str.removeprefix)
@@ -468,7 +468,7 @@ def removeprefix(text: str) -> str:
return self._str_map(removeprefix)
- def _str_removesuffix(self, suffix: str) -> Series:
+ def _str_removesuffix(self, suffix: str):
return self._str_map(lambda x: x.removesuffix(suffix))
def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index b80ed9ac50dce..fcf4f7606a594 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -5,6 +5,7 @@
from typing import (
TYPE_CHECKING,
+ Any,
overload,
)
import warnings
@@ -89,7 +90,8 @@ def to_timedelta(
| Series,
unit: UnitChoices | None = None,
errors: DateTimeErrorChoices = "raise",
-) -> Timedelta | TimedeltaIndex | Series | NaTType:
+ # returning Any for errors="ignore"
+) -> Timedelta | TimedeltaIndex | Series | NaTType | Any:
"""
Convert argument to timedelta.
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index ab2dc20ccbd02..d521d91ab3b7b 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -41,7 +41,9 @@
if TYPE_CHECKING:
from matplotlib.axes import Axes
+ from matplotlib.container import BarContainer
from matplotlib.figure import Figure
+ from matplotlib.patches import Polygon
from pandas._typing import PlottingOrientation
@@ -112,7 +114,8 @@ def _plot( # type: ignore[override]
*,
bins,
**kwds,
- ):
+ # might return a subset from the possible return types of Axes.hist(...)[2]?
+ ) -> BarContainer | Polygon | list[BarContainer | Polygon]:
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
@@ -171,7 +174,8 @@ def _make_plot(self, fig: Figure) -> None:
if self.by is not None:
ax.set_title(pprint_thing(label))
- self._append_legend_handles_labels(artists[0], label)
+ # error: Value of type "Polygon" is not indexable
+ self._append_legend_handles_labels(artists[0], label) # type: ignore[index,arg-type]
def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None:
"""merge BoxPlot/KdePlot properties to passed kwds"""
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 4e542d1b7f04a..178284c7d75b6 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -265,7 +265,7 @@ def validate_bool_kwarg(
f'For argument "{arg_name}" expected type bool, received '
f"type {type(value).__name__}."
)
- return value # pyright: ignore[reportGeneralTypeIssues]
+ return value
def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True):
diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json
index da27906e041cf..1589988603506 100644
--- a/pyright_reportGeneralTypeIssues.json
+++ b/pyright_reportGeneralTypeIssues.json
@@ -41,7 +41,6 @@
"pandas/core/arrays/string_arrow.py",
"pandas/core/arrays/timedeltas.py",
"pandas/core/computation/align.py",
- "pandas/core/computation/ops.py",
"pandas/core/construction.py",
"pandas/core/dtypes/cast.py",
"pandas/core/dtypes/common.py",
| Newer releases of pyright infer return types of partially annotated functions, which uncovers quite a few difficult-to-fix type errors (some functions had no return types because it would create too many typing errors elsewhere).
(pyright can also try to infer return types for completely unannotated functions, but that is disabled for now in pyright_reportGeneralTypeIssues.json as it would trigger more difficult-to-fix cases.) | https://api.github.com/repos/pandas-dev/pandas/pulls/56892 | 2024-01-15T17:31:47Z | 2024-01-16T22:32:04Z | 2024-01-16T22:32:04Z | 2024-01-17T02:50:02Z |
DOC: Add deprecated markers for downcast keyword | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index caac11b6ab4f6..81ffb243cd302 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7208,6 +7208,8 @@ def fillna(
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
+ .. deprecated:: 2.2.0
+
Returns
-------
{klass} or None
@@ -7543,6 +7545,8 @@ def ffill(
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
+ .. deprecated:: 2.2.0
+
Returns
-------
{klass} or None
@@ -7734,6 +7738,8 @@ def bfill(
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
+ .. deprecated:: 2.2.0
+
Returns
-------
{klass} or None
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56891 | 2024-01-15T16:28:09Z | 2024-01-15T19:36:59Z | 2024-01-15T19:36:59Z | 2024-01-15T20:36:37Z |
TST: Fix setitem parametrizations | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 80c8a1e8ef5c7..bd19aed631659 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -115,7 +115,10 @@
ensure_wrapped_if_datetimelike,
extract_array,
)
-from pandas.core.indexers import check_setitem_lengths
+from pandas.core.indexers import (
+ check_array_indexer,
+ check_setitem_lengths,
+)
from pandas.core.indexes.base import get_values_for_csv
if TYPE_CHECKING:
@@ -1661,6 +1664,8 @@ def setitem(self, indexer, value):
# TODO(GH#45419): string[pyarrow] tests break if we transpose
# unconditionally
values = values.T
+
+ check_array_indexer(values, indexer)
check_setitem_lengths(indexer, value, values)
try:
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 3fb2fc09eaa79..4410b3d0441af 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -206,12 +206,9 @@ def test_setitem_integer_array(self, data, idx, box_in_series):
"idx, box_in_series",
[
([0, 1, 2, pd.NA], False),
- pytest.param(
- [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")
- ),
+ ([0, 1, 2, pd.NA], True),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
- # TODO: change False to True?
- (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), # noqa: PT014
+ (pd.array([0, 1, 2, pd.NA], dtype="Int64"), True),
],
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
)
@@ -225,7 +222,10 @@ def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
msg = "Cannot index with an integer indexer containing NA values"
with pytest.raises(ValueError, match=msg):
- arr[idx] = arr[0]
+ if type(arr) is pd.Series:
+ arr.iloc[idx] = arr.iloc[0]
+ else:
+ arr[idx] = arr[0]
@pytest.mark.parametrize("as_callable", [True, False])
@pytest.mark.parametrize("setter", ["loc", None])
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 6ecbf2063f203..9dd9fe5977f04 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -342,15 +342,24 @@ def test_setitem_integer_array(self, data, idx, box_in_series, request):
request.applymarker(mark)
super().test_setitem_integer_array(data, idx, box_in_series)
- @pytest.mark.xfail(reason="list indices must be integers or slices, not NAType")
@pytest.mark.parametrize(
"idx, box_in_series",
[
- ([0, 1, 2, pd.NA], False),
pytest.param(
- [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")
+ [0, 1, 2, pd.NA],
+ False,
+ marks=pytest.mark.xfail(
+ reason="Cannot index with an integer indexer containing NA values"
+ ),
+ ),
+ ([0, 1, 2, pd.NA], True),
+ pytest.param(
+ pd.array([0, 1, 2, pd.NA], dtype="Int64"),
+ False,
+ marks=pytest.mark.xfail(
+ reason="Cannot index with an integer indexer containing NA values"
+ ),
),
- (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), True),
],
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
| I'm not sure if i am on the right track with this PR but it's a try. If not i will close it and probably someone more into the code has to dig into that issue.
- [x] closes #56727
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56890 | 2024-01-15T16:25:44Z | 2024-04-09T17:13:02Z | null | 2024-04-09T17:13:03Z |
TST: extension tests use its own fixtures | diff --git a/pandas/tests/extension/base/accumulate.py b/pandas/tests/extension/base/accumulate.py
index 9189ef7ec9aa5..9a41a3a582c4a 100644
--- a/pandas/tests/extension/base/accumulate.py
+++ b/pandas/tests/extension/base/accumulate.py
@@ -26,6 +26,7 @@ def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):
expected = getattr(alt, op_name)(skipna=skipna)
tm.assert_series_equal(result, expected, check_dtype=False)
+ @pytest.mark.parametrize("skipna", [True, False])
def test_accumulate_series(self, data, all_numeric_accumulations, skipna):
op_name = all_numeric_accumulations
ser = pd.Series(data)
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index 3fb116430861a..c7b768f6e3c88 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -114,6 +114,7 @@ def test_get_common_dtype(self, dtype):
# only case we can test in general)
assert dtype._get_common_dtype([dtype]) == dtype
+ @pytest.mark.parametrize("skipna", [True, False])
def test_infer_dtype(self, data, data_missing, skipna):
# only testing that this works without raising an error
res = infer_dtype(data, skipna=skipna)
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 9f38246d1a317..75628ea177fc2 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -34,6 +34,7 @@ def test_grouping_grouper(self, data_for_grouping):
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
+ @pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index ba247f51e5f1b..c803a8113b4a4 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -37,6 +37,7 @@ def test_value_counts_default_dropna(self, data):
kwarg = sig.parameters["dropna"]
assert kwarg.default is True
+ @pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
@@ -96,6 +97,7 @@ def test_apply_simple_series(self, data):
result = pd.Series(data).apply(id)
assert isinstance(result, pd.Series)
+ @pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data_missing, na_action):
result = data_missing.map(lambda x: x, na_action=na_action)
expected = data_missing.to_numpy()
@@ -211,6 +213,7 @@ def test_nargsort(self, data_missing_for_sorting, na_position, expected):
result = nargsort(data_missing_for_sorting, na_position=na_position)
tm.assert_numpy_array_equal(result, expected)
+ @pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending, key=sort_by_key)
@@ -224,6 +227,7 @@ def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(
self, data_missing_for_sorting, ascending, sort_by_key
):
@@ -235,6 +239,7 @@ def test_sort_values_missing(
expected = ser.iloc[[0, 2, 1]]
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_frame(self, data_for_sorting, ascending):
df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})
result = df.sort_values(["A", "B"])
@@ -243,6 +248,7 @@ def test_sort_values_frame(self, data_for_sorting, ascending):
)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize("keep", ["first", "last", False])
def test_duplicated(self, data, keep):
arr = data.take([0, 1, 0, 1])
result = arr.duplicated(keep=keep)
diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py
index 2a443901fa41a..6ea1b3a6fbe9d 100644
--- a/pandas/tests/extension/base/reduce.py
+++ b/pandas/tests/extension/base/reduce.py
@@ -77,6 +77,7 @@ def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
tm.assert_extension_array_equal(result1, expected)
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
op_name = all_boolean_reductions
ser = pd.Series(data)
@@ -95,6 +96,7 @@ def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
self.check_reduce(ser, op_name, skipna)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
ser = pd.Series(data)
@@ -113,6 +115,7 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
# min/max with empty produce numpy warnings
self.check_reduce(ser, op_name, skipna)
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
ser = pd.Series(data)
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index ba756b471eb8b..7ee2c23c5b23a 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -105,9 +105,10 @@ def test_setitem_sequence_broadcasts(self, data, box_in_series):
assert data[0] == data[2]
assert data[1] == data[2]
- def test_setitem_scalar(self, data, indexer_li):
+ @pytest.mark.parametrize("setter", ["loc", "iloc"])
+ def test_setitem_scalar(self, data, setter):
arr = pd.Series(data)
- setter = indexer_li(arr)
+ setter = getattr(arr, setter)
setter[0] = data[1]
assert arr[0] == data[1]
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 64b897d27a835..69958b51c9e47 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -257,6 +257,7 @@ def test_fillna_copy_series(self, data_missing, using_copy_on_write):
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
super().test_fillna_copy_series(data_missing)
+ @pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 8d0bb85b2a01f..05a112e464677 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -271,6 +271,7 @@ def test_compare_scalar(self, data, comparison_op):
ser = pd.Series(data)
self._compare_other(ser, data, comparison_op, data[0])
+ @pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data_missing, na_action):
if data_missing.dtype.kind in "mM":
result = data_missing.map(lambda x: x, na_action=na_action)
@@ -423,6 +424,7 @@ def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
return False
return True
+ @pytest.mark.parametrize("skipna", [True, False])
def test_accumulate_series(self, data, all_numeric_accumulations, skipna, request):
pa_type = data.dtype.pyarrow_dtype
op_name = all_numeric_accumulations
@@ -524,6 +526,7 @@ def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
expected = getattr(alt, op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
dtype = data.dtype
pa_dtype = dtype.pyarrow_dtype
@@ -549,6 +552,7 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, reque
request.applymarker(xfail_mark)
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_boolean(
self, data, all_boolean_reductions, skipna, na_value, request
):
@@ -585,6 +589,7 @@ def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
}[arr.dtype.kind]
return cmp_dtype
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
op_name = all_numeric_reductions
if op_name == "skew":
@@ -2325,6 +2330,7 @@ def test_str_extract_expand():
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
def test_duration_from_strings_with_nat(unit):
# GH51175
strings = ["1000", "NaT"]
@@ -2827,6 +2833,7 @@ def test_dt_components():
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("skipna", [True, False])
def test_boolean_reduce_series_all_null(all_boolean_reductions, skipna):
# GH51624
ser = pd.Series([None], dtype="float64[pyarrow]")
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index edf560dda36e7..bd4ab5077c6e8 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -134,6 +134,7 @@ def test_combine_add(self, data_repeated):
expected = pd.Series([a + val for a in list(orig_data1)])
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data, na_action):
result = data.map(lambda x: x, na_action=na_action)
tm.assert_extension_array_equal(result, data)
@@ -174,6 +175,7 @@ def test_array_repr(self, data, size):
super().test_array_repr(data, size)
@pytest.mark.xfail(reason="TBD")
+ @pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index 4b25b2768849e..6352bf76f96bb 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -100,6 +100,7 @@ def _supports_accumulation(self, ser, op_name: str) -> bool:
def _supports_reduction(self, obj, op_name: str) -> bool:
return op_name in ["min", "max", "median", "mean", "std", "any", "all"]
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
meth = all_boolean_reductions
msg = f"'{meth}' with datetime64 dtypes is deprecated and will raise in"
@@ -113,6 +114,7 @@ def test_series_constructor(self, data):
data = data._with_freq(None)
super().test_series_constructor(data)
+ @pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data, na_action):
result = data.map(lambda x: x, na_action=na_action)
tm.assert_extension_array_equal(result, data)
diff --git a/pandas/tests/extension/test_masked.py b/pandas/tests/extension/test_masked.py
index 0e19c4078b471..3efc561d6a125 100644
--- a/pandas/tests/extension/test_masked.py
+++ b/pandas/tests/extension/test_masked.py
@@ -169,6 +169,7 @@ def data_for_grouping(dtype):
class TestMaskedArrays(base.ExtensionTests):
+ @pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data_missing, na_action):
result = data_missing.map(lambda x: x, na_action=na_action)
if data_missing.dtype == Float32Dtype():
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 0893c6231197e..3f54f6cbbba69 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -313,6 +313,7 @@ def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
tm.assert_almost_equal(result, expected)
@pytest.mark.skip("TODO: tests not written yet")
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
pass
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 4fe9c160d66af..2d1d213322bac 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -109,6 +109,7 @@ def test_diff(self, data, periods):
else:
super().test_diff(data, periods)
+ @pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data, na_action):
result = data.map(lambda x: x, na_action=na_action)
tm.assert_extension_array_equal(result, data)
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 2efcc192aa15b..d8f14383ef114 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -102,6 +102,7 @@ class TestSparseArray(base.ExtensionTests):
def _supports_reduction(self, obj, op_name: str) -> bool:
return True
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
if all_numeric_reductions in [
"prod",
@@ -126,6 +127,7 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, reque
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
+ @pytest.mark.parametrize("skipna", [True, False])
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
if all_numeric_reductions in [
"prod",
@@ -366,6 +368,7 @@ def test_map(self, func, na_action, expected):
result = data.map(func, na_action=na_action)
tm.assert_extension_array_equal(result, expected)
+ @pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map_raises(self, data, na_action):
# GH52096
msg = "fill value in the sparse values not supported"
@@ -486,6 +489,7 @@ def test_array_repr(self, data, size):
super().test_array_repr(data, size)
@pytest.mark.xfail(reason="result does not match expected")
+ @pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
| This reverts a small part of https://github.com/pandas-dev/pandas/pull/56583, for the changes made in the `pandas/tests/extension` directory
This is still a bit brittle, as there is currently no check inplace we don't change this back in the future -> https://github.com/pandas-dev/pandas/issues/56735. I was thinking we could run the tests for one of our own extension arrays outside of the pandas tests, will comment more on the issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56889 | 2024-01-15T15:45:03Z | 2024-01-15T17:52:25Z | 2024-01-15T17:52:25Z | 2024-01-15T19:10:56Z |
DOC: Additions/updates to documentation : grammar changes to documentation | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 82af8122a6bbd..78d22c768b865 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -19,7 +19,7 @@ Bug reports and enhancement requests
====================================
Bug reports and enhancement requests are an important part of making pandas more stable and
-are curated though Github issues. When reporting and issue or request, please select the `appropriate
+are curated though Github issues. When reporting an issue or request, please select the `appropriate
category and fill out the issue form fully <https://github.com/pandas-dev/pandas/issues/new/choose>`_
to ensure others and the core development team can fully understand the scope of the issue.
| - [x] closes #56885
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56886 | 2024-01-15T10:59:13Z | 2024-01-15T16:29:18Z | 2024-01-15T16:29:18Z | 2024-01-15T16:29:27Z |
DOC: fix EX03 in pandas.ExcelWriter | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 8658715b8bf3e..4bf6a3e97f0c6 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -86,7 +86,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Timestamp.ceil \
pandas.Timestamp.floor \
pandas.Timestamp.round \
- pandas.ExcelWriter \
pandas.read_json \
pandas.io.json.build_table_schema \
pandas.io.formats.style.Styler.to_latex \
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 786f719337b84..2189f54263dec 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -935,7 +935,7 @@ class ExcelWriter(Generic[_WorkbookT]):
is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__
* `odswriter <https://pypi.org/project/odswriter/>`__ for ods files
- See ``DataFrame.to_excel`` for typical usage.
+ See :meth:`DataFrame.to_excel` for typical usage.
The writer should be used as a context manager. Otherwise, call `close()` to save
and close any opened file handles.
@@ -1031,7 +1031,7 @@ class ExcelWriter(Generic[_WorkbookT]):
Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
already exists:
- >>> with ExcelWriter(
+ >>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
@@ -1042,7 +1042,8 @@ class ExcelWriter(Generic[_WorkbookT]):
You can also write multiple DataFrames to a single sheet. Note that the
``if_sheet_exists`` parameter needs to be set to ``overlay``:
- >>> with ExcelWriter("path_to_file.xlsx",
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
... if_sheet_exists="overlay",
| - [ ] ref #56804 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Also add a cross-reference in the docstring.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56884 | 2024-01-15T05:27:43Z | 2024-01-15T06:51:57Z | 2024-01-15T06:51:57Z | 2024-01-15T07:24:05Z |
DOC: Installation instructions update | diff --git a/README.md b/README.md
index e5329d66c2d89..1c04e09a619e8 100644
--- a/README.md
+++ b/README.md
@@ -134,12 +134,7 @@ cloning the git repo), execute:
pip install .
```
-or for installing in [development mode](https://pip.pypa.io/en/latest/cli/pip_install/#install-editable):
-
-
-```sh
-python -m pip install -ve . --no-build-isolation --config-settings=editable-verbose=true
-```
+or for installing in [development mode](https://pip.pypa.io/en/latest/cli/pip_install/#install-editable).
See the full instructions for [installing from source](https://pandas.pydata.org/docs/dev/development/contributing_environment.html).
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 46d1ee49c22a0..d901bc6c108c2 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1120,10 +1120,12 @@ def interval_range(
# error: Argument 1 to "maybe_downcast_numeric" has incompatible type
# "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]";
# expected "ndarray[Any, Any]" [
+ dtype = start.dtype if start.dtype == end.dtype else np.dtype("int64")
breaks = maybe_downcast_numeric(
- breaks, # type: ignore[arg-type]
- np.dtype("int64"),
+ breaks, # type: ignore[arg-type].
+ dtype,
)
+ return IntervalIndex.from_breaks(breaks, name=name, closed=closed, dtype=IntervalDtype(subtype=dtype, closed=closed))
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56883 | 2024-01-15T04:45:41Z | 2024-02-13T09:20:54Z | null | 2024-02-13T09:20:54Z |
DOC: fix EX03 errors in docstrings - pandas.io.formats.style.Styler: format_index, relabel_index, hide, set_td_classes | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 8658715b8bf3e..e2dcf3ff91b65 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -109,10 +109,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.io.formats.style.Styler.apply_index \
pandas.io.formats.style.Styler.map_index \
pandas.io.formats.style.Styler.format \
- pandas.io.formats.style.Styler.format_index \
- pandas.io.formats.style.Styler.relabel_index \
- pandas.io.formats.style.Styler.hide \
- pandas.io.formats.style.Styler.set_td_classes \
pandas.io.formats.style.Styler.set_tooltips \
pandas.io.formats.style.Styler.set_uuid \
pandas.io.formats.style.Styler.pipe \
@@ -122,9 +118,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.io.formats.style.Styler.text_gradient \
pandas.DataFrame.values \
pandas.DataFrame.groupby \
- pandas.DataFrame.idxmax \
- pandas.DataFrame.idxmin \
- pandas.DataFrame.pivot \
pandas.DataFrame.sort_values \
pandas.DataFrame.plot.hexbin \
pandas.DataFrame.plot.line \
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 26349dc129361..0fbfae22f4663 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1496,10 +1496,10 @@ def set_td_classes(self, classes: DataFrame) -> Styler:
Using `MultiIndex` columns and a `classes` `DataFrame` as a subset of the
underlying,
- >>> df = pd.DataFrame([[1,2],[3,4]], index=["a", "b"],
- ... columns=[["level0", "level0"], ["level1a", "level1b"]])
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"],
+ ... columns=[["level0", "level0"], ["level1a", "level1b"]])
>>> classes = pd.DataFrame(["min-val"], index=["a"],
- ... columns=[["level0"],["level1a"]])
+ ... columns=[["level0"], ["level1a"]])
>>> df.style.set_td_classes(classes) # doctest: +SKIP
Form of the output with new additional css classes,
@@ -2717,7 +2717,7 @@ def hide(
--------
Simple application hiding specific rows:
- >>> df = pd.DataFrame([[1,2], [3,4], [5,6]], index=["a", "b", "c"])
+ >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], index=["a", "b", "c"])
>>> df.style.hide(["a", "b"]) # doctest: +SKIP
0 1
c 5 6
@@ -2725,7 +2725,7 @@ def hide(
Hide the index and retain the data values:
>>> midx = pd.MultiIndex.from_product([["x", "y"], ["a", "b", "c"]])
- >>> df = pd.DataFrame(np.random.randn(6,6), index=midx, columns=midx)
+ >>> df = pd.DataFrame(np.random.randn(6, 6), index=midx, columns=midx)
>>> df.style.format("{:.1f}").hide() # doctest: +SKIP
x y
a b c a b c
@@ -2739,7 +2739,7 @@ def hide(
Hide specific rows in a MultiIndex but retain the index:
>>> df.style.format("{:.1f}").hide(subset=(slice(None), ["a", "c"]))
- ... # doctest: +SKIP
+ ... # doctest: +SKIP
x y
a b c a b c
x b 0.7 1.0 1.3 1.5 -0.0 -0.2
@@ -2748,7 +2748,7 @@ def hide(
Hide specific rows and the index through chaining:
>>> df.style.format("{:.1f}").hide(subset=(slice(None), ["a", "c"])).hide()
- ... # doctest: +SKIP
+ ... # doctest: +SKIP
x y
a b c a b c
0.7 1.0 1.3 1.5 -0.0 -0.2
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 55541e5262719..622e047b9f99b 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -1318,9 +1318,10 @@ def format_index(
Using the default ``formatter`` for unspecified levels
>>> df = pd.DataFrame([[1, 2, 3]],
- ... columns=pd.MultiIndex.from_arrays([["a", "a", "b"],[2, np.nan, 4]]))
+ ... columns=pd.MultiIndex.from_arrays(
+ ... [["a", "a", "b"], [2, np.nan, 4]]))
>>> df.style.format_index({0: lambda v: v.upper()}, axis=1, precision=1)
- ... # doctest: +SKIP
+ ... # doctest: +SKIP
A B
2.0 nan 4.0
0 1 2 3
@@ -1329,7 +1330,7 @@ def format_index(
>>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT'
>>> df.style.format_index(func, axis=1, na_rep='MISS')
- ... # doctest: +SKIP
+ ... # doctest: +SKIP
STRING STRING
FLOAT MISS FLOAT
0 1 2 3
@@ -1338,7 +1339,7 @@ def format_index(
>>> df = pd.DataFrame([[1, 2, 3]], columns=['"A"', 'A&B', None])
>>> s = df.style.format_index('$ {0}', axis=1, escape="html", na_rep="NA")
- ... # doctest: +SKIP
+ ... # doctest: +SKIP
<th .. >$ "A"</th>
<th .. >$ A&B</th>
<th .. >NA</td>
@@ -1348,7 +1349,7 @@ def format_index(
>>> df = pd.DataFrame([[1, 2, 3]], columns=["123", "~", "$%#"])
>>> df.style.format_index("\\textbf{{{}}}", escape="latex", axis=1).to_latex()
- ... # doctest: +SKIP
+ ... # doctest: +SKIP
\begin{tabular}{lrrr}
{} & {\textbf{123}} & {\textbf{\textasciitilde }} & {\textbf{\$\%\#}} \\
0 & 1 & 2 & 3 \\
@@ -1475,7 +1476,7 @@ def relabel_index(
Chaining with pre-hidden elements
- >>> df.style.hide([0,1]).relabel_index(["C"]) # doctest: +SKIP
+ >>> df.style.hide([0, 1]).relabel_index(["C"]) # doctest: +SKIP
col
C c
@@ -1493,9 +1494,10 @@ def relabel_index(
1 5
1 0 6
1 7
- >>> styler.hide((midx.get_level_values(0)==0)|(midx.get_level_values(1)==0))
- ... # doctest: +SKIP
- >>> styler.hide(level=[0,1]) # doctest: +SKIP
+ >>> styler.hide((midx.get_level_values(0) == 0) |
+ ... (midx.get_level_values(1) == 0))
+ ... # doctest: +SKIP
+ >>> styler.hide(level=[0, 1]) # doctest: +SKIP
>>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP
col
binary6 6
@@ -1503,9 +1505,9 @@ def relabel_index(
We can also achieve the above by indexing first and then re-labeling
- >>> styler = df.loc[[(1,1,0), (1,1,1)]].style
- >>> styler.hide(level=[0,1]).relabel_index(["binary6", "binary7"])
- ... # doctest: +SKIP
+ >>> styler = df.loc[[(1, 1, 0), (1, 1, 1)]].style
+ >>> styler.hide(level=[0, 1]).relabel_index(["binary6", "binary7"])
+ ... # doctest: +SKIP
col
binary6 6
binary7 7
@@ -1516,9 +1518,9 @@ def relabel_index(
brackets if the string if pre-formatted),
>>> df = pd.DataFrame({"samples": np.random.rand(10)})
- >>> styler = df.loc[np.random.randint(0,10,3)].style
+ >>> styler = df.loc[np.random.randint(0, 10, 3)].style
>>> styler.relabel_index([f"sample{i+1} ({{}})" for i in range(3)])
- ... # doctest: +SKIP
+ ... # doctest: +SKIP
samples
sample1 (5) 0.315811
sample2 (0) 0.495941
| All EX03 Errors resolved in the following cases:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.format_index
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.relabel_index
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.hide
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.set_td_classes
5. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmax
6. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmin
7. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.pivot
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.format_index
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found for `pandas.io.formats.style.Styler.format_index`:
Return value has no description
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.relabel_index
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found for `pandas.io.formats.style.Styler.relabel_index`:
Return value has no description
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.hide
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.io.formats.style.Styler.hide`:
Return value has no description
See Also section not found
```
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.formats.style.Styler.set_td_classes
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.io.formats.style.Styler.set_td_classes`:
No extended summary found
Return value has no description
```
5. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmax
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.idxmax" correct. :)
```
6. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmin
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.idxmin" correct. :)
```
7. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.pivot
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.pivot" correct. :)
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56881 | 2024-01-15T01:39:56Z | 2024-01-15T06:50:44Z | 2024-01-15T06:50:43Z | 2024-01-15T23:17:17Z |
DOC: fix EX03 errors in docstrings - pandas.io.json.build_table_schema, pandas.read_stata, pandas.plotting.scatter_matrix, pandas.Index.droplevel , pandas.Grouper | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index f27dbeaf35915..190af8115fdc2 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -83,16 +83,9 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Timestamp.floor \
pandas.Timestamp.round \
pandas.read_json \
- pandas.io.json.build_table_schema \
pandas.io.formats.style.Styler.to_latex \
pandas.read_parquet \
pandas.DataFrame.to_sql \
- pandas.read_stata \
- pandas.plotting.scatter_matrix \
- pandas.Index.droplevel \
- pandas.MultiIndex.names \
- pandas.MultiIndex.droplevel \
- pandas.Grouper \
pandas.io.formats.style.Styler.map \
pandas.io.formats.style.Styler.apply_index \
pandas.io.formats.style.Styler.map_index \
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index e68c393f8f707..a93cf33590c3e 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -151,8 +151,8 @@ class Grouper:
Specify a resample operation on the column 'Publish date'
>>> df = pd.DataFrame(
- ... {
- ... "Publish date": [
+ ... {
+ ... "Publish date": [
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-09"),
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index bdd6392387ae8..a2666cd6cb229 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2124,7 +2124,7 @@ def droplevel(self, level: IndexLabel = 0):
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
- ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
+ ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index 4d9fba72cf173..c279eeea78c6b 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -277,7 +277,7 @@ def build_table_schema(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
- ... }, index=pd.Index(range(3), name='idx'))
+ ... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': \
[{'name': 'idx', 'type': 'integer'}, \
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 4abf9af185a01..576e27f202524 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -176,7 +176,7 @@
Creating a dummy stata for this example
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', 'parrot'],
-... 'speed': [350, 18, 361, 15]}}) # doctest: +SKIP
+... 'speed': [350, 18, 361, 15]}}) # doctest: +SKIP
>>> df.to_stata('animals.dta') # doctest: +SKIP
Read a Stata dta file:
@@ -189,7 +189,7 @@
>>> df = pd.DataFrame(values, columns=["i"]) # doctest: +SKIP
>>> df.to_stata('filename.dta') # doctest: +SKIP
->>> with pd.read_stata('filename.dta', chunksize=10000) as itr: # doctest: +SKIP
+>>> with pd.read_stata('filename.dta', chunksize=10000) as itr: # doctest: +SKIP
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass # doctest: +SKIP
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 18db460d388a4..c8c8f68f5289e 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -204,7 +204,7 @@ def scatter_matrix(
.. plot::
:context: close-figs
- >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
+ >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A', 'B', 'C', 'D'])
>>> pd.plotting.scatter_matrix(df, alpha=0.2)
array([[<Axes: xlabel='A', ylabel='A'>, <Axes: xlabel='B', ylabel='A'>,
<Axes: xlabel='C', ylabel='A'>, <Axes: xlabel='D', ylabel='A'>],
| All EX03 Errors resolved in the following cases:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.json.build_table_schema
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.read_stata
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.plotting.scatter_matrix
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Index.droplevel
5. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Grouper
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.io.json.build_table_schema
```
################################################################################
################################## Validation ##################################
################################################################################
4 Errors found for `pandas.io.json.build_table_schema`:
No extended summary found
Parameter "data" has no description
Return value has no description
See Also section not found
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.read_stata
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.read_stata`:
No extended summary found
Return value has no description
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.plotting.scatter_matrix
```
################################################################################
################################## Validation ##################################
################################################################################
4 Errors found for `pandas.plotting.scatter_matrix`:
No extended summary found
Parameter "frame" has no description
Parameter "ax" has no description
See Also section not found
```
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Index.droplevel
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.Index.droplevel`:
Return value has no description
See Also section not found
```
5. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Grouper
```
################################################################################
################################## Validation ##################################
################################################################################
3 Errors found for `pandas.Grouper`:
Parameters {'*args', '**kwargs'} not documented
Unknown parameters {'label', 'level', 'origin', 'sort', 'freq', 'closed', 'key', 'convention', 'offset', 'dropna', 'axis'}
See Also section not found
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56880 | 2024-01-15T00:19:20Z | 2024-01-15T17:57:47Z | 2024-01-15T17:57:47Z | 2024-01-15T23:17:27Z |
DOC: fix EX03 errors in docstrings - pandas.Timestamp - ceil, floor, round | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index f27dbeaf35915..78bf63fa3a3ba 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -79,9 +79,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.errors.SettingWithCopyWarning \
pandas.errors.SpecificationError \
pandas.errors.UndefinedVariableError \
- pandas.Timestamp.ceil \
- pandas.Timestamp.floor \
- pandas.Timestamp.round \
pandas.read_json \
pandas.io.json.build_table_schema \
pandas.io.formats.style.Styler.to_latex \
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 65db0e05f859c..cd5e6e521b79f 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -973,16 +973,16 @@ timedelta}, default 'raise'
A timestamp can be rounded using multiple frequency units:
- >>> ts.round(freq='h') # hour
+ >>> ts.round(freq='h') # hour
Timestamp('2020-03-14 16:00:00')
- >>> ts.round(freq='min') # minute
+ >>> ts.round(freq='min') # minute
Timestamp('2020-03-14 15:33:00')
- >>> ts.round(freq='s') # seconds
+ >>> ts.round(freq='s') # seconds
Timestamp('2020-03-14 15:32:52')
- >>> ts.round(freq='ms') # milliseconds
+ >>> ts.round(freq='ms') # milliseconds
Timestamp('2020-03-14 15:32:52.193000')
``freq`` can also be a multiple of a single unit, like '5min' (i.e. 5 minutes):
@@ -1062,16 +1062,16 @@ timedelta}, default 'raise'
A timestamp can be floored using multiple frequency units:
- >>> ts.floor(freq='h') # hour
+ >>> ts.floor(freq='h') # hour
Timestamp('2020-03-14 15:00:00')
- >>> ts.floor(freq='min') # minute
+ >>> ts.floor(freq='min') # minute
Timestamp('2020-03-14 15:32:00')
- >>> ts.floor(freq='s') # seconds
+ >>> ts.floor(freq='s') # seconds
Timestamp('2020-03-14 15:32:52')
- >>> ts.floor(freq='ns') # nanoseconds
+ >>> ts.floor(freq='ns') # nanoseconds
Timestamp('2020-03-14 15:32:52.192548651')
``freq`` can also be a multiple of a single unit, like '5min' (i.e. 5 minutes):
@@ -1151,16 +1151,16 @@ timedelta}, default 'raise'
A timestamp can be ceiled using multiple frequency units:
- >>> ts.ceil(freq='h') # hour
+ >>> ts.ceil(freq='h') # hour
Timestamp('2020-03-14 16:00:00')
- >>> ts.ceil(freq='min') # minute
+ >>> ts.ceil(freq='min') # minute
Timestamp('2020-03-14 15:33:00')
- >>> ts.ceil(freq='s') # seconds
+ >>> ts.ceil(freq='s') # seconds
Timestamp('2020-03-14 15:32:53')
- >>> ts.ceil(freq='us') # microseconds
+ >>> ts.ceil(freq='us') # microseconds
Timestamp('2020-03-14 15:32:52.192549')
``freq`` can also be a multiple of a single unit, like '5min' (i.e. 5 minutes):
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 1dae2403706e8..d4cd90613ca5b 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1973,16 +1973,16 @@ timedelta}, default 'raise'
A timestamp can be rounded using multiple frequency units:
- >>> ts.round(freq='h') # hour
+ >>> ts.round(freq='h') # hour
Timestamp('2020-03-14 16:00:00')
- >>> ts.round(freq='min') # minute
+ >>> ts.round(freq='min') # minute
Timestamp('2020-03-14 15:33:00')
- >>> ts.round(freq='s') # seconds
+ >>> ts.round(freq='s') # seconds
Timestamp('2020-03-14 15:32:52')
- >>> ts.round(freq='ms') # milliseconds
+ >>> ts.round(freq='ms') # milliseconds
Timestamp('2020-03-14 15:32:52.193000')
``freq`` can also be a multiple of a single unit, like '5min' (i.e. 5 minutes):
@@ -2064,16 +2064,16 @@ timedelta}, default 'raise'
A timestamp can be floored using multiple frequency units:
- >>> ts.floor(freq='h') # hour
+ >>> ts.floor(freq='h') # hour
Timestamp('2020-03-14 15:00:00')
- >>> ts.floor(freq='min') # minute
+ >>> ts.floor(freq='min') # minute
Timestamp('2020-03-14 15:32:00')
- >>> ts.floor(freq='s') # seconds
+ >>> ts.floor(freq='s') # seconds
Timestamp('2020-03-14 15:32:52')
- >>> ts.floor(freq='ns') # nanoseconds
+ >>> ts.floor(freq='ns') # nanoseconds
Timestamp('2020-03-14 15:32:52.192548651')
``freq`` can also be a multiple of a single unit, like '5min' (i.e. 5 minutes):
@@ -2153,16 +2153,16 @@ timedelta}, default 'raise'
A timestamp can be ceiled using multiple frequency units:
- >>> ts.ceil(freq='h') # hour
+ >>> ts.ceil(freq='h') # hour
Timestamp('2020-03-14 16:00:00')
- >>> ts.ceil(freq='min') # minute
+ >>> ts.ceil(freq='min') # minute
Timestamp('2020-03-14 15:33:00')
- >>> ts.ceil(freq='s') # seconds
+ >>> ts.ceil(freq='s') # seconds
Timestamp('2020-03-14 15:32:53')
- >>> ts.ceil(freq='us') # microseconds
+ >>> ts.ceil(freq='us') # microseconds
Timestamp('2020-03-14 15:32:52.192549')
``freq`` can also be a multiple of a single unit, like '5min' (i.e. 5 minutes):
| All EX03 Errors resolved in the following cases:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Timestamp.ceil
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Timestamp.floor
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Timestamp.round
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Timestamp.ceil
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.Timestamp.ceil`:
No extended summary found
See Also section not found
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Timestamp.floor
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.Timestamp.floor`:
No extended summary found
See Also section not found
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Timestamp.round
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.Timestamp.round`:
No extended summary found
See Also section not found
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56879 | 2024-01-14T23:33:15Z | 2024-01-15T17:56:28Z | 2024-01-15T17:56:27Z | 2024-01-15T23:17:37Z |
DOC: fixed Ex03 errors in docstrings: | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index f27dbeaf35915..bdd38bda2dfc0 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -73,9 +73,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX03 --ignore_functions \
pandas.Series.plot.line \
pandas.Series.to_sql \
- pandas.errors.DatabaseError \
- pandas.errors.IndexingError \
- pandas.errors.InvalidColumnName \
pandas.errors.SettingWithCopyWarning \
pandas.errors.SpecificationError \
pandas.errors.UndefinedVariableError \
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 9d39b8d92fec9..3cda1273d4ae7 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -599,19 +599,30 @@ class IndexingError(Exception):
"""
Exception is raised when trying to index and there is a mismatch in dimensions.
+ Raised by properties like :attr:`.pandas.DataFrame.iloc` when
+ an indexer is out of bounds or :attr:`.pandas.DataFrame.loc` when its index is
+ unalignable to the frame index.
+
+ See Also
+ --------
+ DataFrame.iloc : Purely integer-location based indexing for \
+ selection by position.
+ DataFrame.loc : Access a group of rows and columns by label(s) \
+ or a boolean array.
+
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 1]})
- >>> df.loc[..., ..., 'A'] # doctest: +SKIP
+ >>> df.loc[..., ..., 'A'] # doctest: +SKIP
... # IndexingError: indexer may only contain one '...' entry
>>> df = pd.DataFrame({'A': [1, 1, 1]})
- >>> df.loc[1, ..., ...] # doctest: +SKIP
+ >>> df.loc[1, ..., ...] # doctest: +SKIP
... # IndexingError: Too many indexers
- >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP
+ >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP
... # IndexingError: Unalignable boolean Series provided as indexer...
>>> s = pd.Series(range(2),
- ... index = pd.MultiIndex.from_product([["a", "b"], ["c"]]))
- >>> s.loc["a", "c", "d"] # doctest: +SKIP
+ ... index=pd.MultiIndex.from_product([["a", "b"], ["c"]]))
+ >>> s.loc["a", "c", "d"] # doctest: +SKIP
... # IndexingError: Too many indexers
"""
@@ -713,13 +724,19 @@ class AttributeConflictWarning(Warning):
class DatabaseError(OSError):
"""
- Error is raised when executing sql with bad syntax or sql that throws an error.
+ Error is raised when executing SQL with bad syntax or SQL that throws an error.
+
+ Raised by :func:`.pandas.read_sql` when a bad SQL statement is passed in.
+
+ See Also
+ --------
+ read_sql : Read SQL query or database table into a DataFrame.
Examples
--------
>>> from sqlite3 import connect
>>> conn = connect(':memory:')
- >>> pd.read_sql('select * test', conn) # doctest: +SKIP
+ >>> pd.read_sql('select * test', conn) # doctest: +SKIP
... # DatabaseError: Execution failed on sql 'test': near "test": syntax error
"""
@@ -758,10 +775,14 @@ class InvalidColumnName(Warning):
Because the column name is an invalid Stata variable, the name needs to be
converted.
+ See Also
+ --------
+ DataFrame.to_stata : Export DataFrame object to Stata dta format.
+
Examples
--------
>>> df = pd.DataFrame({"0categories": pd.Series([2, 2])})
- >>> df.to_stata('test') # doctest: +SKIP
+ >>> df.to_stata('test') # doctest: +SKIP
... # InvalidColumnName: Not all pandas column names were valid Stata variable...
"""
| **PR SUMMARY**
Checked if validation docstrings passes for:
- [x] pandas.errors.DatabaseError
- [x] pandas.errors.IndexingError
- [x] pandas.errors.InvalidColumnName
OUTPUT:
1. python scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.errors.DatabaseError
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.errors.DatabaseError" correct. :)
```
2. python scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.errors.IndexingError
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.errors.IndexingError" correct. :)
```
3. python scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.errors.InvalidColumnName
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.errors.InvalidColumnName" correct. :)
```
**PR CHECKLIST**
- [ ] xref #56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56878 | 2024-01-14T23:26:54Z | 2024-01-15T17:59:10Z | 2024-01-15T17:59:10Z | 2024-01-15T17:59:18Z |
Backport PR #56873 on branch 2.2.x (CI: unxfail adbc-driver-postgresql test) | diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 6645aefd4f0a7..791b6da3deeca 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2229,12 +2229,14 @@ def test_api_chunksize_read(conn, request):
@pytest.mark.parametrize("conn", all_connectable)
def test_api_categorical(conn, request):
if conn == "postgresql_adbc_conn":
- request.node.add_marker(
- pytest.mark.xfail(
- reason="categorical dtype not implemented for ADBC postgres driver",
- strict=True,
+ adbc = import_optional_dependency("adbc_driver_postgresql", errors="ignore")
+ if adbc is not None and Version(adbc.__version__) < Version("0.9.0"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="categorical dtype not implemented for ADBC postgres driver",
+ strict=True,
+ )
)
- )
# GH8624
# test that categorical gets written correctly as dense column
conn = request.getfixturevalue(conn)
| Backport PR #56873: CI: unxfail adbc-driver-postgresql test | https://api.github.com/repos/pandas-dev/pandas/pulls/56875 | 2024-01-14T17:42:44Z | 2024-01-15T00:23:48Z | 2024-01-15T00:23:48Z | 2024-01-15T00:23:48Z |
CI: unxfail adbc-driver-postgresql test | diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 2ddbbaa1bf17c..d86b80691190d 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2229,12 +2229,14 @@ def test_api_chunksize_read(conn, request):
@pytest.mark.parametrize("conn", all_connectable)
def test_api_categorical(conn, request):
if conn == "postgresql_adbc_conn":
- request.node.add_marker(
- pytest.mark.xfail(
- reason="categorical dtype not implemented for ADBC postgres driver",
- strict=True,
+ adbc = import_optional_dependency("adbc_driver_postgresql", errors="ignore")
+ if adbc is not None and Version(adbc.__version__) < Version("0.9.0"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="categorical dtype not implemented for ADBC postgres driver",
+ strict=True,
+ )
)
- )
# GH8624
# test that categorical gets written correctly as dense column
conn = request.getfixturevalue(conn)
| there's been a new release https://pypi.org/project/adbc-driver-postgresql/ | https://api.github.com/repos/pandas-dev/pandas/pulls/56873 | 2024-01-14T11:16:32Z | 2024-01-14T17:42:36Z | 2024-01-14T17:42:36Z | 2024-01-14T17:42:47Z |
wip: depr partial row-wise multi-column-wise enlargement | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 5de5bd58bd35f..f92a9b71a2781 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -657,6 +657,24 @@ Set the following option to opt into the future behavior:
In [9]: pd.set_option("future.no_silent_downcasting", True)
+Deprecated list-like-column-wise enlargement with partial row indexer
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Enlarging a dataframe with a partial row indexer and a list-like column indexer is deprecated.
+
+Instead of (suppose ``'A'`` and ``'B'`` are not already columns in ``df``)
+
+.. code-block:: python
+
+ df.loc[idx, ['A', 'B']] = (val_1, val_2)
+
+please do
+
+.. code-block:: python
+
+ df.loc[idx, 'A'] = val_1
+ df.loc[idx, 'B'] = val_2
+
Other Deprecations
^^^^^^^^^^^^^^^^^^
- Changed :meth:`Timedelta.resolution_string` to return ``h``, ``min``, ``s``, ``ms``, ``us``, and ``ns`` instead of ``H``, ``T``, ``S``, ``L``, ``U``, and ``N``, for compatibility with respective deprecations in frequency aliases (:issue:`52536`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 0f892d4924933..8a66f94ebbe10 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -877,7 +877,20 @@ def _ensure_listlike_indexer(self, key, axis=None, value=None) -> None:
)
self.obj._mgr = new_mgr
return
-
+ if len(diff):
+ warnings.warn(
+ (
+ "list-like-column-wise enlargement with partial-row-wise "
+ "indexer is deprecated.\n"
+ "Instead of\n"
+ " df.loc[idx, ['A', 'B']] = (val_1, val_2)\n"
+ "please do\n"
+ " df.loc[idx, 'A'] = val_1\n"
+ " df.loc[idx, 'B'] = val_2\n"
+ ),
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
self.obj._mgr = self.obj._mgr.reindex_axis(keys, axis=0, only_slice=True)
@final
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index de7d644698f2c..8e4d7991d6f7d 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -377,7 +377,15 @@ def test_multiindex_setitem_columns_enlarging(self, indexer, exp_value):
# GH#39147
mi = MultiIndex.from_tuples([(1, 2), (3, 4)])
df = DataFrame([[1, 2], [3, 4]], index=mi, columns=["a", "b"])
- df.loc[indexer, ["c", "d"]] = 1.0
+ warn = FutureWarning if indexer != slice(None) else None
+ with tm.assert_produces_warning(
+ warn,
+ match=(
+ "list-like-column-wise enlargement with partial-row-wise indexer "
+ "is deprecated"
+ ),
+ ):
+ df.loc[indexer, ["c", "d"]] = 1.0
expected = DataFrame(
[[1, 2, 1.0, 1.0], [3, 4, exp_value, exp_value]],
index=mi,
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index c446f2c44b745..e40066a51db97 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -940,8 +940,12 @@ def test_loc_setitem_with_scalar_index(self, indexer, value):
def test_loc_setitem_missing_columns(self, index, box, expected):
# GH 29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
-
- df.loc[index] = box
+ warn = FutureWarning if index[0] != slice(None, None, None) else None
+ with tm.assert_produces_warning(
+ warn,
+ match=("list-like-column-wise enlargement with partial-row-wise indexer"),
+ ):
+ df.loc[index] = box
tm.assert_frame_equal(df, expected)
def test_loc_coercion(self):
@@ -3327,7 +3331,14 @@ def test_loc_setitem_dict_timedelta_multiple_set(self):
def test_loc_set_multiple_items_in_multiple_new_columns(self):
# GH 25594
df = DataFrame(index=[1, 2], columns=["a"])
- df.loc[1, ["b", "c"]] = [6, 7]
+ with tm.assert_produces_warning(
+ FutureWarning,
+ match=(
+ "list-like-column-wise enlargement with partial-row-wise indexer "
+ "is deprecated"
+ ),
+ ):
+ df.loc[1, ["b", "c"]] = [6, 7]
expected = DataFrame(
{
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
just curious to see what fails, and I'm questioning whether it's worth adding complexity to support this
Is anyone particularly attached to this? If not, I'd suggest deprecating, because:
- the workaround on the users' side is very simple
- `__setitem__` is already incredibly complex and spaghettified
- there's only 3 tests which use this pattern
- there's little time left until 3.0, and I don't have time to work on this further before 2.2
NOTE: I'm not actually sure that disallowing this is even the way to go, will sleep on it | https://api.github.com/repos/pandas-dev/pandas/pulls/56872 | 2024-01-14T09:46:31Z | 2024-02-16T18:22:01Z | null | 2024-02-16T18:22:01Z |
DOC: Remove manual doctesting from validate_docstrings | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index a08a0cbd87383..95362dbbfa5bb 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -65,8 +65,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (EX01, EX02, EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX02,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
+ MSG='Validate docstrings (EX01, EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Partially validate docstrings (EX03)' ; echo $MSG
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index 02c6808658a33..baa27d14acc8c 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -199,12 +199,6 @@ def test_bad_docstrings(self, capsys, klass, func, msgs) -> None:
for msg in msgs:
assert msg in " ".join([err[1] for err in result["errors"]])
- def test_leftover_files_raises(self) -> None:
- with pytest.raises(Exception, match="The following files"):
- validate_docstrings.pandas_validate(
- self._import_path(klass="BadDocstrings", func="leftover_files")
- )
-
def test_validate_all_ignore_functions(self, monkeypatch) -> None:
monkeypatch.setattr(
validate_docstrings,
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 76d64d27b221c..53c67b7df928b 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -18,7 +18,6 @@
import argparse
import doctest
import importlib
-import io
import json
import os
import pathlib
@@ -28,15 +27,12 @@
import matplotlib
import matplotlib.pyplot as plt
-import numpy
from numpydoc.docscrape import get_doc_object
from numpydoc.validate import (
Validator,
validate,
)
-import pandas
-
# With template backend, matplotlib plots nothing
matplotlib.use("template")
@@ -63,7 +59,6 @@
"GL05": "Use 'array-like' rather than 'array_like' in docstrings.",
"SA05": "{reference_name} in `See Also` section does not need `pandas` "
"prefix, use {right_reference} instead.",
- "EX02": "Examples do not pass tests:\n{doctest_log}",
"EX03": "flake8 error: line {line_number}, col {col_number}: {error_code} "
"{error_message}",
"EX04": "Do not import {imported_library}, as it is imported "
@@ -167,32 +162,6 @@ def name(self):
def mentioned_private_classes(self):
return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]
- @property
- def examples_errors(self):
- flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL
- finder = doctest.DocTestFinder()
- runner = doctest.DocTestRunner(optionflags=flags)
- context = {"np": numpy, "pd": pandas}
- error_msgs = ""
- current_dir = set(os.listdir())
- for test in finder.find(self.raw_doc, self.name, globs=context):
- f = io.StringIO()
- runner.run(test, out=f.write)
- error_msgs += f.getvalue()
- leftovers = set(os.listdir()).difference(current_dir)
- if leftovers:
- for leftover in leftovers:
- path = pathlib.Path(leftover).resolve()
- if path.is_dir():
- path.rmdir()
- elif path.is_file():
- path.unlink(missing_ok=True)
- raise Exception(
- f"The following files were leftover from the doctest: "
- f"{leftovers}. Please use # doctest: +SKIP"
- )
- return error_msgs
-
@property
def examples_source_code(self):
lines = doctest.DocTestParser().get_examples(self.raw_doc)
@@ -290,12 +259,6 @@ def pandas_validate(func_name: str):
result["examples_errs"] = ""
if doc.examples:
- result["examples_errs"] = doc.examples_errors
- if result["examples_errs"]:
- result["errors"].append(
- pandas_error("EX02", doctest_log=result["examples_errs"])
- )
-
for error_code, error_message, line_number, col_number in doc.validate_pep8():
result["errors"].append(
pandas_error(
@@ -429,9 +392,6 @@ def header(title, width=80, char="#") -> str:
if result["errors"]:
sys.stderr.write(f'{len(result["errors"])} Errors found for `{func_name}`:\n')
for err_code, err_desc in result["errors"]:
- if err_code == "EX02": # Failing examples are printed at the end
- sys.stderr.write("\tExamples do not pass tests\n")
- continue
sys.stderr.write(f"\t{err_desc}\n")
else:
sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n')
| Since we run doctests using pytest via `pd.test(run_doctests=True)`, we don't need to run them in this script anymore | https://api.github.com/repos/pandas-dev/pandas/pulls/56871 | 2024-01-14T07:06:45Z | 2024-01-14T17:41:37Z | 2024-01-14T17:41:37Z | 2024-01-14T17:41:42Z |
DOC: fix EX03 errors in docstrings - pandas.Index.rename, pandas.Index.isin, pandas.IndexSlice | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 50310a3af9661..0014a70e5e018 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -90,12 +90,9 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.DataFrame.to_sql \
pandas.read_stata \
pandas.plotting.scatter_matrix \
- pandas.Index.rename \
pandas.Index.droplevel \
- pandas.Index.isin \
pandas.MultiIndex.names \
pandas.MultiIndex.droplevel \
- pandas.IndexSlice \
pandas.Grouper \
pandas.io.formats.style.Styler.map \
pandas.io.formats.style.Styler.apply_index \
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c3775961cedb8..bdd6392387ae8 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1953,7 +1953,7 @@ def rename(self, name, inplace: bool = False) -> Self | None:
>>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]],
- ... names=['kind', 'year'])
+ ... names=['kind', 'year'])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
@@ -6575,7 +6575,7 @@ def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
Examples
--------
- >>> idx = pd.Index([1,2,3])
+ >>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
@@ -6584,7 +6584,7 @@ def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
>>> idx.isin([1, 4])
array([ True, False, False])
- >>> midx = pd.MultiIndex.from_arrays([[1,2,3],
+ >>> midx = pd.MultiIndex.from_arrays([[1, 2, 3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 0f892d4924933..2e7a237406ca5 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -121,7 +121,7 @@ class _IndexSlice:
Examples
--------
- >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
+ >>> midx = pd.MultiIndex.from_product([['A0', 'A1'], ['B0', 'B1', 'B2', 'B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
... index=midx, columns=columns)
| All EX03 Errors resolved in the following cases:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Index.rename
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Index.isin
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.IndexSlice
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Index.rename
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.Index.rename" correct. :)
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Index.isin
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.Index.isin" correct. :)
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.IndexSlice
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found for `pandas.IndexSlice`:
No extended summary found
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56870 | 2024-01-14T06:36:14Z | 2024-01-15T07:51:10Z | 2024-01-15T07:51:10Z | 2024-01-15T23:17:47Z |
BUG fix for datetime null (closes #56853) | diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst
index e217e8c8557bb..b6410667ae319 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v2.3.0.rst
@@ -111,6 +111,7 @@ Performance improvements
Bug fixes
~~~~~~~~~
+- Fixed bug causing null values when using datetimes in multi-index. (:issue:`56853`)
- Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`)
@@ -122,7 +123,6 @@ Categorical
Datetimelike
^^^^^^^^^^^^
- Bug in :func:`date_range` where the last valid timestamp would sometimes not be produced (:issue:`56134`)
--
Timedelta
^^^^^^^^^
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 25bcc1f307082..65b8f6f2cf1ad 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3964,6 +3964,16 @@ def insert(self, loc: int, item) -> MultiIndex:
else:
lev_loc = level.get_loc(k)
+ if any(isna(item) for item in level):
+ # check to make sure no null values are in
+ # the level, if they are replace with
+ # empty strings. if this is removed,
+ # it's possible a null value will end up in
+ # indexes with datetime values.
+ level = Index(
+ ["" if isna(item) else item for item in level], dtype=object
+ )
+
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
diff --git a/pandas/tests/indexes/datetimelike_/test_indexing.py b/pandas/tests/indexes/datetimelike_/test_indexing.py
index ee7128601256a..b2e09e6e50252 100644
--- a/pandas/tests/indexes/datetimelike_/test_indexing.py
+++ b/pandas/tests/indexes/datetimelike_/test_indexing.py
@@ -43,3 +43,11 @@ def construct(dtype):
missing = np.arange(6, dtype=np.intp)
tm.assert_numpy_array_equal(result[0], no_matches)
tm.assert_numpy_array_equal(result[1], missing)
+
+
+def test_multiindex_datetime_creation_null_value():
+ df = pd.DataFrame({("A", pd.Timestamp("2024-01-01")): [0]})
+ df.insert(1, "B", [1])
+ df["B"]
+ df["B", ""]
+ del df["B"]
| Closes #56853
This is my first time contributing to this repo and I'm not super experienced with open source, sorry if the style isn't right or if there's some better way of doing it.
From what I can tell, the issue was caused by recurring calls to `get_loc` functions caused by the fact that when you pass in a datetime, it automatically converts subsequent insertions in the same level to datetime. If you don't supply one, it converts it to `NaT`, which causes an indexing error due to having a null value as an index, which gets you caught in endless recursion from `try`/`except` blocks. If you switch the datetime out for a float (or any other common type), the problem is avoided because the unfilled indexes are filled in will empty strings rather that null values. As far as I can tell, this problem only exists with datetime types.
It's kind of a hacky fix, but I figured it would just be easiest to check for null values in the indexes at the end of an insertion and replace them with empty strings. I tried fiddling around with the datetime stuff itself, but it just ended up messing other things up. Let me know if there's some other solution. I also added this as a test case, just to see whether it breaks.
- [x] xref #56853
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions (no new arguments/methods/functions).
- [x] Added an entry in the latest doc/source/whatsnew/vX.X.X.rst file if fixing a bug or adding a new feature. | https://api.github.com/repos/pandas-dev/pandas/pulls/56869 | 2024-01-14T06:10:02Z | 2024-02-28T17:50:35Z | null | 2024-02-28T17:50:36Z |
DOC: fix EX03 errors in docstrings - pandas.core.resample.Resampler.interpolate, pandas.pivot, pandas.merge_asof, pandas.wide_to_long | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 8658715b8bf3e..0901efba139c3 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -93,11 +93,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.read_parquet \
pandas.DataFrame.to_sql \
pandas.read_stata \
- pandas.core.resample.Resampler.interpolate \
pandas.plotting.scatter_matrix \
- pandas.pivot \
- pandas.merge_asof \
- pandas.wide_to_long \
pandas.Index.rename \
pandas.Index.droplevel \
pandas.Index.isin \
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index bb1cd0d738dac..1ae7000f56bc9 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -295,7 +295,7 @@ def wide_to_long(
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
- ... })
+ ... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
@@ -332,8 +332,8 @@ def wide_to_long(
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
- >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
- >>> l
+ >>> long_format = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
+ >>> long_format
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
@@ -358,9 +358,9 @@ def wide_to_long(
Going from long back to wide just takes some creative use of `unstack`
- >>> w = l.unstack()
- >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
- >>> w.reset_index()
+ >>> wide_format = long_format.unstack()
+ >>> wide_format.columns = wide_format.columns.map('{0[0]}{0[1]}'.format)
+ >>> wide_format.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
@@ -381,7 +381,7 @@ def wide_to_long(
... 'B(weekly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
- >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
+ >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id
0 0.548814 0.544883 0.437587 0.383442 0 0
1 0.715189 0.423655 0.891773 0.791725 1 1
@@ -430,9 +430,9 @@ def wide_to_long(
7 3 2 2.3 3.4
8 3 3 2.1 2.9
- >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
- ... sep='_', suffix=r'\w+')
- >>> l
+ >>> long_format = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
+ ... sep='_', suffix=r'\w+')
+ >>> long_format
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 804144931bcfd..6ca403bdb439a 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -601,17 +601,17 @@ def merge_asof(
... pd.Timestamp("2016-05-25 13:30:00.075")
... ],
... "ticker": [
- ... "GOOG",
- ... "MSFT",
- ... "MSFT",
- ... "MSFT",
- ... "GOOG",
- ... "AAPL",
- ... "GOOG",
- ... "MSFT"
- ... ],
- ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
- ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
+ ... "GOOG",
+ ... "MSFT",
+ ... "MSFT",
+ ... "MSFT",
+ ... "GOOG",
+ ... "AAPL",
+ ... "GOOG",
+ ... "MSFT"
+ ... ],
+ ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
+ ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
... }
... )
>>> quotes
@@ -626,19 +626,19 @@ def merge_asof(
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades = pd.DataFrame(
- ... {
- ... "time": [
- ... pd.Timestamp("2016-05-25 13:30:00.023"),
- ... pd.Timestamp("2016-05-25 13:30:00.038"),
- ... pd.Timestamp("2016-05-25 13:30:00.048"),
- ... pd.Timestamp("2016-05-25 13:30:00.048"),
- ... pd.Timestamp("2016-05-25 13:30:00.048")
- ... ],
- ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
- ... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
- ... "quantity": [75, 155, 100, 100, 100]
- ... }
- ... )
+ ... {
+ ... "time": [
+ ... pd.Timestamp("2016-05-25 13:30:00.023"),
+ ... pd.Timestamp("2016-05-25 13:30:00.038"),
+ ... pd.Timestamp("2016-05-25 13:30:00.048"),
+ ... pd.Timestamp("2016-05-25 13:30:00.048"),
+ ... pd.Timestamp("2016-05-25 13:30:00.048")
+ ... ],
+ ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
+ ... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
+ ... "quantity": [75, 155, 100, 100, 100]
+ ... }
+ ... )
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
| All EX03 Errors resolved in the following cases:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.resample.Resampler.interpolate
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.pivot
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.merge_asof
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.wide_to_long
Output:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.resample.Resampler.interpolate
```
################################################################################
################################## Validation ##################################
################################################################################
2 Errors found for `pandas.core.resample.Resampler.interpolate`:
Parameters {'**kwargs'} not documented
Unknown parameters {'``**kwargs``'}
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.pivot
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found for `pandas.pivot`:
Parameter "data" has no description
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.merge_asof
```
################################################################################
################################## Validation ##################################
################################################################################
3 Errors found for `pandas.merge_asof`:
Parameter "left" has no description
Parameter "right" has no description
Return value has no description
```
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.wide_to_long
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.wide_to_long" correct. :)
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56868 | 2024-01-14T06:06:11Z | 2024-01-15T06:37:48Z | 2024-01-15T06:37:48Z | 2024-01-15T23:17:59Z |
DOC: fix EX03 in `pandas.errors` | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 8658715b8bf3e..afb76c29133f9 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -76,13 +76,9 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.errors.DatabaseError \
pandas.errors.IndexingError \
pandas.errors.InvalidColumnName \
- pandas.errors.PossibleDataLossError \
- pandas.errors.PossiblePrecisionLoss \
- pandas.errors.SettingWithCopyError \
pandas.errors.SettingWithCopyWarning \
pandas.errors.SpecificationError \
pandas.errors.UndefinedVariableError \
- pandas.errors.ValueLabelTypeMismatch \
pandas.Timestamp.ceil \
pandas.Timestamp.floor \
pandas.Timestamp.round \
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 9faa17f6e5f15..9d39b8d92fec9 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -425,7 +425,7 @@ class SettingWithCopyError(ValueError):
--------
>>> pd.options.mode.chained_assignment = 'raise'
>>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A'])
- >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP
+ >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP
... # SettingWithCopyError: A value is trying to be set on a copy of a...
"""
@@ -665,8 +665,8 @@ class PossibleDataLossError(Exception):
Examples
--------
- >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
- >>> store.open("w") # doctest: +SKIP
+ >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
+ >>> store.open("w") # doctest: +SKIP
... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]...
"""
@@ -734,7 +734,7 @@ class PossiblePrecisionLoss(Warning):
Examples
--------
>>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)})
- >>> df.to_stata('test') # doctest: +SKIP
+ >>> df.to_stata('test') # doctest: +SKIP
... # PossiblePrecisionLoss: Column converted from int64 to float64...
"""
@@ -746,7 +746,7 @@ class ValueLabelTypeMismatch(Warning):
Examples
--------
>>> df = pd.DataFrame({"categories": pd.Series(["a", 2], dtype="category")})
- >>> df.to_stata('test') # doctest: +SKIP
+ >>> df.to_stata('test') # doctest: +SKIP
... # ValueLabelTypeMismatch: Stata value labels (pandas categories) must be str...
"""
| Fix flake8 errors of some exceptions in `pandas.errors`.
- [ ] xref #56804 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56867 | 2024-01-14T05:01:22Z | 2024-01-15T06:34:27Z | 2024-01-15T06:34:27Z | 2024-01-15T07:24:38Z |
DOC: fix EX03 Errors for pandas.DataFrame.to_sql | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 5e6c02eab574d..e309d1d03f725 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -78,8 +78,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.errors.UndefinedVariableError \
pandas.read_json \
pandas.io.formats.style.Styler.to_latex \
- pandas.read_parquet \
- pandas.DataFrame.to_sql \
+ pandas.read_parquet
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 86c4a99ac052d..14c023f3912a6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2997,7 +2997,7 @@ def to_sql(
3
>>> from sqlalchemy import text
>>> with engine.connect() as conn:
- ... conn.execute(text("SELECT * FROM users")).fetchall()
+ ... conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
An `sqlalchemy.engine.Connection` can also be passed to `con`:
@@ -3014,7 +3014,7 @@ def to_sql(
>>> df2.to_sql(name='users', con=engine, if_exists='append')
2
>>> with engine.connect() as conn:
- ... conn.execute(text("SELECT * FROM users")).fetchall()
+ ... conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5'), (0, 'User 6'),
(1, 'User 7')]
@@ -3025,7 +3025,7 @@ def to_sql(
... index_label='id')
2
>>> with engine.connect() as conn:
- ... conn.execute(text("SELECT * FROM users")).fetchall()
+ ... conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 6'), (1, 'User 7')]
Use ``method`` to define a callable insertion method to do nothing
@@ -3044,7 +3044,7 @@ def to_sql(
For MySQL, a callable to update columns ``b`` and ``c`` if there's a conflict
on a primary key.
- >>> from sqlalchemy.dialects.mysql import insert
+ >>> from sqlalchemy.dialects.mysql import insert # doctest: +SKIP
>>> def insert_on_conflict_update(table, conn, keys, data_iter):
... # update columns "b" and "c" on primary key conflict
... data = [dict(zip(keys, row)) for row in data_iter]
@@ -3076,7 +3076,7 @@ def to_sql(
3
>>> with engine.connect() as conn:
- ... conn.execute(text("SELECT * FROM integers")).fetchall()
+ ... conn.execute(text("SELECT * FROM integers")).fetchall()
[(1,), (None,), (2,)]
""" # noqa: E501
from pandas.io import sql
| - [X] Ref: https://github.com/pandas-dev/pandas/issues/56804
Fix these EX03 Errors for `pandas.DataFrame.to_sql`:
```
9 Errors found for `pandas.DataFrame.to_sql`:
flake8 error: line 8, col 4: E111 indentation is not a multiple of 4
flake8 error: line 15, col 4: E111 indentation is not a multiple of 4
flake8 error: line 19, col 4: E111 indentation is not a multiple of 4
flake8 error: line 27, col 1: F821 undefined name 'df_conflict'
flake8 error: line 27, col 89: E501 line too long (124 > 88 characters)
flake8 error: line 28, col 1: F811 redefinition of unused 'insert' from line 22
flake8 error: line 39, col 1: F821 undefined name 'df_conflict'
flake8 error: line 39, col 89: E501 line too long (123 > 88 characters)
flake8 error: line 46, col 3: E111 indentation is not a multiple of 4
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/56866 | 2024-01-14T04:03:36Z | 2024-01-23T15:51:44Z | null | 2024-01-23T15:51:44Z |
DOC: fix EX03 Errors for pandas.read_json | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c0dfbcc03b473..4809806657dff 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -76,7 +76,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.errors.SettingWithCopyWarning \
pandas.errors.SpecificationError \
pandas.errors.UndefinedVariableError \
- pandas.read_json \
pandas.io.formats.style.Styler.to_latex \
pandas.read_parquet \
pandas.DataFrame.to_sql \
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 4c490c6b2cda2..40b714984f93d 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -717,7 +717,7 @@ def read_json(
"data":[["a","b"],["c","d"]]\
}}\
'
- >>> pd.read_json(StringIO(_), orient='split')
+ >>> pd.read_json(StringIO(_), orient='split') # doctest: +SKIP
col 1 col 2
row 1 a b
row 2 c d
@@ -727,7 +727,7 @@ def read_json(
>>> df.to_json(orient='index')
'{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}'
- >>> pd.read_json(StringIO(_), orient='index')
+ >>> pd.read_json(StringIO(_), orient='index') # doctest: +SKIP
col 1 col 2
row 1 a b
row 2 c d
@@ -737,7 +737,7 @@ def read_json(
>>> df.to_json(orient='records')
'[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]'
- >>> pd.read_json(StringIO(_), orient='records')
+ >>> pd.read_json(StringIO(_), orient='records') # doctest: +SKIP
col 1 col 2
0 a b
1 c d
| - [X] Ref: https://github.com/pandas-dev/pandas/issues/56804
Fix these EX03 Errors for `pandas.read_json`
```
flake8 error: line 6, col 23: F821 undefined name '_'
flake8 error: line 8, col 23: F821 undefined name '_'
flake8 error: line 10, col 23: F821 undefined name '_'
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/56865 | 2024-01-14T03:36:26Z | 2024-01-23T15:51:57Z | null | 2024-01-23T15:51:58Z |
DOC: fix RT03 error for pandas.DataFrame.hist | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 96609fdc1671b..f6246393a6cbc 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -228,6 +228,8 @@ def hist_frame(
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
+ An ndarray is returned with one :class:`matplotlib.AxesSubplot`
+ per column.
See Also
--------
| - [ ] ~~closes https://github.com/pandas-dev/pandas/issues/56804~~ (Not relevant, it is a RT03 error )
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
~~Detected EX03 error for `pandas.DataFrame.hist`:~~
```
python scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.hist
```
Output:
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found for `pandas.DataFrame.hist`:
Return value has no description
```
The output after this changes:
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.hist" correct. :)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/56864 | 2024-01-14T02:20:36Z | 2024-02-16T11:37:42Z | null | 2024-02-16T11:37:43Z |
STY: Use ruff to format docstrings | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b7e43404b86bd..0c11e0d469155 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -19,7 +19,7 @@ ci:
skip: [pylint, pyright, mypy]
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.6
+ rev: v0.1.13
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
@@ -31,8 +31,7 @@ repos:
exclude: ^pandas/tests
args: [--select, "ANN001,ANN2", --fix-only, --exit-non-zero-on-fix]
- id: ruff-format
- # TODO: "." not needed in ruff 0.1.8
- args: ["."]
+ exclude: ^scripts
- repo: https://github.com/jendrikseipp/vulture
rev: 'v2.10'
hooks:
diff --git a/doc/make.py b/doc/make.py
index 2583242786fc8..19df4bae2ea55 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -113,7 +113,7 @@ def _run_os(*args) -> None:
Examples
--------
- >>> DocBuilder()._run_os('python', '--version')
+ >>> DocBuilder()._run_os("python", "--version")
"""
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
@@ -129,7 +129,7 @@ def _sphinx_build(self, kind: str):
Examples
--------
- >>> DocBuilder(num_jobs=4)._sphinx_build('html')
+ >>> DocBuilder(num_jobs=4)._sphinx_build("html")
"""
if kind not in ("html", "latex", "linkcheck"):
raise ValueError(f"kind must be html, latex or linkcheck, not {kind}")
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 7612739531695..8ad1da732a449 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -476,7 +476,7 @@ class option_context(ContextDecorator):
Examples
--------
>>> from pandas import option_context
- >>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
+ >>> with option_context("display.max_rows", 10, "display.max_columns", 5):
... pass
"""
diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py
index cff28f6a20472..d9516077788c8 100644
--- a/pandas/_testing/_warnings.py
+++ b/pandas/_testing/_warnings.py
@@ -76,10 +76,8 @@ class for all warnings. To raise multiple types of exceptions,
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
- ...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
- ...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index 5ad5d02360f0b..4aea85d50c352 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -1178,8 +1178,8 @@ def assert_frame_equal(
but with columns of differing dtypes.
>>> from pandas.testing import assert_frame_equal
- >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
- >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
+ >>> df1 = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
+ >>> df2 = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0]})
df1 equals itself.
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index eb6e4a917889a..3570ebaeffed5 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -70,9 +70,8 @@ def set_timezone(tz: str) -> Generator[None, None, None]:
>>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
'IST'
- >>> with set_timezone('US/Eastern'):
+ >>> with set_timezone("US/Eastern"):
... tzlocal().tzname(datetime(2021, 1, 1))
- ...
'EST'
"""
import time
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 683af644cbdb3..39a5ffd947009 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -265,7 +265,7 @@ def __init__(self, pandas_object): # noqa: E999
For consistency with pandas methods, you should raise an ``AttributeError``
if the data passed to your accessor has an incorrect dtype.
- >>> pd.Series(['a', 'b']).dt
+ >>> pd.Series(["a", "b"]).dt
Traceback (most recent call last):
...
AttributeError: Can only use .dt accessor with datetimelike values
@@ -274,8 +274,6 @@ def __init__(self, pandas_object): # noqa: E999
--------
In your library code::
- import pandas as pd
-
@pd.api.extensions.register_dataframe_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_obj):
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 128477dac562e..b346cb9b2c175 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1215,8 +1215,9 @@ def take(
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])
- >>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
- ... fill_value=-10)
+ >>> pd.api.extensions.take(
+ ... np.array([10, 20, 30]), [0, 0, -1], allow_fill=True, fill_value=-10
+ ... )
array([ 10, 10, -10])
"""
if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 7ae65ba11a752..c15d7b7928867 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1794,14 +1794,14 @@ def normalize_keyword_aggregation(
def _make_unique_kwarg_list(
- seq: Sequence[tuple[Any, Any]]
+ seq: Sequence[tuple[Any, Any]],
) -> Sequence[tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
- >>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
+ >>> kwarg_list = [("a", "<lambda>"), ("a", "<lambda>"), ("b", "<lambda>")]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
@@ -1833,7 +1833,7 @@ def relabel_result(
>>> from pandas.core.apply import relabel_result
>>> result = pd.DataFrame(
... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
- ... index=["max", "mean", "min"]
+ ... index=["max", "mean", "min"],
... )
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
@@ -1972,7 +1972,7 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any:
Examples
--------
- >>> maybe_mangle_lambdas('sum')
+ >>> maybe_mangle_lambdas("sum")
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
@@ -2017,7 +2017,7 @@ def validate_func_kwargs(
Examples
--------
- >>> validate_func_kwargs({'one': 'min', 'two': 'max'})
+ >>> validate_func_kwargs({"one": "min", "two": "max"})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index 62f6737d86d51..dde1b8a35e2f0 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -119,8 +119,9 @@ def __add__(self, other):
Examples
--------
- >>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
- ... index=['elk', 'moose'])
+ >>> df = pd.DataFrame(
+ ... {"height": [1.5, 2.6], "weight": [500, 800]}, index=["elk", "moose"]
+ ... )
>>> df
height weight
elk 1.5 500
@@ -128,14 +129,14 @@ def __add__(self, other):
Adding a scalar affects all rows and columns.
- >>> df[['height', 'weight']] + 1.5
+ >>> df[["height", "weight"]] + 1.5
height weight
elk 3.0 501.5
moose 4.1 801.5
Each element of a list is added to a column of the DataFrame, in order.
- >>> df[['height', 'weight']] + [0.5, 1.5]
+ >>> df[["height", "weight"]] + [0.5, 1.5]
height weight
elk 2.0 501.5
moose 3.1 801.5
@@ -143,7 +144,7 @@ def __add__(self, other):
Keys of a dictionary are aligned to the DataFrame, based on column names;
each value in the dictionary is added to the corresponding column.
- >>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
+ >>> df[["height", "weight"]] + {"height": 0.5, "weight": 1.5}
height weight
elk 2.0 501.5
moose 3.1 801.5
@@ -151,8 +152,8 @@ def __add__(self, other):
When `other` is a :class:`Series`, the index of `other` is aligned with the
columns of the DataFrame.
- >>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
- >>> df[['height', 'weight']] + s1
+ >>> s1 = pd.Series([0.5, 1.5], index=["weight", "height"])
+ >>> df[["height", "weight"]] + s1
height weight
elk 3.0 500.5
moose 4.1 800.5
@@ -161,13 +162,13 @@ def __add__(self, other):
the :class:`Series` will not be reoriented. If index-wise alignment is desired,
:meth:`DataFrame.add` should be used with `axis='index'`.
- >>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
- >>> df[['height', 'weight']] + s2
+ >>> s2 = pd.Series([0.5, 1.5], index=["elk", "moose"])
+ >>> df[["height", "weight"]] + s2
elk height moose weight
elk NaN NaN NaN NaN
moose NaN NaN NaN NaN
- >>> df[['height', 'weight']].add(s2, axis='index')
+ >>> df[["height", "weight"]].add(s2, axis="index")
height weight
elk 2.0 500.5
moose 4.1 801.5
@@ -175,9 +176,10 @@ def __add__(self, other):
When `other` is a :class:`DataFrame`, both columns names and the
index are aligned.
- >>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
- ... index=['elk', 'moose', 'deer'])
- >>> df[['height', 'weight']] + other
+ >>> other = pd.DataFrame(
+ ... {"height": [0.2, 0.4, 0.6]}, index=["elk", "moose", "deer"]
+ ... )
+ >>> df[["height", "weight"]] + other
height weight
deer NaN NaN
elk 1.7 NaN
diff --git a/pandas/core/arrays/arrow/accessors.py b/pandas/core/arrays/arrow/accessors.py
index 7c5ccb2db0194..19ec253e81ef2 100644
--- a/pandas/core/arrays/arrow/accessors.py
+++ b/pandas/core/arrays/arrow/accessors.py
@@ -100,9 +100,7 @@ def len(self) -> Series:
... [1, 2, 3],
... [3],
... ],
- ... dtype=pd.ArrowDtype(pa.list_(
- ... pa.int64()
- ... ))
+ ... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
... )
>>> s.list.len()
0 3
@@ -136,9 +134,7 @@ def __getitem__(self, key: int | slice) -> Series:
... [1, 2, 3],
... [3],
... ],
- ... dtype=pd.ArrowDtype(pa.list_(
- ... pa.int64()
- ... ))
+ ... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
... )
>>> s.list[0]
0 1
@@ -195,9 +191,7 @@ def flatten(self) -> Series:
... [1, 2, 3],
... [3],
... ],
- ... dtype=pd.ArrowDtype(pa.list_(
- ... pa.int64()
- ... ))
+ ... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
... )
>>> s.list.flatten()
0 1
@@ -253,9 +247,9 @@ def dtypes(self) -> Series:
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
- ... dtype=pd.ArrowDtype(pa.struct(
- ... [("version", pa.int64()), ("project", pa.string())]
- ... ))
+ ... dtype=pd.ArrowDtype(
+ ... pa.struct([("version", pa.int64()), ("project", pa.string())])
+ ... ),
... )
>>> s.struct.dtypes
version int64[pyarrow]
@@ -324,9 +318,9 @@ def field(
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
- ... dtype=pd.ArrowDtype(pa.struct(
- ... [("version", pa.int64()), ("project", pa.string())]
- ... ))
+ ... dtype=pd.ArrowDtype(
+ ... pa.struct([("version", pa.int64()), ("project", pa.string())])
+ ... ),
... )
Extract by field name.
@@ -357,19 +351,21 @@ def field(
For nested struct types, you can pass a list of values to index
multiple levels:
- >>> version_type = pa.struct([
- ... ("major", pa.int64()),
- ... ("minor", pa.int64()),
- ... ])
+ >>> version_type = pa.struct(
+ ... [
+ ... ("major", pa.int64()),
+ ... ("minor", pa.int64()),
+ ... ]
+ ... )
>>> s = pd.Series(
... [
... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
... ],
- ... dtype=pd.ArrowDtype(pa.struct(
- ... [("version", version_type), ("project", pa.string())]
- ... ))
+ ... dtype=pd.ArrowDtype(
+ ... pa.struct([("version", version_type), ("project", pa.string())])
+ ... ),
... )
>>> s.struct.field(["version", "minor"])
0 5
@@ -454,9 +450,9 @@ def explode(self) -> DataFrame:
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
- ... dtype=pd.ArrowDtype(pa.struct(
- ... [("version", pa.int64()), ("project", pa.string())]
- ... ))
+ ... dtype=pd.ArrowDtype(
+ ... pa.struct([("version", pa.int64()), ("project", pa.string())])
+ ... ),
... )
>>> s.struct.explode()
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index e41a96cfcef7e..147b94e441f30 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -380,8 +380,9 @@ def _from_factorized(cls, values, original):
Examples
--------
- >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),
- ... pd.Interval(1, 5), pd.Interval(1, 5)])
+ >>> interv_arr = pd.arrays.IntervalArray(
+ ... [pd.Interval(0, 1), pd.Interval(1, 5), pd.Interval(1, 5)]
+ ... )
>>> codes, uniques = pd.factorize(interv_arr)
>>> pd.arrays.IntervalArray._from_factorized(uniques, interv_arr)
<IntervalArray>
@@ -685,7 +686,7 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``:
- >>> arr1 = arr.astype('Float64')
+ >>> arr1 = arr.astype("Float64")
>>> arr1
<FloatingArray>
[1.0, 2.0, 3.0]
@@ -695,7 +696,7 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
Otherwise, we will get a Numpy ndarray:
- >>> arr2 = arr.astype('float64')
+ >>> arr2 = arr.astype("float64")
>>> arr2
array([1., 2., 3.])
>>> arr2.dtype
@@ -939,15 +940,16 @@ def interpolate(
Examples
--------
>>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3]))
- >>> arr.interpolate(method="linear",
- ... limit=3,
- ... limit_direction="forward",
- ... index=pd.Index([1, 2, 3, 4]),
- ... fill_value=1,
- ... copy=False,
- ... axis=0,
- ... limit_area="inside"
- ... )
+ >>> arr.interpolate(
+ ... method="linear",
+ ... limit=3,
+ ... limit_direction="forward",
+ ... index=pd.Index([1, 2, 3, 4]),
+ ... fill_value=1,
+ ... copy=False,
+ ... axis=0,
+ ... limit_area="inside",
+ ... )
<NumpyExtensionArray>
[0.0, 1.0, 2.0, 3.0]
Length: 4, dtype: float64
@@ -1467,8 +1469,10 @@ def factorize(
Examples
--------
- >>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02",
- ... "2014-03", "2014-03"], freq="M")
+ >>> idx1 = pd.PeriodIndex(
+ ... ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"],
+ ... freq="M",
+ ... )
>>> arr, idx = idx1.factorize()
>>> arr
array([0, 0, 1, 1, 2, 2])
@@ -1627,10 +1631,9 @@ def take(self, indices, allow_fill=False, fill_value=None):
# type for the array, to the physical storage type for
# the data, before passing to take.
- result = take(data, indices, fill_value=fill_value,
- allow_fill=allow_fill)
+ result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
- """
+ """ # noqa: E501
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
@@ -1767,7 +1770,7 @@ def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
--------
>>> class MyExtensionArray(pd.arrays.NumpyExtensionArray):
... def _formatter(self, boxed=False):
- ... return lambda x: '*' + str(x) + '*' if boxed else repr(x) + '*'
+ ... return lambda x: "*" + str(x) + "*" if boxed else repr(x) + "*"
>>> MyExtensionArray(np.array([1, 2, 3, 4]))
<MyExtensionArray>
[1*, 2*, 3*, 4*]
@@ -1902,7 +1905,7 @@ def _accumulate(
Examples
--------
>>> arr = pd.array([1, 2, 3])
- >>> arr._accumulate(name='cumsum')
+ >>> arr._accumulate(name="cumsum")
<IntegerArray>
[1, 3, 6]
Length: 3, dtype: Int64
@@ -2007,10 +2010,9 @@ def _hash_pandas_object(
Examples
--------
- >>> pd.array([1, 2])._hash_pandas_object(encoding='utf-8',
- ... hash_key="1000000000000000",
- ... categorize=False
- ... )
+ >>> pd.array([1, 2])._hash_pandas_object(
+ ... encoding="utf-8", hash_key="1000000000000000", categorize=False
+ ... )
array([ 6238072747940578789, 15839785061582574730], dtype=uint64)
"""
from pandas.core.util.hashing import hash_array
@@ -2044,8 +2046,9 @@ def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]:
Examples
--------
>>> import pyarrow as pa
- >>> a = pd.array([[1, 2, 3], [4], [5, 6]],
- ... dtype=pd.ArrowDtype(pa.list_(pa.int64())))
+ >>> a = pd.array(
+ ... [[1, 2, 3], [4], [5, 6]], dtype=pd.ArrowDtype(pa.list_(pa.int64()))
+ ... )
>>> a._explode()
(<ArrowExtensionArray>
[1, 2, 3, 4, 5, 6]
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index dea90dbd2f0d1..d1dba024e85c5 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -329,7 +329,7 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMi
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
- >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
+ >>> pd.Categorical(["a", "b", "c", "a", "b", "c"])
['a', 'b', 'c', 'a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
@@ -349,8 +349,9 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMi
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
- >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
- ... categories=['c', 'b', 'a'])
+ >>> c = pd.Categorical(
+ ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
+ ... )
>>> c
['a', 'b', 'c', 'a', 'b', 'c']
Categories (3, object): ['c' < 'b' < 'a']
@@ -509,7 +510,7 @@ def dtype(self) -> CategoricalDtype:
Examples
--------
- >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+ >>> cat = pd.Categorical(["a", "b"], ordered=True)
>>> cat
['a', 'b']
Categories (2, object): ['a' < 'b']
@@ -749,7 +750,7 @@ def from_codes(
Examples
--------
- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
+ >>> dtype = pd.CategoricalDtype(["a", "b"], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
['a', 'b', 'a', 'b']
Categories (2, object): ['a' < 'b']
@@ -804,28 +805,28 @@ def categories(self) -> Index:
--------
For :class:`pandas.Series`:
- >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
+ >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser.cat.categories
Index(['a', 'b', 'c'], dtype='object')
- >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], categories=['b', 'c', 'd'])
+ >>> raw_cat = pd.Categorical(["a", "b", "c", "a"], categories=["b", "c", "d"])
>>> ser = pd.Series(raw_cat)
>>> ser.cat.categories
Index(['b', 'c', 'd'], dtype='object')
For :class:`pandas.Categorical`:
- >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+ >>> cat = pd.Categorical(["a", "b"], ordered=True)
>>> cat.categories
Index(['a', 'b'], dtype='object')
For :class:`pandas.CategoricalIndex`:
- >>> ci = pd.CategoricalIndex(['a', 'c', 'b', 'a', 'c', 'b'])
+ >>> ci = pd.CategoricalIndex(["a", "c", "b", "a", "c", "b"])
>>> ci.categories
Index(['a', 'b', 'c'], dtype='object')
- >>> ci = pd.CategoricalIndex(['a', 'c'], categories=['c', 'b', 'a'])
+ >>> ci = pd.CategoricalIndex(["a", "c"], categories=["c", "b", "a"])
>>> ci.categories
Index(['c', 'b', 'a'], dtype='object')
"""
@@ -840,32 +841,32 @@ def ordered(self) -> Ordered:
--------
For :class:`pandas.Series`:
- >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
+ >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser.cat.ordered
False
- >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True)
+ >>> raw_cat = pd.Categorical(["a", "b", "c", "a"], ordered=True)
>>> ser = pd.Series(raw_cat)
>>> ser.cat.ordered
True
For :class:`pandas.Categorical`:
- >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+ >>> cat = pd.Categorical(["a", "b"], ordered=True)
>>> cat.ordered
True
- >>> cat = pd.Categorical(['a', 'b'], ordered=False)
+ >>> cat = pd.Categorical(["a", "b"], ordered=False)
>>> cat.ordered
False
For :class:`pandas.CategoricalIndex`:
- >>> ci = pd.CategoricalIndex(['a', 'b'], ordered=True)
+ >>> ci = pd.CategoricalIndex(["a", "b"], ordered=True)
>>> ci.ordered
True
- >>> ci = pd.CategoricalIndex(['a', 'b'], ordered=False)
+ >>> ci = pd.CategoricalIndex(["a", "b"], ordered=False)
>>> ci.ordered
False
"""
@@ -891,17 +892,17 @@ def codes(self) -> np.ndarray:
--------
For :class:`pandas.Categorical`:
- >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+ >>> cat = pd.Categorical(["a", "b"], ordered=True)
>>> cat.codes
array([0, 1], dtype=int8)
For :class:`pandas.CategoricalIndex`:
- >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
+ >>> ci = pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
>>> ci.codes
array([0, 1, 2, 0, 1, 2], dtype=int8)
- >>> ci = pd.CategoricalIndex(['a', 'c'], categories=['c', 'b', 'a'])
+ >>> ci = pd.CategoricalIndex(["a", "c"], categories=["c", "b", "a"])
>>> ci.codes
array([2, 0], dtype=int8)
"""
@@ -920,12 +921,12 @@ def _set_categories(self, categories, fastpath: bool = False) -> None:
Examples
--------
- >>> c = pd.Categorical(['a', 'b'])
+ >>> c = pd.Categorical(["a", "b"])
>>> c
['a', 'b']
Categories (2, object): ['a', 'b']
- >>> c._set_categories(pd.Index(['a', 'c']))
+ >>> c._set_categories(pd.Index(["a", "c"]))
>>> c
['a', 'c']
Categories (2, object): ['a', 'c']
@@ -989,7 +990,7 @@ def as_ordered(self) -> Self:
--------
For :class:`pandas.Series`:
- >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
+ >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser.cat.ordered
False
>>> ser = ser.cat.as_ordered()
@@ -998,7 +999,7 @@ def as_ordered(self) -> Self:
For :class:`pandas.CategoricalIndex`:
- >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a'])
+ >>> ci = pd.CategoricalIndex(["a", "b", "c", "a"])
>>> ci.ordered
False
>>> ci = ci.as_ordered()
@@ -1020,7 +1021,7 @@ def as_unordered(self) -> Self:
--------
For :class:`pandas.Series`:
- >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True)
+ >>> raw_cat = pd.Categorical(["a", "b", "c", "a"], ordered=True)
>>> ser = pd.Series(raw_cat)
>>> ser.cat.ordered
True
@@ -1030,7 +1031,7 @@ def as_unordered(self) -> Self:
For :class:`pandas.CategoricalIndex`:
- >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a'], ordered=True)
+ >>> ci = pd.CategoricalIndex(["a", "b", "c", "a"], ordered=True)
>>> ci.ordered
True
>>> ci = ci.as_unordered()
@@ -1093,8 +1094,9 @@ def set_categories(
--------
For :class:`pandas.Series`:
- >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'A'],
- ... categories=['a', 'b', 'c'], ordered=True)
+ >>> raw_cat = pd.Categorical(
+ ... ["a", "b", "c", "A"], categories=["a", "b", "c"], ordered=True
+ ... )
>>> ser = pd.Series(raw_cat)
>>> ser
0 a
@@ -1104,7 +1106,7 @@ def set_categories(
dtype: category
Categories (3, object): ['a' < 'b' < 'c']
- >>> ser.cat.set_categories(['A', 'B', 'C'], rename=True)
+ >>> ser.cat.set_categories(["A", "B", "C"], rename=True)
0 A
1 B
2 C
@@ -1114,16 +1116,17 @@ def set_categories(
For :class:`pandas.CategoricalIndex`:
- >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'A'],
- ... categories=['a', 'b', 'c'], ordered=True)
+ >>> ci = pd.CategoricalIndex(
+ ... ["a", "b", "c", "A"], categories=["a", "b", "c"], ordered=True
+ ... )
>>> ci
CategoricalIndex(['a', 'b', 'c', nan], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
- >>> ci.set_categories(['A', 'b', 'c'])
+ >>> ci.set_categories(["A", "b", "c"])
CategoricalIndex([nan, 'b', 'c', nan], categories=['A', 'b', 'c'],
ordered=True, dtype='category')
- >>> ci.set_categories(['A', 'b', 'c'], rename=True)
+ >>> ci.set_categories(["A", "b", "c"], rename=True)
CategoricalIndex(['A', 'b', 'c', nan], categories=['A', 'b', 'c'],
ordered=True, dtype='category')
"""
@@ -1189,7 +1192,7 @@ def rename_categories(self, new_categories) -> Self:
Examples
--------
- >>> c = pd.Categorical(['a', 'a', 'b'])
+ >>> c = pd.Categorical(["a", "a", "b"])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
@@ -1197,7 +1200,7 @@ def rename_categories(self, new_categories) -> Self:
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
- >>> c.rename_categories({'a': 'A', 'c': 'C'})
+ >>> c.rename_categories({"a": "A", "c": "C"})
['A', 'A', 'b']
Categories (2, object): ['A', 'b']
@@ -1257,8 +1260,8 @@ def reorder_categories(self, new_categories, ordered=None) -> Self:
--------
For :class:`pandas.Series`:
- >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
- >>> ser = ser.cat.reorder_categories(['c', 'b', 'a'], ordered=True)
+ >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
+ >>> ser = ser.cat.reorder_categories(["c", "b", "a"], ordered=True)
>>> ser
0 a
1 b
@@ -1277,11 +1280,11 @@ def reorder_categories(self, new_categories, ordered=None) -> Self:
For :class:`pandas.CategoricalIndex`:
- >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a'])
+ >>> ci = pd.CategoricalIndex(["a", "b", "c", "a"])
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
- >>> ci.reorder_categories(['c', 'b', 'a'], ordered=True)
+ >>> ci.reorder_categories(["c", "b", "a"], ordered=True)
CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'],
ordered=True, dtype='category')
"""
@@ -1327,12 +1330,12 @@ def add_categories(self, new_categories) -> Self:
Examples
--------
- >>> c = pd.Categorical(['c', 'b', 'c'])
+ >>> c = pd.Categorical(["c", "b", "c"])
>>> c
['c', 'b', 'c']
Categories (2, object): ['b', 'c']
- >>> c.add_categories(['d', 'a'])
+ >>> c.add_categories(["d", "a"])
['c', 'b', 'c']
Categories (4, object): ['b', 'c', 'd', 'a']
"""
@@ -1395,12 +1398,12 @@ def remove_categories(self, removals) -> Self:
Examples
--------
- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])
+ >>> c = pd.Categorical(["a", "c", "b", "c", "d"])
>>> c
['a', 'c', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
- >>> c.remove_categories(['d', 'a'])
+ >>> c.remove_categories(["d", "a"])
[NaN, 'c', 'b', 'c', NaN]
Categories (2, object): ['b', 'c']
"""
@@ -1442,13 +1445,13 @@ def remove_unused_categories(self) -> Self:
Examples
--------
- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])
+ >>> c = pd.Categorical(["a", "c", "b", "c", "d"])
>>> c
['a', 'c', 'b', 'c', 'd']
Categories (4, object): ['a', 'b', 'c', 'd']
- >>> c[2] = 'a'
- >>> c[4] = 'c'
+ >>> c[2] = "a"
+ >>> c[4] = "c"
>>> c
['a', 'c', 'a', 'c', 'c']
Categories (4, object): ['a', 'b', 'c', 'd']
@@ -1522,37 +1525,37 @@ def map(
Examples
--------
- >>> cat = pd.Categorical(['a', 'b', 'c'])
+ >>> cat = pd.Categorical(["a", "b", "c"])
>>> cat
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> cat.map(lambda x: x.upper(), na_action=None)
['A', 'B', 'C']
Categories (3, object): ['A', 'B', 'C']
- >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}, na_action=None)
+ >>> cat.map({"a": "first", "b": "second", "c": "third"}, na_action=None)
['first', 'second', 'third']
Categories (3, object): ['first', 'second', 'third']
If the mapping is one-to-one the ordering of the categories is
preserved:
- >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
+ >>> cat = pd.Categorical(["a", "b", "c"], ordered=True)
>>> cat
['a', 'b', 'c']
Categories (3, object): ['a' < 'b' < 'c']
- >>> cat.map({'a': 3, 'b': 2, 'c': 1}, na_action=None)
+ >>> cat.map({"a": 3, "b": 2, "c": 1}, na_action=None)
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
- >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}, na_action=None)
+ >>> cat.map({"a": "first", "b": "second", "c": "first"}, na_action=None)
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
- >>> cat.map({'a': 'first', 'b': 'second'}, na_action=None)
+ >>> cat.map({"a": "first", "b": "second"}, na_action=None)
Index(['first', 'second', nan], dtype='object')
"""
if na_action is lib.no_default:
@@ -1664,7 +1667,7 @@ def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
Examples
--------
- >>> cat = pd.Categorical(['a', 'b'], ordered=True)
+ >>> cat = pd.Categorical(["a", "b"], ordered=True)
The following calls ``cat.__array__``
@@ -1932,12 +1935,12 @@ def argsort(
Examples
--------
- >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
+ >>> pd.Categorical(["b", "b", "a", "c"]).argsort()
array([2, 0, 1, 3])
- >>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
- ... categories=['c', 'b', 'a'],
- ... ordered=True)
+ >>> cat = pd.Categorical(
+ ... ["b", "b", "a", "c"], categories=["c", "b", "a"], ordered=True
+ ... )
>>> cat.argsort()
array([3, 0, 1, 2])
@@ -2031,10 +2034,10 @@ def sort_values(
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
- >>> c.sort_values(na_position='first')
+ >>> c.sort_values(na_position="first")
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
- >>> c.sort_values(ascending=False, na_position='first')
+ >>> c.sort_values(ascending=False, na_position="first")
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
@@ -2348,7 +2351,7 @@ def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]:
Examples
--------
- >>> c = pd.Categorical(list('aabca'))
+ >>> c = pd.Categorical(list("aabca"))
>>> c
['a', 'a', 'b', 'c', 'a']
Categories (3, object): ['a', 'b', 'c']
@@ -2632,15 +2635,14 @@ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
Examples
--------
- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
- ... 'hippo'])
- >>> s.isin(['cow', 'lama'])
+ >>> s = pd.Categorical(["llama", "cow", "llama", "beetle", "llama", "hippo"])
+ >>> s.isin(["cow", "llama"])
array([ True, True, True, False, True, False])
- Passing a single string as ``s.isin('lama')`` will raise an error. Use
+ Passing a single string as ``s.isin('llama')`` will raise an error. Use
a list of one element instead:
- >>> s.isin(['lama'])
+ >>> s.isin(["llama"])
array([ True, False, True, False, True, False])
"""
null_mask = np.asarray(isna(values))
@@ -3007,8 +3009,8 @@ def recode_for_categories(
Examples
--------
- >>> old_cat = pd.Index(['b', 'a', 'c'])
- >>> new_cat = pd.Index(['a', 'b'])
+ >>> old_cat = pd.Index(["b", "a", "c"])
+ >>> new_cat = pd.Index(["a", "b"])
>>> codes = np.array([0, 1, 1, 2])
>>> recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1], dtype=int8)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 1e52cb1ee46e1..4194ffcee2e44 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -270,7 +270,7 @@ def _unbox_scalar(
Examples
--------
- >>> arr = pd.array(np.array(['1970-01-01'], 'datetime64[ns]'))
+ >>> arr = pd.array(np.array(["1970-01-01"], "datetime64[ns]"))
>>> arr._unbox_scalar(arr[0])
numpy.datetime64('1970-01-01T00:00:00.000000000')
"""
@@ -889,8 +889,9 @@ def freqstr(self) -> str | None:
The frequency can be inferred if there are more than 2 points:
- >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"],
- ... freq="infer")
+ >>> idx = pd.DatetimeIndex(
+ ... ["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer"
+ ... )
>>> idx.freqstr
'2D'
@@ -1596,7 +1597,7 @@ def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0):
--------
For :class:`pandas.DatetimeIndex`:
- >>> idx = pd.date_range('2001-01-01 00:00', periods=3)
+ >>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[ns]', freq='D')
@@ -1605,7 +1606,7 @@ def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0):
For :class:`pandas.TimedeltaIndex`:
- >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit="D")
>>> tdelta_idx
TimedeltaIndex(['1 days', '2 days', '3 days'],
dtype='timedelta64[ns]', freq=None)
@@ -1775,9 +1776,8 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]:
Examples
--------
- >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
- ... periods=3, freq='s')
- >>> rng.strftime('%%B %%d, %%Y, %%r')
+ >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), periods=3, freq="s")
+ >>> rng.strftime("%%B %%d, %%Y, %%r")
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 4b804598681fa..bc8d170b73fd0 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -210,9 +210,7 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): # type: ignore[misc]
Examples
--------
>>> pd.arrays.DatetimeArray._from_sequence(
- ... pd.DatetimeIndex(
- ... ["2023-01-01", "2023-01-02"], freq="D"
- ... )
+ ... pd.DatetimeIndex(["2023-01-01", "2023-01-02"], freq="D")
... )
<DatetimeArray>
['2023-01-01 00:00:00', '2023-01-02 00:00:00']
@@ -611,8 +609,9 @@ def tz(self) -> tzinfo | None:
For DatetimeIndex:
- >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
- ... "2/1/2020 11:00:00+00:00"])
+ >>> idx = pd.DatetimeIndex(
+ ... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
+ ... )
>>> idx.tz
datetime.timezone.utc
"""
@@ -888,8 +887,9 @@ def tz_convert(self, tz) -> Self:
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
- >>> dti = pd.date_range(start='2014-08-01 09:00',
- ... freq='h', periods=3, tz='Europe/Berlin')
+ >>> dti = pd.date_range(
+ ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin"
+ ... )
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
@@ -897,7 +897,7 @@ def tz_convert(self, tz) -> Self:
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='h')
- >>> dti.tz_convert('US/Central')
+ >>> dti.tz_convert("US/Central")
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
@@ -906,8 +906,9 @@ def tz_convert(self, tz) -> Self:
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
- >>> dti = pd.date_range(start='2014-08-01 09:00', freq='h',
- ... periods=3, tz='Europe/Berlin')
+ >>> dti = pd.date_range(
+ ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin"
+ ... )
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
@@ -1131,7 +1132,7 @@ def to_pydatetime(self) -> npt.NDArray[np.object_]:
Examples
--------
- >>> idx = pd.date_range('2018-02-27', periods=3)
+ >>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.to_pydatetime()
array([datetime.datetime(2018, 2, 27, 0, 0),
datetime.datetime(2018, 2, 28, 0, 0),
@@ -1164,8 +1165,9 @@ def normalize(self) -> Self:
Examples
--------
- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='h',
- ... periods=3, tz='Asia/Calcutta')
+ >>> idx = pd.date_range(
+ ... start="2014-08-01 10:00", freq="h", periods=3, tz="Asia/Calcutta"
+ ... )
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
@@ -1215,10 +1217,16 @@ def to_period(self, freq=None) -> PeriodArray:
Examples
--------
- >>> df = pd.DataFrame({"y": [1, 2, 3]},
- ... index=pd.to_datetime(["2000-03-31 00:00:00",
- ... "2000-05-31 00:00:00",
- ... "2000-08-31 00:00:00"]))
+ >>> df = pd.DataFrame(
+ ... {"y": [1, 2, 3]},
+ ... index=pd.to_datetime(
+ ... [
+ ... "2000-03-31 00:00:00",
+ ... "2000-05-31 00:00:00",
+ ... "2000-08-31 00:00:00",
+ ... ]
+ ... ),
+ ... )
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
@@ -1283,7 +1291,7 @@ def month_name(self, locale=None) -> npt.NDArray[np.object_]:
Examples
--------
- >>> s = pd.Series(pd.date_range(start='2018-01', freq='ME', periods=3))
+ >>> s = pd.Series(pd.date_range(start="2018-01", freq="ME", periods=3))
>>> s
0 2018-01-31
1 2018-02-28
@@ -1295,7 +1303,7 @@ def month_name(self, locale=None) -> npt.NDArray[np.object_]:
2 March
dtype: object
- >>> idx = pd.date_range(start='2018-01', freq='ME', periods=3)
+ >>> idx = pd.date_range(start="2018-01", freq="ME", periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='ME')
@@ -1306,11 +1314,11 @@ def month_name(self, locale=None) -> npt.NDArray[np.object_]:
for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month
names in Brazilian Portuguese language.
- >>> idx = pd.date_range(start='2018-01', freq='ME', periods=3)
+ >>> idx = pd.date_range(start="2018-01", freq="ME", periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='ME')
- >>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP
+ >>> idx.month_name(locale="pt_BR.utf8") # doctest: +SKIP
Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object')
"""
values = self._local_timestamps()
@@ -1340,7 +1348,7 @@ def day_name(self, locale=None) -> npt.NDArray[np.object_]:
Examples
--------
- >>> s = pd.Series(pd.date_range(start='2018-01-01', freq='D', periods=3))
+ >>> s = pd.Series(pd.date_range(start="2018-01-01", freq="D", periods=3))
>>> s
0 2018-01-01
1 2018-01-02
@@ -1352,7 +1360,7 @@ def day_name(self, locale=None) -> npt.NDArray[np.object_]:
2 Wednesday
dtype: object
- >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
+ >>> idx = pd.date_range(start="2018-01-01", freq="D", periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
@@ -1363,11 +1371,11 @@ def day_name(self, locale=None) -> npt.NDArray[np.object_]:
for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day
names in Brazilian Portuguese language.
- >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
+ >>> idx = pd.date_range(start="2018-01-01", freq="D", periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
- >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP
+ >>> idx.day_name(locale="pt_BR.utf8") # doctest: +SKIP
Index(['Segunda', 'Terça', 'Quarta'], dtype='object')
"""
values = self._local_timestamps()
@@ -1402,8 +1410,9 @@ def time(self) -> npt.NDArray[np.object_]:
For DatetimeIndex:
- >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
- ... "2/1/2020 11:00:00+00:00"])
+ >>> idx = pd.DatetimeIndex(
+ ... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
+ ... )
>>> idx.time
array([datetime.time(10, 0), datetime.time(11, 0)], dtype=object)
"""
@@ -1438,8 +1447,9 @@ def timetz(self) -> npt.NDArray[np.object_]:
For DatetimeIndex:
- >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
- ... "2/1/2020 11:00:00+00:00"])
+ >>> idx = pd.DatetimeIndex(
+ ... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
+ ... )
>>> idx.timetz
array([datetime.time(10, 0, tzinfo=datetime.timezone.utc),
datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object)
@@ -1471,8 +1481,9 @@ def date(self) -> npt.NDArray[np.object_]:
For DatetimeIndex:
- >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",
- ... "2/1/2020 11:00:00+00:00"])
+ >>> idx = pd.DatetimeIndex(
+ ... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]
+ ... )
>>> idx.date
array([datetime.date(2020, 1, 1), datetime.date(2020, 2, 1)], dtype=object)
"""
@@ -1501,7 +1512,7 @@ def isocalendar(self) -> DataFrame:
Examples
--------
- >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
+ >>> idx = pd.date_range(start="2019-12-29", freq="D", periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
@@ -2169,7 +2180,7 @@ def std(
--------
For :class:`pandas.DatetimeIndex`:
- >>> idx = pd.date_range('2001-01-01 00:00', periods=3)
+ >>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[ns]', freq='D')
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index f9384e25ba9d9..dc453f3e37c50 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -115,12 +115,12 @@ class IntegerArray(NumericArray):
String aliases for the dtypes are also available. They are capitalized.
- >>> pd.array([1, None, 3], dtype='Int32')
+ >>> pd.array([1, None, 3], dtype="Int32")
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
- >>> pd.array([1, None, 3], dtype='UInt16')
+ >>> pd.array([1, None, 3], dtype="UInt16")
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: UInt16
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 8bbc4976675c8..ab79622ddd8be 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -172,8 +172,7 @@ class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc]
Examples
--------
- >>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01',
- ... '2023-01-02'], freq='D'))
+ >>> pd.arrays.PeriodArray(pd.PeriodIndex(["2023-01-01", "2023-01-02"], freq="D"))
<PeriodArray>
['2023-01-01', '2023-01-02']
Length: 2, dtype: period[D]
@@ -719,16 +718,16 @@ def asfreq(self, freq=None, how: str = "E") -> Self:
Examples
--------
- >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='Y')
+ >>> pidx = pd.period_range("2010-01-01", "2015-01-01", freq="Y")
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[Y-DEC]')
- >>> pidx.asfreq('M')
+ >>> pidx.asfreq("M")
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]')
- >>> pidx.asfreq('M', how='S')
+ >>> pidx.asfreq("M", how="S")
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]')
"""
@@ -1035,29 +1034,26 @@ def period_array(
Examples
--------
- >>> period_array([pd.Period('2017', freq='Y'),
- ... pd.Period('2018', freq='Y')])
+ >>> period_array([pd.Period("2017", freq="Y"), pd.Period("2018", freq="Y")])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[Y-DEC]
- >>> period_array([pd.Period('2017', freq='Y'),
- ... pd.Period('2018', freq='Y'),
- ... pd.NaT])
+ >>> period_array([pd.Period("2017", freq="Y"), pd.Period("2018", freq="Y"), pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[Y-DEC]
Integers that look like years are handled
- >>> period_array([2000, 2001, 2002], freq='D')
+ >>> period_array([2000, 2001, 2002], freq="D")
<PeriodArray>
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
- >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
+ >>> period_array(["2000-Q1", "2000-Q2", "2000-Q3", "2000-Q4"], freq="Q")
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
index a1d81aeeecb0b..6608fcce2cd62 100644
--- a/pandas/core/arrays/sparse/accessor.py
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -156,7 +156,7 @@ def to_coo(
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
- ... (2, 1, "b", 1)
+ ... (2, 1, "b", 1),
... ],
... names=["A", "B", "C", "D"],
... )
@@ -244,8 +244,7 @@ class SparseFrameAccessor(BaseAccessor, PandasDelegate):
Examples
--------
- >>> df = pd.DataFrame({"a": [1, 2, 0, 0],
- ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]")
+ >>> df = pd.DataFrame({"a": [1, 2, 0, 0], "b": [3, 0, 0, 4]}, dtype="Sparse[int]")
>>> df.sparse.density
0.5
"""
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index fafeedc01b02b..5369839126e48 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -1251,7 +1251,7 @@ def astype(self, dtype: AstypeArg | None = None, copy: bool = True):
IntIndex
Indices: array([2, 3], dtype=int32)
- >>> arr.astype(SparseDtype(np.dtype('int32')))
+ >>> arr.astype(SparseDtype(np.dtype("int32")))
[0, 0, 1, 2]
Fill: 0
IntIndex
@@ -1260,7 +1260,7 @@ def astype(self, dtype: AstypeArg | None = None, copy: bool = True):
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
- >>> arr.astype(SparseDtype(np.dtype('float64')))
+ >>> arr.astype(SparseDtype(np.dtype("float64")))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index ecad5b481f952..5a803c9064db9 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -327,7 +327,7 @@ class StringArray(BaseStringArray, NumpyExtensionArray): # type: ignore[misc]
Examples
--------
- >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string")
+ >>> pd.array(["This is", "some text", None, "data."], dtype="string")
<StringArray>
['This is', 'some text', <NA>, 'data.']
Length: 4, dtype: string
@@ -335,11 +335,11 @@ class StringArray(BaseStringArray, NumpyExtensionArray): # type: ignore[misc]
Unlike arrays instantiated with ``dtype="object"``, ``StringArray``
will convert the values to strings.
- >>> pd.array(['1', 1], dtype="object")
+ >>> pd.array(["1", 1], dtype="object")
<NumpyExtensionArray>
['1', 1]
Length: 2, dtype: object
- >>> pd.array(['1', 1], dtype="string")
+ >>> pd.array(["1", 1], dtype="string")
<StringArray>
['1', '1']
Length: 2, dtype: string
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index a76eef8095695..ba02c63c00ce4 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -117,7 +117,7 @@ class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringAr
Examples
--------
- >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string[pyarrow]")
+ >>> pd.array(["This is", "some text", None, "data."], dtype="string[pyarrow]")
<ArrowStringArray>
['This is', 'some text', <NA>, 'data.']
Length: 4, dtype: string
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 58455f8cb8398..51075939276f7 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -132,7 +132,7 @@ class TimedeltaArray(dtl.TimelikeOps):
Examples
--------
- >>> pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(['1h', '2h']))
+ >>> pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(["1h", "2h"]))
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
@@ -747,7 +747,7 @@ def total_seconds(self) -> npt.NDArray[np.float64]:
--------
**Series**
- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
>>> s
0 0 days
1 1 days
@@ -766,7 +766,7 @@ def total_seconds(self) -> npt.NDArray[np.float64]:
**TimedeltaIndex**
- >>> idx = pd.to_timedelta(np.arange(5), unit='d')
+ >>> idx = pd.to_timedelta(np.arange(5), unit="d")
>>> idx
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
@@ -787,7 +787,7 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
Examples
--------
- >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D')
+ >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit="D")
>>> tdelta_idx
TimedeltaIndex(['1 days', '2 days', '3 days'],
dtype='timedelta64[ns]', freq=None)
@@ -945,7 +945,7 @@ def components(self) -> DataFrame:
Examples
--------
- >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
+ >>> tdelta_idx = pd.to_timedelta(["1 day 3 min 2 us 42 ns"])
>>> tdelta_idx
TimedeltaIndex(['1 days 00:03:00.000002042'],
dtype='timedelta64[ns]', freq=None)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 490daa656f603..a1484d9ad032b 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -367,7 +367,7 @@ def ndim(self) -> Literal[1]:
Examples
--------
- >>> s = pd.Series(['Ant', 'Bear', 'Cow'])
+ >>> s = pd.Series(["Ant", "Bear", "Cow"])
>>> s
0 Ant
1 Bear
@@ -409,7 +409,7 @@ def item(self):
For an index:
- >>> s = pd.Series([1], index=['a'])
+ >>> s = pd.Series([1], index=["a"])
>>> s.index.item()
'a'
"""
@@ -426,7 +426,7 @@ def nbytes(self) -> int:
--------
For Series:
- >>> s = pd.Series(['Ant', 'Bear', 'Cow'])
+ >>> s = pd.Series(["Ant", "Bear", "Cow"])
>>> s
0 Ant
1 Bear
@@ -454,7 +454,7 @@ def size(self) -> int:
--------
For Series:
- >>> s = pd.Series(['Ant', 'Bear', 'Cow'])
+ >>> s = pd.Series(["Ant", "Bear", "Cow"])
>>> s
0 Ant
1 Bear
@@ -531,7 +531,7 @@ def array(self) -> ExtensionArray:
For extension types, like Categorical, the actual ExtensionArray
is returned
- >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
+ >>> ser = pd.Series(pd.Categorical(["a", "b", "a"]))
>>> ser.array
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
@@ -610,7 +610,7 @@ def to_numpy(
Examples
--------
- >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
+ >>> ser = pd.Series(pd.Categorical(["a", "b", "a"]))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
@@ -618,7 +618,7 @@ def to_numpy(
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
- >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
+ >>> ser = pd.Series(pd.date_range("2000", periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
@@ -713,8 +713,15 @@ def argmax(
--------
Consider dataset containing cereal calories
- >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
- ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
+ >>> s = pd.Series(
+ ... [100.0, 110.0, 120.0, 110.0],
+ ... index=[
+ ... "Corn Flakes",
+ ... "Almond Delight",
+ ... "Cinnamon Toast Crunch",
+ ... "Cocoa Puff",
+ ... ],
+ ... )
>>> s
Corn Flakes 100.0
Almond Delight 110.0
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index d41a9c80a10ec..7b35d451c1120 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -169,7 +169,7 @@ def array(
would no longer return a :class:`arrays.NumpyExtensionArray` backed by a
NumPy array.
- >>> pd.array(['a', 'b'], dtype=str)
+ >>> pd.array(["a", "b"], dtype=str)
<NumpyExtensionArray>
['a', 'b']
Length: 2, dtype: str32
@@ -178,7 +178,7 @@ def array(
data. If you really need the new array to be backed by a NumPy array,
specify that in the dtype.
- >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
+ >>> pd.array(["a", "b"], dtype=np.dtype("<U1"))
<NumpyExtensionArray>
['a', 'b']
Length: 2, dtype: str32
@@ -193,12 +193,12 @@ def array(
rather than a ``NumpyExtensionArray``. This is for symmetry with the case of
timezone-aware data, which NumPy does not natively support.
- >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
+ >>> pd.array(["2015", "2016"], dtype="datetime64[ns]")
<DatetimeArray>
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
Length: 2, dtype: datetime64[ns]
- >>> pd.array(["1h", "2h"], dtype='timedelta64[ns]')
+ >>> pd.array(["1h", "2h"], dtype="timedelta64[ns]")
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
@@ -230,27 +230,27 @@ def array(
>>> with pd.option_context("string_storage", "pyarrow"):
... arr = pd.array(["a", None, "c"])
- ...
>>> arr
<ArrowStringArray>
['a', <NA>, 'c']
Length: 3, dtype: string
- >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
+ >>> pd.array([pd.Period("2000", freq="D"), pd.Period("2000", freq="D")])
<PeriodArray>
['2000-01-01', '2000-01-01']
Length: 2, dtype: period[D]
You can use the string alias for `dtype`
- >>> pd.array(['a', 'b', 'a'], dtype='category')
+ >>> pd.array(["a", "b", "a"], dtype="category")
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Or specify the actual dtype
- >>> pd.array(['a', 'b', 'a'],
- ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
+ >>> pd.array(
+ ... ["a", "b", "a"], dtype=pd.CategoricalDtype(["a", "b", "c"], ordered=True)
+ ... )
['a', 'b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
@@ -439,7 +439,7 @@ def extract_array(
Examples
--------
- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
+ >>> extract_array(pd.Series(["a", "b", "c"], dtype="category"))
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 6b00a5284ec5b..41407704dfc8a 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -96,8 +96,7 @@ class property**.
>>> from pandas.api.extensions import ExtensionArray
>>> class ExtensionDtype:
... def __from_arrow__(
- ... self,
- ... array: pyarrow.Array | pyarrow.ChunkedArray
+ ... self, array: pyarrow.Array | pyarrow.ChunkedArray
... ) -> ExtensionArray:
... ...
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index dfe12872c3916..b8b73e7dc6ddb 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -893,10 +893,10 @@ def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]:
Examples
--------
- >>> np.asarray([1, '1'])
+ >>> np.asarray([1, "1"])
array(['1', '1'], dtype='<U21')
- >>> infer_dtype_from_array([1, '1'])
+ >>> infer_dtype_from_array([1, "1"])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index a53bbe9935684..99114d996cc4c 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -367,7 +367,7 @@ def is_timedelta64_dtype(arr_or_dtype) -> bool:
False
>>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
True
- >>> is_timedelta64_dtype('0 days')
+ >>> is_timedelta64_dtype("0 days")
False
"""
if isinstance(arr_or_dtype, np.dtype):
@@ -544,7 +544,7 @@ def is_string_dtype(arr_or_dtype) -> bool:
True
>>> is_string_dtype(int)
False
- >>> is_string_dtype(np.array(['a', 'b']))
+ >>> is_string_dtype(np.array(["a", "b"]))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
@@ -646,9 +646,9 @@ def is_integer_dtype(arr_or_dtype) -> bool:
False
>>> is_integer_dtype(np.uint64)
True
- >>> is_integer_dtype('int8')
+ >>> is_integer_dtype("int8")
True
- >>> is_integer_dtype('Int8')
+ >>> is_integer_dtype("Int8")
True
>>> is_integer_dtype(pd.Int8Dtype)
True
@@ -656,13 +656,13 @@ def is_integer_dtype(arr_or_dtype) -> bool:
False
>>> is_integer_dtype(np.timedelta64)
False
- >>> is_integer_dtype(np.array(['a', 'b']))
+ >>> is_integer_dtype(np.array(["a", "b"]))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
- >>> is_integer_dtype(pd.Index([1, 2.])) # float
+ >>> is_integer_dtype(pd.Index([1, 2.0])) # float
False
"""
return _is_dtype_type(
@@ -703,9 +703,9 @@ def is_signed_integer_dtype(arr_or_dtype) -> bool:
False
>>> is_signed_integer_dtype(np.uint64) # unsigned
False
- >>> is_signed_integer_dtype('int8')
+ >>> is_signed_integer_dtype("int8")
True
- >>> is_signed_integer_dtype('Int8')
+ >>> is_signed_integer_dtype("Int8")
True
>>> is_signed_integer_dtype(pd.Int8Dtype)
True
@@ -713,13 +713,13 @@ def is_signed_integer_dtype(arr_or_dtype) -> bool:
False
>>> is_signed_integer_dtype(np.timedelta64)
False
- >>> is_signed_integer_dtype(np.array(['a', 'b']))
+ >>> is_signed_integer_dtype(np.array(["a", "b"]))
False
>>> is_signed_integer_dtype(pd.Series([1, 2]))
True
>>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
False
- >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
+ >>> is_signed_integer_dtype(pd.Index([1, 2.0])) # float
False
>>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
False
@@ -759,17 +759,17 @@ def is_unsigned_integer_dtype(arr_or_dtype) -> bool:
False
>>> is_unsigned_integer_dtype(np.uint64)
True
- >>> is_unsigned_integer_dtype('uint8')
+ >>> is_unsigned_integer_dtype("uint8")
True
- >>> is_unsigned_integer_dtype('UInt8')
+ >>> is_unsigned_integer_dtype("UInt8")
True
>>> is_unsigned_integer_dtype(pd.UInt8Dtype)
True
- >>> is_unsigned_integer_dtype(np.array(['a', 'b']))
+ >>> is_unsigned_integer_dtype(np.array(["a", "b"]))
False
>>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
False
- >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
+ >>> is_unsigned_integer_dtype(pd.Index([1, 2.0])) # float
False
>>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
True
@@ -815,9 +815,9 @@ def is_int64_dtype(arr_or_dtype) -> bool:
False
>>> is_int64_dtype(np.int64) # doctest: +SKIP
True
- >>> is_int64_dtype('int8') # doctest: +SKIP
+ >>> is_int64_dtype("int8") # doctest: +SKIP
False
- >>> is_int64_dtype('Int8') # doctest: +SKIP
+ >>> is_int64_dtype("Int8") # doctest: +SKIP
False
>>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP
True
@@ -825,11 +825,11 @@ def is_int64_dtype(arr_or_dtype) -> bool:
False
>>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP
False
- >>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP
+ >>> is_int64_dtype(np.array(["a", "b"])) # doctest: +SKIP
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP
True
- >>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP
+ >>> is_int64_dtype(pd.Index([1, 2.0])) # float # doctest: +SKIP
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP
False
@@ -870,7 +870,7 @@ def is_datetime64_any_dtype(arr_or_dtype) -> bool:
True
>>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
- >>> is_datetime64_any_dtype(np.array(['a', 'b']))
+ >>> is_datetime64_any_dtype(np.array(["a", "b"]))
False
>>> is_datetime64_any_dtype(np.array([1, 2]))
False
@@ -923,7 +923,7 @@ def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
False
>>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
- >>> is_datetime64_ns_dtype(np.array(['a', 'b']))
+ >>> is_datetime64_ns_dtype(np.array(["a", "b"]))
False
>>> is_datetime64_ns_dtype(np.array([1, 2]))
False
@@ -965,11 +965,11 @@ def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
Examples
--------
>>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype
- >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
+ >>> is_timedelta64_ns_dtype(np.dtype("m8[ns]"))
True
- >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
+ >>> is_timedelta64_ns_dtype(np.dtype("m8[ps]")) # Wrong frequency
False
- >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
+ >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype="m8[ns]"))
True
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
False
@@ -1051,7 +1051,7 @@ def needs_i8_conversion(dtype: DtypeObj | None) -> bool:
False
>>> needs_i8_conversion(np.dtype(np.datetime64))
True
- >>> needs_i8_conversion(np.array(['a', 'b']))
+ >>> needs_i8_conversion(np.array(["a", "b"]))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
@@ -1096,11 +1096,11 @@ def is_numeric_dtype(arr_or_dtype) -> bool:
False
>>> is_numeric_dtype(np.timedelta64)
False
- >>> is_numeric_dtype(np.array(['a', 'b']))
+ >>> is_numeric_dtype(np.array(["a", "b"]))
False
>>> is_numeric_dtype(pd.Series([1, 2]))
True
- >>> is_numeric_dtype(pd.Index([1, 2.]))
+ >>> is_numeric_dtype(pd.Index([1, 2.0]))
True
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
False
@@ -1172,11 +1172,11 @@ def is_float_dtype(arr_or_dtype) -> bool:
False
>>> is_float_dtype(float)
True
- >>> is_float_dtype(np.array(['a', 'b']))
+ >>> is_float_dtype(np.array(["a", "b"]))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
- >>> is_float_dtype(pd.Index([1, 2.]))
+ >>> is_float_dtype(pd.Index([1, 2.0]))
True
"""
return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(
@@ -1214,7 +1214,7 @@ def is_bool_dtype(arr_or_dtype) -> bool:
True
>>> is_bool_dtype(np.bool_)
True
- >>> is_bool_dtype(np.array(['a', 'b']))
+ >>> is_bool_dtype(np.array(["a", "b"]))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
@@ -1298,13 +1298,13 @@ def is_extension_array_dtype(arr_or_dtype) -> bool:
Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
- >>> arr = pd.Categorical(['a', 'b'])
+ >>> arr = pd.Categorical(["a", "b"])
>>> is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
True
- >>> arr = np.array(['a', 'b'])
+ >>> arr = np.array(["a", "b"])
>>> is_extension_array_dtype(arr.dtype)
False
"""
@@ -1351,7 +1351,7 @@ def is_complex_dtype(arr_or_dtype) -> bool:
False
>>> is_complex_dtype(np.complex128)
True
- >>> is_complex_dtype(np.array(['a', 'b']))
+ >>> is_complex_dtype(np.array(["a", "b"]))
False
>>> is_complex_dtype(pd.Series([1, 2]))
False
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 9ec662a6cd352..7d5e88b502a00 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -278,8 +278,8 @@ def union_categoricals(
containing categorical data, but note that the resulting array will
always be a plain `Categorical`
- >>> a = pd.Series(["b", "c"], dtype='category')
- >>> b = pd.Series(["a", "b"], dtype='category')
+ >>> a = pd.Series(["b", "c"], dtype="category")
+ >>> b = pd.Series(["a", "b"], dtype="category")
>>> pd.api.types.union_categoricals([a, b])
['b', 'c', 'a', 'b']
Categories (3, object): ['b', 'c', 'a']
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 5afb77b89c8d5..68c7ab6cbdbd1 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -188,8 +188,8 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
Examples
--------
- >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
- >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
+ >>> t = pd.CategoricalDtype(categories=["b", "a"], ordered=True)
+ >>> pd.Series(["a", "b", "a", "c"], dtype=t)
0 a
1 b
2 a
@@ -286,14 +286,14 @@ def _from_values_or_dtype(
>>> pd.CategoricalDtype._from_values_or_dtype()
CategoricalDtype(categories=None, ordered=None, categories_dtype=None)
>>> pd.CategoricalDtype._from_values_or_dtype(
- ... categories=['a', 'b'], ordered=True
+ ... categories=["a", "b"], ordered=True
... )
CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object)
- >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True)
- >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False)
+ >>> dtype1 = pd.CategoricalDtype(["a", "b"], ordered=True)
+ >>> dtype2 = pd.CategoricalDtype(["x", "y"], ordered=False)
>>> c = pd.Categorical([0, 1], dtype=dtype1)
>>> pd.CategoricalDtype._from_values_or_dtype(
- ... c, ['x', 'y'], ordered=True, dtype=dtype2
+ ... c, ["x", "y"], ordered=True, dtype=dtype2
... )
Traceback (most recent call last):
...
@@ -621,7 +621,7 @@ def categories(self) -> Index:
Examples
--------
- >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
+ >>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=True)
>>> cat_type.categories
Index(['a', 'b'], dtype='object')
"""
@@ -634,11 +634,11 @@ def ordered(self) -> Ordered:
Examples
--------
- >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True)
+ >>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=True)
>>> cat_type.ordered
True
- >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False)
+ >>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=False)
>>> cat_type.ordered
False
"""
@@ -717,10 +717,10 @@ class DatetimeTZDtype(PandasExtensionDtype):
Examples
--------
>>> from zoneinfo import ZoneInfo
- >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo("UTC"))
datetime64[ns, UTC]
- >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo("Europe/Paris"))
datetime64[ns, Europe/Paris]
"""
@@ -793,7 +793,7 @@ def unit(self) -> str_type:
Examples
--------
>>> from zoneinfo import ZoneInfo
- >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo("America/Los_Angeles"))
>>> dtype.unit
'ns'
"""
@@ -807,7 +807,7 @@ def tz(self) -> tzinfo:
Examples
--------
>>> from zoneinfo import ZoneInfo
- >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo("America/Los_Angeles"))
>>> dtype.tz
zoneinfo.ZoneInfo(key='America/Los_Angeles')
"""
@@ -840,7 +840,7 @@ def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
Examples
--------
- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
+ >>> DatetimeTZDtype.construct_from_string("datetime64[ns, UTC]")
datetime64[ns, UTC]
"""
if not isinstance(string, str):
@@ -962,7 +962,7 @@ class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype):
Examples
--------
- >>> pd.PeriodDtype(freq='D')
+ >>> pd.PeriodDtype(freq="D")
period[D]
>>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
@@ -1026,7 +1026,7 @@ def freq(self) -> BaseOffset:
Examples
--------
- >>> dtype = pd.PeriodDtype(freq='D')
+ >>> dtype = pd.PeriodDtype(freq="D")
>>> dtype.freq
<Day>
"""
@@ -1181,7 +1181,7 @@ class IntervalDtype(PandasExtensionDtype):
Examples
--------
- >>> pd.IntervalDtype(subtype='int64', closed='both')
+ >>> pd.IntervalDtype(subtype="int64", closed="both")
interval[int64, both]
"""
@@ -1281,7 +1281,7 @@ def subtype(self):
Examples
--------
- >>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
+ >>> dtype = pd.IntervalDtype(subtype="int64", closed="both")
>>> dtype.subtype
dtype('int64')
"""
@@ -1999,7 +1999,7 @@ def _subtype_with_str(self):
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
- >>> dtype = SparseDtype(str, '')
+ >>> dtype = SparseDtype(str, "")
>>> dtype.subtype
dtype('O')
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index e87b7f02b9b05..c0d9b418b9e79 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -428,7 +428,7 @@ def is_dataclass(item: object) -> bool:
>>> is_dataclass(Point)
False
- >>> is_dataclass(Point(0,2))
+ >>> is_dataclass(Point(0, 2))
True
"""
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 52ec4a0b012e3..17c1ad5e4d8d9 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -129,7 +129,7 @@ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
--------
Scalar arguments (including strings) result in a scalar boolean.
- >>> pd.isna('dog')
+ >>> pd.isna("dog")
False
>>> pd.isna(pd.NA)
@@ -150,8 +150,7 @@ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
For indexes, an ndarray of booleans is returned.
- >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
- ... "2017-07-08"])
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
@@ -160,7 +159,7 @@ def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
For Series and DataFrame, the same type is returned, containing booleans.
- >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
+ >>> df = pd.DataFrame([["ant", "bee", "cat"], ["dog", None, "fly"]])
>>> df
0 1 2
0 ant bee cat
@@ -411,7 +410,7 @@ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
--------
Scalar arguments (including strings) result in a scalar boolean.
- >>> pd.notna('dog')
+ >>> pd.notna("dog")
True
>>> pd.notna(pd.NA)
@@ -432,8 +431,7 @@ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
For indexes, an ndarray of booleans is returned.
- >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
- ... "2017-07-08"])
+ >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
@@ -442,7 +440,7 @@ def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
For Series and DataFrame, the same type is returned, containing booleans.
- >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
+ >>> df = pd.DataFrame([["ant", "bee", "cat"], ["dog", None, "fly"]])
>>> df
0 1 2
0 ant bee cat
@@ -498,13 +496,9 @@ def array_equivalent(
Examples
--------
- >>> array_equivalent(
- ... np.array([1, 2, np.nan]),
- ... np.array([1, 2, np.nan]))
+ >>> array_equivalent(np.array([1, 2, np.nan]), np.array([1, 2, np.nan]))
True
- >>> array_equivalent(
- ... np.array([1, np.nan, 2]),
- ... np.array([1, 2, np.nan]))
+ >>> array_equivalent(np.array([1, np.nan, 2]), np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
@@ -676,15 +670,15 @@ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
Examples
--------
- >>> na_value_for_dtype(np.dtype('int64'))
+ >>> na_value_for_dtype(np.dtype("int64"))
0
- >>> na_value_for_dtype(np.dtype('int64'), compat=False)
+ >>> na_value_for_dtype(np.dtype("int64"), compat=False)
nan
- >>> na_value_for_dtype(np.dtype('float64'))
+ >>> na_value_for_dtype(np.dtype("float64"))
nan
- >>> na_value_for_dtype(np.dtype('bool'))
+ >>> na_value_for_dtype(np.dtype("bool"))
False
- >>> na_value_for_dtype(np.dtype('datetime64[ns]'))
+ >>> na_value_for_dtype(np.dtype("datetime64[ns]"))
numpy.datetime64('NaT')
"""
diff --git a/pandas/core/flags.py b/pandas/core/flags.py
index 394695e69a3d3..8dcf49745bf2d 100644
--- a/pandas/core/flags.py
+++ b/pandas/core/flags.py
@@ -41,7 +41,7 @@ class Flags:
>>> df.flags
<Flags(allows_duplicate_labels=False)>
- >>> df.flags['allows_duplicate_labels'] = True
+ >>> df.flags["allows_duplicate_labels"] = True
>>> df.flags
<Flags(allows_duplicate_labels=True)>
"""
@@ -71,7 +71,7 @@ def allows_duplicate_labels(self) -> bool:
Examples
--------
- >>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
+ >>> df = pd.DataFrame({"A": [1, 2]}, index=["a", "a"])
>>> df.flags.allows_duplicate_labels
True
>>> df.flags.allows_duplicate_labels = False
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 910d7b2ab2178..e48e5d9023f33 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -554,7 +554,7 @@ class DataFrame(NDFrame, OpsMixin):
--------
Constructing DataFrame from a dictionary.
- >>> d = {'col1': [1, 2], 'col2': [3, 4]}
+ >>> d = {"col1": [1, 2], "col2": [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
@@ -578,7 +578,7 @@ class DataFrame(NDFrame, OpsMixin):
Constructing DataFrame from a dictionary including Series:
- >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])}
+ >>> d = {"col1": [0, 1, 2, 3], "col2": pd.Series([2, 3], index=[2, 3])}
>>> pd.DataFrame(data=d, index=[0, 1, 2, 3])
col1 col2
0 0 NaN
@@ -588,8 +588,9 @@ class DataFrame(NDFrame, OpsMixin):
Constructing DataFrame from numpy ndarray:
- >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
- ... columns=['a', 'b', 'c'])
+ >>> df2 = pd.DataFrame(
+ ... np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=["a", "b", "c"]
+ ... )
>>> df2
a b c
0 1 2 3
@@ -598,10 +599,11 @@ class DataFrame(NDFrame, OpsMixin):
Constructing DataFrame from a numpy ndarray that has labeled columns:
- >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)],
- ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")])
- >>> df3 = pd.DataFrame(data, columns=['c', 'a'])
- ...
+ >>> data = np.array(
+ ... [(1, 2, 3), (4, 5, 6), (7, 8, 9)],
+ ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")],
+ ... )
+ >>> df3 = pd.DataFrame(data, columns=["c", "a"])
>>> df3
c a
0 3 1
@@ -926,12 +928,13 @@ def __dataframe__(
Examples
--------
- >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df_not_necessarily_pandas = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> interchange_object = df_not_necessarily_pandas.__dataframe__()
>>> interchange_object.column_names()
Index(['A', 'B'], dtype='object')
- >>> df_pandas = (pd.api.interchange.from_dataframe
- ... (interchange_object.select_columns_by_name(['A'])))
+ >>> df_pandas = pd.api.interchange.from_dataframe(
+ ... interchange_object.select_columns_by_name(["A"])
+ ... )
>>> df_pandas
A
0 1
@@ -999,7 +1002,7 @@ def axes(self) -> list[Index]:
Examples
--------
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
+ >>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
@@ -1017,12 +1020,11 @@ def shape(self) -> tuple[int, int]:
Examples
--------
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
+ >>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.shape
(2, 2)
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
- ... 'col3': [5, 6]})
+ >>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4], "col3": [5, 6]})
>>> df.shape
(2, 3)
"""
@@ -1047,9 +1049,12 @@ def _is_homogeneous_type(self) -> bool:
Items with the same type but different sizes are considered
different types.
- >>> DataFrame({
- ... "A": np.array([1, 2], dtype=np.int32),
- ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
+ >>> DataFrame(
+ ... {
+ ... "A": np.array([1, 2], dtype=np.int32),
+ ... "B": np.array([1, 2], dtype=np.int64),
+ ... }
+ ... )._is_homogeneous_type
False
"""
# The "<" part of "<=" here is for empty DataFrame cases
@@ -1315,7 +1320,7 @@ def to_string(
Examples
--------
- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
+ >>> d = {"col1": [1, 2, 3], "col2": [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
@@ -1385,7 +1390,7 @@ def style(self) -> Styler:
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2, 3]})
+ >>> df = pd.DataFrame({"A": [1, 2, 3]})
>>> df.style # doctest: +SKIP
Please see
@@ -1482,15 +1487,15 @@ def iterrows(self) -> Iterable[tuple[Hashable, Series]]:
Examples
--------
- >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
+ >>> df = pd.DataFrame([[1, 1.5]], columns=["int", "float"])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
- >>> print(row['int'].dtype)
+ >>> print(row["int"].dtype)
float64
- >>> print(df['int'].dtype)
+ >>> print(df["int"].dtype)
int64
"""
columns = self.columns
@@ -1536,15 +1541,15 @@ def itertuples(
Examples
--------
- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
- ... index=['dog', 'hawk'])
+ >>> df = pd.DataFrame(
+ ... {"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"]
+ ... )
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
- ...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
@@ -1553,16 +1558,14 @@ def itertuples(
>>> for row in df.itertuples(index=False):
... print(row)
- ...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
- >>> for row in df.itertuples(name='Animal'):
+ >>> for row in df.itertuples(name="Animal"):
... print(row)
- ...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
@@ -1797,7 +1800,7 @@ def from_dict(
--------
By default the keys of the dict become the DataFrame columns:
- >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
+ >>> data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
@@ -1808,8 +1811,8 @@ def from_dict(
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
- >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
- >>> pd.DataFrame.from_dict(data, orient='index')
+ >>> data = {"row_1": [3, 2, 1, 0], "row_2": ["a", "b", "c", "d"]}
+ >>> pd.DataFrame.from_dict(data, orient="index")
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
@@ -1817,8 +1820,7 @@ def from_dict(
When using the 'index' orientation, the column names can be
specified manually:
- >>> pd.DataFrame.from_dict(data, orient='index',
- ... columns=['A', 'B', 'C', 'D'])
+ >>> pd.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
A B C D
row_1 3 2 1 0
row_2 a b c d
@@ -1826,12 +1828,14 @@ def from_dict(
Specify ``orient='tight'`` to create the DataFrame using a 'tight'
format:
- >>> data = {'index': [('a', 'b'), ('a', 'c')],
- ... 'columns': [('x', 1), ('y', 2)],
- ... 'data': [[1, 3], [2, 4]],
- ... 'index_names': ['n1', 'n2'],
- ... 'column_names': ['z1', 'z2']}
- >>> pd.DataFrame.from_dict(data, orient='tight')
+ >>> data = {
+ ... "index": [("a", "b"), ("a", "c")],
+ ... "columns": [("x", 1), ("y", 2)],
+ ... "data": [[1, 3], [2, 4]],
+ ... "index_names": ["n1", "n2"],
+ ... "column_names": ["z1", "z2"],
+ ... }
+ >>> pd.DataFrame.from_dict(data, orient="tight")
z1 x y
z2 1 2
n1 n2
@@ -1929,7 +1933,7 @@ def to_numpy(
For a mix of numeric and non-numeric types, the output array will
have object dtype.
- >>> df['C'] = pd.date_range('2000', periods=2)
+ >>> df["C"] = pd.date_range("2000", periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
@@ -2048,9 +2052,9 @@ def to_dict(
Examples
--------
- >>> df = pd.DataFrame({'col1': [1, 2],
- ... 'col2': [0.5, 0.75]},
- ... index=['row1', 'row2'])
+ >>> df = pd.DataFrame(
+ ... {"col1": [1, 2], "col2": [0.5, 0.75]}, index=["row1", "row2"]
+ ... )
>>> df
col1 col2
row1 1 0.50
@@ -2060,7 +2064,7 @@ def to_dict(
You can specify the return orientation.
- >>> df.to_dict('series')
+ >>> df.to_dict("series")
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
@@ -2068,17 +2072,17 @@ def to_dict(
row2 0.75
Name: col2, dtype: float64}
- >>> df.to_dict('split')
+ >>> df.to_dict("split")
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
- >>> df.to_dict('records')
+ >>> df.to_dict("records")
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
- >>> df.to_dict('index')
+ >>> df.to_dict("index")
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
- >>> df.to_dict('tight')
+ >>> df.to_dict("tight")
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]}
@@ -2092,7 +2096,7 @@ def to_dict(
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
- >>> df.to_dict('records', into=dd)
+ >>> df.to_dict("records", into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
@@ -2153,8 +2157,10 @@ def from_records(
--------
Data can be provided as a structured ndarray:
- >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
- ... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
+ >>> data = np.array(
+ ... [(3, "a"), (2, "b"), (1, "c"), (0, "d")],
+ ... dtype=[("col_1", "i4"), ("col_2", "U1")],
+ ... )
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
@@ -2164,10 +2170,12 @@ def from_records(
Data can be provided as a list of dicts:
- >>> data = [{'col_1': 3, 'col_2': 'a'},
- ... {'col_1': 2, 'col_2': 'b'},
- ... {'col_1': 1, 'col_2': 'c'},
- ... {'col_1': 0, 'col_2': 'd'}]
+ >>> data = [
+ ... {"col_1": 3, "col_2": "a"},
+ ... {"col_1": 2, "col_2": "b"},
+ ... {"col_1": 1, "col_2": "c"},
+ ... {"col_1": 0, "col_2": "d"},
+ ... ]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
@@ -2177,8 +2185,8 @@ def from_records(
Data can be provided as a list of tuples with corresponding columns:
- >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
- >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
+ >>> data = [(3, "a"), (2, "b"), (1, "c"), (0, "d")]
+ >>> pd.DataFrame.from_records(data, columns=["col_1", "col_2"])
col_1 col_2
0 3 a
1 2 b
@@ -2367,8 +2375,7 @@ def to_records(
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
- ... index=['a', 'b'])
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [0.5, 0.75]}, index=["a", "b"])
>>> df
A B
a 1 0.50
@@ -2639,10 +2646,10 @@ def to_stata(
Examples
--------
- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
- ... 'parrot'],
- ... 'speed': [350, 18, 361, 15]}})
- >>> df.to_stata('animals.dta') # doctest: +SKIP
+ >>> df = pd.DataFrame(
+ ... [["falcon", 350], ["parrot", 18]], columns=["animal", "parrot"]
+ ... )
+ >>> df.to_stata("animals.dta") # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
@@ -2869,10 +2876,9 @@ def to_parquet(
Examples
--------
- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
- >>> df.to_parquet('df.parquet.gzip',
- ... compression='gzip') # doctest: +SKIP
- >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
+ >>> df = pd.DataFrame(data={{"col1": [1, 2], "col2": [3, 4]}})
+ >>> df.to_parquet("df.parquet.gzip", compression="gzip") # doctest: +SKIP
+ >>> pd.read_parquet("df.parquet.gzip") # doctest: +SKIP
col1 col2
0 1 3
1 2 4
@@ -2967,9 +2973,9 @@ def to_orc(
Examples
--------
- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]})
- >>> df.to_orc('df.orc') # doctest: +SKIP
- >>> pd.read_orc('df.orc') # doctest: +SKIP
+ >>> df = pd.DataFrame(data={"col1": [1, 2], "col2": [4, 3]})
+ >>> df.to_orc("df.orc") # doctest: +SKIP
+ >>> pd.read_orc("df.orc") # doctest: +SKIP
col1 col2
0 1 4
1 2 3
@@ -3110,7 +3116,7 @@ def to_html(
Examples
--------
- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]})
+ >>> df = pd.DataFrame(data={"col1": [1, 2], "col2": [4, 3]})
>>> html_string = '''<table border="1" class="dataframe">
... <thead>
... <tr style="text-align: right;">
@@ -3315,9 +3321,10 @@ def to_xml(
Examples
--------
- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'],
- ... 'degrees': [360, 360, 180],
- ... 'sides': [4, np.nan, 3]}})
+ >>> df = pd.DataFrame(
+ ... [["square", 360, 4], ["circle", 360, np.nan], ["triangle", 180, 3]],
+ ... columns=["shape", "degrees", "sides"],
+ ... )
>>> df.to_xml() # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
@@ -3342,9 +3349,9 @@ def to_xml(
</row>
</data>
- >>> df.to_xml(attr_cols=[
- ... 'index', 'shape', 'degrees', 'sides'
- ... ]) # doctest: +SKIP
+ >>> df.to_xml(
+ ... attr_cols=["index", "shape", "degrees", "sides"]
+ ... ) # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<data>
<row index="0" shape="square" degrees="360" sides="4.0"/>
@@ -3352,8 +3359,9 @@ def to_xml(
<row index="2" shape="triangle" degrees="180" sides="3.0"/>
</data>
- >>> df.to_xml(namespaces={{"doc": "https://example.com"}},
- ... prefix="doc") # doctest: +SKIP
+ >>> df.to_xml(
+ ... namespaces={{"doc": "https://example.com"}}, prefix="doc"
+ ... ) # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="https://example.com">
<doc:row>
@@ -3485,9 +3493,8 @@ def memory_usage(self, index: bool = True, deep: bool = False) -> Series:
Examples
--------
- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
- >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
- ... for t in dtypes])
+ >>> dtypes = ["int64", "float64", "complex128", "object", "bool"]
+ >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
@@ -3528,7 +3535,7 @@ def memory_usage(self, index: bool = True, deep: bool = False) -> Series:
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
- >>> df['object'].astype('category').memory_usage(deep=True)
+ >>> df["object"].astype("category").memory_usage(deep=True)
5136
"""
result = self._constructor_sliced(
@@ -3593,7 +3600,7 @@ def transpose(self, *args, copy: bool = False) -> DataFrame:
--------
**Square DataFrame with homogeneous dtype**
- >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
+ >>> d1 = {"col1": [1, 2], "col2": [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
@@ -3620,10 +3627,12 @@ def transpose(self, *args, copy: bool = False) -> DataFrame:
**Non-square DataFrame with mixed dtypes**
- >>> d2 = {'name': ['Alice', 'Bob'],
- ... 'score': [9.5, 8],
- ... 'employed': [False, True],
- ... 'kids': [0, 0]}
+ >>> d2 = {
+ ... "name": ["Alice", "Bob"],
+ ... "score": [9.5, 8],
+ ... "employed": [False, True],
+ ... "kids": [0, 0],
+ ... }
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
@@ -3743,7 +3752,7 @@ def T(self) -> DataFrame:
Examples
--------
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
+ >>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df
col1 col2
0 1 3
@@ -4477,9 +4486,9 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No
Examples
--------
- >>> df = pd.DataFrame({'A': range(1, 6),
- ... 'B': range(10, 0, -2),
- ... 'C C': range(10, 5, -1)})
+ >>> df = pd.DataFrame(
+ ... {"A": range(1, 6), "B": range(10, 0, -2), "C C": range(10, 5, -1)}
+ ... )
>>> df
A B C C
0 1 10 10
@@ -4487,7 +4496,7 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No
2 3 6 8
3 4 4 7
4 5 2 6
- >>> df.query('A > B')
+ >>> df.query("A > B")
A B C C
4 5 2 6
@@ -4499,13 +4508,13 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No
For columns with spaces in their name, you can use backtick quoting.
- >>> df.query('B == `C C`')
+ >>> df.query("B == `C C`")
A B C C
0 1 10 10
The previous expression is equivalent to
- >>> df[df.B == df['C C']]
+ >>> df[df.B == df["C C"]]
A B C C
0 1 10 10
"""
@@ -4581,7 +4590,7 @@ def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None:
Examples
--------
- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
+ >>> df = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)})
>>> df
A B
0 1 10
@@ -4589,7 +4598,7 @@ def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None:
2 3 6
3 4 4
4 5 2
- >>> df.eval('A + B')
+ >>> df.eval("A + B")
0 11
1 10
2 9
@@ -4600,7 +4609,7 @@ def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None:
Assignment is allowed though by default the original DataFrame is not
modified.
- >>> df.eval('C = A + B')
+ >>> df.eval("C = A + B")
A B C
0 1 10 11
1 2 8 10
@@ -4687,9 +4696,9 @@ def select_dtypes(self, include=None, exclude=None) -> Self:
Examples
--------
- >>> df = pd.DataFrame({'a': [1, 2] * 3,
- ... 'b': [True, False] * 3,
- ... 'c': [1.0, 2.0] * 3})
+ >>> df = pd.DataFrame(
+ ... {"a": [1, 2] * 3, "b": [True, False] * 3, "c": [1.0, 2.0] * 3}
+ ... )
>>> df
a b c
0 1 True 1.0
@@ -4699,7 +4708,7 @@ def select_dtypes(self, include=None, exclude=None) -> Self:
4 1 True 1.0
5 2 False 2.0
- >>> df.select_dtypes(include='bool')
+ >>> df.select_dtypes(include="bool")
b
0 True
1 False
@@ -4708,7 +4717,7 @@ def select_dtypes(self, include=None, exclude=None) -> Self:
4 True
5 False
- >>> df.select_dtypes(include=['float64'])
+ >>> df.select_dtypes(include=["float64"])
c
0 1.0
1 2.0
@@ -4717,7 +4726,7 @@ def select_dtypes(self, include=None, exclude=None) -> Self:
4 1.0
5 2.0
- >>> df.select_dtypes(exclude=['int64'])
+ >>> df.select_dtypes(exclude=["int64"])
b c
0 True 1.0
1 False 2.0
@@ -4816,7 +4825,7 @@ def insert(
Examples
--------
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
+ >>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df
col1 col2
0 1 3
@@ -4896,8 +4905,7 @@ def assign(self, **kwargs) -> DataFrame:
Examples
--------
- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
- ... index=['Portland', 'Berkeley'])
+ >>> df = pd.DataFrame({"temp_c": [17.0, 25.0]}, index=["Portland", "Berkeley"])
>>> df
temp_c
Portland 17.0
@@ -4913,7 +4921,7 @@ def assign(self, **kwargs) -> DataFrame:
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
- >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
+ >>> df.assign(temp_f=df["temp_c"] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
@@ -4921,8 +4929,10 @@ def assign(self, **kwargs) -> DataFrame:
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
- >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
- ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
+ >>> df.assign(
+ ... temp_f=lambda x: x["temp_c"] * 9 / 5 + 32,
+ ... temp_k=lambda x: (x["temp_f"] + 459.67) * 5 / 9,
+ ... )
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
@@ -5189,8 +5199,7 @@ def drop(
Examples
--------
- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
- ... columns=['A', 'B', 'C', 'D'])
+ >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"])
>>> df
A B C D
0 0 1 2 3
@@ -5199,13 +5208,13 @@ def drop(
Drop columns
- >>> df.drop(['B', 'C'], axis=1)
+ >>> df.drop(["B", "C"], axis=1)
A D
0 0 3
1 4 7
2 8 11
- >>> df.drop(columns=['B', 'C'])
+ >>> df.drop(columns=["B", "C"])
A D
0 0 3
1 4 7
@@ -5219,14 +5228,25 @@ def drop(
Drop columns and/or rows of MultiIndex DataFrame
- >>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'],
- ... ['speed', 'weight', 'length']],
- ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
- ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
- >>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
- ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
- ... [250, 150], [1.5, 0.8], [320, 250],
- ... [1, 0.8], [0.3, 0.2]])
+ >>> midx = pd.MultiIndex(
+ ... levels=[["llama", "cow", "falcon"], ["speed", "weight", "length"]],
+ ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
+ ... )
+ >>> df = pd.DataFrame(
+ ... index=midx,
+ ... columns=["big", "small"],
+ ... data=[
+ ... [45, 30],
+ ... [200, 100],
+ ... [1.5, 1],
+ ... [30, 20],
+ ... [250, 150],
+ ... [1.5, 0.8],
+ ... [320, 250],
+ ... [1, 0.8],
+ ... [0.3, 0.2],
+ ... ],
+ ... )
>>> df
big small
llama speed 45.0 30.0
@@ -5243,7 +5263,7 @@ def drop(
DataFrame, i.e., drop the combination ``'falcon'`` and
``'weight'``, which deletes only the corresponding row
- >>> df.drop(index=('falcon', 'weight'))
+ >>> df.drop(index=("falcon", "weight"))
big small
llama speed 45.0 30.0
weight 200.0 100.0
@@ -5254,7 +5274,7 @@ def drop(
falcon speed 320.0 250.0
length 0.3 0.2
- >>> df.drop(index='cow', columns='small')
+ >>> df.drop(index="cow", columns="small")
big
llama speed 45.0
weight 200.0
@@ -5263,7 +5283,7 @@ def drop(
weight 1.0
length 0.3
- >>> df.drop(index='length', level=1)
+ >>> df.drop(index="length", level=1)
big small
llama speed 45.0 30.0
weight 200.0 100.0
@@ -5446,13 +5466,13 @@ def rename(
Using axis-style parameters:
- >>> df.rename(str.lower, axis='columns')
+ >>> df.rename(str.lower, axis="columns")
a b
0 1 4
1 2 5
2 3 6
- >>> df.rename({1: 2, 2: 4}, axis='index')
+ >>> df.rename({1: 2, 2: 4}, axis="index")
A B
0 1 4
2 2 5
@@ -5484,11 +5504,15 @@ def pop(self, item: Hashable) -> Series:
Examples
--------
- >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
- ... ('parrot', 'bird', 24.0),
- ... ('lion', 'mammal', 80.5),
- ... ('monkey', 'mammal', np.nan)],
- ... columns=('name', 'class', 'max_speed'))
+ >>> df = pd.DataFrame(
+ ... [
+ ... ("falcon", "bird", 389.0),
+ ... ("parrot", "bird", 24.0),
+ ... ("lion", "mammal", 80.5),
+ ... ("monkey", "mammal", np.nan),
+ ... ],
+ ... columns=("name", "class", "max_speed"),
+ ... )
>>> df
name class max_speed
0 falcon bird 389.0
@@ -5496,7 +5520,7 @@ def pop(self, item: Hashable) -> Series:
2 lion mammal 80.5
3 monkey mammal NaN
- >>> df.pop('class')
+ >>> df.pop("class")
0 bird
1 bird
2 mammal
@@ -5729,9 +5753,13 @@ def set_index(
Examples
--------
- >>> df = pd.DataFrame({'month': [1, 4, 7, 10],
- ... 'year': [2012, 2014, 2013, 2014],
- ... 'sale': [55, 40, 84, 31]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "month": [1, 4, 7, 10],
+ ... "year": [2012, 2014, 2013, 2014],
+ ... "sale": [55, 40, 84, 31],
+ ... }
+ ... )
>>> df
month year sale
0 1 2012 55
@@ -5741,7 +5769,7 @@ def set_index(
Set the index to become the 'month' column:
- >>> df.set_index('month')
+ >>> df.set_index("month")
year sale
month
1 2012 55
@@ -5751,7 +5779,7 @@ def set_index(
Create a MultiIndex using columns 'year' and 'month':
- >>> df.set_index(['year', 'month'])
+ >>> df.set_index(["year", "month"])
sale
year month
2012 1 55
@@ -5761,7 +5789,7 @@ def set_index(
Create a MultiIndex using an Index and a column:
- >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
+ >>> df.set_index([pd.Index([1, 2, 3, 4]), "year"])
month sale
year
1 2012 1 55
@@ -5987,12 +6015,11 @@ def reset_index(
Examples
--------
- >>> df = pd.DataFrame([('bird', 389.0),
- ... ('bird', 24.0),
- ... ('mammal', 80.5),
- ... ('mammal', np.nan)],
- ... index=['falcon', 'parrot', 'lion', 'monkey'],
- ... columns=('class', 'max_speed'))
+ >>> df = pd.DataFrame(
+ ... [("bird", 389.0), ("bird", 24.0), ("mammal", 80.5), ("mammal", np.nan)],
+ ... index=["falcon", "parrot", "lion", "monkey"],
+ ... columns=("class", "max_speed"),
+ ... )
>>> df
class max_speed
falcon bird 389.0
@@ -6022,19 +6049,21 @@ class max_speed
You can also use `reset_index` with `MultiIndex`.
- >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
- ... ('bird', 'parrot'),
- ... ('mammal', 'lion'),
- ... ('mammal', 'monkey')],
- ... names=['class', 'name'])
- >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
- ... ('species', 'type')])
- >>> df = pd.DataFrame([(389.0, 'fly'),
- ... (24.0, 'fly'),
- ... (80.5, 'run'),
- ... (np.nan, 'jump')],
- ... index=index,
- ... columns=columns)
+ >>> index = pd.MultiIndex.from_tuples(
+ ... [
+ ... ("bird", "falcon"),
+ ... ("bird", "parrot"),
+ ... ("mammal", "lion"),
+ ... ("mammal", "monkey"),
+ ... ],
+ ... names=["class", "name"],
+ ... )
+ >>> columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
+ >>> df = pd.DataFrame(
+ ... [(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
+ ... index=index,
+ ... columns=columns,
+ ... )
>>> df
speed species
max type
@@ -6046,7 +6075,7 @@ class name
Using the `names` parameter, choose a name for the index column:
- >>> df.reset_index(names=['classes', 'names'])
+ >>> df.reset_index(names=["classes", "names"])
classes names speed species
max type
0 bird falcon 389.0 fly
@@ -6056,7 +6085,7 @@ class name
If the index has multiple levels, we can reset a subset of them:
- >>> df.reset_index(level='class')
+ >>> df.reset_index(level="class")
class speed species
max type
name
@@ -6068,7 +6097,7 @@ class speed species
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
- >>> df.reset_index(level='class', col_level=1)
+ >>> df.reset_index(level="class", col_level=1)
speed species
class max type
name
@@ -6080,7 +6109,7 @@ class max type
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
- >>> df.reset_index(level='class', col_level=1, col_fill='species')
+ >>> df.reset_index(level="class", col_level=1, col_fill="species")
species speed species
class max type
name
@@ -6091,7 +6120,7 @@ class max type
If we specify a nonexistent level for `col_fill`, it is created:
- >>> df.reset_index(level='class', col_level=1, col_fill='genus')
+ >>> df.reset_index(level="class", col_level=1, col_fill="genus")
genus speed species
class max type
name
@@ -6287,10 +6316,13 @@ def dropna(
Examples
--------
- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
- ... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
- ... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
- ... pd.NaT]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "name": ["Alfred", "Batman", "Catwoman"],
+ ... "toy": [np.nan, "Batmobile", "Bullwhip"],
+ ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), pd.NaT],
+ ... }
+ ... )
>>> df
name toy born
0 Alfred NaN NaT
@@ -6305,7 +6337,7 @@ def dropna(
Drop the columns where at least one element is missing.
- >>> df.dropna(axis='columns')
+ >>> df.dropna(axis="columns")
name
0 Alfred
1 Batman
@@ -6313,7 +6345,7 @@ def dropna(
Drop the rows where all elements are missing.
- >>> df.dropna(how='all')
+ >>> df.dropna(how="all")
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
@@ -6328,7 +6360,7 @@ def dropna(
Define in which columns to look for missing values.
- >>> df.dropna(subset=['name', 'toy'])
+ >>> df.dropna(subset=["name", "toy"])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
@@ -6463,11 +6495,13 @@ def drop_duplicates(
--------
Consider dataset containing ramen rating.
- >>> df = pd.DataFrame({
- ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
- ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
- ... 'rating': [4, 4, 3.5, 15, 5]
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "brand": ["Yum Yum", "Yum Yum", "Indomie", "Indomie", "Indomie"],
+ ... "style": ["cup", "cup", "cup", "pack", "pack"],
+ ... "rating": [4, 4, 3.5, 15, 5],
+ ... }
+ ... )
>>> df
brand style rating
0 Yum Yum cup 4.0
@@ -6487,14 +6521,14 @@ def drop_duplicates(
To remove duplicates on specific column(s), use ``subset``.
- >>> df.drop_duplicates(subset=['brand'])
+ >>> df.drop_duplicates(subset=["brand"])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
- >>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
+ >>> df.drop_duplicates(subset=["brand", "style"], keep="last")
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
@@ -6554,11 +6588,13 @@ def duplicated(
--------
Consider dataset containing ramen rating.
- >>> df = pd.DataFrame({
- ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
- ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
- ... 'rating': [4, 4, 3.5, 15, 5]
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "brand": ["Yum Yum", "Yum Yum", "Indomie", "Indomie", "Indomie"],
+ ... "style": ["cup", "cup", "cup", "pack", "pack"],
+ ... "rating": [4, 4, 3.5, 15, 5],
+ ... }
+ ... )
>>> df
brand style rating
0 Yum Yum cup 4.0
@@ -6581,7 +6617,7 @@ def duplicated(
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
- >>> df.duplicated(keep='last')
+ >>> df.duplicated(keep="last")
0 True
1 False
2 False
@@ -6601,7 +6637,7 @@ def duplicated(
To find duplicates on specific column(s), use ``subset``.
- >>> df.duplicated(subset=['brand'])
+ >>> df.duplicated(subset=["brand"])
0 False
1 True
2 False
@@ -6747,12 +6783,14 @@ def sort_values(
Examples
--------
- >>> df = pd.DataFrame({
- ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
- ... 'col2': [2, 1, 9, 8, 7, 4],
- ... 'col3': [0, 1, 9, 4, 2, 3],
- ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F']
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "col1": ["A", "A", "B", np.nan, "D", "C"],
+ ... "col2": [2, 1, 9, 8, 7, 4],
+ ... "col3": [0, 1, 9, 4, 2, 3],
+ ... "col4": ["a", "B", "c", "D", "e", "F"],
+ ... }
+ ... )
>>> df
col1 col2 col3 col4
0 A 2 0 a
@@ -6764,7 +6802,7 @@ def sort_values(
Sort by col1
- >>> df.sort_values(by=['col1'])
+ >>> df.sort_values(by=["col1"])
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
@@ -6775,7 +6813,7 @@ def sort_values(
Sort by multiple columns
- >>> df.sort_values(by=['col1', 'col2'])
+ >>> df.sort_values(by=["col1", "col2"])
col1 col2 col3 col4
1 A 1 1 B
0 A 2 0 a
@@ -6786,7 +6824,7 @@ def sort_values(
Sort Descending
- >>> df.sort_values(by='col1', ascending=False)
+ >>> df.sort_values(by="col1", ascending=False)
col1 col2 col3 col4
4 D 7 2 e
5 C 4 3 F
@@ -6797,7 +6835,7 @@ def sort_values(
Putting NAs first
- >>> df.sort_values(by='col1', ascending=False, na_position='first')
+ >>> df.sort_values(by="col1", ascending=False, na_position="first")
col1 col2 col3 col4
3 NaN 8 4 D
4 D 7 2 e
@@ -6808,7 +6846,7 @@ def sort_values(
Sorting with a key function
- >>> df.sort_values(by='col4', key=lambda col: col.str.lower())
+ >>> df.sort_values(by="col4", key=lambda col: col.str.lower())
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
@@ -6820,10 +6858,12 @@ def sort_values(
Natural sort with the key argument,
using the `natsort <https://github.com/SethMMorton/natsort>` package.
- >>> df = pd.DataFrame({
- ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'],
- ... "value": [10, 20, 30, 40, 50]
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "time": ["0hr", "128hr", "72hr", "48hr", "96hr"],
+ ... "value": [10, 20, 30, 40, 50],
+ ... }
+ ... )
>>> df
time value
0 0hr 10
@@ -6833,8 +6873,7 @@ def sort_values(
4 96hr 50
>>> from natsort import index_natsorted
>>> df.sort_values(
- ... by="time",
- ... key=lambda x: np.argsort(index_natsorted(df["time"]))
+ ... by="time", key=lambda x: np.argsort(index_natsorted(df["time"]))
... )
time value
0 0hr 10
@@ -7035,8 +7074,9 @@ def sort_index(
Examples
--------
- >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
- ... columns=['A'])
+ >>> df = pd.DataFrame(
+ ... [1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150], columns=["A"]
+ ... )
>>> df.sort_index()
A
1 4
@@ -7059,7 +7099,7 @@ def sort_index(
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
- >>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
+ >>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=["A", "b", "C", "d"])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
@@ -7123,9 +7163,10 @@ def value_counts(
Examples
--------
- >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
- ... 'num_wings': [2, 0, 0, 0]},
- ... index=['falcon', 'dog', 'cat', 'ant'])
+ >>> df = pd.DataFrame(
+ ... {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
+ ... index=["falcon", "dog", "cat", "ant"],
+ ... )
>>> df
num_legs num_wings
falcon 2 2
@@ -7163,8 +7204,12 @@ def value_counts(
With `dropna` set to `False` we can also count rows with NA values.
- >>> df = pd.DataFrame({'first_name': ['John', 'Anne', 'John', 'Beth'],
- ... 'middle_name': ['Smith', pd.NA, pd.NA, 'Louise']})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "first_name": ["John", "Anne", "John", "Beth"],
+ ... "middle_name": ["Smith", pd.NA, pd.NA, "Louise"],
+ ... }
+ ... )
>>> df
first_name middle_name
0 John Smith
@@ -7262,16 +7307,34 @@ def nlargest(
Examples
--------
- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
- ... 434000, 434000, 337000, 11300,
- ... 11300, 11300],
- ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
- ... 17036, 182, 38, 311],
- ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
- ... "IS", "NR", "TV", "AI"]},
- ... index=["Italy", "France", "Malta",
- ... "Maldives", "Brunei", "Iceland",
- ... "Nauru", "Tuvalu", "Anguilla"])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "population": [
+ ... 59000000,
+ ... 65000000,
+ ... 434000,
+ ... 434000,
+ ... 434000,
+ ... 337000,
+ ... 11300,
+ ... 11300,
+ ... 11300,
+ ... ],
+ ... "GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
+ ... "alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
+ ... },
+ ... index=[
+ ... "Italy",
+ ... "France",
+ ... "Malta",
+ ... "Maldives",
+ ... "Brunei",
+ ... "Iceland",
+ ... "Nauru",
+ ... "Tuvalu",
+ ... "Anguilla",
+ ... ],
+ ... )
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
@@ -7287,7 +7350,7 @@ def nlargest(
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
- >>> df.nlargest(3, 'population')
+ >>> df.nlargest(3, "population")
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
@@ -7295,7 +7358,7 @@ def nlargest(
When using ``keep='last'``, ties are resolved in reverse order:
- >>> df.nlargest(3, 'population', keep='last')
+ >>> df.nlargest(3, "population", keep="last")
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
@@ -7305,7 +7368,7 @@ def nlargest(
if there are duplicate values for the smallest element, all the
ties are kept:
- >>> df.nlargest(3, 'population', keep='all')
+ >>> df.nlargest(3, "population", keep="all")
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
@@ -7315,7 +7378,7 @@ def nlargest(
However, ``nlargest`` does not keep ``n`` distinct largest elements:
- >>> df.nlargest(5, 'population', keep='all')
+ >>> df.nlargest(5, "population", keep="all")
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
@@ -7326,7 +7389,7 @@ def nlargest(
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
- >>> df.nlargest(3, ['population', 'GDP'])
+ >>> df.nlargest(3, ["population", "GDP"])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
@@ -7375,16 +7438,34 @@ def nsmallest(
Examples
--------
- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
- ... 434000, 434000, 337000, 337000,
- ... 11300, 11300],
- ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
- ... 17036, 182, 38, 311],
- ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
- ... "IS", "NR", "TV", "AI"]},
- ... index=["Italy", "France", "Malta",
- ... "Maldives", "Brunei", "Iceland",
- ... "Nauru", "Tuvalu", "Anguilla"])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "population": [
+ ... 59000000,
+ ... 65000000,
+ ... 434000,
+ ... 434000,
+ ... 434000,
+ ... 337000,
+ ... 337000,
+ ... 11300,
+ ... 11300,
+ ... ],
+ ... "GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
+ ... "alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
+ ... },
+ ... index=[
+ ... "Italy",
+ ... "France",
+ ... "Malta",
+ ... "Maldives",
+ ... "Brunei",
+ ... "Iceland",
+ ... "Nauru",
+ ... "Tuvalu",
+ ... "Anguilla",
+ ... ],
+ ... )
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
@@ -7400,7 +7481,7 @@ def nsmallest(
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
- >>> df.nsmallest(3, 'population')
+ >>> df.nsmallest(3, "population")
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
@@ -7408,7 +7489,7 @@ def nsmallest(
When using ``keep='last'``, ties are resolved in reverse order:
- >>> df.nsmallest(3, 'population', keep='last')
+ >>> df.nsmallest(3, "population", keep="last")
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
@@ -7418,7 +7499,7 @@ def nsmallest(
if there are duplicate values for the largest element, all the
ties are kept.
- >>> df.nsmallest(3, 'population', keep='all')
+ >>> df.nsmallest(3, "population", keep="all")
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
@@ -7428,7 +7509,7 @@ def nsmallest(
However, ``nsmallest`` does not keep ``n`` distinct
smallest elements:
- >>> df.nsmallest(4, 'population', keep='all')
+ >>> df.nsmallest(4, "population", keep="all")
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
@@ -7438,7 +7519,7 @@ def nsmallest(
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
- >>> df.nsmallest(3, ['population', 'GDP'])
+ >>> df.nsmallest(3, ["population", "GDP"])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
@@ -8323,8 +8404,8 @@ def combine(
--------
Combine using a simple function that chooses the smaller column.
- >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
- >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
+ >>> df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
+ >>> df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
@@ -8333,8 +8414,8 @@ def combine(
Example using a true element-wise combine function.
- >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
- >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
+ >>> df1 = pd.DataFrame({"A": [5, 0], "B": [2, 4]})
+ >>> df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
@@ -8343,8 +8424,8 @@ def combine(
Using `fill_value` fills Nones prior to passing the column to the
merge function.
- >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
- >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
+ >>> df1 = pd.DataFrame({"A": [0, 0], "B": [None, 4]})
+ >>> df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
@@ -8353,8 +8434,8 @@ def combine(
However, if the same element in both dataframes is None, that None
is preserved
- >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
- >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
+ >>> df1 = pd.DataFrame({"A": [0, 0], "B": [None, 4]})
+ >>> df2 = pd.DataFrame({"A": [1, 1], "B": [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
@@ -8363,8 +8444,14 @@ def combine(
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
- >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
- >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
+ >>> df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
+ >>> df2 = pd.DataFrame(
+ ... {
+ ... "B": [3, 3],
+ ... "C": [-10, 1],
+ ... },
+ ... index=[1, 2],
+ ... )
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
@@ -8379,7 +8466,13 @@ def combine(
Demonstrating the preference of the passed in dataframe.
- >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
+ >>> df2 = pd.DataFrame(
+ ... {
+ ... "B": [3, 3],
+ ... "C": [1, 1],
+ ... },
+ ... index=[1, 2],
+ ... )
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
@@ -8489,8 +8582,8 @@ def combine_first(self, other: DataFrame) -> DataFrame:
Examples
--------
- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
- >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
+ >>> df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
+ >>> df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
@@ -8499,8 +8592,8 @@ def combine_first(self, other: DataFrame) -> DataFrame:
Null values still persist if the location of that null value
does not exist in `other`
- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
- >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
+ >>> df1 = pd.DataFrame({"A": [None, 0], "B": [4, None]})
+ >>> df2 = pd.DataFrame({"B": [3, 3], "C": [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
@@ -8599,10 +8692,8 @@ def update(
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2, 3],
- ... 'B': [400, 500, 600]})
- >>> new_df = pd.DataFrame({'B': [4, 5, 6],
- ... 'C': [7, 8, 9]})
+ >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]})
+ >>> new_df = pd.DataFrame({"B": [4, 5, 6], "C": [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
@@ -8613,9 +8704,8 @@ def update(
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
- >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
- ... 'B': ['x', 'y', 'z']})
- >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
+ >>> df = pd.DataFrame({"A": ["a", "b", "c"], "B": ["x", "y", "z"]})
+ >>> new_df = pd.DataFrame({"B": ["d", "e", "f", "g", "h", "i"]})
>>> df.update(new_df)
>>> df
A B
@@ -8623,9 +8713,8 @@ def update(
1 b e
2 c f
- >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
- ... 'B': ['x', 'y', 'z']})
- >>> new_df = pd.DataFrame({'B': ['d', 'f']}, index=[0, 2])
+ >>> df = pd.DataFrame({"A": ["a", "b", "c"], "B": ["x", "y", "z"]})
+ >>> new_df = pd.DataFrame({"B": ["d", "f"]}, index=[0, 2])
>>> df.update(new_df)
>>> df
A B
@@ -8635,9 +8724,8 @@ def update(
For Series, its name attribute must be set.
- >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
- ... 'B': ['x', 'y', 'z']})
- >>> new_column = pd.Series(['d', 'e', 'f'], name='B')
+ >>> df = pd.DataFrame({"A": ["a", "b", "c"], "B": ["x", "y", "z"]})
+ >>> new_column = pd.Series(["d", "e", "f"], name="B")
>>> df.update(new_column)
>>> df
A B
@@ -8648,9 +8736,8 @@ def update(
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
- >>> df = pd.DataFrame({'A': [1, 2, 3],
- ... 'B': [400., 500., 600.]})
- >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
+ >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [400.0, 500.0, 600.0]})
+ >>> new_df = pd.DataFrame({"B": [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
@@ -9235,9 +9322,9 @@ def stack(
--------
**Single level columns**
- >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
- ... index=['cat', 'dog'],
- ... columns=['weight', 'height'])
+ >>> df_single_level_cols = pd.DataFrame(
+ ... [[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
+ ... )
Stacking a dataframe with a single level column axis returns a Series:
@@ -9254,11 +9341,12 @@ def stack(
**Multi level columns: simple case**
- >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
- ... ('weight', 'pounds')])
- >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
- ... index=['cat', 'dog'],
- ... columns=multicol1)
+ >>> multicol1 = pd.MultiIndex.from_tuples(
+ ... [("weight", "kg"), ("weight", "pounds")]
+ ... )
+ >>> df_multi_level_cols1 = pd.DataFrame(
+ ... [[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
+ ... )
Stacking a dataframe with a multi-level column axis:
@@ -9276,11 +9364,10 @@ def stack(
**Missing values**
- >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
- ... ('height', 'm')])
- >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
- ... index=['cat', 'dog'],
- ... columns=multicol2)
+ >>> multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
+ >>> df_multi_level_cols2 = pd.DataFrame(
+ ... [[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
+ ... )
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
@@ -9434,9 +9521,13 @@ def explode(
Examples
--------
- >>> df = pd.DataFrame({'A': [[0, 1, 2], 'foo', [], [3, 4]],
- ... 'B': 1,
- ... 'C': [['a', 'b', 'c'], np.nan, [], ['d', 'e']]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": [[0, 1, 2], "foo", [], [3, 4]],
+ ... "B": 1,
+ ... "C": [["a", "b", "c"], np.nan, [], ["d", "e"]],
+ ... }
+ ... )
>>> df
A B C
0 [0, 1, 2] 1 [a, b, c]
@@ -9446,7 +9537,7 @@ def explode(
Single-column explode.
- >>> df.explode('A')
+ >>> df.explode("A")
A B C
0 0 1 [a, b, c]
0 1 1 [a, b, c]
@@ -9458,7 +9549,7 @@ def explode(
Multi-column explode.
- >>> df.explode(list('AC'))
+ >>> df.explode(list("AC"))
A B C
0 0 1 a
0 1 1 b
@@ -9544,8 +9635,9 @@ def unstack(
Examples
--------
- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
- ... ('two', 'a'), ('two', 'b')])
+ >>> index = pd.MultiIndex.from_tuples(
+ ... [("one", "a"), ("one", "b"), ("two", "a"), ("two", "b")]
+ ... )
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
@@ -9939,7 +10031,7 @@ def apply(
Examples
--------
- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
+ >>> df = pd.DataFrame([[4, 9]] * 3, columns=["A", "B"])
>>> df
A B
0 4 9
@@ -9979,7 +10071,7 @@ def apply(
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
- >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
+ >>> df.apply(lambda x: [1, 2], axis=1, result_type="expand")
0 1
0 1 2
1 1 2
@@ -9989,7 +10081,7 @@ def apply(
``result_type='expand'``. The resulting column names
will be the Series index.
- >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
+ >>> df.apply(lambda x: pd.Series([1, 2], index=["foo", "bar"]), axis=1)
foo bar
0 1 2
1 1 2
@@ -10000,7 +10092,7 @@ def apply(
and broadcast it along the axis. The resulting column names will
be the originals.
- >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
+ >>> df.apply(lambda x: [1, 2], axis=1, result_type="broadcast")
A B
0 1 2
1 1 2
@@ -10073,7 +10165,7 @@ def map(
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
- >>> df_copy.map(lambda x: len(str(x)), na_action='ignore')
+ >>> df_copy.map(lambda x: len(str(x)), na_action="ignore")
0 1
0 NaN 4
1 5.0 5
@@ -10096,7 +10188,7 @@ def map(
But it's better to avoid map in that case.
- >>> df ** 2
+ >>> df**2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
@@ -10299,8 +10391,12 @@ def join(
Examples
--------
- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
- ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "key": ["K0", "K1", "K2", "K3", "K4", "K5"],
+ ... "A": ["A0", "A1", "A2", "A3", "A4", "A5"],
+ ... }
+ ... )
>>> df
key A
@@ -10311,8 +10407,7 @@ def join(
4 K4 A4
5 K5 A5
- >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
- ... 'B': ['B0', 'B1', 'B2']})
+ >>> other = pd.DataFrame({"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]})
>>> other
key B
@@ -10322,7 +10417,7 @@ def join(
Join DataFrames using their indexes.
- >>> df.join(other, lsuffix='_caller', rsuffix='_other')
+ >>> df.join(other, lsuffix="_caller", rsuffix="_other")
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
@@ -10335,7 +10430,7 @@ def join(
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
- >>> df.set_index('key').join(other.set_index('key'))
+ >>> df.set_index("key").join(other.set_index("key"))
A B
key
K0 A0 B0
@@ -10350,7 +10445,7 @@ def join(
any column in `df`. This method preserves the original DataFrame's
index in the result.
- >>> df.join(other.set_index('key'), on='key')
+ >>> df.join(other.set_index("key"), on="key")
key A B
0 K0 A0 B0
1 K1 A1 B1
@@ -10361,8 +10456,12 @@ def join(
Using non-unique key values shows how they are matched.
- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K1', 'K3', 'K0', 'K1'],
- ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "key": ["K0", "K1", "K1", "K3", "K0", "K1"],
+ ... "A": ["A0", "A1", "A2", "A3", "A4", "A5"],
+ ... }
+ ... )
>>> df
key A
@@ -10373,7 +10472,7 @@ def join(
4 K0 A4
5 K1 A5
- >>> df.join(other.set_index('key'), on='key', validate='m:1')
+ >>> df.join(other.set_index("key"), on="key", validate="m:1")
key A B
0 K0 A0 B0
1 K1 A1 B1
@@ -10529,8 +10628,10 @@ def round(
Examples
--------
- >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
- ... columns=['dogs', 'cats'])
+ >>> df = pd.DataFrame(
+ ... [(0.21, 0.32), (0.01, 0.67), (0.66, 0.03), (0.21, 0.18)],
+ ... columns=["dogs", "cats"],
+ ... )
>>> df
dogs cats
0 0.21 0.32
@@ -10552,7 +10653,7 @@ def round(
specified with the column names as key and the number of decimal
places as value
- >>> df.round({'dogs': 1, 'cats': 0})
+ >>> df.round({"dogs": 1, "cats": 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
@@ -10563,7 +10664,7 @@ def round(
specified with the column names as index and the number of
decimal places as value
- >>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
+ >>> decimals = pd.Series([0, 1], index=["cats", "dogs"])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
@@ -10675,15 +10776,18 @@ def corr(
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
- >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
- ... columns=['dogs', 'cats'])
+ >>> df = pd.DataFrame(
+ ... [(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)],
+ ... columns=["dogs", "cats"],
+ ... )
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
- >>> df = pd.DataFrame([(1, 1), (2, np.nan), (np.nan, 3), (4, 4)],
- ... columns=['dogs', 'cats'])
+ >>> df = pd.DataFrame(
+ ... [(1, 1), (2, np.nan), (np.nan, 3), (4, 4)], columns=["dogs", "cats"]
+ ... )
>>> df.corr(min_periods=3)
dogs cats
dogs 1.0 NaN
@@ -10809,16 +10913,18 @@ def cov(
Examples
--------
- >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
- ... columns=['dogs', 'cats'])
+ >>> df = pd.DataFrame(
+ ... [(1, 2), (0, 3), (2, 0), (1, 1)], columns=["dogs", "cats"]
+ ... )
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
- >>> df = pd.DataFrame(np.random.randn(1000, 5),
- ... columns=['a', 'b', 'c', 'd', 'e'])
+ >>> df = pd.DataFrame(
+ ... np.random.randn(1000, 5), columns=["a", "b", "c", "d", "e"]
+ ... )
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
@@ -10834,10 +10940,9 @@ def cov(
each column pair in order to have a valid result:
>>> np.random.seed(42)
- >>> df = pd.DataFrame(np.random.randn(20, 3),
- ... columns=['a', 'b', 'c'])
- >>> df.loc[df.index[:5], 'a'] = np.nan
- >>> df.loc[df.index[5:10], 'b'] = np.nan
+ >>> df = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
+ >>> df.loc[df.index[:5], "a"] = np.nan
+ >>> df.loc[df.index[5:10], "b"] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
@@ -10917,10 +11022,12 @@ def corrwith(
--------
>>> index = ["a", "b", "c", "d", "e"]
>>> columns = ["one", "two", "three", "four"]
- >>> df1 = pd.DataFrame(np.arange(20).reshape(5, 4),
- ... index=index, columns=columns)
- >>> df2 = pd.DataFrame(np.arange(16).reshape(4, 4),
- ... index=index[:4], columns=columns)
+ >>> df1 = pd.DataFrame(
+ ... np.arange(20).reshape(5, 4), index=index, columns=columns
+ ... )
+ >>> df2 = pd.DataFrame(
+ ... np.arange(16).reshape(4, 4), index=index[:4], columns=columns
+ ... )
>>> df1.corrwith(df2)
one 1.0
two 1.0
@@ -11035,10 +11142,13 @@ def count(self, axis: Axis = 0, numeric_only: bool = False):
--------
Constructing DataFrame from a dictionary:
- >>> df = pd.DataFrame({"Person":
- ... ["John", "Myla", "Lewis", "John", "Myla"],
- ... "Age": [24., np.nan, 21., 33, 26],
- ... "Single": [False, True, True, True, False]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "Person": ["John", "Myla", "Lewis", "John", "Myla"],
+ ... "Age": [24.0, np.nan, 21.0, 33, 26],
+ ... "Single": [False, True, True, True, False],
+ ... }
+ ... )
>>> df
Person Age Single
0 John 24.0 False
@@ -11057,7 +11167,7 @@ def count(self, axis: Axis = 0, numeric_only: bool = False):
Counts for each **row**:
- >>> df.count(axis='columns')
+ >>> df.count(axis="columns")
0 3
1 2
2 3
@@ -11467,7 +11577,7 @@ def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
Examples
--------
- >>> df = pd.DataFrame({'A': [4, 5, 6], 'B': [4, 1, 1]})
+ >>> df = pd.DataFrame({"A": [4, 5, 6], "B": [4, 1, 1]})
>>> df.nunique()
A 3
B 2
@@ -11600,12 +11710,16 @@ def mode(
Examples
--------
- >>> df = pd.DataFrame([('bird', 2, 2),
- ... ('mammal', 4, np.nan),
- ... ('arthropod', 8, 0),
- ... ('bird', 2, np.nan)],
- ... index=('falcon', 'horse', 'spider', 'ostrich'),
- ... columns=('species', 'legs', 'wings'))
+ >>> df = pd.DataFrame(
+ ... [
+ ... ("bird", 2, 2),
+ ... ("mammal", 4, np.nan),
+ ... ("arthropod", 8, 0),
+ ... ("bird", 2, np.nan),
+ ... ],
+ ... index=("falcon", "horse", "spider", "ostrich"),
+ ... columns=("species", "legs", "wings"),
+ ... )
>>> df
species legs wings
falcon bird 2 2.0
@@ -11639,7 +11753,7 @@ def mode(
To compute the mode over columns and not rows, use the axis parameter:
- >>> df.mode(axis='columns', numeric_only=True)
+ >>> df.mode(axis="columns", numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
@@ -11746,24 +11860,25 @@ def quantile(
Examples
--------
- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
- ... columns=['a', 'b'])
- >>> df.quantile(.1)
+ >>> df = pd.DataFrame(
+ ... np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), columns=["a", "b"]
+ ... )
+ >>> df.quantile(0.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
- >>> df.quantile([.1, .5])
+ >>> df.quantile([0.1, 0.5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `method='table'` will compute the quantile over all columns.
- >>> df.quantile(.1, method="table", interpolation="nearest")
+ >>> df.quantile(0.1, method="table", interpolation="nearest")
a 1
b 1
Name: 0.1, dtype: int64
- >>> df.quantile([.1, .5], method="table", interpolation="nearest")
+ >>> df.quantile([0.1, 0.5], method="table", interpolation="nearest")
a b
0.1 1 1
0.5 3 100
@@ -11771,11 +11886,13 @@ def quantile(
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
- >>> df = pd.DataFrame({'A': [1, 2],
- ... 'B': [pd.Timestamp('2010'),
- ... pd.Timestamp('2011')],
- ... 'C': [pd.Timedelta('1 days'),
- ... pd.Timedelta('2 days')]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": [1, 2],
+ ... "B": [pd.Timestamp("2010"), pd.Timestamp("2011")],
+ ... "C": [pd.Timedelta("1 days"), pd.Timedelta("2 days")],
+ ... }
+ ... )
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
@@ -11907,8 +12024,8 @@ def to_timestamp(
Examples
--------
- >>> idx = pd.PeriodIndex(['2023', '2024'], freq='Y')
- >>> d = {'col1': [1, 2], 'col2': [3, 4]}
+ >>> idx = pd.PeriodIndex(["2023", "2024"], freq="Y")
+ >>> d = {"col1": [1, 2], "col2": [3, 4]}
>>> df1 = pd.DataFrame(data=d, index=idx)
>>> df1
col1 col2
@@ -11928,7 +12045,7 @@ def to_timestamp(
Using `freq` which is the offset that the Timestamps will have
>>> df2 = pd.DataFrame(data=d, index=idx)
- >>> df2 = df2.to_timestamp(freq='M')
+ >>> df2 = df2.to_timestamp(freq="M")
>>> df2
col1 col2
2023-01-31 1 3
@@ -12045,8 +12162,9 @@ def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame:
Examples
--------
- >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
- ... index=['falcon', 'dog'])
+ >>> df = pd.DataFrame(
+ ... {"num_legs": [2, 4], "num_wings": [2, 0]}, index=["falcon", "dog"]
+ ... )
>>> df
num_legs num_wings
falcon 2 2
@@ -12070,7 +12188,7 @@ def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame:
When ``values`` is a dict, we can pass values to check for each
column separately:
- >>> df.isin({'num_wings': [0, 3]})
+ >>> df.isin({"num_wings": [0, 3]})
num_legs num_wings
falcon False False
dog False True
@@ -12079,8 +12197,9 @@ def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame:
match. Note that 'falcon' does not match based on the number of legs
in other.
- >>> other = pd.DataFrame({'num_legs': [8, 3], 'num_wings': [0, 2]},
- ... index=['spider', 'falcon'])
+ >>> other = pd.DataFrame(
+ ... {"num_legs": [8, 3], "num_wings": [0, 2]}, index=["spider", "falcon"]
+ ... )
>>> df.isin(other)
num_legs num_wings
falcon False True
@@ -12271,9 +12390,9 @@ def values(self) -> np.ndarray:
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
- >>> df = pd.DataFrame({'age': [3, 29],
- ... 'height': [94, 170],
- ... 'weight': [31, 115]})
+ >>> df = pd.DataFrame(
+ ... {"age": [3, 29], "height": [94, 170], "weight": [31, 115]}
+ ... )
>>> df
age height weight
0 3 94 31
@@ -12291,10 +12410,14 @@ def values(self) -> np.ndarray:
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
- >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
- ... ('lion', 80.5, 1),
- ... ('monkey', np.nan, None)],
- ... columns=('name', 'max_speed', 'rank'))
+ >>> df2 = pd.DataFrame(
+ ... [
+ ... ("parrot", 24.0, "second"),
+ ... ("lion", 80.5, 1),
+ ... ("monkey", np.nan, None),
+ ... ],
+ ... columns=("name", "max_speed", "rank"),
+ ... )
>>> df2.dtypes
name object
max_speed float64
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 93c2afab51d2c..3c71784ad81c4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -352,7 +352,7 @@ def attrs(self) -> dict[Hashable, Any]:
For DataFrame:
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> df.attrs = {"A": [10, 20, 30]}
>>> df.attrs
{'A': [10, 20, 30]}
@@ -670,11 +670,11 @@ def ndim(self) -> int:
Examples
--------
- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
+ >>> s = pd.Series({"a": 1, "b": 2, "c": 3})
>>> s.ndim
1
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
+ >>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.ndim
2
"""
@@ -695,11 +695,11 @@ def size(self) -> int:
Examples
--------
- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
+ >>> s = pd.Series({"a": 1, "b": 2, "c": 3})
>>> s.size
3
- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
+ >>> df = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
>>> df.size
4
"""
@@ -867,15 +867,15 @@ def droplevel(self, level: IndexLabel, axis: Axis = 0) -> Self:
Examples
--------
- >>> df = pd.DataFrame([
- ... [1, 2, 3, 4],
- ... [5, 6, 7, 8],
- ... [9, 10, 11, 12]
- ... ]).set_index([0, 1]).rename_axis(['a', 'b'])
+ >>> df = (
+ ... pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
+ ... .set_index([0, 1])
+ ... .rename_axis(["a", "b"])
+ ... )
- >>> df.columns = pd.MultiIndex.from_tuples([
- ... ('c', 'e'), ('d', 'f')
- ... ], names=['level_1', 'level_2'])
+ >>> df.columns = pd.MultiIndex.from_tuples(
+ ... [("c", "e"), ("d", "f")], names=["level_1", "level_2"]
+ ... )
>>> df
level_1 c d
@@ -885,7 +885,7 @@ def droplevel(self, level: IndexLabel, axis: Axis = 0) -> Self:
5 6 7 8
9 10 11 12
- >>> df.droplevel('a')
+ >>> df.droplevel("a")
level_1 c d
level_2 e f
b
@@ -893,7 +893,7 @@ def droplevel(self, level: IndexLabel, axis: Axis = 0) -> Self:
6 7 8
10 11 12
- >>> df.droplevel('level_2', axis=1)
+ >>> df.droplevel("level_2", axis=1)
level_1 c d
a b
1 2 3 4
@@ -973,7 +973,7 @@ def squeeze(self, axis: Axis | None = None):
Squeezing is even more effective when used with DataFrames.
- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
>>> df
a b
0 1 2
@@ -982,7 +982,7 @@ def squeeze(self, axis: Axis | None = None):
Slicing a single column will produce a DataFrame with the columns
having only one value:
- >>> df_a = df[['a']]
+ >>> df_a = df[["a"]]
>>> df_a
a
0 1
@@ -990,7 +990,7 @@ def squeeze(self, axis: Axis | None = None):
So the columns can be squeezed down, resulting in a Series:
- >>> df_a.squeeze('columns')
+ >>> df_a.squeeze("columns")
0 1
1 3
Name: a, dtype: int64
@@ -998,14 +998,14 @@ def squeeze(self, axis: Axis | None = None):
Slicing a single row from a single column will produce a single
scalar DataFrame:
- >>> df_0a = df.loc[df.index < 1, ['a']]
+ >>> df_0a = df.loc[df.index < 1, ["a"]]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
- >>> df_0a.squeeze('rows')
+ >>> df_0a.squeeze("rows")
a 1
Name: 0, dtype: int64
@@ -1219,9 +1219,9 @@ def rename_axis(
--------
**DataFrame**
- >>> df = pd.DataFrame({"num_legs": [4, 4, 2],
- ... "num_arms": [0, 0, 2]},
- ... ["dog", "cat", "monkey"])
+ >>> df = pd.DataFrame(
+ ... {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}, ["dog", "cat", "monkey"]
+ ... )
>>> df
num_legs num_arms
dog 4 0
@@ -1244,9 +1244,9 @@ def rename_axis(
**MultiIndex**
- >>> df.index = pd.MultiIndex.from_product([['mammal'],
- ... ['dog', 'cat', 'monkey']],
- ... names=['type', 'name'])
+ >>> df.index = pd.MultiIndex.from_product(
+ ... [["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
+ ... )
>>> df
limbs num_legs num_arms
type name
@@ -1254,7 +1254,7 @@ def rename_axis(
cat 4 0
monkey 2 2
- >>> df.rename_axis(index={'type': 'class'})
+ >>> df.rename_axis(index={"type": "class"})
limbs num_legs num_arms
class name
mammal dog 4 0
@@ -1343,8 +1343,7 @@ def _set_axis_name(
Examples
--------
- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
- ... ["dog", "cat", "monkey"])
+ >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
@@ -1357,7 +1356,8 @@ def _set_axis_name(
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
- ... [["mammal"], ['dog', 'cat', 'monkey']])
+ ... [["mammal"], ["dog", "cat", "monkey"]]
+ ... )
>>> df._set_axis_name(["type", "name"])
num_legs
type name
@@ -1560,9 +1560,9 @@ def bool(self) -> bool_t:
>>> pd.Series([False]).bool() # doctest: +SKIP
False
- >>> pd.DataFrame({'col': [True]}).bool() # doctest: +SKIP
+ >>> pd.DataFrame({"col": [True]}).bool() # doctest: +SKIP
True
- >>> pd.DataFrame({'col': [False]}).bool() # doctest: +SKIP
+ >>> pd.DataFrame({"col": [False]}).bool() # doctest: +SKIP
False
This is an alternative method and will only work
@@ -1635,7 +1635,7 @@ def abs(self) -> Self:
Absolute numeric values in a Series with a Timedelta element.
- >>> s = pd.Series([pd.Timedelta('1 days')])
+ >>> s = pd.Series([pd.Timedelta("1 days")])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
@@ -1643,11 +1643,9 @@ def abs(self) -> Self:
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
- >>> df = pd.DataFrame({
- ... 'a': [4, 5, 6, 7],
- ... 'b': [10, 20, 30, 40],
- ... 'c': [100, 50, -30, -50]
- ... })
+ >>> df = pd.DataFrame(
+ ... {"a": [4, 5, 6, 7], "b": [10, 20, 30, 40], "c": [100, 50, -30, -50]}
+ ... )
>>> df
a b c
0 4 10 100
@@ -1968,7 +1966,7 @@ def __iter__(self) -> Iterator:
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
+ >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> for x in df:
... print(x)
A
@@ -1990,8 +1988,9 @@ def keys(self) -> Index:
Examples
--------
- >>> d = pd.DataFrame(data={'A': [1, 2, 3], 'B': [0, 4, 8]},
- ... index=['a', 'b', 'c'])
+ >>> d = pd.DataFrame(
+ ... data={"A": [1, 2, 3], "B": [0, 4, 8]}, index=["a", "b", "c"]
+ ... )
>>> d
A B
a 1 0
@@ -2052,7 +2051,7 @@ def empty(self) -> bool_t:
--------
An example of an actual empty DataFrame. Notice the index is empty:
- >>> df_empty = pd.DataFrame({'A' : []})
+ >>> df_empty = pd.DataFrame({"A": []})
>>> df_empty
Empty DataFrame
Columns: [A]
@@ -2063,7 +2062,7 @@ def empty(self) -> bool_t:
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
- >>> df = pd.DataFrame({'A' : [np.nan]})
+ >>> df = pd.DataFrame({"A": [np.nan]})
>>> df
A
0 NaN
@@ -2072,7 +2071,7 @@ def empty(self) -> bool_t:
>>> df.dropna().empty
True
- >>> ser_empty = pd.Series({'A' : []})
+ >>> ser_empty = pd.Series({"A": []})
>>> ser_empty
A []
dtype: object
@@ -2313,35 +2312,35 @@ def to_excel(
Create, write to and save a workbook:
- >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
- ... index=['row 1', 'row 2'],
- ... columns=['col 1', 'col 2'])
+ >>> df1 = pd.DataFrame(
+ ... [["a", "b"], ["c", "d"]],
+ ... index=["row 1", "row 2"],
+ ... columns=["col 1", "col 2"],
+ ... )
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
- >>> df1.to_excel("output.xlsx",
- ... sheet_name='Sheet_name_1') # doctest: +SKIP
+ >>> df1.to_excel("output.xlsx", sheet_name="Sheet_name_1") # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
- >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
- ... df1.to_excel(writer, sheet_name='Sheet_name_1')
- ... df2.to_excel(writer, sheet_name='Sheet_name_2')
+ >>> with pd.ExcelWriter("output.xlsx") as writer: # doctest: +SKIP
+ ... df1.to_excel(writer, sheet_name="Sheet_name_1")
+ ... df2.to_excel(writer, sheet_name="Sheet_name_2")
ExcelWriter can also be used to append to an existing Excel file:
- >>> with pd.ExcelWriter('output.xlsx',
- ... mode='a') as writer: # doctest: +SKIP
- ... df1.to_excel(writer, sheet_name='Sheet_name_3')
+ >>> with pd.ExcelWriter("output.xlsx", mode="a") as writer: # doctest: +SKIP
+ ... df1.to_excel(writer, sheet_name="Sheet_name_3")
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
- >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
+ >>> df1.to_excel("output1.xlsx", engine="xlsxwriter") # doctest: +SKIP
"""
if engine_kwargs is None:
engine_kwargs = {}
@@ -2768,23 +2767,24 @@ def to_hdf(
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
- ... index=['a', 'b', 'c']) # doctest: +SKIP
- >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP
+ >>> df = pd.DataFrame(
+ ... {"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"]
+ ... ) # doctest: +SKIP
+ >>> df.to_hdf("data.h5", key="df", mode="w") # doctest: +SKIP
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP
- >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP
+ >>> s.to_hdf("data.h5", key="s") # doctest: +SKIP
Reading from HDF file:
- >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP
+ >>> pd.read_hdf("data.h5", "df") # doctest: +SKIP
A B
a 1 4
b 2 5
c 3 6
- >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP
+ >>> pd.read_hdf("data.h5", "s") # doctest: +SKIP
0 1
1 2
2 3
@@ -3079,7 +3079,9 @@ def to_pickle(
Examples
--------
- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP
+ >>> original_df = pd.DataFrame(
+ ... {{"foo": range(5), "bar": range(5, 10)}}
+ ... ) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
@@ -3097,7 +3099,7 @@ def to_pickle(
2 2 7
3 3 8
4 4 9
- """ # noqa: E501
+ """
from pandas.io.pickle import to_pickle
to_pickle(
@@ -3152,9 +3154,9 @@ def to_clipboard(
--------
Copy the contents of a DataFrame to the clipboard.
- >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
- >>> df.to_clipboard(sep=',') # doctest: +SKIP
+ >>> df.to_clipboard(sep=",") # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
@@ -3163,7 +3165,7 @@ def to_clipboard(
We can omit the index by passing the keyword `index` and setting
it to false.
- >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
+ >>> df.to_clipboard(sep=",", index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
@@ -3174,6 +3176,7 @@ def to_clipboard(
.. code-block:: python
import pyperclip
+
html = df.style.to_html()
pyperclip.copy(html)
"""
@@ -3203,12 +3206,15 @@ def to_xarray(self):
Examples
--------
- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
- ... ('parrot', 'bird', 24.0, 2),
- ... ('lion', 'mammal', 80.5, 4),
- ... ('monkey', 'mammal', np.nan, 4)],
- ... columns=['name', 'class', 'max_speed',
- ... 'num_legs'])
+ >>> df = pd.DataFrame(
+ ... [
+ ... ("falcon", "bird", 389.0, 2),
+ ... ("parrot", "bird", 24.0, 2),
+ ... ("lion", "mammal", 80.5, 4),
+ ... ("monkey", "mammal", np.nan, 4),
+ ... ],
+ ... columns=["name", "class", "max_speed", "num_legs"],
+ ... )
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
@@ -3227,19 +3233,23 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
- >>> df['max_speed'].to_xarray()
+ >>> df["max_speed"].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
- >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
- ... '2018-01-02', '2018-01-02'])
- >>> df_multiindex = pd.DataFrame({'date': dates,
- ... 'animal': ['falcon', 'parrot',
- ... 'falcon', 'parrot'],
- ... 'speed': [350, 18, 361, 15]})
- >>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
+ >>> dates = pd.to_datetime(
+ ... ["2018-01-01", "2018-01-01", "2018-01-02", "2018-01-02"]
+ ... )
+ >>> df_multiindex = pd.DataFrame(
+ ... {
+ ... "date": dates,
+ ... "animal": ["falcon", "parrot", "falcon", "parrot"],
+ ... "speed": [350, 18, 361, 15],
+ ... }
+ ... )
+ >>> df_multiindex = df_multiindex.set_index(["date", "animal"])
>>> df_multiindex
speed
@@ -3862,31 +3872,34 @@ def to_csv(
--------
Create 'out.csv' containing 'df' without indices
- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'],
- ... 'mask': ['red', 'purple'],
- ... 'weapon': ['sai', 'bo staff']}})
- >>> df.to_csv('out.csv', index=False) # doctest: +SKIP
+ >>> df = pd.DataFrame(
+ ... [["Raphael", "red", "sai"], ["Donatello", "purple", "bo staff"]],
+ ... columns=["name", "mask", "weapon"],
+ ... )
+ >>> df.to_csv("out.csv", index=False) # doctest: +SKIP
Create 'out.zip' containing 'out.csv'
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
- >>> compression_opts = dict(method='zip',
- ... archive_name='out.csv') # doctest: +SKIP
- >>> df.to_csv('out.zip', index=False,
- ... compression=compression_opts) # doctest: +SKIP
+ >>> compression_opts = dict(
+ ... method="zip", archive_name="out.csv"
+ ... ) # doctest: +SKIP
+ >>> df.to_csv(
+ ... "out.zip", index=False, compression=compression_opts
+ ... ) # doctest: +SKIP
To write a csv file to a new folder or nested folder you will first
need to create it using either Pathlib or os:
>>> from pathlib import Path # doctest: +SKIP
- >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP
+ >>> filepath = Path("folder/subfolder/out.csv") # doctest: +SKIP
>>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP
>>> df.to_csv(filepath) # doctest: +SKIP
>>> import os # doctest: +SKIP
- >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP
- >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP
+ >>> os.makedirs("folder/subfolder", exist_ok=True) # doctest: +SKIP
+ >>> df.to_csv("folder/subfolder/out.csv") # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
@@ -3955,12 +3968,16 @@ def take(self, indices, axis: Axis = 0, **kwargs) -> Self:
Examples
--------
- >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
- ... ('parrot', 'bird', 24.0),
- ... ('lion', 'mammal', 80.5),
- ... ('monkey', 'mammal', np.nan)],
- ... columns=['name', 'class', 'max_speed'],
- ... index=[0, 2, 3, 1])
+ >>> df = pd.DataFrame(
+ ... [
+ ... ("falcon", "bird", 389.0),
+ ... ("parrot", "bird", 24.0),
+ ... ("lion", "mammal", 80.5),
+ ... ("monkey", "mammal", np.nan),
+ ... ],
+ ... columns=["name", "class", "max_speed"],
+ ... index=[0, 2, 3, 1],
+ ... )
>>> df
name class max_speed
0 falcon bird 389.0
@@ -4086,13 +4103,15 @@ def xs(
Examples
--------
- >>> d = {'num_legs': [4, 4, 2, 2],
- ... 'num_wings': [0, 0, 2, 2],
- ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
- ... 'animal': ['cat', 'dog', 'bat', 'penguin'],
- ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
+ >>> d = {
+ ... "num_legs": [4, 4, 2, 2],
+ ... "num_wings": [0, 0, 2, 2],
+ ... "class": ["mammal", "mammal", "mammal", "bird"],
+ ... "animal": ["cat", "dog", "bat", "penguin"],
+ ... "locomotion": ["walks", "walks", "flies", "walks"],
+ ... }
>>> df = pd.DataFrame(data=d)
- >>> df = df.set_index(['class', 'animal', 'locomotion'])
+ >>> df = df.set_index(["class", "animal", "locomotion"])
>>> df
num_legs num_wings
class animal locomotion
@@ -4103,7 +4122,7 @@ class animal locomotion
Get values at specified index
- >>> df.xs('mammal')
+ >>> df.xs("mammal")
num_legs num_wings
animal locomotion
cat walks 4 0
@@ -4112,29 +4131,28 @@ class animal locomotion
Get values at several indexes
- >>> df.xs(('mammal', 'dog', 'walks'))
+ >>> df.xs(("mammal", "dog", "walks"))
num_legs 4
num_wings 0
Name: (mammal, dog, walks), dtype: int64
Get values at specified index and level
- >>> df.xs('cat', level=1)
+ >>> df.xs("cat", level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
- >>> df.xs(('bird', 'walks'),
- ... level=[0, 'locomotion'])
+ >>> df.xs(("bird", "walks"), level=[0, "locomotion"])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
- >>> df.xs('num_wings', axis=1)
+ >>> df.xs("num_wings", axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
@@ -4333,8 +4351,8 @@ def get(self, key, default=None):
2014-02-14 22.0 medium
2014-02-15 35.0 medium
- >>> ser = df['windspeed']
- >>> ser.get('2014-02-13')
+ >>> ser = df["windspeed"]
+ >>> ser.get("2014-02-13")
'high'
If the key isn't found, the default value will be used.
@@ -4342,7 +4360,7 @@ def get(self, key, default=None):
>>> df.get(["temp_celsius", "temp_kelvin"], default="default_value")
'default_value'
- >>> ser.get('2014-02-10', '[unknown]')
+ >>> ser.get("2014-02-10", "[unknown]")
'[unknown]'
"""
try:
@@ -4434,14 +4452,16 @@ def reindex_like(
Examples
--------
- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
- ... [31, 87.8, 'high'],
- ... [22, 71.6, 'medium'],
- ... [35, 95, 'medium']],
- ... columns=['temp_celsius', 'temp_fahrenheit',
- ... 'windspeed'],
- ... index=pd.date_range(start='2014-02-12',
- ... end='2014-02-15', freq='D'))
+ >>> df1 = pd.DataFrame(
+ ... [
+ ... [24.3, 75.7, "high"],
+ ... [31, 87.8, "high"],
+ ... [22, 71.6, "medium"],
+ ... [35, 95, "medium"],
+ ... ],
+ ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
+ ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
+ ... )
>>> df1
temp_celsius temp_fahrenheit windspeed
@@ -4450,12 +4470,11 @@ def reindex_like(
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
- >>> df2 = pd.DataFrame([[28, 'low'],
- ... [30, 'low'],
- ... [35.1, 'medium']],
- ... columns=['temp_celsius', 'windspeed'],
- ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
- ... '2014-02-15']))
+ >>> df2 = pd.DataFrame(
+ ... [[28, "low"], [30, "low"], [35.1, "medium"]],
+ ... columns=["temp_celsius", "windspeed"],
+ ... index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
+ ... )
>>> df2
temp_celsius windspeed
@@ -4698,14 +4717,14 @@ def add_prefix(self, prefix: str, axis: Axis | None = None) -> Self:
3 4
dtype: int64
- >>> s.add_prefix('item_')
+ >>> s.add_prefix("item_")
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
- >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
+ >>> df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]})
>>> df
A B
0 1 3
@@ -4713,7 +4732,7 @@ def add_prefix(self, prefix: str, axis: Axis | None = None) -> Self:
2 3 5
3 4 6
- >>> df.add_prefix('col_')
+ >>> df.add_prefix("col_")
col_A col_B
0 1 3
1 2 4
@@ -4772,14 +4791,14 @@ def add_suffix(self, suffix: str, axis: Axis | None = None) -> Self:
3 4
dtype: int64
- >>> s.add_suffix('_item')
+ >>> s.add_suffix("_item")
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
- >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
+ >>> df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]})
>>> df
A B
0 1 3
@@ -4787,7 +4806,7 @@ def add_suffix(self, suffix: str, axis: Axis | None = None) -> Self:
2 3 5
3 4 6
- >>> df.add_suffix('_col')
+ >>> df.add_suffix("_col")
A_col B_col
0 1 3
1 2 4
@@ -4904,12 +4923,14 @@ def sort_values(
Examples
--------
- >>> df = pd.DataFrame({
- ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
- ... 'col2': [2, 1, 9, 8, 7, 4],
- ... 'col3': [0, 1, 9, 4, 2, 3],
- ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F']
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "col1": ["A", "A", "B", np.nan, "D", "C"],
+ ... "col2": [2, 1, 9, 8, 7, 4],
+ ... "col3": [0, 1, 9, 4, 2, 3],
+ ... "col4": ["a", "B", "c", "D", "e", "F"],
+ ... }
+ ... )
>>> df
col1 col2 col3 col4
0 A 2 0 a
@@ -4921,7 +4942,7 @@ def sort_values(
Sort by col1
- >>> df.sort_values(by=['col1'])
+ >>> df.sort_values(by=["col1"])
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
@@ -4932,7 +4953,7 @@ def sort_values(
Sort by multiple columns
- >>> df.sort_values(by=['col1', 'col2'])
+ >>> df.sort_values(by=["col1", "col2"])
col1 col2 col3 col4
1 A 1 1 B
0 A 2 0 a
@@ -4943,7 +4964,7 @@ def sort_values(
Sort Descending
- >>> df.sort_values(by='col1', ascending=False)
+ >>> df.sort_values(by="col1", ascending=False)
col1 col2 col3 col4
4 D 7 2 e
5 C 4 3 F
@@ -4954,7 +4975,7 @@ def sort_values(
Putting NAs first
- >>> df.sort_values(by='col1', ascending=False, na_position='first')
+ >>> df.sort_values(by="col1", ascending=False, na_position="first")
col1 col2 col3 col4
3 NaN 8 4 D
4 D 7 2 e
@@ -4965,7 +4986,7 @@ def sort_values(
Sorting with a key function
- >>> df.sort_values(by='col4', key=lambda col: col.str.lower())
+ >>> df.sort_values(by="col4", key=lambda col: col.str.lower())
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
@@ -4977,10 +4998,12 @@ def sort_values(
Natural sort with the key argument,
using the `natsort <https://github.com/SethMMorton/natsort>` package.
- >>> df = pd.DataFrame({
- ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'],
- ... "value": [10, 20, 30, 40, 50]
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "time": ["0hr", "128hr", "72hr", "48hr", "96hr"],
+ ... "value": [10, 20, 30, 40, 50],
+ ... }
+ ... )
>>> df
time value
0 0hr 10
@@ -4990,8 +5013,7 @@ def sort_values(
4 96hr 50
>>> from natsort import index_natsorted
>>> df.sort_values(
- ... by="time",
- ... key=lambda x: np.argsort(index_natsorted(df["time"]))
+ ... by="time", key=lambda x: np.argsort(index_natsorted(df["time"]))
... )
time value
0 0hr 10
@@ -5197,10 +5219,13 @@ def reindex(
Create a dataframe with some fictional data.
- >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
- >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301],
- ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}},
- ... index=index)
+ >>> index = ["Firefox", "Chrome", "Safari", "IE10", "Konqueror"]
+ >>> columns = ["http_status", "response_time"]
+ >>> df = pd.DataFrame(
+ ... [[200, 0.04], [200, 0.02], [404, 0.07], [404, 0.08], [301, 1.0]],
+ ... columns=columns,
+ ... index=index,
+ ... )
>>> df
http_status response_time
Firefox 200 0.04
@@ -5213,8 +5238,7 @@ def reindex(
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
- >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
- ... 'Chrome']
+ >>> new_index = ["Safari", "Iceweasel", "Comodo Dragon", "IE10", "Chrome"]
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
@@ -5236,7 +5260,7 @@ def reindex(
IE10 404 0.08
Chrome 200 0.02
- >>> df.reindex(new_index, fill_value='missing')
+ >>> df.reindex(new_index, fill_value="missing")
http_status response_time
Safari 404 0.07
Iceweasel missing missing
@@ -5246,7 +5270,7 @@ def reindex(
We can also reindex the columns.
- >>> df.reindex(columns=['http_status', 'user_agent'])
+ >>> df.reindex(columns=["http_status", "user_agent"])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
@@ -5256,7 +5280,7 @@ def reindex(
Or we can use "axis-style" keyword arguments
- >>> df.reindex(['http_status', 'user_agent'], axis="columns")
+ >>> df.reindex(["http_status", "user_agent"], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
@@ -5269,9 +5293,10 @@ def reindex(
monotonically increasing index (for example, a sequence
of dates).
- >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
- >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}},
- ... index=date_index)
+ >>> date_index = pd.date_range("1/1/2010", periods=6, freq="D")
+ >>> df2 = pd.DataFrame(
+ ... {{"prices": [100, 101, np.nan, 100, 89, 88]}}, index=date_index
+ ... )
>>> df2
prices
2010-01-01 100.0
@@ -5284,7 +5309,7 @@ def reindex(
Suppose we decide to expand the dataframe to cover a wider
date range.
- >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
+ >>> date_index2 = pd.date_range("12/29/2009", periods=10, freq="D")
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
@@ -5306,7 +5331,7 @@ def reindex(
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
- >>> df2.reindex(date_index2, method='bfill')
+ >>> df2.reindex(date_index2, method="bfill")
prices
2009-12-29 100.0
2009-12-30 100.0
@@ -5515,28 +5540,30 @@ def filter(
Examples
--------
- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
- ... index=['mouse', 'rabbit'],
- ... columns=['one', 'two', 'three'])
+ >>> df = pd.DataFrame(
+ ... np.array(([1, 2, 3], [4, 5, 6])),
+ ... index=["mouse", "rabbit"],
+ ... columns=["one", "two", "three"],
+ ... )
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
- >>> df.filter(items=['one', 'three'])
+ >>> df.filter(items=["one", "three"])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
- >>> df.filter(regex='e$', axis=1)
+ >>> df.filter(regex="e$", axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
- >>> df.filter(like='bbi', axis=0)
+ >>> df.filter(like="bbi", axis=0)
one two three
rabbit 4 5 6
"""
@@ -5608,8 +5635,21 @@ def head(self, n: int = 5) -> Self:
Examples
--------
- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
- ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "animal": [
+ ... "alligator",
+ ... "bee",
+ ... "falcon",
+ ... "lion",
+ ... "monkey",
+ ... "parrot",
+ ... "shark",
+ ... "whale",
+ ... "zebra",
+ ... ]
+ ... }
+ ... )
>>> df
animal
0 alligator
@@ -5685,8 +5725,21 @@ def tail(self, n: int = 5) -> Self:
Examples
--------
- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
- ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "animal": [
+ ... "alligator",
+ ... "bee",
+ ... "falcon",
+ ... "lion",
+ ... "monkey",
+ ... "parrot",
+ ... "shark",
+ ... "whale",
+ ... "zebra",
+ ... ]
+ ... }
+ ... )
>>> df
animal
0 alligator
@@ -5811,10 +5864,14 @@ def sample(
Examples
--------
- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
- ... 'num_wings': [2, 0, 0, 0],
- ... 'num_specimen_seen': [10, 2, 1, 8]},
- ... index=['falcon', 'dog', 'spider', 'fish'])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "num_legs": [2, 4, 8, 0],
+ ... "num_wings": [2, 0, 0, 0],
+ ... "num_specimen_seen": [10, 2, 1, 8],
+ ... },
+ ... index=["falcon", "dog", "spider", "fish"],
+ ... )
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
@@ -5826,7 +5883,7 @@ def sample(
Note that we use `random_state` to ensure the reproducibility of
the examples.
- >>> df['num_legs'].sample(n=3, random_state=1)
+ >>> df["num_legs"].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
@@ -5856,7 +5913,7 @@ def sample(
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
- >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
+ >>> df.sample(n=2, weights="num_specimen_seen", random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
@@ -5949,7 +6006,7 @@ def pipe(
Constructing a income DataFrame from a dictionary.
>>> data = [[8000, 1000], [9500, np.nan], [5000, 2000]]
- >>> df = pd.DataFrame(data, columns=['Salary', 'Others'])
+ >>> df = pd.DataFrame(data, columns=["Salary", "Others"])
>>> df
Salary Others
0 8000 1000.0
@@ -5971,7 +6028,8 @@ def pipe(
>>> subtract_national_insurance(
... subtract_state_tax(subtract_federal_tax(df), rate=0.12),
... rate=0.05,
- ... rate_increase=0.02) # doctest: +SKIP
+ ... rate_increase=0.02,
+ ... ) # doctest: +SKIP
You can write
@@ -5997,9 +6055,7 @@ def pipe(
... df.pipe(subtract_federal_tax)
... .pipe(subtract_state_tax, rate=0.12)
... .pipe(
- ... (subtract_national_insurance, 'df'),
- ... rate=0.05,
- ... rate_increase=0.02
+ ... (subtract_national_insurance, "df"), rate=0.05, rate_increase=0.02
... )
... )
Salary Others
@@ -6209,10 +6265,14 @@ def dtypes(self):
Examples
--------
- >>> df = pd.DataFrame({'float': [1.0],
- ... 'int': [1],
- ... 'datetime': [pd.Timestamp('20180310')],
- ... 'string': ['foo']})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "float": [1.0],
+ ... "int": [1],
+ ... "datetime": [pd.Timestamp("20180310")],
+ ... "string": ["foo"],
+ ... }
+ ... )
>>> df.dtypes
float float64
int int64
@@ -6283,7 +6343,7 @@ def astype(
--------
Create a DataFrame:
- >>> d = {'col1': [1, 2], 'col2': [3, 4]}
+ >>> d = {"col1": [1, 2], "col2": [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
@@ -6292,33 +6352,33 @@ def astype(
Cast all columns to int32:
- >>> df.astype('int32').dtypes
+ >>> df.astype("int32").dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
- >>> df.astype({'col1': 'int32'}).dtypes
+ >>> df.astype({"col1": "int32"}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
- >>> ser = pd.Series([1, 2], dtype='int32')
+ >>> ser = pd.Series([1, 2], dtype="int32")
>>> ser
0 1
1 2
dtype: int32
- >>> ser.astype('int64')
+ >>> ser.astype("int64")
0 1
1 2
dtype: int64
Convert to categorical type:
- >>> ser.astype('category')
+ >>> ser.astype("category")
0 1
1 2
dtype: category
@@ -6327,8 +6387,7 @@ def astype(
Convert to ordered categorical type with custom ordering:
>>> from pandas.api.types import CategoricalDtype
- >>> cat_dtype = CategoricalDtype(
- ... categories=[2, 1], ordered=True)
+ >>> cat_dtype = CategoricalDtype(categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
@@ -6337,7 +6396,7 @@ def astype(
Create a series of dates:
- >>> ser_date = pd.Series(pd.date_range('20200101', periods=3))
+ >>> ser_date = pd.Series(pd.date_range("20200101", periods=3))
>>> ser_date
0 2020-01-01
1 2020-01-02
@@ -6952,11 +7011,15 @@ def fillna(
Examples
--------
- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
- ... [3, 4, np.nan, 1],
- ... [np.nan, np.nan, np.nan, np.nan],
- ... [np.nan, 3, np.nan, 4]],
- ... columns=list("ABCD"))
+ >>> df = pd.DataFrame(
+ ... [
+ ... [np.nan, 2, np.nan, 0],
+ ... [3, 4, np.nan, 1],
+ ... [np.nan, np.nan, np.nan, np.nan],
+ ... [np.nan, 3, np.nan, 4],
+ ... ],
+ ... columns=list("ABCD"),
+ ... )
>>> df
A B C D
0 NaN 2.0 NaN 0.0
@@ -7265,11 +7328,15 @@ def ffill(
Examples
--------
- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
- ... [3, 4, np.nan, 1],
- ... [np.nan, np.nan, np.nan, np.nan],
- ... [np.nan, 3, np.nan, 4]],
- ... columns=list("ABCD"))
+ >>> df = pd.DataFrame(
+ ... [
+ ... [np.nan, 2, np.nan, 0],
+ ... [3, 4, np.nan, 1],
+ ... [np.nan, np.nan, np.nan, np.nan],
+ ... [np.nan, 3, np.nan, 4],
+ ... ],
+ ... columns=list("ABCD"),
+ ... )
>>> df
A B C D
0 NaN 2.0 NaN 0.0
@@ -7460,7 +7527,7 @@ def bfill(
With DataFrame:
- >>> df = pd.DataFrame({{'A': [1, None, None, 4], 'B': [None, 5, None, 7]}})
+ >>> df = pd.DataFrame({{"A": [1, None, None, 4], "B": [None, 5, None, 7]}})
>>> df
A B
0 1.0 NaN
@@ -8009,7 +8076,7 @@ def interpolate(
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
- >>> s.interpolate(method='polynomial', order=2)
+ >>> s.interpolate(method="polynomial", order=2)
0 0.000000
1 2.000000
2 4.666667
@@ -8024,18 +8091,22 @@ def interpolate(
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
- >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
- ... (np.nan, 2.0, np.nan, np.nan),
- ... (2.0, 3.0, np.nan, 9.0),
- ... (np.nan, 4.0, -4.0, 16.0)],
- ... columns=list('abcd'))
+ >>> df = pd.DataFrame(
+ ... [
+ ... (0.0, np.nan, -1.0, 1.0),
+ ... (np.nan, 2.0, np.nan, np.nan),
+ ... (2.0, 3.0, np.nan, 9.0),
+ ... (np.nan, 4.0, -4.0, 16.0),
+ ... ],
+ ... columns=list("abcd"),
+ ... )
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
- >>> df.interpolate(method='linear', limit_direction='forward', axis=0)
+ >>> df.interpolate(method="linear", limit_direction="forward", axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
@@ -8044,7 +8115,7 @@ def interpolate(
Using polynomial interpolation.
- >>> df['d'].interpolate(method='polynomial', order=2)
+ >>> df["d"].interpolate(method="polynomial", order=2)
0 1.0
1 4.0
2 9.0
@@ -8247,24 +8318,32 @@ def asof(self, where, subset=None):
Take all columns into consideration
- >>> df = pd.DataFrame({'a': [10., 20., 30., 40., 50.],
- ... 'b': [None, None, None, None, 500]},
- ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
- ... '2018-02-27 09:02:00',
- ... '2018-02-27 09:03:00',
- ... '2018-02-27 09:04:00',
- ... '2018-02-27 09:05:00']))
- >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
- ... '2018-02-27 09:04:30']))
+ >>> df = pd.DataFrame(
+ ... {
+ ... "a": [10.0, 20.0, 30.0, 40.0, 50.0],
+ ... "b": [None, None, None, None, 500],
+ ... },
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2018-02-27 09:01:00",
+ ... "2018-02-27 09:02:00",
+ ... "2018-02-27 09:03:00",
+ ... "2018-02-27 09:04:00",
+ ... "2018-02-27 09:05:00",
+ ... ]
+ ... ),
+ ... )
+ >>> df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
- >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
- ... '2018-02-27 09:04:30']),
- ... subset=['a'])
+ >>> df.asof(
+ ... pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]),
+ ... subset=["a"],
+ ... )
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
@@ -8375,11 +8454,18 @@ def isna(self) -> Self:
--------
Show which entries in a DataFrame are NA.
- >>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
- ... born=[pd.NaT, pd.Timestamp('1939-05-27'),
- ... pd.Timestamp('1940-04-25')],
- ... name=['Alfred', 'Batman', ''],
- ... toy=[None, 'Batmobile', 'Joker']))
+ >>> df = pd.DataFrame(
+ ... dict(
+ ... age=[5, 6, np.nan],
+ ... born=[
+ ... pd.NaT,
+ ... pd.Timestamp("1939-05-27"),
+ ... pd.Timestamp("1940-04-25"),
+ ... ],
+ ... name=["Alfred", "Batman", ""],
+ ... toy=[None, "Batmobile", "Joker"],
+ ... )
+ ... )
>>> df
age born name toy
0 5.0 NaT Alfred None
@@ -8442,11 +8528,18 @@ def notna(self) -> Self:
--------
Show which entries in a DataFrame are not NA.
- >>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
- ... born=[pd.NaT, pd.Timestamp('1939-05-27'),
- ... pd.Timestamp('1940-04-25')],
- ... name=['Alfred', 'Batman', ''],
- ... toy=[None, 'Batmobile', 'Joker']))
+ >>> df = pd.DataFrame(
+ ... dict(
+ ... age=[5, 6, np.nan],
+ ... born=[
+ ... pd.NaT,
+ ... pd.Timestamp("1939-05-27"),
+ ... pd.Timestamp("1940-04-25"),
+ ... ],
+ ... name=["Alfred", "Batman", ""],
+ ... toy=[None, "Batmobile", "Joker"],
+ ... )
+ ... )
>>> df
age born name toy
0 5.0 NaT Alfred None
@@ -8619,7 +8712,7 @@ def clip(
Examples
--------
- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
+ >>> data = {"col_0": [9, -3, 0, -1, 5], "col_1": [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
@@ -8832,9 +8925,9 @@ def asfreq(
--------
Start by creating a series with 4 one minute timestamps.
- >>> index = pd.date_range('1/1/2000', periods=4, freq='min')
+ >>> index = pd.date_range("1/1/2000", periods=4, freq="min")
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
- >>> df = pd.DataFrame({{'s': series}})
+ >>> df = pd.DataFrame({{"s": series}})
>>> df
s
2000-01-01 00:00:00 0.0
@@ -8844,7 +8937,7 @@ def asfreq(
Upsample the series into 30 second bins.
- >>> df.asfreq(freq='30s')
+ >>> df.asfreq(freq="30s")
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
@@ -8856,7 +8949,7 @@ def asfreq(
Upsample again, providing a ``fill value``.
- >>> df.asfreq(freq='30s', fill_value=9.0)
+ >>> df.asfreq(freq="30s", fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
@@ -8868,7 +8961,7 @@ def asfreq(
Upsample again, providing a ``method``.
- >>> df.asfreq(freq='30s', method='bfill')
+ >>> df.asfreq(freq="30s", method="bfill")
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
@@ -8920,8 +9013,8 @@ def at_time(self, time, asof: bool_t = False, axis: Axis | None = None) -> Self:
Examples
--------
- >>> i = pd.date_range('2018-04-09', periods=4, freq='12h')
- >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
+ >>> i = pd.date_range("2018-04-09", periods=4, freq="12h")
+ >>> ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
@@ -8929,7 +9022,7 @@ def at_time(self, time, asof: bool_t = False, axis: Axis | None = None) -> Self:
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
- >>> ts.at_time('12:00')
+ >>> ts.at_time("12:00")
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
@@ -8992,8 +9085,8 @@ def between_time(
Examples
--------
- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
- >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
+ >>> i = pd.date_range("2018-04-09", periods=4, freq="1D20min")
+ >>> ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
@@ -9001,7 +9094,7 @@ def between_time(
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
- >>> ts.between_time('0:15', '0:45')
+ >>> ts.between_time("0:15", "0:45")
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
@@ -9009,7 +9102,7 @@ def between_time(
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
- >>> ts.between_time('0:45', '0:15')
+ >>> ts.between_time("0:45", "0:15")
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
@@ -9146,7 +9239,7 @@ def resample(
--------
Start by creating a series with 9 one minute timestamps.
- >>> index = pd.date_range('1/1/2000', periods=9, freq='min')
+ >>> index = pd.date_range("1/1/2000", periods=9, freq="min")
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
@@ -9163,7 +9256,7 @@ def resample(
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
- >>> series.resample('3min').sum()
+ >>> series.resample("3min").sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
@@ -9177,7 +9270,7 @@ def resample(
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
- >>> series.resample('3min', label='right').sum()
+ >>> series.resample("3min", label="right").sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
@@ -9186,7 +9279,7 @@ def resample(
To include this value close the right side of the bin interval,
as shown below.
- >>> series.resample('3min', label='right', closed='right').sum()
+ >>> series.resample("3min", label="right", closed="right").sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
@@ -9195,7 +9288,7 @@ def resample(
Upsample the series into 30 second bins.
- >>> series.resample('30s').asfreq()[0:5] # Select first 5 rows
+ >>> series.resample("30s").asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
@@ -9206,7 +9299,7 @@ def resample(
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``ffill`` method.
- >>> series.resample('30s').ffill()[0:5]
+ >>> series.resample("30s").ffill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
@@ -9217,7 +9310,7 @@ def resample(
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
- >>> series.resample('30s').bfill()[0:5]
+ >>> series.resample("30s").bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
@@ -9229,8 +9322,7 @@ def resample(
>>> def custom_resampler(arraylike):
... return np.sum(arraylike) + 5
- ...
- >>> series.resample('3min').apply(custom_resampler)
+ >>> series.resample("3min").apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
@@ -9239,12 +9331,9 @@ def resample(
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
- >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
- ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
- >>> df = pd.DataFrame(d)
- >>> df['week_starting'] = pd.date_range('01/01/2018',
- ... periods=8,
- ... freq='W')
+ >>> df = pd.DataFrame([10, 11, 9, 13, 14, 18, 17, 19], columns=["price"])
+ >>> df["volume"] = [50, 60, 40, 100, 50, 100, 40, 50]
+ >>> df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
>>> df
price volume week_starting
0 10 50 2018-01-07
@@ -9255,7 +9344,7 @@ def resample(
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
- >>> df.resample('ME', on='week_starting').mean()
+ >>> df.resample("ME", on="week_starting").mean()
price volume
week_starting
2018-01-31 10.75 62.5
@@ -9264,14 +9353,20 @@ def resample(
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
- >>> days = pd.date_range('1/1/2000', periods=4, freq='D')
- >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
- ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
+ >>> days = pd.date_range("1/1/2000", periods=4, freq="D")
>>> df2 = pd.DataFrame(
- ... d2,
- ... index=pd.MultiIndex.from_product(
- ... [days, ['morning', 'afternoon']]
- ... )
+ ... [
+ ... [10, 50],
+ ... [11, 60],
+ ... [9, 40],
+ ... [13, 100],
+ ... [14, 50],
+ ... [18, 100],
+ ... [17, 40],
+ ... [19, 50],
+ ... ],
+ ... columns=["price", "volume"],
+ ... index=pd.MultiIndex.from_product([days, ["morning", "afternoon"]]),
... )
>>> df2
price volume
@@ -9283,7 +9378,7 @@ def resample(
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
- >>> df2.resample('D', level=0).sum()
+ >>> df2.resample("D", level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
@@ -9292,8 +9387,8 @@ def resample(
If you want to adjust the start of the bins based on a fixed timestamp:
- >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
- >>> rng = pd.date_range(start, end, freq='7min')
+ >>> start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
+ >>> rng = pd.date_range(start, end, freq="7min")
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
@@ -9307,7 +9402,7 @@ def resample(
2000-10-02 00:26:00 24
Freq: 7min, dtype: int64
- >>> ts.resample('17min').sum()
+ >>> ts.resample("17min").sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
@@ -9315,7 +9410,7 @@ def resample(
2000-10-02 00:22:00 24
Freq: 17min, dtype: int64
- >>> ts.resample('17min', origin='epoch').sum()
+ >>> ts.resample("17min", origin="epoch").sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
@@ -9323,7 +9418,7 @@ def resample(
2000-10-02 00:26:00 24
Freq: 17min, dtype: int64
- >>> ts.resample('17min', origin='2000-01-01').sum()
+ >>> ts.resample("17min", origin="2000-01-01").sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
@@ -9333,14 +9428,14 @@ def resample(
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
- >>> ts.resample('17min', origin='start').sum()
+ >>> ts.resample("17min", origin="start").sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17min, dtype: int64
- >>> ts.resample('17min', offset='23h30min').sum()
+ >>> ts.resample("17min", offset="23h30min").sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
@@ -9349,7 +9444,7 @@ def resample(
If you want to take the largest Timestamp as the end of the bins:
- >>> ts.resample('17min', origin='end').sum()
+ >>> ts.resample("17min", origin="end").sum()
2000-10-01 23:35:00 0
2000-10-01 23:52:00 18
2000-10-02 00:09:00 27
@@ -9360,7 +9455,7 @@ def resample(
midnight of the largest Timestamp as the end of the bins and drop the bins
not containing data:
- >>> ts.resample('17min', origin='end_day').sum()
+ >>> ts.resample("17min", origin="end_day").sum()
2000-10-01 23:38:00 3
2000-10-01 23:55:00 15
2000-10-02 00:12:00 45
@@ -9468,9 +9563,12 @@ def rank(
Examples
--------
- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
- ... 'spider', 'snake'],
- ... 'Number_legs': [4, 2, 4, 8, np.nan]})
+ >>> df = pd.DataFrame(
+ ... data={
+ ... "Animal": ["cat", "penguin", "dog", "spider", "snake"],
+ ... "Number_legs": [4, 2, 4, 8, np.nan],
+ ... }
+ ... )
>>> df
Animal Number_legs
0 cat 4.0
@@ -9504,10 +9602,10 @@ def rank(
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
- >>> df['default_rank'] = df['Number_legs'].rank()
- >>> df['max_rank'] = df['Number_legs'].rank(method='max')
- >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
- >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
+ >>> df["default_rank"] = df["Number_legs"].rank()
+ >>> df["max_rank"] = df["Number_legs"].rank(method="max")
+ >>> df["NA_bottom"] = df["Number_legs"].rank(na_option="bottom")
+ >>> df["pct_rank"] = df["Number_legs"].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
@@ -10386,7 +10484,7 @@ def where(
4 10
dtype: int64
- >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
+ >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
>>> df
A B
0 0 1
@@ -10602,10 +10700,11 @@ def shift(
Examples
--------
- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45],
- ... "Col2": [13, 23, 18, 33, 48],
- ... "Col3": [17, 27, 22, 37, 52]}},
- ... index=pd.date_range("2020-01-01", "2020-01-05"))
+ >>> df = pd.DataFrame(
+ ... [[10, 13, 17], [20, 23, 27], [15, 18, 22], [30, 33, 37], [45, 48, 52]],
+ ... columns=["Col1", "Col2", "Col3"],
+ ... index=pd.date_range("2020-01-01", "2020-01-05"),
+ ... )
>>> df
Col1 Col2 Col3
2020-01-01 10 13 17
@@ -10654,7 +10753,7 @@ def shift(
2020-01-07 30 33 37
2020-01-08 45 48 52
- >>> df['Col1'].shift(periods=[0, 1, 2])
+ >>> df["Col1"].shift(periods=[0, 1, 2])
Col1_0 Col1_1 Col1_2
2020-01-01 10 NaN NaN
2020-01-02 20 10.0 NaN
@@ -10787,10 +10886,14 @@ def truncate(
Examples
--------
- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
- ... 'B': ['f', 'g', 'h', 'i', 'j'],
- ... 'C': ['k', 'l', 'm', 'n', 'o']},
- ... index=[1, 2, 3, 4, 5])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": ["a", "b", "c", "d", "e"],
+ ... "B": ["f", "g", "h", "i", "j"],
+ ... "C": ["k", "l", "m", "n", "o"],
+ ... },
+ ... index=[1, 2, 3, 4, 5],
+ ... )
>>> df
A B C
1 a f k
@@ -10817,7 +10920,7 @@ def truncate(
For Series, only rows can be truncated.
- >>> df['A'].truncate(before=2, after=4)
+ >>> df["A"].truncate(before=2, after=4)
2 b
3 c
4 d
@@ -10826,8 +10929,8 @@ def truncate(
The index values in ``truncate`` can be datetimes or string
dates.
- >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
- >>> df = pd.DataFrame(index=dates, data={'A': 1})
+ >>> dates = pd.date_range("2016-01-01", "2016-02-01", freq="s")
+ >>> df = pd.DataFrame(index=dates, data={"A": 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
@@ -10836,8 +10939,9 @@ def truncate(
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
- >>> df.truncate(before=pd.Timestamp('2016-01-05'),
- ... after=pd.Timestamp('2016-01-10')).tail()
+ >>> df.truncate(
+ ... before=pd.Timestamp("2016-01-05"), after=pd.Timestamp("2016-01-10")
+ ... ).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
@@ -10849,7 +10953,7 @@ def truncate(
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
- >>> df.truncate('2016-01-05', '2016-01-10').tail()
+ >>> df.truncate("2016-01-05", "2016-01-10").tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
@@ -10861,7 +10965,7 @@ def truncate(
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
- >>> df.loc['2016-01-05':'2016-01-10', :].tail()
+ >>> df.loc["2016-01-05":"2016-01-10", :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
@@ -10953,16 +11057,15 @@ def tz_convert(
>>> s = pd.Series(
... [1],
- ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']),
+ ... index=pd.DatetimeIndex(["2018-09-15 01:30:00+02:00"]),
... )
- >>> s.tz_convert('Asia/Shanghai')
+ >>> s.tz_convert("Asia/Shanghai")
2018-09-15 07:30:00+08:00 1
dtype: int64
Pass None to convert to UTC and get a tz-naive index:
- >>> s = pd.Series([1],
- ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']))
+ >>> s = pd.Series([1], index=pd.DatetimeIndex(["2018-09-15 01:30:00+02:00"]))
>>> s.tz_convert(None)
2018-09-14 23:30:00 1
dtype: int64
@@ -11083,16 +11186,15 @@ def tz_localize(
>>> s = pd.Series(
... [1],
- ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']),
+ ... index=pd.DatetimeIndex(["2018-09-15 01:30:00"]),
... )
- >>> s.tz_localize('CET')
+ >>> s.tz_localize("CET")
2018-09-15 01:30:00+02:00 1
dtype: int64
Pass None to convert to tz-naive index and preserve local time:
- >>> s = pd.Series([1],
- ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']))
+ >>> s = pd.Series([1], index=pd.DatetimeIndex(["2018-09-15 01:30:00+02:00"]))
>>> s.tz_localize(None)
2018-09-15 01:30:00 1
dtype: int64
@@ -11100,15 +11202,21 @@ def tz_localize(
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
- >>> s = pd.Series(range(7),
- ... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
- ... '2018-10-28 02:00:00',
- ... '2018-10-28 02:30:00',
- ... '2018-10-28 02:00:00',
- ... '2018-10-28 02:30:00',
- ... '2018-10-28 03:00:00',
- ... '2018-10-28 03:30:00']))
- >>> s.tz_localize('CET', ambiguous='infer')
+ >>> s = pd.Series(
+ ... range(7),
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2018-10-28 01:30:00",
+ ... "2018-10-28 02:00:00",
+ ... "2018-10-28 02:30:00",
+ ... "2018-10-28 02:00:00",
+ ... "2018-10-28 02:30:00",
+ ... "2018-10-28 03:00:00",
+ ... "2018-10-28 03:30:00",
+ ... ]
+ ... ),
+ ... )
+ >>> s.tz_localize("CET", ambiguous="infer")
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
@@ -11121,11 +11229,17 @@ def tz_localize(
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
- >>> s = pd.Series(range(3),
- ... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
- ... '2018-10-28 02:36:00',
- ... '2018-10-28 03:46:00']))
- >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
+ >>> s = pd.Series(
+ ... range(3),
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2018-10-28 01:20:00",
+ ... "2018-10-28 02:36:00",
+ ... "2018-10-28 03:46:00",
+ ... ]
+ ... ),
+ ... )
+ >>> s.tz_localize("CET", ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
@@ -11135,18 +11249,19 @@ def tz_localize(
dates forward or backward with a timedelta object or `'shift_forward'`
or `'shift_backward'`.
- >>> s = pd.Series(range(2),
- ... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
- ... '2015-03-29 03:30:00']))
- >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
+ >>> s = pd.Series(
+ ... range(2),
+ ... index=pd.DatetimeIndex(["2015-03-29 02:30:00", "2015-03-29 03:30:00"]),
+ ... )
+ >>> s.tz_localize("Europe/Warsaw", nonexistent="shift_forward")
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
- >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
+ >>> s.tz_localize("Europe/Warsaw", nonexistent="shift_backward")
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
- >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1h'))
+ >>> s.tz_localize("Europe/Warsaw", nonexistent=pd.Timedelta("1h"))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
@@ -11307,7 +11422,7 @@ def describe(
Describing a categorical ``Series``.
- >>> s = pd.Series(['a', 'a', 'b', 'c'])
+ >>> s = pd.Series(["a", "a", "b", "c"])
>>> s.describe()
count 4
unique 3
@@ -11317,11 +11432,13 @@ def describe(
Describing a timestamp ``Series``.
- >>> s = pd.Series([
- ... np.datetime64("2000-01-01"),
- ... np.datetime64("2010-01-01"),
- ... np.datetime64("2010-01-01")
- ... ])
+ >>> s = pd.Series(
+ ... [
+ ... np.datetime64("2000-01-01"),
+ ... np.datetime64("2010-01-01"),
+ ... np.datetime64("2010-01-01"),
+ ... ]
+ ... )
>>> s.describe()
count 3
mean 2006-09-01 08:00:00
@@ -11335,10 +11452,13 @@ def describe(
Describing a ``DataFrame``. By default only numeric fields
are returned.
- >>> df = pd.DataFrame({'categorical': pd.Categorical(['d', 'e', 'f']),
- ... 'numeric': [1, 2, 3],
- ... 'object': ['a', 'b', 'c']
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "categorical": pd.Categorical(["d", "e", "f"]),
+ ... "numeric": [1, 2, 3],
+ ... "object": ["a", "b", "c"],
+ ... }
+ ... )
>>> df.describe()
numeric
count 3.0
@@ -11352,7 +11472,7 @@ def describe(
Describing all columns of a ``DataFrame`` regardless of data type.
- >>> df.describe(include='all') # doctest: +SKIP
+ >>> df.describe(include="all") # doctest: +SKIP
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
@@ -11404,7 +11524,7 @@ def describe(
Including only categorical columns from a ``DataFrame`` description.
- >>> df.describe(include=['category'])
+ >>> df.describe(include=["category"])
categorical
count 3
unique 3
@@ -11545,11 +11665,14 @@ def pct_change(
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
- >>> df = pd.DataFrame({
- ... 'FR': [4.0405, 4.0963, 4.3149],
- ... 'GR': [1.7246, 1.7482, 1.8519],
- ... 'IT': [804.74, 810.01, 860.13]},
- ... index=['1980-01-01', '1980-02-01', '1980-03-01'])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "FR": [4.0405, 4.0963, 4.3149],
+ ... "GR": [1.7246, 1.7482, 1.8519],
+ ... "IT": [804.74, 810.01, 860.13],
+ ... },
+ ... index=["1980-01-01", "1980-02-01", "1980-03-01"],
+ ... )
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
@@ -11565,17 +11688,20 @@ def pct_change(
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
- >>> df = pd.DataFrame({
- ... '2016': [1769950, 30586265],
- ... '2015': [1500923, 40912316],
- ... '2014': [1371819, 41403351]},
- ... index=['GOOG', 'APPL'])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "2016": [1769950, 30586265],
+ ... "2015": [1500923, 40912316],
+ ... "2014": [1371819, 41403351],
+ ... },
+ ... index=["GOOG", "APPL"],
+ ... )
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
- >>> df.pct_change(axis='columns', periods=-1)
+ >>> df.pct_change(axis="columns", periods=-1)
2016 2015 2014
GOOG 0.179241 0.094112 NaN
APPL -0.252395 -0.011860 NaN
@@ -12200,7 +12326,7 @@ def first_valid_index(self) -> Hashable | None:
For DataFrame:
- >>> df = pd.DataFrame({{'A': [None, None, 2], 'B': [None, 3, 4]}})
+ >>> df = pd.DataFrame({{"A": [None, None, 2], "B": [None, 3, 4]}})
>>> df
A B
0 NaN NaN
@@ -12211,7 +12337,7 @@ def first_valid_index(self) -> Hashable | None:
>>> df.last_valid_index()
2
- >>> df = pd.DataFrame({{'A': [None, None, None], 'B': [None, None, None]}})
+ >>> df = pd.DataFrame({{"A": [None, None, None], "B": [None, None, None]}})
>>> df
A B
0 None None
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index f68a5f605e331..c4037dad1f828 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -583,12 +583,15 @@ def filter(self, func, dropna: bool = True, *args, **kwargs):
Examples
--------
- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
- ... 'foo', 'bar'],
- ... 'B' : [1, 2, 3, 4, 5, 6],
- ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
- >>> grouped = df.groupby('A')
- >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": ["foo", "bar", "foo", "bar", "foo", "bar"],
+ ... "B": [1, 2, 3, 4, 5, 6],
+ ... "C": [2.0, 5.0, 8.0, 1.0, 2.0, 9.0],
+ ... }
+ ... )
+ >>> grouped = df.groupby("A")
+ >>> df.groupby("A").B.filter(lambda x: x.mean() > 3.0)
1 2
3 4
5 6
@@ -629,7 +632,7 @@ def nunique(self, dropna: bool = True) -> Series | DataFrame:
--------
For SeriesGroupby:
- >>> lst = ['a', 'a', 'b', 'b']
+ >>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 3], index=lst)
>>> ser
a 1
@@ -644,15 +647,19 @@ def nunique(self, dropna: bool = True) -> Series | DataFrame:
For Resampler:
- >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 3],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 3
dtype: int64
- >>> ser.resample('MS').nunique()
+ >>> ser.resample("MS").nunique()
2023-01-01 2
2023-02-01 1
Freq: MS, dtype: int64
@@ -911,13 +918,17 @@ def take(
Examples
--------
- >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
- ... ('parrot', 'bird', 24.0),
- ... ('lion', 'mammal', 80.5),
- ... ('monkey', 'mammal', np.nan),
- ... ('rabbit', 'mammal', 15.0)],
- ... columns=['name', 'class', 'max_speed'],
- ... index=[4, 3, 2, 1, 0])
+ >>> df = pd.DataFrame(
+ ... [
+ ... ("falcon", "bird", 389.0),
+ ... ("parrot", "bird", 24.0),
+ ... ("lion", "mammal", 80.5),
+ ... ("monkey", "mammal", np.nan),
+ ... ("rabbit", "mammal", 15.0),
+ ... ],
+ ... columns=["name", "class", "max_speed"],
+ ... index=[4, 3, 2, 1, 0],
+ ... )
>>> df
name class max_speed
4 falcon bird 389.0
@@ -981,10 +992,19 @@ def skew(
Examples
--------
- >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.],
- ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon',
- ... 'Parrot', 'Parrot', 'Parrot'],
- ... name="Max Speed")
+ >>> ser = pd.Series(
+ ... [390.0, 350.0, 357.0, np.nan, 22.0, 20.0, 30.0],
+ ... index=[
+ ... "Falcon",
+ ... "Falcon",
+ ... "Falcon",
+ ... "Falcon",
+ ... "Parrot",
+ ... "Parrot",
+ ... "Parrot",
+ ... ],
+ ... name="Max Speed",
+ ... )
>>> ser
Falcon 390.0
Falcon 350.0
@@ -1075,8 +1095,12 @@ def idxmin(self, skipna: bool = True) -> Series:
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
@@ -1084,7 +1108,7 @@ def idxmin(self, skipna: bool = True) -> Series:
2023-02-15 4
dtype: int64
- >>> ser.groupby(['a', 'a', 'b', 'b']).idxmin()
+ >>> ser.groupby(["a", "a", "b", "b"]).idxmin()
a 2023-01-01
b 2023-02-01
dtype: datetime64[ns]
@@ -1125,8 +1149,12 @@ def idxmax(self, skipna: bool = True) -> Series:
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
@@ -1134,7 +1162,7 @@ def idxmax(self, skipna: bool = True) -> Series:
2023-02-15 4
dtype: int64
- >>> ser.groupby(['a', 'a', 'b', 'b']).idxmax()
+ >>> ser.groupby(["a", "a", "b", "b"]).idxmax()
a 2023-01-15
b 2023-02-15
dtype: datetime64[ns]
@@ -1173,7 +1201,7 @@ def is_monotonic_increasing(self) -> Series:
Examples
--------
- >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
+ >>> s = pd.Series([2, 1, 3, 4], index=["Falcon", "Falcon", "Parrot", "Parrot"])
>>> s.groupby(level=0).is_monotonic_increasing
Falcon False
Parrot True
@@ -1192,7 +1220,7 @@ def is_monotonic_decreasing(self) -> Series:
Examples
--------
- >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot'])
+ >>> s = pd.Series([2, 1, 3, 4], index=["Falcon", "Falcon", "Parrot", "Parrot"])
>>> s.groupby(level=0).is_monotonic_decreasing
Falcon True
Parrot False
@@ -1256,13 +1284,17 @@ def unique(self) -> Series:
Examples
--------
- >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1),
- ... ('Beagle', 'dog', 15.2),
- ... ('Chihuahua', 'dog', 6.9),
- ... ('Persian', 'cat', 9.2),
- ... ('Chihuahua', 'dog', 7),
- ... ('Persian', 'cat', 8.8)],
- ... columns=['breed', 'animal', 'height_in'])
+ >>> df = pd.DataFrame(
+ ... [
+ ... ("Chihuahua", "dog", 6.1),
+ ... ("Beagle", "dog", 15.2),
+ ... ("Chihuahua", "dog", 6.9),
+ ... ("Persian", "cat", 9.2),
+ ... ("Chihuahua", "dog", 7),
+ ... ("Persian", "cat", 8.8),
+ ... ],
+ ... columns=["breed", "animal", "height_in"],
+ ... )
>>> df
breed animal height_in
0 Chihuahua dog 6.1
@@ -1271,7 +1303,7 @@ def unique(self) -> Series:
3 Persian cat 9.2
4 Chihuahua dog 7.0
5 Persian cat 8.8
- >>> ser = df.groupby('animal')['breed'].unique()
+ >>> ser = df.groupby("animal")["breed"].unique()
>>> ser
animal
cat [Persian]
@@ -1826,12 +1858,15 @@ def filter(self, func, dropna: bool = True, *args, **kwargs) -> DataFrame:
Examples
--------
- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
- ... 'foo', 'bar'],
- ... 'B' : [1, 2, 3, 4, 5, 6],
- ... 'C' : [2.0, 5., 8., 1., 2., 9.]})
- >>> grouped = df.groupby('A')
- >>> grouped.filter(lambda x: x['B'].mean() > 3.)
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": ["foo", "bar", "foo", "bar", "foo", "bar"],
+ ... "B": [1, 2, 3, 4, 5, 6],
+ ... "C": [2.0, 5.0, 8.0, 1.0, 2.0, 9.0],
+ ... }
+ ... )
+ >>> grouped = df.groupby("A")
+ >>> grouped.filter(lambda x: x["B"].mean() > 3.0)
A B C
1 bar 2 5.0
3 bar 4 1.0
@@ -1981,10 +2016,13 @@ def nunique(self, dropna: bool = True) -> DataFrame:
Examples
--------
- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
- ... 'ham', 'ham'],
- ... 'value1': [1, 5, 5, 2, 5, 5],
- ... 'value2': list('abbaxy')})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "id": ["spam", "egg", "egg", "spam", "ham", "ham"],
+ ... "value1": [1, 5, 5, 2, 5, 5],
+ ... "value2": list("abbaxy"),
+ ... }
+ ... )
>>> df
id value1 value2
0 spam 1 a
@@ -1994,7 +2032,7 @@ def nunique(self, dropna: bool = True) -> DataFrame:
4 ham 5 x
5 ham 5 y
- >>> df.groupby('id').nunique()
+ >>> df.groupby("id").nunique()
value1 value2
id
egg 1 1
@@ -2003,7 +2041,7 @@ def nunique(self, dropna: bool = True) -> DataFrame:
Check for rows with the same id but conflicting values:
- >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
+ >>> df.groupby("id").filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
@@ -2054,9 +2092,13 @@ def idxmax(
--------
Consider a dataset containing food consumption in Argentina.
- >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
- ... 'co2_emissions': [37.2, 19.66, 1712]},
- ... index=['Pork', 'Wheat Products', 'Beef'])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "consumption": [10.51, 103.11, 55.48],
+ ... "co2_emissions": [37.2, 19.66, 1712],
+ ... },
+ ... index=["Pork", "Wheat Products", "Beef"],
+ ... )
>>> df
consumption co2_emissions
@@ -2115,9 +2157,13 @@ def idxmin(
--------
Consider a dataset containing food consumption in Argentina.
- >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
- ... 'co2_emissions': [37.2, 19.66, 1712]},
- ... index=['Pork', 'Wheat Products', 'Beef'])
+ >>> df = pd.DataFrame(
+ ... {
+ ... "consumption": [10.51, 103.11, 55.48],
+ ... "co2_emissions": [37.2, 19.66, 1712],
+ ... },
+ ... index=["Pork", "Wheat Products", "Beef"],
+ ... )
>>> df
consumption co2_emissions
@@ -2189,11 +2235,13 @@ def value_counts(
Examples
--------
- >>> df = pd.DataFrame({
- ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
- ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
- ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "gender": ["male", "male", "female", "male", "female", "male"],
+ ... "education": ["low", "medium", "high", "low", "high", "low"],
+ ... "country": ["US", "FR", "US", "FR", "FR", "FR"],
+ ... }
+ ... )
>>> df
gender education country
@@ -2204,7 +2252,7 @@ def value_counts(
4 female high FR
5 male low FR
- >>> df.groupby('gender').value_counts()
+ >>> df.groupby("gender").value_counts()
gender education country
female high FR 1
US 1
@@ -2213,7 +2261,7 @@ def value_counts(
medium FR 1
Name: count, dtype: int64
- >>> df.groupby('gender').value_counts(ascending=True)
+ >>> df.groupby("gender").value_counts(ascending=True)
gender education country
female high FR 1
US 1
@@ -2222,7 +2270,7 @@ def value_counts(
low FR 2
Name: count, dtype: int64
- >>> df.groupby('gender').value_counts(normalize=True)
+ >>> df.groupby("gender").value_counts(normalize=True)
gender education country
female high FR 0.50
US 0.50
@@ -2231,7 +2279,7 @@ def value_counts(
medium FR 0.25
Name: proportion, dtype: float64
- >>> df.groupby('gender', as_index=False).value_counts()
+ >>> df.groupby("gender", as_index=False).value_counts()
gender education country count
0 female high FR 1
1 female high US 1
@@ -2239,7 +2287,7 @@ def value_counts(
3 male low US 1
4 male medium FR 1
- >>> df.groupby('gender', as_index=False).value_counts(normalize=True)
+ >>> df.groupby("gender", as_index=False).value_counts(normalize=True)
gender education country proportion
0 female high FR 0.50
1 female high US 0.50
@@ -2288,13 +2336,17 @@ def take(
Examples
--------
- >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
- ... ('parrot', 'bird', 24.0),
- ... ('lion', 'mammal', 80.5),
- ... ('monkey', 'mammal', np.nan),
- ... ('rabbit', 'mammal', 15.0)],
- ... columns=['name', 'class', 'max_speed'],
- ... index=[4, 3, 2, 1, 0])
+ >>> df = pd.DataFrame(
+ ... [
+ ... ("falcon", "bird", 389.0),
+ ... ("parrot", "bird", 24.0),
+ ... ("lion", "mammal", 80.5),
+ ... ("monkey", "mammal", np.nan),
+ ... ("rabbit", "mammal", 15.0),
+ ... ],
+ ... columns=["name", "class", "max_speed"],
+ ... index=[4, 3, 2, 1, 0],
+ ... )
>>> df
name class max_speed
4 falcon bird 389.0
@@ -2372,14 +2424,15 @@ def skew(
Examples
--------
- >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi',
- ... 'lion', 'monkey', 'rabbit'],
- ... ['bird', 'bird', 'bird', 'bird',
- ... 'mammal', 'mammal', 'mammal']]
- >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class'))
- >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan,
- ... 80.5, 21.5, 15.0]},
- ... index=index)
+ >>> arrays = [
+ ... ["falcon", "parrot", "cockatoo", "kiwi", "lion", "monkey", "rabbit"],
+ ... ["bird", "bird", "bird", "bird", "mammal", "mammal", "mammal"],
+ ... ]
+ >>> index = pd.MultiIndex.from_arrays(arrays, names=("name", "class"))
+ >>> df = pd.DataFrame(
+ ... {"max_speed": [389.0, 24.0, 70.0, np.nan, 80.5, 21.5, 15.0]},
+ ... index=index,
+ ... )
>>> df
max_speed
name class
@@ -2548,10 +2601,18 @@ def corrwith(
Examples
--------
- >>> df1 = pd.DataFrame({"Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
- ... "Data": [6, 6, 8, 5, 4, 2, 7, 3, 9]})
- >>> df2 = pd.DataFrame({"Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
- ... "Data": [5, 3, 8, 3, 1, 1, 2, 3, 6]})
+ >>> df1 = pd.DataFrame(
+ ... {
+ ... "Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ ... "Data": [6, 6, 8, 5, 4, 2, 7, 3, 9],
+ ... }
+ ... )
+ >>> df2 = pd.DataFrame(
+ ... {
+ ... "Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ ... "Data": [5, 3, 8, 3, 1, 1, 2, 3, 6],
+ ... }
+ ... )
>>> df1.groupby("Day").corrwith(df2)
Data Day
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 1440bd0adfd26..4106e5c46e00c 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -800,7 +800,7 @@ def groups(self) -> dict[Hashable, Index]:
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, 3], index=lst)
>>> ser
a 1
@@ -824,15 +824,19 @@ def groups(self) -> dict[Hashable, Index]:
For Resampler:
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').groups
+ >>> ser.resample("MS").groups
{Timestamp('2023-01-01 00:00:00'): 2, Timestamp('2023-02-01 00:00:00'): 4}
"""
return self._grouper.groups
@@ -853,7 +857,7 @@ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, 3], index=lst)
>>> ser
a 1
@@ -866,8 +870,9 @@ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
For DataFrameGroupBy:
>>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["owl", "toucan", "eagle"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["owl", "toucan", "eagle"]
+ ... )
>>> df
a b c
owl 1 2 3
@@ -878,15 +883,19 @@ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
For Resampler:
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').indices
+ >>> ser.resample("MS").indices
defaultdict(<class 'list'>, {Timestamp('2023-01-01 00:00:00'): [0, 1],
Timestamp('2023-02-01 00:00:00'): [2, 3]})
"""
@@ -1043,7 +1052,7 @@ def get_group(self, name) -> DataFrame | Series:
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, 3], index=lst)
>>> ser
a 1
@@ -1058,8 +1067,9 @@ def get_group(self, name) -> DataFrame | Series:
For DataFrameGroupBy:
>>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["owl", "toucan", "eagle"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["owl", "toucan", "eagle"]
+ ... )
>>> df
a b c
owl 1 2 3
@@ -1072,15 +1082,19 @@ def get_group(self, name) -> DataFrame | Series:
For Resampler:
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').get_group('2023-01-01')
+ >>> ser.resample("MS").get_group("2023-01-01")
2023-01-01 1
2023-01-15 2
dtype: int64
@@ -1125,7 +1139,7 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, 3], index=lst)
>>> ser
a 1
@@ -1133,7 +1147,7 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
b 3
dtype: int64
>>> for x, y in ser.groupby(level=0):
- ... print(f'{x}\\n{y}\\n')
+ ... print(f"{x}\\n{y}\\n")
a
a 1
a 2
@@ -1152,7 +1166,7 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
1 1 5 6
2 7 8 9
>>> for x, y in df.groupby(by=["a"]):
- ... print(f'{x}\\n{y}\\n')
+ ... print(f"{x}\\n{y}\\n")
(1,)
a b c
0 1 2 3
@@ -1163,16 +1177,20 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
For Resampler:
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> for x, y in ser.resample('MS'):
- ... print(f'{x}\\n{y}\\n')
+ >>> for x, y in ser.resample("MS"):
+ ... print(f"{x}\\n{y}\\n")
2023-01-01 00:00:00
2023-01-01 1
2023-01-15 2
@@ -2079,7 +2097,7 @@ def any(self, skipna: bool = True) -> NDFrameT:
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, 0], index=lst)
>>> ser
a 1
@@ -2094,8 +2112,9 @@ def any(self, skipna: bool = True) -> NDFrameT:
For DataFrameGroupBy:
>>> data = [[1, 0, 3], [1, 0, 6], [7, 1, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["ostrich", "penguin", "parrot"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["ostrich", "penguin", "parrot"]
+ ... )
>>> df
a b c
ostrich 1 0 3
@@ -2136,7 +2155,7 @@ def all(self, skipna: bool = True) -> NDFrameT:
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, 0], index=lst)
>>> ser
a 1
@@ -2151,8 +2170,9 @@ def all(self, skipna: bool = True) -> NDFrameT:
For DataFrameGroupBy:
>>> data = [[1, 0, 3], [1, 5, 6], [7, 8, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["ostrich", "penguin", "parrot"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["ostrich", "penguin", "parrot"]
+ ... )
>>> df
a b c
ostrich 1 0 3
@@ -2186,7 +2206,7 @@ def count(self) -> NDFrameT:
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, np.nan], index=lst)
>>> ser
a 1.0
@@ -2201,8 +2221,9 @@ def count(self) -> NDFrameT:
For DataFrameGroupBy:
>>> data = [[1, np.nan, 3], [1, np.nan, 6], [7, 8, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["cow", "horse", "bull"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["cow", "horse", "bull"]
+ ... )
>>> df
a b c
cow 1 NaN 3
@@ -2216,15 +2237,19 @@ def count(self) -> NDFrameT:
For Resampler:
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').count()
+ >>> ser.resample("MS").count()
2023-01-01 2
2023-02-01 2
Freq: MS, dtype: int64
@@ -2309,14 +2334,15 @@ def mean(
%(see_also)s
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
- ... 'B': [np.nan, 2, 3, 4, 5],
- ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
+ >>> df = pd.DataFrame(
+ ... {"A": [1, 1, 2, 1, 2], "B": [np.nan, 2, 3, 4, 5], "C": [1, 2, 1, 1, 2]},
+ ... columns=["A", "B", "C"],
+ ... )
Groupby one column and return the mean of the remaining columns in
each group.
- >>> df.groupby('A').mean()
+ >>> df.groupby("A").mean()
B C
A
1 3.0 1.333333
@@ -2324,7 +2350,7 @@ def mean(
Groupby two columns and return the mean of the remaining column.
- >>> df.groupby(['A', 'B']).mean()
+ >>> df.groupby(["A", "B"]).mean()
C
A B
1 2.0 2.0
@@ -2335,7 +2361,7 @@ def mean(
Groupby one column and return the mean of only particular column in
the group.
- >>> df.groupby('A')['B'].mean()
+ >>> df.groupby("A")["B"].mean()
A
1 3.0
2 4.0
@@ -2384,7 +2410,7 @@ def median(self, numeric_only: bool = False) -> NDFrameT:
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
@@ -2401,9 +2427,10 @@ def median(self, numeric_only: bool = False) -> NDFrameT:
For DataFrameGroupBy:
- >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
- >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
- ... 'mouse', 'mouse', 'mouse', 'mouse'])
+ >>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
+ >>> df = pd.DataFrame(
+ ... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
+ ... )
>>> df
a b
dog 1 1
@@ -2420,14 +2447,20 @@ def median(self, numeric_only: bool = False) -> NDFrameT:
For Resampler:
- >>> ser = pd.Series([1, 2, 3, 3, 4, 5],
- ... index=pd.DatetimeIndex(['2023-01-01',
- ... '2023-01-10',
- ... '2023-01-15',
- ... '2023-02-01',
- ... '2023-02-10',
- ... '2023-02-15']))
- >>> ser.resample('MS').median()
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 3, 4, 5],
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2023-01-01",
+ ... "2023-01-10",
+ ... "2023-01-15",
+ ... "2023-02-01",
+ ... "2023-02-10",
+ ... "2023-02-15",
+ ... ]
+ ... ),
+ ... )
+ >>> ser.resample("MS").median()
2023-01-01 2.0
2023-02-01 4.0
Freq: MS, dtype: float64
@@ -2494,7 +2527,7 @@ def std(
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
@@ -2511,9 +2544,10 @@ def std(
For DataFrameGroupBy:
- >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
- >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
- ... 'mouse', 'mouse', 'mouse', 'mouse'])
+ >>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
+ >>> df = pd.DataFrame(
+ ... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
+ ... )
>>> df
a b
dog 1 1
@@ -2603,7 +2637,7 @@ def var(
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
@@ -2620,9 +2654,10 @@ def var(
For DataFrameGroupBy:
- >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
- >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
- ... 'mouse', 'mouse', 'mouse', 'mouse'])
+ >>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
+ >>> df = pd.DataFrame(
+ ... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
+ ... )
>>> df
a b
dog 1 1
@@ -2811,7 +2846,7 @@ def sem(self, ddof: int = 1, numeric_only: bool = False) -> NDFrameT:
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b', 'b']
+ >>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([5, 10, 8, 14], index=lst)
>>> ser
a 5
@@ -2827,8 +2862,11 @@ def sem(self, ddof: int = 1, numeric_only: bool = False) -> NDFrameT:
For DataFrameGroupBy:
>>> data = [[1, 12, 11], [1, 15, 2], [2, 5, 8], [2, 6, 12]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["tuna", "salmon", "catfish", "goldfish"])
+ >>> df = pd.DataFrame(
+ ... data,
+ ... columns=["a", "b", "c"],
+ ... index=["tuna", "salmon", "catfish", "goldfish"],
+ ... )
>>> df
a b c
tuna 1 12 11
@@ -2843,14 +2881,20 @@ def sem(self, ddof: int = 1, numeric_only: bool = False) -> NDFrameT:
For Resampler:
- >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
- ... index=pd.DatetimeIndex(['2023-01-01',
- ... '2023-01-10',
- ... '2023-01-15',
- ... '2023-02-01',
- ... '2023-02-10',
- ... '2023-02-15']))
- >>> ser.resample('MS').sem()
+ >>> ser = pd.Series(
+ ... [1, 3, 2, 4, 3, 8],
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2023-01-01",
+ ... "2023-01-10",
+ ... "2023-01-15",
+ ... "2023-02-01",
+ ... "2023-02-10",
+ ... "2023-02-15",
+ ... ]
+ ... ),
+ ... )
+ >>> ser.resample("MS").sem()
2023-01-01 0.577350
2023-02-01 1.527525
Freq: MS, dtype: float64
@@ -2885,7 +2929,7 @@ def size(self) -> DataFrame | Series:
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, 3], index=lst)
>>> ser
a 1
@@ -2898,8 +2942,9 @@ def size(self) -> DataFrame | Series:
dtype: int64
>>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["owl", "toucan", "eagle"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["owl", "toucan", "eagle"]
+ ... )
>>> df
a b c
owl 1 2 3
@@ -2913,14 +2958,16 @@ def size(self) -> DataFrame | Series:
For Resampler:
- >>> ser = pd.Series([1, 2, 3], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3],
+ ... index=pd.DatetimeIndex(["2023-01-01", "2023-01-15", "2023-02-01"]),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
dtype: int64
- >>> ser.resample('MS').size()
+ >>> ser.resample("MS").size()
2023-01-01 2
2023-02-01 1
Freq: MS, dtype: int64
@@ -3252,9 +3299,15 @@ def first(
Examples
--------
- >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[None, 5, 6], C=[1, 2, 3],
- ... D=['3/11/2000', '3/12/2000', '3/13/2000']))
- >>> df['D'] = pd.to_datetime(df['D'])
+ >>> df = pd.DataFrame(
+ ... dict(
+ ... A=[1, 1, 3],
+ ... B=[None, 5, 6],
+ ... C=[1, 2, 3],
+ ... D=["3/11/2000", "3/12/2000", "3/13/2000"],
+ ... )
+ ... )
+ >>> df["D"] = pd.to_datetime(df["D"])
>>> df.groupby("A").first()
B C D
A
@@ -3381,7 +3434,16 @@ def ohlc(self) -> DataFrame:
For SeriesGroupBy:
- >>> lst = ['SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC',]
+ >>> lst = [
+ ... "SPX",
+ ... "CAC",
+ ... "SPX",
+ ... "CAC",
+ ... "SPX",
+ ... "CAC",
+ ... "SPX",
+ ... "CAC",
+ ... ]
>>> ser = pd.Series([3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 0.1, 0.5], index=lst)
>>> ser
SPX 3.4
@@ -3400,10 +3462,13 @@ def ohlc(self) -> DataFrame:
For DataFrameGroupBy:
- >>> data = {2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2 , 1],
- ... 2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0]}
- >>> df = pd.DataFrame(data, index=['SPX', 'CAC', 'SPX', 'CAC',
- ... 'SPX', 'CAC', 'SPX', 'CAC'])
+ >>> data = {
+ ... 2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2, 1],
+ ... 2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0],
+ ... }
+ >>> df = pd.DataFrame(
+ ... data, index=["SPX", "CAC", "SPX", "CAC", "SPX", "CAC", "SPX", "CAC"]
+ ... )
>>> df
2022 2023
SPX 1.2 3.4
@@ -3422,14 +3487,20 @@ def ohlc(self) -> DataFrame:
For Resampler:
- >>> ser = pd.Series([1, 3, 2, 4, 3, 5],
- ... index=pd.DatetimeIndex(['2023-01-01',
- ... '2023-01-10',
- ... '2023-01-15',
- ... '2023-02-01',
- ... '2023-02-10',
- ... '2023-02-15']))
- >>> ser.resample('MS').ohlc()
+ >>> ser = pd.Series(
+ ... [1, 3, 2, 4, 3, 5],
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2023-01-01",
+ ... "2023-01-10",
+ ... "2023-01-15",
+ ... "2023-02-01",
+ ... "2023-02-10",
+ ... "2023-02-15",
+ ... ]
+ ... ),
+ ... )
+ >>> ser.resample("MS").ohlc()
open high low close
2023-01-01 1 3 1 2
2023-02-01 4 5 3 5
@@ -3542,10 +3613,8 @@ def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resamp
Examples
--------
- >>> idx = pd.date_range('1/1/2000', periods=4, freq='min')
- >>> df = pd.DataFrame(data=4 * [range(2)],
- ... index=idx,
- ... columns=['a', 'b'])
+ >>> idx = pd.date_range("1/1/2000", periods=4, freq="min")
+ >>> df = pd.DataFrame(data=4 * [range(2)], index=idx, columns=["a", "b"])
>>> df.iloc[2, 0] = 5
>>> df
a b
@@ -3557,7 +3626,7 @@ def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resamp
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
- >>> df.groupby('a').resample('3min', include_groups=False).sum()
+ >>> df.groupby("a").resample("3min", include_groups=False).sum()
b
a
0 2000-01-01 00:00:00 2
@@ -3566,7 +3635,7 @@ def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resamp
Upsample the series into 30 second bins.
- >>> df.groupby('a').resample('30s', include_groups=False).sum()
+ >>> df.groupby("a").resample("30s", include_groups=False).sum()
b
a
0 2000-01-01 00:00:00 1
@@ -3580,7 +3649,7 @@ def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resamp
Resample by month. Values are assigned to the month of the period.
- >>> df.groupby('a').resample('ME', include_groups=False).sum()
+ >>> df.groupby("a").resample("ME", include_groups=False).sum()
b
a
0 2000-01-31 3
@@ -3590,8 +3659,8 @@ def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resamp
side of the bin interval.
>>> (
- ... df.groupby('a')
- ... .resample('3min', closed='right', include_groups=False)
+ ... df.groupby("a")
+ ... .resample("3min", closed="right", include_groups=False)
... .sum()
... )
b
@@ -3605,8 +3674,8 @@ def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resamp
the left.
>>> (
- ... df.groupby('a')
- ... .resample('3min', closed='right', label='right', include_groups=False)
+ ... df.groupby("a")
+ ... .resample("3min", closed="right", label="right", include_groups=False)
... .sum()
... )
b
@@ -3712,9 +3781,13 @@ def rolling(self, *args, **kwargs) -> RollingGroupby:
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 1, 2, 2],
- ... 'B': [1, 2, 3, 4],
- ... 'C': [0.362, 0.227, 1.267, -0.562]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": [1, 1, 2, 2],
+ ... "B": [1, 2, 3, 4],
+ ... "C": [0.362, 0.227, 1.267, -0.562],
+ ... }
+ ... )
>>> df
A B C
0 1 1 0.362
@@ -3722,7 +3795,7 @@ def rolling(self, *args, **kwargs) -> RollingGroupby:
2 2 3 1.267
3 2 4 -0.562
- >>> df.groupby('A').rolling(2).sum()
+ >>> df.groupby("A").rolling(2).sum()
B C
A
1 0 NaN NaN
@@ -3730,7 +3803,7 @@ def rolling(self, *args, **kwargs) -> RollingGroupby:
2 2 NaN NaN
3 7.0 0.705
- >>> df.groupby('A').rolling(2, min_periods=1).sum()
+ >>> df.groupby("A").rolling(2, min_periods=1).sum()
B C
A
1 0 1.0 0.362
@@ -3738,7 +3811,7 @@ def rolling(self, *args, **kwargs) -> RollingGroupby:
2 2 3.0 1.267
3 7.0 0.705
- >>> df.groupby('A').rolling(2, on='B').sum()
+ >>> df.groupby("A").rolling(2, on="B").sum()
B C
A
1 0 1 NaN
@@ -3993,7 +4066,7 @@ def bfill(self, limit: int | None = None):
With Series:
- >>> index = ['Falcon', 'Falcon', 'Parrot', 'Parrot', 'Parrot']
+ >>> index = ["Falcon", "Falcon", "Parrot", "Parrot", "Parrot"]
>>> s = pd.Series([None, 1, None, None, 3], index=index)
>>> s
Falcon NaN
@@ -4019,8 +4092,10 @@ def bfill(self, limit: int | None = None):
With DataFrame:
- >>> df = pd.DataFrame({'A': [1, None, None, None, 4],
- ... 'B': [None, None, 5, None, 7]}, index=index)
+ >>> df = pd.DataFrame(
+ ... {"A": [1, None, None, None, 4], "B": [None, None, 5, None, 7]},
+ ... index=index,
+ ... )
>>> df
A B
Falcon 1.0 NaN
@@ -4081,9 +4156,10 @@ def nth(self) -> GroupByNthSelector:
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
- ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
- >>> g = df.groupby('A')
+ >>> df = pd.DataFrame(
+ ... {"A": [1, 1, 2, 1, 2], "B": [np.nan, 2, 3, 4, 5]}, columns=["A", "B"]
+ ... )
+ >>> g = df.groupby("A")
>>> g.nth(0)
A B
0 1 NaN
@@ -4124,7 +4200,7 @@ def nth(self) -> GroupByNthSelector:
Specifying `dropna` allows ignoring ``NaN`` values
- >>> g.nth(0, dropna='any')
+ >>> g.nth(0, dropna="any")
A B
1 1 2.0
2 2 3.0
@@ -4132,7 +4208,7 @@ def nth(self) -> GroupByNthSelector:
When the specified ``n`` is larger than any of the groups, an
empty DataFrame is returned
- >>> g.nth(3, dropna='any')
+ >>> g.nth(3, dropna="any")
Empty DataFrame
Columns: [A, B]
Index: []
@@ -4232,11 +4308,11 @@ def quantile(
Examples
--------
- >>> df = pd.DataFrame([
- ... ['a', 1], ['a', 2], ['a', 3],
- ... ['b', 1], ['b', 3], ['b', 5]
- ... ], columns=['key', 'val'])
- >>> df.groupby('key').quantile()
+ >>> df = pd.DataFrame(
+ ... [["a", 1], ["a", 2], ["a", 3], ["b", 1], ["b", 3], ["b", 5]],
+ ... columns=["key", "val"],
+ ... )
+ >>> df.groupby("key").quantile()
val
key
a 2.0
@@ -4533,8 +4609,7 @@ def cumcount(self, ascending: bool = True):
Examples
--------
- >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
- ... columns=['A'])
+ >>> df = pd.DataFrame([["a"], ["a"], ["a"], ["b"], ["b"], ["a"]], columns=["A"])
>>> df
A
0 a
@@ -4543,7 +4618,7 @@ def cumcount(self, ascending: bool = True):
3 b
4 b
5 a
- >>> df.groupby('A').cumcount()
+ >>> df.groupby("A").cumcount()
0 0
1 1
2 2
@@ -4551,7 +4626,7 @@ def cumcount(self, ascending: bool = True):
4 1
5 3
dtype: int64
- >>> df.groupby('A').cumcount(ascending=False)
+ >>> df.groupby("A").cumcount(ascending=False)
0 3
1 2
2 1
@@ -4618,8 +4693,8 @@ def rank(
7 b 4
8 b 1
9 b 5
- >>> for method in ['average', 'min', 'max', 'dense', 'first']:
- ... df[f'{method}_rank'] = df.groupby('group')['value'].rank(method)
+ >>> for method in ["average", "min", "max", "dense", "first"]:
+ ... df[f"{method}_rank"] = df.groupby("group")["value"].rank(method)
>>> df
group value average_rank min_rank max_rank dense_rank first_rank
0 a 2 1.5 1.0 2.0 1.0 1.0
@@ -4665,7 +4740,7 @@ def cumprod(self, *args, **kwargs) -> NDFrameT:
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([6, 2, 0], index=lst)
>>> ser
a 6
@@ -4681,8 +4756,9 @@ def cumprod(self, *args, **kwargs) -> NDFrameT:
For DataFrameGroupBy:
>>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["cow", "horse", "bull"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["cow", "horse", "bull"]
+ ... )
>>> df
a b c
cow 1 8 2
@@ -4714,7 +4790,7 @@ def cumsum(self, *args, **kwargs) -> NDFrameT:
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b']
+ >>> lst = ["a", "a", "b"]
>>> ser = pd.Series([6, 2, 0], index=lst)
>>> ser
a 6
@@ -4730,8 +4806,9 @@ def cumsum(self, *args, **kwargs) -> NDFrameT:
For DataFrameGroupBy:
>>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["fox", "gorilla", "lion"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["fox", "gorilla", "lion"]
+ ... )
>>> df
a b c
fox 1 8 2
@@ -4767,7 +4844,7 @@ def cummin(
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([1, 6, 2, 3, 0, 4], index=lst)
>>> ser
a 1
@@ -4789,8 +4866,9 @@ def cummin(
For DataFrameGroupBy:
>>> data = [[1, 0, 2], [1, 1, 5], [6, 6, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["snake", "rabbit", "turtle"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["snake", "rabbit", "turtle"]
+ ... )
>>> df
a b c
snake 1 0 2
@@ -4828,7 +4906,7 @@ def cummax(
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([1, 6, 2, 3, 1, 4], index=lst)
>>> ser
a 1
@@ -4850,8 +4928,9 @@ def cummax(
For DataFrameGroupBy:
>>> data = [[1, 8, 2], [1, 1, 0], [2, 6, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["cow", "horse", "bull"])
+ >>> df = pd.DataFrame(
+ ... data, columns=["a", "b", "c"], index=["cow", "horse", "bull"]
+ ... )
>>> df
a b c
cow 1 8 2
@@ -4915,7 +4994,7 @@ def shift(
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b', 'b']
+ >>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 4], index=lst)
>>> ser
a 1
@@ -4933,8 +5012,11 @@ def shift(
For DataFrameGroupBy:
>>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["tuna", "salmon", "catfish", "goldfish"])
+ >>> df = pd.DataFrame(
+ ... data,
+ ... columns=["a", "b", "c"],
+ ... index=["tuna", "salmon", "catfish", "goldfish"],
+ ... )
>>> df
a b c
tuna 1 2 3
@@ -5039,7 +5121,7 @@ def diff(
--------
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
@@ -5060,9 +5142,10 @@ def diff(
For DataFrameGroupBy:
- >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]}
- >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog',
- ... 'mouse', 'mouse', 'mouse', 'mouse'])
+ >>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
+ >>> df = pd.DataFrame(
+ ... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
+ ... )
>>> df
a b
dog 1 1
@@ -5121,7 +5204,7 @@ def pct_change(
For SeriesGroupBy:
- >>> lst = ['a', 'a', 'b', 'b']
+ >>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 4], index=lst)
>>> ser
a 1
@@ -5139,8 +5222,11 @@ def pct_change(
For DataFrameGroupBy:
>>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]]
- >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
- ... index=["tuna", "salmon", "catfish", "goldfish"])
+ >>> df = pd.DataFrame(
+ ... data,
+ ... columns=["a", "b", "c"],
+ ... index=["tuna", "salmon", "catfish", "goldfish"],
+ ... )
>>> df
a b c
tuna 1 2 3
@@ -5227,13 +5313,12 @@ def head(self, n: int = 5) -> NDFrameT:
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
- ... columns=['A', 'B'])
- >>> df.groupby('A').head(1)
+ >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
+ >>> df.groupby("A").head(1)
A B
0 1 2
2 5 6
- >>> df.groupby('A').head(-1)
+ >>> df.groupby("A").head(-1)
A B
0 1 2
"""
@@ -5265,13 +5350,14 @@ def tail(self, n: int = 5) -> NDFrameT:
Examples
--------
- >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
- ... columns=['A', 'B'])
- >>> df.groupby('A').tail(1)
+ >>> df = pd.DataFrame(
+ ... [["a", 1], ["a", 2], ["b", 1], ["b", 2]], columns=["A", "B"]
+ ... )
+ >>> df.groupby("A").tail(1)
A B
1 a 2
3 b 2
- >>> df.groupby('A').tail(-1)
+ >>> df.groupby("A").tail(-1)
A B
1 a 2
3 b 2
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 827c44736c6c0..7a316b28d902a 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -148,10 +148,10 @@ class Grouper:
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-09"),
- ... pd.Timestamp("2000-01-16")
+ ... pd.Timestamp("2000-01-16"),
... ],
... "ID": [0, 1, 2, 3],
- ... "Price": [10, 20, 30, 40]
+ ... "Price": [10, 20, 30, 40],
... }
... )
>>> df
@@ -169,8 +169,8 @@ class Grouper:
If you want to adjust the start of the bins based on a fixed timestamp:
- >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
- >>> rng = pd.date_range(start, end, freq='7min')
+ >>> start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
+ >>> rng = pd.date_range(start, end, freq="7min")
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
@@ -184,7 +184,7 @@ class Grouper:
2000-10-02 00:26:00 24
Freq: 7min, dtype: int64
- >>> ts.groupby(pd.Grouper(freq='17min')).sum()
+ >>> ts.groupby(pd.Grouper(freq="17min")).sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
@@ -192,7 +192,7 @@ class Grouper:
2000-10-02 00:22:00 24
Freq: 17min, dtype: int64
- >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
+ >>> ts.groupby(pd.Grouper(freq="17min", origin="epoch")).sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
@@ -200,7 +200,7 @@ class Grouper:
2000-10-02 00:26:00 24
Freq: 17min, dtype: int64
- >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
+ >>> ts.groupby(pd.Grouper(freq="17min", origin="2000-01-01")).sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
@@ -210,14 +210,14 @@ class Grouper:
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
- >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
+ >>> ts.groupby(pd.Grouper(freq="17min", origin="start")).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17min, dtype: int64
- >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
+ >>> ts.groupby(pd.Grouper(freq="17min", offset="23h30min")).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
@@ -227,7 +227,7 @@ class Grouper:
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
- >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
+ >>> ts.groupby(pd.Grouper(freq="17min", offset="2min")).sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
diff --git a/pandas/core/groupby/indexing.py b/pandas/core/groupby/indexing.py
index a3c5ab8edc94e..75c0a062b57d0 100644
--- a/pandas/core/groupby/indexing.py
+++ b/pandas/core/groupby/indexing.py
@@ -99,8 +99,9 @@ def _positional_selector(self) -> GroupByPositionalSelector:
Examples
--------
- >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]],
- ... columns=["A", "B"])
+ >>> df = pd.DataFrame(
+ ... [["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], columns=["A", "B"]
+ ... )
>>> df.groupby("A")._positional_selector[1:2]
A B
1 a 2
diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py
index 5119089bac977..3dd256e9ce45d 100644
--- a/pandas/core/indexers/objects.py
+++ b/pandas/core/indexers/objects.py
@@ -300,7 +300,7 @@ class FixedForwardWindowIndexer(BaseIndexer):
Examples
--------
- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index 55bb58f3108c3..78dbe3a1ca632 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -202,7 +202,7 @@ def validate_indices(indices: np.ndarray, n: int) -> None:
Examples
--------
- >>> validate_indices(np.array([1, 2]), 3) # OK
+ >>> validate_indices(np.array([1, 2]), 3) # OK
>>> validate_indices(np.array([1, -2]), 3)
Traceback (most recent call last):
@@ -214,7 +214,7 @@ def validate_indices(indices: np.ndarray, n: int) -> None:
...
IndexError: indices are out-of-bounds
- >>> validate_indices(np.array([-1, -1]), 0) # OK
+ >>> validate_indices(np.array([-1, -1]), 0) # OK
>>> validate_indices(np.array([0, 1]), 0)
Traceback (most recent call last):
@@ -502,7 +502,7 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
For non-integer/boolean dtypes, an appropriate error is raised:
- >>> indexer = np.array([0., 2.], dtype="float64")
+ >>> indexer = np.array([0.0, 2.0], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index a91fb0a8d718d..8a742a0a9d57d 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -346,7 +346,7 @@ def to_pydatetime(self) -> np.ndarray:
Examples
--------
- >>> s = pd.Series(pd.date_range('20180310', periods=2))
+ >>> s = pd.Series(pd.date_range("20180310", periods=2))
>>> s
0 2018-03-10
1 2018-03-11
@@ -358,7 +358,7 @@ def to_pydatetime(self) -> np.ndarray:
pandas' nanosecond precision is truncated to microseconds.
- >>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))
+ >>> s = pd.Series(pd.date_range("20180310", periods=2, freq="ns"))
>>> s
0 2018-03-10 00:00:00.000000000
1 2018-03-10 00:00:00.000000001
@@ -494,7 +494,7 @@ def components(self) -> DataFrame:
Examples
--------
- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="s"))
>>> s
0 0 days 00:00:00
1 0 days 00:00:01
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e87ecb1b6011c..124d56d737251 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -362,7 +362,7 @@ class Index(IndexOpsMixin, PandasObject):
>>> pd.Index([1, 2, 3])
Index([1, 2, 3], dtype='int64')
- >>> pd.Index(list('abc'))
+ >>> pd.Index(list("abc"))
Index(['a', 'b', 'c'], dtype='object')
>>> pd.Index([1, 2, 3], dtype="uint8")
@@ -725,7 +725,7 @@ def _format_duplicate_message(self) -> DataFrame:
Examples
--------
- >>> idx = pd.Index(['a', 'b', 'a'])
+ >>> idx = pd.Index(["a", "b", "a"])
>>> idx._format_duplicate_message()
positions
label
@@ -812,7 +812,7 @@ def is_(self, other) -> bool:
Examples
--------
- >>> idx1 = pd.Index(['1', '2', '3'])
+ >>> idx1 = pd.Index(["1", "2", "3"])
>>> idx1.is_(idx1.view())
True
@@ -1006,7 +1006,7 @@ def ravel(self, order: str_t = "C") -> Self:
Examples
--------
- >>> s = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
+ >>> s = pd.Series([1, 2, 3], index=["a", "b", "c"])
>>> s.index.ravel()
Index(['a', 'b', 'c'], dtype='object')
"""
@@ -1076,7 +1076,7 @@ def astype(self, dtype, copy: bool = True):
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
- >>> idx.astype('float')
+ >>> idx.astype("float")
Index([1.0, 2.0, 3.0], dtype='float64')
"""
if dtype is not None:
@@ -1279,7 +1279,7 @@ def copy(
Examples
--------
- >>> idx = pd.Index(['a', 'b', 'c'])
+ >>> idx = pd.Index(["a", "b", "c"])
>>> new_idx = idx.copy()
>>> idx is new_idx
False
@@ -1571,7 +1571,7 @@ def to_series(self, index=None, name: Hashable | None = None) -> Series:
Examples
--------
- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
+ >>> idx = pd.Index(["Ant", "Bear", "Cow"], name="animal")
By default, the original index and original name is reused.
@@ -1592,7 +1592,7 @@ def to_series(self, index=None, name: Hashable | None = None) -> Series:
To override the name of the resulting column, specify ``name``:
- >>> idx.to_series(name='zoo')
+ >>> idx.to_series(name="zoo")
animal
Ant Ant
Bear Bear
@@ -1635,7 +1635,7 @@ def to_frame(
Examples
--------
- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
+ >>> idx = pd.Index(["Ant", "Bear", "Cow"], name="animal")
>>> idx.to_frame()
animal
animal
@@ -1653,7 +1653,7 @@ def to_frame(
To override the name of the resulting column, specify `name`:
- >>> idx.to_frame(index=False, name='zoo')
+ >>> idx.to_frame(index=False, name="zoo")
zoo
0 Ant
1 Bear
@@ -1679,7 +1679,7 @@ def name(self) -> Hashable:
Examples
--------
- >>> idx = pd.Index([1, 2, 3], name='x')
+ >>> idx = pd.Index([1, 2, 3], name="x")
>>> idx
Index([1, 2, 3], dtype='int64', name='x')
>>> idx.name
@@ -1848,19 +1848,18 @@ def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
- >>> idx.set_names('quarter')
+ >>> idx.set_names("quarter")
Index([1, 2, 3, 4], dtype='int64', name='quarter')
- >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
- ... [2018, 2019]])
+ >>> idx = pd.MultiIndex.from_product([["python", "cobra"], [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
- >>> idx = idx.set_names(['kind', 'year'])
- >>> idx.set_names('species', level=0)
+ >>> idx = idx.set_names(["kind", "year"])
+ >>> idx.set_names("species", level=0)
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
@@ -1869,7 +1868,7 @@ def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
When renaming levels with a dict, levels can not be passed.
- >>> idx.set_names({'kind': 'snake'})
+ >>> idx.set_names({"kind": "snake"})
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
@@ -1952,26 +1951,26 @@ def rename(self, name, inplace: bool = False) -> Self | None:
Examples
--------
- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')
- >>> idx.rename('grade')
+ >>> idx = pd.Index(["A", "C", "A", "B"], name="score")
+ >>> idx.rename("grade")
Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')
- >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
- ... [2018, 2019]],
- ... names=('kind', 'year'))
+ >>> idx = pd.MultiIndex.from_product(
+ ... [["python", "cobra"], [2018, 2019]], names=["kind", "year"]
+ ... )
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=('kind', 'year'))
- >>> idx.rename(['species', 'year'])
+ >>> idx.rename(["species", "year"])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=('species', 'year'))
- >>> idx.rename('species')
+ >>> idx.rename("species")
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
"""
@@ -2094,7 +2093,7 @@ def _get_level_values(self, level) -> Index:
Examples
--------
- >>> idx = pd.Index(list('abc'))
+ >>> idx = pd.Index(list("abc"))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
@@ -2129,7 +2128,7 @@ def droplevel(self, level: IndexLabel = 0):
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
- ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']
+ ... [[1, 2], [3, 4], [5, 6]], names=["x", "y", "z"]
... )
>>> mi
MultiIndex([(1, 3, 5),
@@ -2146,12 +2145,12 @@ def droplevel(self, level: IndexLabel = 0):
(2, 4)],
names=('x', 'y'))
- >>> mi.droplevel('z')
+ >>> mi.droplevel("z")
MultiIndex([(1, 3),
(2, 4)],
names=('x', 'y'))
- >>> mi.droplevel(['x', 'y'])
+ >>> mi.droplevel(["x", "y"])
Index([5, 6], dtype='int64', name='z')
"""
if not isinstance(level, (tuple, list)):
@@ -2338,13 +2337,13 @@ def is_unique(self) -> bool:
>>> idx.is_unique
True
- >>> idx = pd.Index(["Watermelon", "Orange", "Apple",
- ... "Watermelon"]).astype("category")
+ >>> idx = pd.Index(["Watermelon", "Orange", "Apple", "Watermelon"]).astype(
+ ... "category"
+ ... )
>>> idx.is_unique
False
- >>> idx = pd.Index(["Orange", "Apple",
- ... "Watermelon"]).astype("category")
+ >>> idx = pd.Index(["Orange", "Apple", "Watermelon"]).astype("category")
>>> idx.is_unique
True
"""
@@ -2375,13 +2374,13 @@ def has_duplicates(self) -> bool:
>>> idx.has_duplicates
False
- >>> idx = pd.Index(["Watermelon", "Orange", "Apple",
- ... "Watermelon"]).astype("category")
+ >>> idx = pd.Index(["Watermelon", "Orange", "Apple", "Watermelon"]).astype(
+ ... "category"
+ ... )
>>> idx.has_duplicates
True
- >>> idx = pd.Index(["Orange", "Apple",
- ... "Watermelon"]).astype("category")
+ >>> idx = pd.Index(["Orange", "Apple", "Watermelon"]).astype("category")
>>> idx.has_duplicates
False
"""
@@ -2611,8 +2610,9 @@ def is_object(self) -> bool:
>>> idx.is_object() # doctest: +SKIP
True
- >>> idx = pd.Index(["Watermelon", "Orange", "Apple",
- ... "Watermelon"]).astype("category")
+ >>> idx = pd.Index(["Watermelon", "Orange", "Apple", "Watermelon"]).astype(
+ ... "category"
+ ... )
>>> idx.is_object() # doctest: +SKIP
False
@@ -2653,8 +2653,9 @@ def is_categorical(self) -> bool:
Examples
--------
- >>> idx = pd.Index(["Watermelon", "Orange", "Apple",
- ... "Watermelon"]).astype("category")
+ >>> idx = pd.Index(["Watermelon", "Orange", "Apple", "Watermelon"]).astype(
+ ... "category"
+ ... )
>>> idx.is_categorical() # doctest: +SKIP
True
@@ -2706,8 +2707,9 @@ def is_interval(self) -> bool:
Examples
--------
- >>> idx = pd.Index([pd.Interval(left=0, right=5),
- ... pd.Interval(left=5, right=10)])
+ >>> idx = pd.Index(
+ ... [pd.Interval(left=0, right=5), pd.Interval(left=5, right=10)]
+ ... )
>>> idx.is_interval() # doctest: +SKIP
True
@@ -2832,7 +2834,7 @@ def hasnans(self) -> bool:
Examples
--------
- >>> s = pd.Series([1, 2, 3], index=['a', 'b', None])
+ >>> s = pd.Series([1, 2, 3], index=["a", "b", None])
>>> s
a 1
b 2
@@ -2883,7 +2885,7 @@ def isna(self) -> npt.NDArray[np.bool_]:
Empty strings are not considered NA values. None is considered an NA
value.
- >>> idx = pd.Index(['black', '', 'red', None])
+ >>> idx = pd.Index(["black", "", "red", None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
@@ -2891,8 +2893,9 @@ def isna(self) -> npt.NDArray[np.bool_]:
For datetimes, `NaT` (Not a Time) is considered as an NA value.
- >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
- ... pd.Timestamp(''), None, pd.NaT])
+ >>> idx = pd.DatetimeIndex(
+ ... [pd.Timestamp("1940-04-25"), pd.Timestamp(""), None, pd.NaT]
+ ... )
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
@@ -2939,7 +2942,7 @@ def notna(self) -> npt.NDArray[np.bool_]:
Empty strings are not considered NA values. None is considered a NA
value.
- >>> idx = pd.Index(['black', '', 'red', None])
+ >>> idx = pd.Index(["black", "", "red", None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
@@ -3099,20 +3102,20 @@ def drop_duplicates(self, *, keep: DropKeep = "first") -> Self:
--------
Generate an pandas.Index with duplicate values.
- >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
+ >>> idx = pd.Index(["llama", "cow", "llama", "beetle", "llama", "hippo"])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
- >>> idx.drop_duplicates(keep='first')
- Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
+ >>> idx.drop_duplicates(keep="first")
+ Index(['llama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
- >>> idx.drop_duplicates(keep='last')
- Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
+ >>> idx.drop_duplicates(keep="last")
+ Index(['cow', 'beetle', 'llama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
@@ -3158,19 +3161,19 @@ def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
- >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
+ >>> idx = pd.Index(["llama", "cow", "llama", "beetle", "llama"])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
- >>> idx.duplicated(keep='first')
+ >>> idx.duplicated(keep="first")
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
- >>> idx.duplicated(keep='last')
+ >>> idx.duplicated(keep="last")
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
@@ -3279,7 +3282,7 @@ def union(self, other, sort=None):
Union mismatched dtypes
- >>> idx1 = pd.Index(['a', 'b', 'c', 'd'])
+ >>> idx1 = pd.Index(["a", "b", "c", "d"])
>>> idx2 = pd.Index([1, 2, 3, 4])
>>> idx1.union(idx2)
Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')
@@ -3783,16 +3786,16 @@ def get_loc(self, key):
Examples
--------
- >>> unique_index = pd.Index(list('abc'))
- >>> unique_index.get_loc('b')
+ >>> unique_index = pd.Index(list("abc"))
+ >>> unique_index.get_loc("b")
1
- >>> monotonic_index = pd.Index(list('abbc'))
- >>> monotonic_index.get_loc('b')
+ >>> monotonic_index = pd.Index(list("abbc"))
+ >>> monotonic_index.get_loc("b")
slice(1, 3, None)
- >>> non_monotonic_index = pd.Index(list('abcb'))
- >>> non_monotonic_index.get_loc('b')
+ >>> non_monotonic_index = pd.Index(list("abcb"))
+ >>> non_monotonic_index.get_loc("b")
array([False, True, False, True])
"""
casted_key = self._maybe_cast_indexer(key)
@@ -3863,8 +3866,8 @@ def get_indexer(
Examples
--------
- >>> index = pd.Index(['c', 'a', 'b'])
- >>> index.get_indexer(['a', 'b', 'x'])
+ >>> index = pd.Index(["c", "a", "b"])
+ >>> index.get_indexer(["a", "b", "x"])
array([ 1, 2, -1])
Notice that the return value is an array of locations in ``index``
@@ -4374,10 +4377,10 @@ def reindex(
Examples
--------
- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])
+ >>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
- >>> idx.reindex(['car', 'bike'])
+ >>> idx.reindex(["car", "bike"])
(Index(['car', 'bike'], dtype='object'), array([0, 1]))
"""
# GH6552: preserve names when reindexing to non-named target
@@ -4581,7 +4584,7 @@ def join(
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([4, 5, 6])
- >>> idx1.join(idx2, how='outer')
+ >>> idx1.join(idx2, how="outer")
Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
other = ensure_index(other)
@@ -4865,7 +4868,7 @@ def _join_level(
from pandas.core.indexes.multi import MultiIndex
def _get_leaf_sorter(
- labels: tuple[np.ndarray, ...] | list[np.ndarray]
+ labels: tuple[np.ndarray, ...] | list[np.ndarray],
) -> npt.NDArray[np.intp]:
"""
Returns sorter for the inner most level while preserving the
@@ -5303,10 +5306,10 @@ def where(self, cond, other=None) -> Index:
Examples
--------
- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])
+ >>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
- >>> idx.where(idx.isin(['car', 'train']), 'other')
+ >>> idx.where(idx.isin(["car", "train"]), "other")
Index(['car', 'other', 'train', 'other'], dtype='object')
"""
if isinstance(self, ABCMultiIndex):
@@ -5635,10 +5638,10 @@ def equals(self, other: Any) -> bool:
The dtype is *not* compared
- >>> int64_idx = pd.Index([1, 2, 3], dtype='int64')
+ >>> int64_idx = pd.Index([1, 2, 3], dtype="int64")
>>> int64_idx
Index([1, 2, 3], dtype='int64')
- >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64')
+ >>> uint64_idx = pd.Index([1, 2, 3], dtype="uint64")
>>> uint64_idx
Index([1, 2, 3], dtype='uint64')
>>> int64_idx.equals(uint64_idx)
@@ -5697,13 +5700,13 @@ def identical(self, other) -> bool:
Examples
--------
- >>> idx1 = pd.Index(['1', '2', '3'])
- >>> idx2 = pd.Index(['1', '2', '3'])
+ >>> idx1 = pd.Index(["1", "2", "3"])
+ >>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2.identical(idx1)
True
- >>> idx1 = pd.Index(['1', '2', '3'], name="A")
- >>> idx2 = pd.Index(['1', '2', '3'], name="B")
+ >>> idx1 = pd.Index(["1", "2", "3"], name="A")
+ >>> idx2 = pd.Index(["1", "2", "3"], name="B")
>>> idx2.identical(idx1)
False
"""
@@ -5751,26 +5754,25 @@ def asof(self, label):
--------
`Index.asof` returns the latest index label up to the passed label.
- >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])
- >>> idx.asof('2014-01-01')
+ >>> idx = pd.Index(["2013-12-31", "2014-01-02", "2014-01-03"])
+ >>> idx.asof("2014-01-01")
'2013-12-31'
If the label is in the index, the method returns the passed label.
- >>> idx.asof('2014-01-02')
+ >>> idx.asof("2014-01-02")
'2014-01-02'
If all of the labels in the index are later than the passed label,
NaN is returned.
- >>> idx.asof('1999-01-02')
+ >>> idx.asof("1999-01-02")
nan
If the index is not sorted, an error is raised.
- >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',
- ... '2014-01-03'])
- >>> idx_not_sorted.asof('2013-12-31')
+ >>> idx_not_sorted = pd.Index(["2013-12-31", "2015-01-02", "2014-01-03"])
+ >>> idx_not_sorted.asof("2013-12-31")
Traceback (most recent call last):
ValueError: index must be monotonic increasing or decreasing
"""
@@ -5830,9 +5832,10 @@ def asof_locs(
Examples
--------
- >>> idx = pd.date_range('2023-06-01', periods=3, freq='D')
- >>> where = pd.DatetimeIndex(['2023-05-30 00:12:00', '2023-06-01 00:00:00',
- ... '2023-06-02 23:59:59'])
+ >>> idx = pd.date_range("2023-06-01", periods=3, freq="D")
+ >>> where = pd.DatetimeIndex(
+ ... ["2023-05-30 00:12:00", "2023-06-01 00:00:00", "2023-06-02 23:59:59"]
+ ... )
>>> mask = np.ones(3, dtype=bool)
>>> idx.asof_locs(where, mask)
array([-1, 0, 1])
@@ -6024,7 +6027,7 @@ def shift(self, periods: int = 1, freq=None) -> Self:
--------
Put the first 5 month starts of 2011 into an index.
- >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
+ >>> month_starts = pd.date_range("1/1/2011", periods=5, freq="MS")
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
@@ -6032,7 +6035,7 @@ def shift(self, periods: int = 1, freq=None) -> Self:
Shift the index by 10 days.
- >>> month_starts.shift(10, freq='D')
+ >>> month_starts.shift(10, freq="D")
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
@@ -6074,7 +6077,7 @@ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
Examples
--------
- >>> idx = pd.Index(['b', 'a', 'd', 'c'])
+ >>> idx = pd.Index(["b", "a", "d", "c"])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
@@ -6209,7 +6212,7 @@ def get_indexer_for(self, target) -> npt.NDArray[np.intp]:
Examples
--------
- >>> idx = pd.Index([np.nan, 'var1', np.nan])
+ >>> idx = pd.Index([np.nan, "var1", np.nan])
>>> idx.get_indexer_for([np.nan])
array([0, 2])
"""
@@ -6508,16 +6511,16 @@ def map(self, mapper, na_action: Literal["ignore"] | None = None):
Examples
--------
>>> idx = pd.Index([1, 2, 3])
- >>> idx.map({1: 'a', 2: 'b', 3: 'c'})
+ >>> idx.map({1: "a", 2: "b", 3: "c"})
Index(['a', 'b', 'c'], dtype='object')
Using `map` with a function:
>>> idx = pd.Index([1, 2, 3])
- >>> idx.map('I am a {}'.format)
+ >>> idx.map("I am a {}".format)
Index(['I am a 1', 'I am a 2', 'I am a 3'], dtype='object')
- >>> idx = pd.Index(['a', 'b', 'c'])
+ >>> idx = pd.Index(["a", "b", "c"])
>>> idx.map(lambda x: x.upper())
Index(['A', 'B', 'C'], dtype='object')
"""
@@ -6621,9 +6624,9 @@ def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
>>> idx.isin([1, 4])
array([ True, False, False])
- >>> midx = pd.MultiIndex.from_arrays([[1, 2, 3],
- ... ['red', 'blue', 'green']],
- ... names=('number', 'color'))
+ >>> midx = pd.MultiIndex.from_arrays(
+ ... [[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
+ ... )
>>> midx
MultiIndex([(1, 'red'),
(2, 'blue'),
@@ -6633,12 +6636,12 @@ def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
- >>> midx.isin(['red', 'orange', 'yellow'], level='color')
+ >>> midx.isin(["red", "orange", "yellow"], level="color")
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
- >>> midx.isin([(1, 'red'), (3, 'red')])
+ >>> midx.isin([(1, "red"), (3, "red")])
array([ True, False, False])
"""
if level is not None:
@@ -6686,12 +6689,12 @@ def slice_indexer(
--------
This is a method on all index types. For example you can do:
- >>> idx = pd.Index(list('abcd'))
- >>> idx.slice_indexer(start='b', end='c')
+ >>> idx = pd.Index(list("abcd"))
+ >>> idx.slice_indexer(start="b", end="c")
slice(1, 3, None)
- >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
- >>> idx.slice_indexer(start='b', end=('c', 'g'))
+ >>> idx = pd.MultiIndex.from_arrays([list("abcd"), list("efgh")])
+ >>> idx.slice_indexer(start="b", end=("c", "g"))
slice(1, 3, None)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step)
@@ -6802,16 +6805,16 @@ def get_slice_bound(self, label, side: Literal["left", "right"]) -> int:
Examples
--------
>>> idx = pd.RangeIndex(5)
- >>> idx.get_slice_bound(3, 'left')
+ >>> idx.get_slice_bound(3, "left")
3
- >>> idx.get_slice_bound(3, 'right')
+ >>> idx.get_slice_bound(3, "right")
4
If ``label`` is non-unique in the index, an error will be raised.
- >>> idx_duplicate = pd.Index(['a', 'b', 'a', 'c', 'd'])
- >>> idx_duplicate.get_slice_bound('a', 'left')
+ >>> idx_duplicate = pd.Index(["a", "b", "a", "c", "d"])
+ >>> idx_duplicate.get_slice_bound("a", "left")
Traceback (most recent call last):
KeyError: Cannot get left slice bound for non-unique label: 'a'
"""
@@ -6887,8 +6890,8 @@ def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]:
Examples
--------
- >>> idx = pd.Index(list('abcd'))
- >>> idx.slice_locs(start='b', end='c')
+ >>> idx = pd.Index(list("abcd"))
+ >>> idx.slice_locs(start="b", end="c")
(1, 3)
"""
inc = step is None or step >= 0
@@ -6969,11 +6972,11 @@ def delete(self, loc) -> Self:
Examples
--------
- >>> idx = pd.Index(['a', 'b', 'c'])
+ >>> idx = pd.Index(["a", "b", "c"])
>>> idx.delete(1)
Index(['a', 'c'], dtype='object')
- >>> idx = pd.Index(['a', 'b', 'c'])
+ >>> idx = pd.Index(["a", "b", "c"])
>>> idx.delete([0, 2])
Index(['b'], dtype='object')
"""
@@ -7005,8 +7008,8 @@ def insert(self, loc: int, item) -> Index:
Examples
--------
- >>> idx = pd.Index(['a', 'b', 'c'])
- >>> idx.insert(1, 'x')
+ >>> idx = pd.Index(["a", "b", "c"])
+ >>> idx.insert(1, "x")
Index(['a', 'x', 'b', 'c'], dtype='object')
"""
item = lib.item_from_zerodim(item)
@@ -7089,8 +7092,8 @@ def drop(
Examples
--------
- >>> idx = pd.Index(['a', 'b', 'c'])
- >>> idx.drop(['a'])
+ >>> idx = pd.Index(["a", "b", "c"])
+ >>> idx.drop(["a"])
Index(['b', 'c'], dtype='object')
"""
if not isinstance(labels, Index):
@@ -7468,13 +7471,13 @@ def min(self, axis=None, skipna: bool = True, *args, **kwargs):
>>> idx.min()
1
- >>> idx = pd.Index(['c', 'b', 'a'])
+ >>> idx = pd.Index(["c", "b", "a"])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
- >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
+ >>> idx = pd.MultiIndex.from_product([("a", "b"), (2, 1)])
>>> idx.min()
('a', 1)
"""
@@ -7531,13 +7534,13 @@ def max(self, axis=None, skipna: bool = True, *args, **kwargs):
>>> idx.max()
3
- >>> idx = pd.Index(['c', 'b', 'a'])
+ >>> idx = pd.Index(["c", "b", "a"])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
- >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
+ >>> idx = pd.MultiIndex.from_product([("a", "b"), (2, 1)])
>>> idx.max()
('b', 2)
"""
@@ -7645,13 +7648,13 @@ def ensure_index(index_like: Axes, copy: bool = False) -> Index:
Examples
--------
- >>> ensure_index(['a', 'b'])
+ >>> ensure_index(["a", "b"])
Index(['a', 'b'], dtype='object')
- >>> ensure_index([('a', 'a'), ('b', 'c')])
+ >>> ensure_index([("a", "a"), ("b", "c")])
Index([('a', 'a'), ('b', 'c')], dtype='object')
- >>> ensure_index([['a', 'a'], ['b', 'c']])
+ >>> ensure_index([["a", "a"], ["b", "c"]])
MultiIndex([('a', 'b'),
('a', 'c')],
)
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index b307be004ad6e..5e9d15812526f 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -284,14 +284,14 @@ def equals(self, other: object) -> bool:
Examples
--------
- >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
- >>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']))
+ >>> ci = pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
+ >>> ci2 = pd.CategoricalIndex(pd.Categorical(["a", "b", "c", "a", "b", "c"]))
>>> ci.equals(ci2)
True
The order of elements matters.
- >>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c'])
+ >>> ci3 = pd.CategoricalIndex(["c", "b", "a", "a", "b", "c"])
>>> ci.equals(ci3)
False
@@ -304,16 +304,17 @@ def equals(self, other: object) -> bool:
The categories matter, but the order of the categories matters only when
``ordered=True``.
- >>> ci5 = ci.set_categories(['a', 'b', 'c', 'd'])
+ >>> ci5 = ci.set_categories(["a", "b", "c", "d"])
>>> ci.equals(ci5)
False
- >>> ci6 = ci.set_categories(['b', 'c', 'a'])
+ >>> ci6 = ci.set_categories(["b", "c", "a"])
>>> ci.equals(ci6)
True
- >>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
- ... ordered=True)
- >>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a'])
+ >>> ci_ordered = pd.CategoricalIndex(
+ ... ["a", "b", "c", "a", "b", "c"], ordered=True
+ ... )
+ >>> ci2_ordered = ci_ordered.set_categories(["b", "c", "a"])
>>> ci_ordered.equals(ci2_ordered)
False
"""
@@ -462,37 +463,37 @@ def map(self, mapper, na_action: Literal["ignore"] | None = None):
Examples
--------
- >>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
+ >>> idx = pd.CategoricalIndex(["a", "b", "c"])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
- >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
+ >>> idx.map({"a": "first", "b": "second", "c": "third"})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
- >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
+ >>> idx = pd.CategoricalIndex(["a", "b", "c"], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
- >>> idx.map({'a': 3, 'b': 2, 'c': 1})
+ >>> idx.map({"a": 3, "b": 2, "c": 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
- >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
+ >>> idx.map({"a": "first", "b": "second", "c": "first"})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
- >>> idx.map({'a': 'first', 'b': 'second'})
+ >>> idx.map({"a": "first", "b": "second"})
Index(['first', 'second', nan], dtype='object')
"""
mapped = self._values.map(mapper, na_action=na_action)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index a5670536c74f7..45decaf97a188 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -463,20 +463,20 @@ def as_unit(self, unit: str) -> Self:
--------
For :class:`pandas.DatetimeIndex`:
- >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006'])
+ >>> idx = pd.DatetimeIndex(["2020-01-02 01:02:03.004005006"])
>>> idx
DatetimeIndex(['2020-01-02 01:02:03.004005006'],
dtype='datetime64[ns]', freq=None)
- >>> idx.as_unit('s')
+ >>> idx.as_unit("s")
DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
For :class:`pandas.TimedeltaIndex`:
- >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
+ >>> tdelta_idx = pd.to_timedelta(["1 day 3 min 2 us 42 ns"])
>>> tdelta_idx
TimedeltaIndex(['1 days 00:03:00.000002042'],
dtype='timedelta64[ns]', freq=None)
- >>> tdelta_idx.as_unit('s')
+ >>> tdelta_idx.as_unit("s")
TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
"""
arr = self._data.as_unit(unit)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 3cf3352e64f27..282a11122211b 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -488,12 +488,13 @@ def snap(self, freq: Frequency = "S") -> DatetimeIndex:
Examples
--------
- >>> idx = pd.DatetimeIndex(['2023-01-01', '2023-01-02',
- ... '2023-02-01', '2023-02-02'])
+ >>> idx = pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-02", "2023-02-01", "2023-02-02"]
+ ... )
>>> idx
DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'],
dtype='datetime64[ns]', freq=None)
- >>> idx.snap('MS')
+ >>> idx.snap("MS")
DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'],
dtype='datetime64[ns]', freq=None)
"""
@@ -737,8 +738,9 @@ def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:
Examples
--------
- >>> idx = pd.DatetimeIndex(["1/1/2020 10:00", "2/1/2020 11:00",
- ... "3/1/2020 10:00"])
+ >>> idx = pd.DatetimeIndex(
+ ... ["1/1/2020 10:00", "2/1/2020 11:00", "3/1/2020 10:00"]
+ ... )
>>> idx.indexer_at_time("10:00")
array([0, 2])
"""
@@ -906,7 +908,7 @@ def date_range(
Specify `start` and `end`, with the default daily frequency.
- >>> pd.date_range(start='1/1/2018', end='1/08/2018')
+ >>> pd.date_range(start="1/1/2018", end="1/08/2018")
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
@@ -925,14 +927,14 @@ def date_range(
Specify `start` and `periods`, the number of periods (days).
- >>> pd.date_range(start='1/1/2018', periods=8)
+ >>> pd.date_range(start="1/1/2018", periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
- >>> pd.date_range(end='1/1/2018', periods=8)
+ >>> pd.date_range(end="1/1/2018", periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
@@ -940,7 +942,7 @@ def date_range(
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
- >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
+ >>> pd.date_range(start="2018-04-24", end="2018-04-27", periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
@@ -949,28 +951,28 @@ def date_range(
Changed the `freq` (frequency) to ``'ME'`` (month end frequency).
- >>> pd.date_range(start='1/1/2018', periods=5, freq='ME')
+ >>> pd.date_range(start="1/1/2018", periods=5, freq="ME")
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='ME')
Multiples are allowed
- >>> pd.date_range(start='1/1/2018', periods=5, freq='3ME')
+ >>> pd.date_range(start="1/1/2018", periods=5, freq="3ME")
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3ME')
`freq` can also be specified as an Offset object.
- >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
+ >>> pd.date_range(start="1/1/2018", periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3ME')
Specify `tz` to set the timezone.
- >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
+ >>> pd.date_range(start="1/1/2018", periods=5, tz="Asia/Tokyo")
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
@@ -979,20 +981,20 @@ def date_range(
`inclusive` controls whether to include `start` and `end` that are on the
boundary. The default, "both", includes boundary points on either end.
- >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both")
+ >>> pd.date_range(start="2017-01-01", end="2017-01-04", inclusive="both")
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``inclusive='left'`` to exclude `end` if it falls on the boundary.
- >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left')
+ >>> pd.date_range(start="2017-01-01", end="2017-01-04", inclusive="left")
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and
similarly ``inclusive='neither'`` will exclude both `start` and `end`.
- >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right')
+ >>> pd.date_range(start="2017-01-01", end="2017-01-04", inclusive="right")
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
@@ -1088,7 +1090,7 @@ def bdate_range(
--------
Note how the two weekend days are skipped in the result.
- >>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
+ >>> pd.bdate_range(start="1/1/2018", end="1/08/2018")
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-08'],
dtype='datetime64[ns]', freq='B')
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index f3f3e286e43e5..46d1ee49c22a0 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -479,7 +479,7 @@ def is_overlapping(self) -> bool:
Intervals that share closed endpoints overlap:
- >>> index = pd.interval_range(0, 3, closed='both')
+ >>> index = pd.interval_range(0, 3, closed="both")
>>> index
IntervalIndex([[0, 1], [1, 2], [2, 3]],
dtype='interval[int64, both]')
@@ -488,7 +488,7 @@ def is_overlapping(self) -> bool:
Intervals that only have an open endpoint in common do not overlap:
- >>> index = pd.interval_range(0, 3, closed='left')
+ >>> index = pd.interval_range(0, 3, closed="left")
>>> index
IntervalIndex([[0, 1), [1, 2), [2, 3)],
dtype='interval[int64, left]')
@@ -1017,8 +1017,9 @@ def interval_range(
Additionally, datetime-like input is also supported.
- >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
- ... end=pd.Timestamp('2017-01-04'))
+ >>> pd.interval_range(
+ ... start=pd.Timestamp("2017-01-01"), end=pd.Timestamp("2017-01-04")
+ ... )
IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00],
(2017-01-02 00:00:00, 2017-01-03 00:00:00],
(2017-01-03 00:00:00, 2017-01-04 00:00:00]],
@@ -1035,8 +1036,7 @@ def interval_range(
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
- >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
- ... periods=3, freq='MS')
+ >>> pd.interval_range(start=pd.Timestamp("2017-01-01"), periods=3, freq="MS")
IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00],
(2017-02-01 00:00:00, 2017-03-01 00:00:00],
(2017-03-01 00:00:00, 2017-04-01 00:00:00]],
@@ -1052,7 +1052,7 @@ def interval_range(
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
- >>> pd.interval_range(end=5, periods=4, closed='both')
+ >>> pd.interval_range(end=5, periods=4, closed="both")
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
dtype='interval[int64, both]')
"""
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index a11dad9dcb518..c81d76d471a5f 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -296,8 +296,8 @@ class MultiIndex(Index):
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
- >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
+ >>> arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
+ >>> pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
@@ -502,8 +502,8 @@ def from_arrays(
Examples
--------
- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
- >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
+ >>> arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
+ >>> pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
@@ -573,9 +573,8 @@ def from_tuples(
Examples
--------
- >>> tuples = [(1, 'red'), (1, 'blue'),
- ... (2, 'red'), (2, 'blue')]
- >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
+ >>> tuples = [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")]
+ >>> pd.MultiIndex.from_tuples(tuples, names=("number", "color"))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
@@ -655,9 +654,8 @@ def from_product(
Examples
--------
>>> numbers = [0, 1, 2]
- >>> colors = ['green', 'purple']
- >>> pd.MultiIndex.from_product([numbers, colors],
- ... names=('number', 'color'))
+ >>> colors = ["green", "purple"]
+ >>> pd.MultiIndex.from_product([numbers, colors], names=["number", "color"])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
@@ -717,9 +715,10 @@ def from_frame(
Examples
--------
- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
- ... ['NJ', 'Temp'], ['NJ', 'Precip']],
- ... columns=['a', 'b'])
+ >>> df = pd.DataFrame(
+ ... [["HI", "Temp"], ["HI", "Precip"], ["NJ", "Temp"], ["NJ", "Precip"]],
+ ... columns=["a", "b"],
+ ... )
>>> df
a b
0 HI Temp
@@ -736,7 +735,7 @@ def from_frame(
Using explicit names, instead of the column names
- >>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
+ >>> pd.MultiIndex.from_frame(df, names=["state", "observation"])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
@@ -806,8 +805,9 @@ def dtypes(self) -> Series:
Examples
--------
- >>> idx = pd.MultiIndex.from_product([(0, 1, 2), ('green', 'purple')],
- ... names=('number', 'color'))
+ >>> idx = pd.MultiIndex.from_product(
+ ... [(0, 1, 2), ("green", "purple")], names=["number", "color"]
+ ... )
>>> idx
MultiIndex([(0, 'green'),
(0, 'purple'),
@@ -860,10 +860,11 @@ def levels(self) -> tuple[Index, ...]:
Examples
--------
- >>> index = pd.MultiIndex.from_product([['mammal'],
- ... ('goat', 'human', 'cat', 'dog')],
- ... names=['Category', 'Animals'])
- >>> leg_num = pd.DataFrame(data=(4, 2, 4, 4), index=index, columns=['Legs'])
+ >>> index = pd.MultiIndex.from_product(
+ ... [["mammal"], ("goat", "human", "cat", "dog")],
+ ... names=["Category", "Animals"],
+ ... )
+ >>> leg_num = pd.DataFrame(data=(4, 2, 4, 4), index=index, columns=["Legs"])
>>> leg_num
Legs
Category Animals
@@ -972,9 +973,9 @@ def set_levels(
... (2, "one"),
... (2, "two"),
... (3, "one"),
- ... (3, "two")
+ ... (3, "two"),
... ],
- ... names=["foo", "bar"]
+ ... names=["foo", "bar"],
... )
>>> idx
MultiIndex([(1, 'one'),
@@ -985,7 +986,7 @@ def set_levels(
(3, 'two')],
names=('foo', 'bar'))
- >>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
+ >>> idx.set_levels([["a", "b", "c"], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
@@ -993,7 +994,7 @@ def set_levels(
('c', 1),
('c', 2)],
names=('foo', 'bar'))
- >>> idx.set_levels(['a', 'b', 'c'], level=0)
+ >>> idx.set_levels(["a", "b", "c"], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
@@ -1001,7 +1002,7 @@ def set_levels(
('c', 'one'),
('c', 'two')],
names=('foo', 'bar'))
- >>> idx.set_levels(['a', 'b'], level='bar')
+ >>> idx.set_levels(["a", "b"], level="bar")
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
@@ -1015,7 +1016,7 @@ def set_levels(
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
- >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
+ >>> idx.set_levels([["a", "b", "c"], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
@@ -1023,7 +1024,7 @@ def set_levels(
('c', 1),
('c', 2)],
names=('foo', 'bar'))
- >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
+ >>> idx.set_levels([["a", "b", "c"], [1, 2, 3, 4]], level=[0, 1]).levels
(Index(['a', 'b', 'c'], dtype='object', name='foo'), Index([1, 2, 3, 4], dtype='int64', name='bar'))
""" # noqa: E501
@@ -1049,7 +1050,7 @@ def nlevels(self) -> int:
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
+ >>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
@@ -1065,7 +1066,7 @@ def levshape(self) -> Shape:
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
+ >>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
@@ -1166,7 +1167,7 @@ def set_codes(
(2, 'one'),
(1, 'two')],
names=('foo', 'bar'))
- >>> idx.set_codes([0, 0, 1, 1], level='bar')
+ >>> idx.set_codes([0, 0, 1, 1], level="bar")
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
@@ -1274,7 +1275,7 @@ def copy( # type: ignore[override]
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
+ >>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
@@ -1817,14 +1818,14 @@ def get_level_values(self, level) -> Index: # type: ignore[override]
--------
Create a MultiIndex:
- >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
- >>> mi.names = ['level_1', 'level_2']
+ >>> mi = pd.MultiIndex.from_arrays((list("abc"), list("def")))
+ >>> mi.names = ["level_1", "level_2"]
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
- >>> mi.get_level_values('level_2')
+ >>> mi.get_level_values("level_2")
Index(['d', 'e', 'f'], dtype='object', name='level_2')
If a level contains missing values, the return type of the level
@@ -1885,7 +1886,7 @@ def to_frame(
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])
+ >>> mi = pd.MultiIndex.from_arrays([["a", "b"], ["c", "d"]])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
@@ -1903,7 +1904,7 @@ def to_frame(
0 a c
1 b d
- >>> df = mi.to_frame(name=['x', 'y'])
+ >>> df = mi.to_frame(name=["x", "y"])
>>> df
x y
a c a c
@@ -1962,8 +1963,8 @@ def to_flat_index(self) -> Index: # type: ignore[override]
Examples
--------
>>> index = pd.MultiIndex.from_product(
- ... [['foo', 'bar'], ['baz', 'qux']],
- ... names=('a', 'b'))
+ ... [["foo", "bar"], ["baz", "qux"]], names=["a", "b"]
+ ... )
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
@@ -1984,25 +1985,29 @@ def _is_lexsorted(self) -> bool:
In the below examples, the first level of the MultiIndex is sorted because
a<b<c, so there is no need to look at the next level.
- >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'],
- ... ['d', 'e', 'f']])._is_lexsorted()
+ >>> pd.MultiIndex.from_arrays(
+ ... [["a", "b", "c"], ["d", "e", "f"]]
+ ... )._is_lexsorted()
True
- >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'],
- ... ['d', 'f', 'e']])._is_lexsorted()
+ >>> pd.MultiIndex.from_arrays(
+ ... [["a", "b", "c"], ["d", "f", "e"]]
+ ... )._is_lexsorted()
True
In case there is a tie, the lexicographical sorting looks
at the next level of the MultiIndex.
- >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']])._is_lexsorted()
+ >>> pd.MultiIndex.from_arrays([[0, 1, 1], ["a", "b", "c"]])._is_lexsorted()
True
- >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']])._is_lexsorted()
+ >>> pd.MultiIndex.from_arrays([[0, 1, 1], ["a", "c", "b"]])._is_lexsorted()
False
- >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
- ... ['aa', 'bb', 'aa', 'bb']])._is_lexsorted()
+ >>> pd.MultiIndex.from_arrays(
+ ... [["a", "a", "b", "b"], ["aa", "bb", "aa", "bb"]]
+ ... )._is_lexsorted()
True
- >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
- ... ['bb', 'aa', 'aa', 'bb']])._is_lexsorted()
+ >>> pd.MultiIndex.from_arrays(
+ ... [["a", "a", "b", "b"], ["bb", "aa", "aa", "bb"]]
+ ... )._is_lexsorted()
False
"""
return self._lexsort_depth == self.nlevels
@@ -2039,8 +2044,9 @@ def _sort_levels_monotonic(self, raise_if_incomparable: bool = False) -> MultiIn
Examples
--------
- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
- ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
+ >>> mi = pd.MultiIndex(
+ ... levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
+ ... )
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
@@ -2103,7 +2109,7 @@ def remove_unused_levels(self) -> MultiIndex:
Examples
--------
- >>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
+ >>> mi = pd.MultiIndex.from_product([range(2), list("ab")])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
@@ -2290,7 +2296,7 @@ def append(self, other):
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([['a'], ['b']])
+ >>> mi = pd.MultiIndex.from_arrays([["a"], ["b"]])
>>> mi
MultiIndex([('a', 'b')],
)
@@ -2385,8 +2391,9 @@ def drop( # type: ignore[override]
Examples
--------
- >>> idx = pd.MultiIndex.from_product([(0, 1, 2), ('green', 'purple')],
- ... names=["number", "color"])
+ >>> idx = pd.MultiIndex.from_product(
+ ... [(0, 1, 2), ("green", "purple")], names=["number", "color"]
+ ... )
>>> idx
MultiIndex([(0, 'green'),
(0, 'purple'),
@@ -2395,7 +2402,7 @@ def drop( # type: ignore[override]
(2, 'green'),
(2, 'purple')],
names=('number', 'color'))
- >>> idx.drop([(1, 'green'), (2, 'purple')])
+ >>> idx.drop([(1, "green"), (2, "purple")])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'purple'),
@@ -2404,7 +2411,7 @@ def drop( # type: ignore[override]
We can also drop from a specific level.
- >>> idx.drop('green', level='color')
+ >>> idx.drop("green", level="color")
MultiIndex([(0, 'purple'),
(1, 'purple'),
(2, 'purple')],
@@ -2503,8 +2510,9 @@ def swaplevel(self, i=-2, j=-1) -> MultiIndex:
Examples
--------
- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
- ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
+ >>> mi = pd.MultiIndex(
+ ... levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
+ ... )
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
@@ -2549,7 +2557,7 @@ def reorder_levels(self, order) -> MultiIndex:
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])
+ >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"])
>>> mi
MultiIndex([(1, 3),
(2, 4)],
@@ -2560,7 +2568,7 @@ def reorder_levels(self, order) -> MultiIndex:
(4, 2)],
names=('y', 'x'))
- >>> mi.reorder_levels(order=['y', 'x'])
+ >>> mi.reorder_levels(order=["y", "x"])
MultiIndex([(3, 1),
(4, 2)],
names=('y', 'x'))
@@ -2835,18 +2843,18 @@ def get_slice_bound(
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
+ >>> mi = pd.MultiIndex.from_arrays([list("abbc"), list("gefd")])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
- >>> mi.get_slice_bound('b', side="left")
+ >>> mi.get_slice_bound("b", side="left")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
- >>> mi.get_slice_bound(('b','f'), side="right")
+ >>> mi.get_slice_bound(("b", "f"), side="right")
3
See Also
@@ -2890,19 +2898,20 @@ def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]:
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
- ... names=['A', 'B'])
+ >>> mi = pd.MultiIndex.from_arrays(
+ ... [list("abbd"), list("deff")], names=["A", "B"]
+ ... )
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
- >>> mi.slice_locs(start='b')
+ >>> mi.slice_locs(start="b")
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
- >>> mi.slice_locs(start='b', end=('b', 'f'))
+ >>> mi.slice_locs(start="b", end=("b", "f"))
(1, 3)
See Also
@@ -3026,12 +3035,12 @@ def get_loc(self, key):
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
+ >>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")])
- >>> mi.get_loc('b')
+ >>> mi.get_loc("b")
slice(1, 3, None)
- >>> mi.get_loc(('b', 'e'))
+ >>> mi.get_loc(("b", "e"))
1
"""
self._check_indexing_error(key)
@@ -3144,16 +3153,15 @@ def get_loc_level(self, key, level: IndexLabel = 0, drop_level: bool = True):
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
- ... names=['A', 'B'])
+ >>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")], names=["A", "B"])
- >>> mi.get_loc_level('b')
+ >>> mi.get_loc_level("b")
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
- >>> mi.get_loc_level('e', level='B')
+ >>> mi.get_loc_level("e", level="B")
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
- >>> mi.get_loc_level(['b', 'e'])
+ >>> mi.get_loc_level(["b", "e"])
(1, None)
"""
if not isinstance(level, (list, tuple)):
@@ -3455,15 +3463,15 @@ def get_locs(self, seq) -> npt.NDArray[np.intp]:
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
+ >>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")])
- >>> mi.get_locs('b') # doctest: +SKIP
+ >>> mi.get_locs("b") # doctest: +SKIP
array([1, 2], dtype=int64)
- >>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
+ >>> mi.get_locs([slice(None), ["e", "f"]]) # doctest: +SKIP
array([1, 2], dtype=int64)
- >>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
+ >>> mi.get_locs([[True, False, True], slice("e", "f")]) # doctest: +SKIP
array([2], dtype=int64)
"""
@@ -3675,11 +3683,11 @@ def truncate(self, before=None, after=None) -> MultiIndex:
Examples
--------
- >>> mi = pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['x', 'y', 'z']])
+ >>> mi = pd.MultiIndex.from_arrays([["a", "b", "c"], ["x", "y", "z"]])
>>> mi
MultiIndex([('a', 'x'), ('b', 'y'), ('c', 'z')],
)
- >>> mi.truncate(before='a', after='b')
+ >>> mi.truncate(before="a", after="b")
MultiIndex([('a', 'x'), ('b', 'y')],
)
"""
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index ab499665b13ed..a7315d40f0236 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -156,7 +156,7 @@ class PeriodIndex(DatetimeIndexOpsMixin):
Examples
--------
- >>> idx = pd.PeriodIndex(data=['2000Q1', '2002Q3'], freq='Q')
+ >>> idx = pd.PeriodIndex(data=["2000Q1", "2002Q3"], freq="Q")
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
@@ -374,7 +374,7 @@ def from_ordinals(cls, ordinals, *, freq, name=None) -> Self:
Examples
--------
- >>> idx = pd.PeriodIndex.from_ordinals([-1, 0, 1], freq='Q')
+ >>> idx = pd.PeriodIndex.from_ordinals([-1, 0, 1], freq="Q")
>>> idx
PeriodIndex(['1969Q4', '1970Q1', '1970Q2'], dtype='period[Q-DEC]')
"""
@@ -617,7 +617,7 @@ def period_range(
Examples
--------
- >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
+ >>> pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
@@ -627,8 +627,11 @@ def period_range(
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
- >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
- ... end=pd.Period('2017Q2', freq='Q'), freq='M')
+ >>> pd.period_range(
+ ... start=pd.Period("2017Q1", freq="Q"),
+ ... end=pd.Period("2017Q2", freq="Q"),
+ ... freq="M",
+ ... )
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index db813b047b2bb..485c7a1ce08cd 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -114,13 +114,13 @@ class TimedeltaIndex(DatetimeTimedeltaMixin):
Examples
--------
- >>> pd.TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'])
+ >>> pd.TimedeltaIndex(["0 days", "1 days", "2 days", "3 days", "4 days"])
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
We can also let pandas infer the frequency when possible.
- >>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq='infer')
+ >>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq="infer")
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
"""
@@ -316,14 +316,14 @@ def timedelta_range(
Examples
--------
- >>> pd.timedelta_range(start='1 day', periods=4)
+ >>> pd.timedelta_range(start="1 day", periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
- >>> pd.timedelta_range(start='1 day', periods=4, closed='right')
+ >>> pd.timedelta_range(start="1 day", periods=4, closed="right")
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
@@ -331,7 +331,7 @@ def timedelta_range(
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
- >>> pd.timedelta_range(start='1 day', end='2 days', freq='6h')
+ >>> pd.timedelta_range(start="1 day", end="2 days", freq="6h")
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6h')
@@ -339,7 +339,7 @@ def timedelta_range(
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
- >>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
+ >>> pd.timedelta_range(start="1 day", end="5 days", periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 91e9d6fd602a6..4ccac6449d835 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -116,14 +116,17 @@ class _IndexSlice:
Examples
--------
- >>> midx = pd.MultiIndex.from_product([['A0', 'A1'], ['B0', 'B1', 'B2', 'B3']])
- >>> columns = ['foo', 'bar']
- >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
- ... index=midx, columns=columns)
+ >>> midx = pd.MultiIndex.from_product([["A0", "A1"], ["B0", "B1", "B2", "B3"]])
+ >>> columns = ["foo", "bar"]
+ >>> dfmi = pd.DataFrame(
+ ... np.arange(16).reshape((len(midx), len(columns))),
+ ... index=midx,
+ ... columns=columns,
+ ... )
Using the default slice command:
- >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
+ >>> dfmi.loc[(slice(None), slice("B0", "B1")), :]
foo bar
A0 B0 0 1
B1 2 3
@@ -133,7 +136,7 @@ class _IndexSlice:
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
- >>> dfmi.loc[idx[:, 'B0':'B1'], :]
+ >>> dfmi.loc[idx[:, "B0":"B1"], :]
foo bar
A0 B0 0 1
B1 2 3
@@ -195,9 +198,11 @@ def iloc(self) -> _iLocIndexer:
Examples
--------
- >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},
- ... {'a': 100, 'b': 200, 'c': 300, 'd': 400},
- ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000}]
+ >>> mydict = [
+ ... {"a": 1, "b": 2, "c": 3, "d": 4},
+ ... {"a": 100, "b": 200, "c": 300, "d": 400},
+ ... {"a": 1000, "b": 2000, "c": 3000, "d": 4000},
+ ... ]
>>> df = pd.DataFrame(mydict)
>>> df
a b c d
@@ -345,9 +350,11 @@ def loc(self) -> _LocIndexer:
--------
**Getting values**
- >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
- ... index=['cobra', 'viper', 'sidewinder'],
- ... columns=['max_speed', 'shield'])
+ >>> df = pd.DataFrame(
+ ... [[1, 2], [4, 5], [7, 8]],
+ ... index=["cobra", "viper", "sidewinder"],
+ ... columns=["max_speed", "shield"],
+ ... )
>>> df
max_speed shield
cobra 1 2
@@ -356,27 +363,27 @@ def loc(self) -> _LocIndexer:
Single label. Note this returns the row as a Series.
- >>> df.loc['viper']
+ >>> df.loc["viper"]
max_speed 4
shield 5
Name: viper, dtype: int64
List of labels. Note using ``[[]]`` returns a DataFrame.
- >>> df.loc[['viper', 'sidewinder']]
+ >>> df.loc[["viper", "sidewinder"]]
max_speed shield
viper 4 5
sidewinder 7 8
Single label for row and column
- >>> df.loc['cobra', 'shield']
+ >>> df.loc["cobra", "shield"]
2
Slice with labels for row and single label for column. As mentioned
above, note that both the start and stop of the slice are included.
- >>> df.loc['cobra':'viper', 'max_speed']
+ >>> df.loc["cobra":"viper", "max_speed"]
cobra 1
viper 4
Name: max_speed, dtype: int64
@@ -389,8 +396,9 @@ def loc(self) -> _LocIndexer:
Alignable boolean Series:
- >>> df.loc[pd.Series([False, True, False],
- ... index=['viper', 'sidewinder', 'cobra'])]
+ >>> df.loc[
+ ... pd.Series([False, True, False], index=["viper", "sidewinder", "cobra"])
+ ... ]
max_speed shield
sidewinder 7 8
@@ -404,25 +412,25 @@ def loc(self) -> _LocIndexer:
Conditional that returns a boolean Series
- >>> df.loc[df['shield'] > 6]
+ >>> df.loc[df["shield"] > 6]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series with column labels specified
- >>> df.loc[df['shield'] > 6, ['max_speed']]
+ >>> df.loc[df["shield"] > 6, ["max_speed"]]
max_speed
sidewinder 7
Multiple conditional using ``&`` that returns a boolean Series
- >>> df.loc[(df['max_speed'] > 1) & (df['shield'] < 8)]
+ >>> df.loc[(df["max_speed"] > 1) & (df["shield"] < 8)]
max_speed shield
viper 4 5
Multiple conditional using ``|`` that returns a boolean Series
- >>> df.loc[(df['max_speed'] > 4) | (df['shield'] < 5)]
+ >>> df.loc[(df["max_speed"] > 4) | (df["shield"] < 5)]
max_speed shield
cobra 1 2
sidewinder 7 8
@@ -439,7 +447,7 @@ def loc(self) -> _LocIndexer:
Callable that returns a boolean Series
- >>> df.loc[lambda df: df['shield'] == 8]
+ >>> df.loc[lambda df: df["shield"] == 8]
max_speed shield
sidewinder 7 8
@@ -447,7 +455,7 @@ def loc(self) -> _LocIndexer:
Set value for all items matching the list of labels
- >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
+ >>> df.loc[["viper", "sidewinder"], ["shield"]] = 50
>>> df
max_speed shield
cobra 1 2
@@ -456,7 +464,7 @@ def loc(self) -> _LocIndexer:
Set value for an entire row
- >>> df.loc['cobra'] = 10
+ >>> df.loc["cobra"] = 10
>>> df
max_speed shield
cobra 10 10
@@ -465,7 +473,7 @@ def loc(self) -> _LocIndexer:
Set value for an entire column
- >>> df.loc[:, 'max_speed'] = 30
+ >>> df.loc[:, "max_speed"] = 30
>>> df
max_speed shield
cobra 30 10
@@ -474,7 +482,7 @@ def loc(self) -> _LocIndexer:
Set value for rows matching callable condition
- >>> df.loc[df['shield'] > 35] = 0
+ >>> df.loc[df["shield"] > 35] = 0
>>> df
max_speed shield
cobra 30 10
@@ -505,8 +513,11 @@ def loc(self) -> _LocIndexer:
Another example using integers for the index
- >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
- ... index=[7, 8, 9], columns=['max_speed', 'shield'])
+ >>> df = pd.DataFrame(
+ ... [[1, 2], [4, 5], [7, 8]],
+ ... index=[7, 8, 9],
+ ... columns=["max_speed", "shield"],
+ ... )
>>> df
max_speed shield
7 1 2
@@ -527,14 +538,16 @@ def loc(self) -> _LocIndexer:
A number of examples using a DataFrame with a MultiIndex
>>> tuples = [
- ... ('cobra', 'mark i'), ('cobra', 'mark ii'),
- ... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),
- ... ('viper', 'mark ii'), ('viper', 'mark iii')
+ ... ("cobra", "mark i"),
+ ... ("cobra", "mark ii"),
+ ... ("sidewinder", "mark i"),
+ ... ("sidewinder", "mark ii"),
+ ... ("viper", "mark ii"),
+ ... ("viper", "mark iii"),
... ]
>>> index = pd.MultiIndex.from_tuples(tuples)
- >>> values = [[12, 2], [0, 4], [10, 20],
- ... [1, 4], [7, 1], [16, 36]]
- >>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)
+ >>> values = [[12, 2], [0, 4], [10, 20], [1, 4], [7, 1], [16, 36]]
+ >>> df = pd.DataFrame(values, columns=["max_speed", "shield"], index=index)
>>> df
max_speed shield
cobra mark i 12 2
@@ -546,14 +559,14 @@ def loc(self) -> _LocIndexer:
Single label. Note this returns a DataFrame with a single index.
- >>> df.loc['cobra']
+ >>> df.loc["cobra"]
max_speed shield
mark i 12 2
mark ii 0 4
Single index tuple. Note this returns a Series.
- >>> df.loc[('cobra', 'mark ii')]
+ >>> df.loc[("cobra", "mark ii")]
max_speed 0
shield 4
Name: (cobra, mark ii), dtype: int64
@@ -561,25 +574,25 @@ def loc(self) -> _LocIndexer:
Single label for row and column. Similar to passing in a tuple, this
returns a Series.
- >>> df.loc['cobra', 'mark i']
+ >>> df.loc["cobra", "mark i"]
max_speed 12
shield 2
Name: (cobra, mark i), dtype: int64
Single tuple. Note using ``[[]]`` returns a DataFrame.
- >>> df.loc[[('cobra', 'mark ii')]]
+ >>> df.loc[[("cobra", "mark ii")]]
max_speed shield
cobra mark ii 0 4
Single tuple for the index with a single label for the column
- >>> df.loc[('cobra', 'mark i'), 'shield']
+ >>> df.loc[("cobra", "mark i"), "shield"]
2
Slice from index tuple to single label
- >>> df.loc[('cobra', 'mark i'):'viper']
+ >>> df.loc[("cobra", "mark i") : "viper"]
max_speed shield
cobra mark i 12 2
mark ii 0 4
@@ -590,7 +603,7 @@ def loc(self) -> _LocIndexer:
Slice from index tuple to index tuple
- >>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')]
+ >>> df.loc[("cobra", "mark i") : ("viper", "mark ii")]
max_speed shield
cobra mark i 12 2
mark ii 0 4
@@ -642,8 +655,11 @@ def at(self) -> _AtIndexer:
Examples
--------
- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
- ... index=[4, 5, 6], columns=['A', 'B', 'C'])
+ >>> df = pd.DataFrame(
+ ... [[0, 2, 3], [0, 4, 1], [10, 20, 30]],
+ ... index=[4, 5, 6],
+ ... columns=["A", "B", "C"],
+ ... )
>>> df
A B C
4 0 2 3
@@ -652,18 +668,18 @@ def at(self) -> _AtIndexer:
Get value at specified row/column pair
- >>> df.at[4, 'B']
+ >>> df.at[4, "B"]
2
Set value at specified row/column pair
- >>> df.at[4, 'B'] = 10
- >>> df.at[4, 'B']
+ >>> df.at[4, "B"] = 10
+ >>> df.at[4, "B"]
10
Get value within a Series
- >>> df.loc[5].at['B']
+ >>> df.loc[5].at["B"]
4
"""
return _AtIndexer("at", self)
@@ -690,8 +706,9 @@ def iat(self) -> _iAtIndexer:
Examples
--------
- >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
- ... columns=['A', 'B', 'C'])
+ >>> df = pd.DataFrame(
+ ... [[0, 2, 3], [0, 4, 1], [10, 20, 30]], columns=["A", "B", "C"]
+ ... )
>>> df
A B C
0 0 2 3
diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py
index ba2d275e88b32..b296e6016a1ac 100644
--- a/pandas/core/interchange/from_dataframe.py
+++ b/pandas/core/interchange/from_dataframe.py
@@ -50,12 +50,13 @@ def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame:
Examples
--------
- >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df_not_necessarily_pandas = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> interchange_object = df_not_necessarily_pandas.__dataframe__()
>>> interchange_object.column_names()
Index(['A', 'B'], dtype='object')
- >>> df_pandas = (pd.api.interchange.from_dataframe
- ... (interchange_object.select_columns_by_name(['A'])))
+ >>> df_pandas = pd.api.interchange.from_dataframe(
+ ... interchange_object.select_columns_by_name(["A"])
+ ... )
>>> df_pandas
A
0 1
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 2ca42d1621b97..cdc2ff6c51b06 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -1100,7 +1100,7 @@ def _interp_limit(
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
- if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
+ if invalid[max(0, x - fw_limit) : x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py
index fb5980184355c..0404da189dfa5 100644
--- a/pandas/core/ops/missing.py
+++ b/pandas/core/ops/missing.py
@@ -89,9 +89,9 @@ def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> x
array([ 1, 0, -1])
- >>> y = 0 # int 0; numpy behavior is different with float
+ >>> y = 0 # int 0; numpy behavior is different with float
>>> result = x // y
- >>> result # raw numpy result does not fill division by zero
+ >>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 2a36c0f1ef549..34c61c6f26106 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -395,16 +395,13 @@ def transform(self, arg, *args, **kwargs):
Examples
--------
- >>> s = pd.Series([1, 2],
- ... index=pd.date_range('20180101',
- ... periods=2,
- ... freq='1h'))
+ >>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h"))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: h, dtype: int64
- >>> resampled = s.resample('15min')
+ >>> resampled = s.resample("15min")
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
2018-01-01 00:00:00 NaN
2018-01-01 01:00:00 NaN
@@ -557,8 +554,12 @@ def ffill(self, limit: int | None = None):
--------
Here we only create a ``Series``.
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
@@ -568,7 +569,7 @@ def ffill(self, limit: int | None = None):
Example for ``ffill`` with downsampling (we have fewer dates after resampling):
- >>> ser.resample('MS').ffill()
+ >>> ser.resample("MS").ffill()
2023-01-01 1
2023-02-01 3
Freq: MS, dtype: int64
@@ -576,7 +577,7 @@ def ffill(self, limit: int | None = None):
Example for ``ffill`` with upsampling (fill the new dates with
the previous value):
- >>> ser.resample('W').ffill()
+ >>> ser.resample("W").ffill()
2023-01-01 1
2023-01-08 1
2023-01-15 2
@@ -590,7 +591,7 @@ def ffill(self, limit: int | None = None):
With upsampling and limiting (only fill the first new date with the
previous value):
- >>> ser.resample('W').ffill(limit=1)
+ >>> ser.resample("W").ffill(limit=1)
2023-01-01 1.0
2023-01-08 1.0
2023-01-15 2.0
@@ -635,16 +636,13 @@ def nearest(self, limit: int | None = None):
Examples
--------
- >>> s = pd.Series([1, 2],
- ... index=pd.date_range('20180101',
- ... periods=2,
- ... freq='1h'))
+ >>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h"))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: h, dtype: int64
- >>> s.resample('15min').nearest()
+ >>> s.resample("15min").nearest()
2018-01-01 00:00:00 1
2018-01-01 00:15:00 1
2018-01-01 00:30:00 2
@@ -654,7 +652,7 @@ def nearest(self, limit: int | None = None):
Limit the number of upsampled values imputed by the nearest:
- >>> s.resample('15min').nearest(limit=1)
+ >>> s.resample("15min").nearest(limit=1)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 1.0
2018-01-01 00:30:00 NaN
@@ -706,15 +704,16 @@ def bfill(self, limit: int | None = None):
--------
Resampling a Series:
- >>> s = pd.Series([1, 2, 3],
- ... index=pd.date_range('20180101', periods=3, freq='h'))
+ >>> s = pd.Series(
+ ... [1, 2, 3], index=pd.date_range("20180101", periods=3, freq="h")
+ ... )
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: h, dtype: int64
- >>> s.resample('30min').bfill()
+ >>> s.resample("30min").bfill()
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
@@ -722,7 +721,7 @@ def bfill(self, limit: int | None = None):
2018-01-01 02:00:00 3
Freq: 30min, dtype: int64
- >>> s.resample('15min').bfill(limit=2)
+ >>> s.resample("15min").bfill(limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
@@ -736,16 +735,17 @@ def bfill(self, limit: int | None = None):
Resampling a DataFrame that has missing values:
- >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
- ... index=pd.date_range('20180101', periods=3,
- ... freq='h'))
+ >>> df = pd.DataFrame(
+ ... {"a": [2, np.nan, 6], "b": [1, 3, 5]},
+ ... index=pd.date_range("20180101", periods=3, freq="h"),
+ ... )
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
- >>> df.resample('30min').bfill()
+ >>> df.resample("30min").bfill()
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
@@ -753,7 +753,7 @@ def bfill(self, limit: int | None = None):
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
- >>> df.resample('15min').bfill(limit=2)
+ >>> df.resample("15min").bfill(limit=2)
a b
2018-01-01 00:00:00 2.0 1.0
2018-01-01 00:15:00 NaN NaN
@@ -818,8 +818,9 @@ def fillna(self, method, limit: int | None = None):
--------
Resampling a Series:
- >>> s = pd.Series([1, 2, 3],
- ... index=pd.date_range('20180101', periods=3, freq='h'))
+ >>> s = pd.Series(
+ ... [1, 2, 3], index=pd.date_range("20180101", periods=3, freq="h")
+ ... )
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
@@ -836,7 +837,7 @@ def fillna(self, method, limit: int | None = None):
2018-01-01 02:00:00 3.0
Freq: 30min, dtype: float64
- >>> s.resample('30min').fillna("backfill")
+ >>> s.resample("30min").fillna("backfill")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
@@ -844,7 +845,7 @@ def fillna(self, method, limit: int | None = None):
2018-01-01 02:00:00 3
Freq: 30min, dtype: int64
- >>> s.resample('15min').fillna("backfill", limit=2)
+ >>> s.resample("15min").fillna("backfill", limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
@@ -856,7 +857,7 @@ def fillna(self, method, limit: int | None = None):
2018-01-01 02:00:00 3.0
Freq: 15min, dtype: float64
- >>> s.resample('30min').fillna("pad")
+ >>> s.resample("30min").fillna("pad")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 1
2018-01-01 01:00:00 2
@@ -864,7 +865,7 @@ def fillna(self, method, limit: int | None = None):
2018-01-01 02:00:00 3
Freq: 30min, dtype: int64
- >>> s.resample('30min').fillna("nearest")
+ >>> s.resample("30min").fillna("nearest")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
@@ -874,15 +875,16 @@ def fillna(self, method, limit: int | None = None):
Missing values present before the upsampling are not affected.
- >>> sm = pd.Series([1, None, 3],
- ... index=pd.date_range('20180101', periods=3, freq='h'))
+ >>> sm = pd.Series(
+ ... [1, None, 3], index=pd.date_range("20180101", periods=3, freq="h")
+ ... )
>>> sm
2018-01-01 00:00:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 02:00:00 3.0
Freq: h, dtype: float64
- >>> sm.resample('30min').fillna('backfill')
+ >>> sm.resample("30min").fillna("backfill")
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
@@ -890,7 +892,7 @@ def fillna(self, method, limit: int | None = None):
2018-01-01 02:00:00 3.0
Freq: 30min, dtype: float64
- >>> sm.resample('30min').fillna('pad')
+ >>> sm.resample("30min").fillna("pad")
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 1.0
2018-01-01 01:00:00 NaN
@@ -898,7 +900,7 @@ def fillna(self, method, limit: int | None = None):
2018-01-01 02:00:00 3.0
Freq: 30min, dtype: float64
- >>> sm.resample('30min').fillna('nearest')
+ >>> sm.resample("30min").fillna("nearest")
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
@@ -909,16 +911,17 @@ def fillna(self, method, limit: int | None = None):
DataFrame resampling is done column-wise. All the same options are
available.
- >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
- ... index=pd.date_range('20180101', periods=3,
- ... freq='h'))
+ >>> df = pd.DataFrame(
+ ... {"a": [2, np.nan, 6], "b": [1, 3, 5]},
+ ... index=pd.date_range("20180101", periods=3, freq="h"),
+ ... )
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
- >>> df.resample('30min').fillna("bfill")
+ >>> df.resample("30min").fillna("bfill")
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
@@ -1136,15 +1139,19 @@ def asfreq(self, fill_value=None):
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-31', '2023-02-01', '2023-02-28']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-31", "2023-02-01", "2023-02-28"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-31 2
2023-02-01 3
2023-02-28 4
dtype: int64
- >>> ser.resample('MS').asfreq()
+ >>> ser.resample("MS").asfreq()
2023-01-01 1
2023-02-01 3
Freq: MS, dtype: int64
@@ -1180,15 +1187,19 @@ def sum(
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').sum()
+ >>> ser.resample("MS").sum()
2023-01-01 3
2023-02-01 7
Freq: MS, dtype: int64
@@ -1224,15 +1235,19 @@ def prod(
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').prod()
+ >>> ser.resample("MS").prod()
2023-01-01 2
2023-02-01 12
Freq: MS, dtype: int64
@@ -1254,15 +1269,19 @@ def min(
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').min()
+ >>> ser.resample("MS").min()
2023-01-01 1
2023-02-01 3
Freq: MS, dtype: int64
@@ -1284,15 +1303,19 @@ def max(
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').max()
+ >>> ser.resample("MS").max()
2023-01-01 2
2023-02-01 4
Freq: MS, dtype: int64
@@ -1353,15 +1376,19 @@ def mean(
Examples
--------
- >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
- ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
+ >>> ser = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
+ ... ),
+ ... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
- >>> ser.resample('MS').mean()
+ >>> ser.resample("MS").mean()
2023-01-01 1.5
2023-02-01 3.5
Freq: MS, dtype: float64
@@ -1398,14 +1425,20 @@ def std(
Examples
--------
- >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
- ... index=pd.DatetimeIndex(['2023-01-01',
- ... '2023-01-10',
- ... '2023-01-15',
- ... '2023-02-01',
- ... '2023-02-10',
- ... '2023-02-15']))
- >>> ser.resample('MS').std()
+ >>> ser = pd.Series(
+ ... [1, 3, 2, 4, 3, 8],
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2023-01-01",
+ ... "2023-01-10",
+ ... "2023-01-15",
+ ... "2023-02-01",
+ ... "2023-02-10",
+ ... "2023-02-15",
+ ... ]
+ ... ),
+ ... )
+ >>> ser.resample("MS").std()
2023-01-01 1.000000
2023-02-01 2.645751
Freq: MS, dtype: float64
@@ -1443,19 +1476,25 @@ def var(
Examples
--------
- >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
- ... index=pd.DatetimeIndex(['2023-01-01',
- ... '2023-01-10',
- ... '2023-01-15',
- ... '2023-02-01',
- ... '2023-02-10',
- ... '2023-02-15']))
- >>> ser.resample('MS').var()
+ >>> ser = pd.Series(
+ ... [1, 3, 2, 4, 3, 8],
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2023-01-01",
+ ... "2023-01-10",
+ ... "2023-01-15",
+ ... "2023-02-01",
+ ... "2023-02-10",
+ ... "2023-02-15",
+ ... ]
+ ... ),
+ ... )
+ >>> ser.resample("MS").var()
2023-01-01 1.0
2023-02-01 7.0
Freq: MS, dtype: float64
- >>> ser.resample('MS').var(ddof=0)
+ >>> ser.resample("MS").var(ddof=0)
2023-01-01 0.666667
2023-02-01 4.666667
Freq: MS, dtype: float64
@@ -1563,19 +1602,25 @@ def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs):
Examples
--------
- >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
- ... index=pd.DatetimeIndex(['2023-01-01',
- ... '2023-01-10',
- ... '2023-01-15',
- ... '2023-02-01',
- ... '2023-02-10',
- ... '2023-02-15']))
- >>> ser.resample('MS').quantile()
+ >>> ser = pd.Series(
+ ... [1, 3, 2, 4, 3, 8],
+ ... index=pd.DatetimeIndex(
+ ... [
+ ... "2023-01-01",
+ ... "2023-01-10",
+ ... "2023-01-15",
+ ... "2023-02-01",
+ ... "2023-02-10",
+ ... "2023-02-15",
+ ... ]
+ ... ),
+ ... )
+ >>> ser.resample("MS").quantile()
2023-01-01 2.0
2023-02-01 4.0
Freq: MS, dtype: float64
- >>> ser.resample('MS').quantile(.25)
+ >>> ser.resample("MS").quantile(0.25)
2023-01-01 1.5
2023-02-01 3.5
Freq: MS, dtype: float64
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 2558532bfb029..7e0bdbcb0ddba 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -238,8 +238,8 @@ def concat(
--------
Combine two ``Series``.
- >>> s1 = pd.Series(['a', 'b'])
- >>> s2 = pd.Series(['c', 'd'])
+ >>> s1 = pd.Series(["a", "b"])
+ >>> s2 = pd.Series(["c", "d"])
>>> pd.concat([s1, s2])
0 a
1 b
@@ -260,7 +260,7 @@ def concat(
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
- >>> pd.concat([s1, s2], keys=['s1', 's2'])
+ >>> pd.concat([s1, s2], keys=["s1", "s2"])
s1 0 a
1 b
s2 0 c
@@ -269,8 +269,7 @@ def concat(
Label the index keys you create with the ``names`` option.
- >>> pd.concat([s1, s2], keys=['s1', 's2'],
- ... names=['Series name', 'Row ID'])
+ >>> pd.concat([s1, s2], keys=["s1", "s2"], names=["Series name", "Row ID"])
Series name Row ID
s1 0 a
1 b
@@ -280,14 +279,12 @@ def concat(
Combine two ``DataFrame`` objects with identical columns.
- >>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
- ... columns=['letter', 'number'])
+ >>> df1 = pd.DataFrame([["a", 1], ["b", 2]], columns=["letter", "number"])
>>> df1
letter number
0 a 1
1 b 2
- >>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
- ... columns=['letter', 'number'])
+ >>> df2 = pd.DataFrame([["c", 3], ["d", 4]], columns=["letter", "number"])
>>> df2
letter number
0 c 3
@@ -303,8 +300,9 @@ def concat(
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
- >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
- ... columns=['letter', 'number', 'animal'])
+ >>> df3 = pd.DataFrame(
+ ... [["c", 3, "cat"], ["d", 4, "dog"]], columns=["letter", "number", "animal"]
+ ... )
>>> df3
letter number animal
0 c 3 cat
@@ -330,8 +328,9 @@ def concat(
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
- >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
- ... columns=['animal', 'name'])
+ >>> df4 = pd.DataFrame(
+ ... [["bird", "polly"], ["monkey", "george"]], columns=["animal", "name"]
+ ... )
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
@@ -340,11 +339,11 @@ def concat(
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
- >>> df5 = pd.DataFrame([1], index=['a'])
+ >>> df5 = pd.DataFrame([1], index=["a"])
>>> df5
0
a 1
- >>> df6 = pd.DataFrame([2], index=['a'])
+ >>> df6 = pd.DataFrame([2], index=["a"])
>>> df6
0
a 2
@@ -355,11 +354,11 @@ def concat(
Append a single row to the end of a ``DataFrame`` object.
- >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0])
+ >>> df7 = pd.DataFrame({"a": 1, "b": 2}, index=[0])
>>> df7
a b
0 1 2
- >>> new_row = pd.Series({'a': 3, 'b': 4})
+ >>> new_row = pd.Series({"a": 3, "b": 4})
>>> new_row
a 3
b 4
diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py
index 2c74538175a58..fae5c082c72a0 100644
--- a/pandas/core/reshape/encoding.py
+++ b/pandas/core/reshape/encoding.py
@@ -101,7 +101,7 @@ def get_dummies(
Examples
--------
- >>> s = pd.Series(list('abca'))
+ >>> s = pd.Series(list("abca"))
>>> pd.get_dummies(s)
a b c
@@ -110,7 +110,7 @@ def get_dummies(
2 False False True
3 True False False
- >>> s1 = ['a', 'b', np.nan]
+ >>> s1 = ["a", "b", np.nan]
>>> pd.get_dummies(s1)
a b
@@ -124,16 +124,15 @@ def get_dummies(
1 False True False
2 False False True
- >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
- ... 'C': [1, 2, 3]})
+ >>> df = pd.DataFrame({"A": ["a", "b", "a"], "B": ["b", "a", "c"], "C": [1, 2, 3]})
- >>> pd.get_dummies(df, prefix=['col1', 'col2'])
+ >>> pd.get_dummies(df, prefix=["col1", "col2"])
C col1_a col1_b col2_a col2_b col2_c
0 1 True False False True False
1 2 False True True False False
2 3 True False False False True
- >>> pd.get_dummies(pd.Series(list('abcaa')))
+ >>> pd.get_dummies(pd.Series(list("abcaa")))
a b c
0 True False False
1 False True False
@@ -141,7 +140,7 @@ def get_dummies(
3 True False False
4 True False False
- >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
+ >>> pd.get_dummies(pd.Series(list("abcaa")), drop_first=True)
b c
0 False False
1 True False
@@ -149,7 +148,7 @@ def get_dummies(
3 False False
4 False False
- >>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
+ >>> pd.get_dummies(pd.Series(list("abc")), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
@@ -426,8 +425,7 @@ def from_dummies(
Examples
--------
- >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0],
- ... "c": [0, 0, 1, 0]})
+ >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], "c": [0, 0, 1, 0]})
>>> df
a b c
@@ -442,9 +440,15 @@ def from_dummies(
2 c
3 a
- >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0],
- ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0],
- ... "col2_c": [0, 0, 1]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "col1_a": [1, 0, 1],
+ ... "col1_b": [0, 1, 0],
+ ... "col2_a": [0, 1, 0],
+ ... "col2_b": [1, 0, 0],
+ ... "col2_c": [0, 0, 1],
+ ... }
+ ... )
>>> df
col1_a col1_b col2_a col2_b col2_c
@@ -458,9 +462,15 @@ def from_dummies(
1 b a
2 a c
- >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0],
- ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0],
- ... "col2_c": [0, 0, 0]})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "col1_a": [1, 0, 0],
+ ... "col1_b": [0, 1, 0],
+ ... "col2_a": [0, 1, 0],
+ ... "col2_b": [1, 0, 0],
+ ... "col2_c": [0, 0, 0],
+ ... }
+ ... )
>>> df
col1_a col1_b col2_a col2_b col2_c
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 3ee896275a67a..7b8ef8da3ab46 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -176,15 +176,21 @@ def lreshape(data: DataFrame, groups: dict, dropna: bool = True) -> DataFrame:
Examples
--------
- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
- ... 'team': ['Red Sox', 'Yankees'],
- ... 'year1': [2007, 2007], 'year2': [2008, 2008]})
+ >>> data = pd.DataFrame(
+ ... {
+ ... "hr1": [514, 573],
+ ... "hr2": [545, 526],
+ ... "team": ["Red Sox", "Yankees"],
+ ... "year1": [2007, 2007],
+ ... "year2": [2008, 2008],
+ ... }
+ ... )
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
- >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
+ >>> pd.lreshape(data, {"year": ["year1", "year2"], "hr": ["hr1", "hr2"]})
team year hr
0 Red Sox 2007 514
1 Yankees 2007 573
@@ -290,12 +296,15 @@ def wide_to_long(
Examples
--------
>>> np.random.seed(123)
- >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
- ... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
- ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
- ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
- ... "X" : dict(zip(range(3), np.random.randn(3)))
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A1970": {0: "a", 1: "b", 2: "c"},
+ ... "A1980": {0: "d", 1: "e", 2: "f"},
+ ... "B1970": {0: 2.5, 1: 1.2, 2: 0.7},
+ ... "B1980": {0: 3.2, 1: 1.3, 2: 0.1},
+ ... "X": dict(zip(range(3), np.random.randn(3))),
+ ... }
+ ... )
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
@@ -315,12 +324,14 @@ def wide_to_long(
With multiple id columns
- >>> df = pd.DataFrame({
- ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
- ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
- ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
- ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ ... "birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
+ ... "ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
+ ... "ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
+ ... }
+ ... )
>>> df
famid birth ht1 ht2
0 1 1 2.8 3.4
@@ -332,7 +343,7 @@ def wide_to_long(
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
- >>> long_format = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
+ >>> long_format = pd.wide_to_long(df, stubnames="ht", i=["famid", "birth"], j="age")
>>> long_format
... # doctest: +NORMALIZE_WHITESPACE
ht
@@ -359,7 +370,7 @@ def wide_to_long(
Going from long back to wide just takes some creative use of `unstack`
>>> wide_format = long_format.unstack()
- >>> wide_format.columns = wide_format.columns.map('{0[0]}{0[1]}'.format)
+ >>> wide_format.columns = wide_format.columns.map("{0[0]}{0[1]}".format)
>>> wide_format.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
@@ -375,20 +386,23 @@ def wide_to_long(
Less wieldy column names are also handled
>>> np.random.seed(0)
- >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3),
- ... 'A(weekly)-2011': np.random.rand(3),
- ... 'B(weekly)-2010': np.random.rand(3),
- ... 'B(weekly)-2011': np.random.rand(3),
- ... 'X' : np.random.randint(3, size=3)})
- >>> df['id'] = df.index
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A(weekly)-2010": np.random.rand(3),
+ ... "A(weekly)-2011": np.random.rand(3),
+ ... "B(weekly)-2010": np.random.rand(3),
+ ... "B(weekly)-2011": np.random.rand(3),
+ ... "X": np.random.randint(3, size=3),
+ ... }
+ ... )
+ >>> df["id"] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id
0 0.548814 0.544883 0.437587 0.383442 0 0
1 0.715189 0.423655 0.891773 0.791725 1 1
2 0.602763 0.645894 0.963663 0.528895 1 2
- >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id',
- ... j='year', sep='-')
+ >>> pd.wide_to_long(df, ["A(weekly)", "B(weekly)"], i="id", j="year", sep="-")
... # doctest: +NORMALIZE_WHITESPACE
X A(weekly) B(weekly)
id year
@@ -403,8 +417,13 @@ def wide_to_long(
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
- ... set([match[0] for match in df.columns.str.findall(
- ... r'[A-B]\(.*\)').values if match != []])
+ ... set(
+ ... [
+ ... match[0]
+ ... for match in df.columns.str.findall(r"[A-B]\(.*\)").values
+ ... if match != []
+ ... ]
+ ... )
... )
>>> list(stubnames)
['A(weekly)', 'B(weekly)']
@@ -412,12 +431,14 @@ def wide_to_long(
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
- >>> df = pd.DataFrame({
- ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
- ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
- ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
- ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ ... "birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
+ ... "ht_one": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
+ ... "ht_two": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
+ ... }
+ ... )
>>> df
famid birth ht_one ht_two
0 1 1 2.8 3.4
@@ -430,8 +451,9 @@ def wide_to_long(
7 3 2 2.3 3.4
8 3 3 2.1 2.9
- >>> long_format = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
- ... sep='_', suffix=r'\w+')
+ >>> long_format = pd.wide_to_long(
+ ... df, stubnames="ht", i=["famid", "birth"], j="age", sep="_", suffix=r"\w+"
+ ... )
>>> long_format
... # doctest: +NORMALIZE_WHITESPACE
ht
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 95261394994ae..4f10fd729723e 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -366,7 +366,7 @@ def merge_ordered(
... {
... "key": ["a", "c", "e", "a", "c", "e"],
... "lvalue": [1, 2, 3, 1, 2, 3],
- ... "group": ["a", "a", "a", "b", "b", "b"]
+ ... "group": ["a", "a", "a", "b", "b", "b"],
... }
... )
>>> df1
@@ -597,7 +597,7 @@ def merge_asof(
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.049"),
... pd.Timestamp("2016-05-25 13:30:00.072"),
- ... pd.Timestamp("2016-05-25 13:30:00.075")
+ ... pd.Timestamp("2016-05-25 13:30:00.075"),
... ],
... "ticker": [
... "GOOG",
@@ -607,10 +607,10 @@ def merge_asof(
... "GOOG",
... "AAPL",
... "GOOG",
- ... "MSFT"
+ ... "MSFT",
... ],
... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
- ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
+ ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
... }
... )
>>> quotes
@@ -631,11 +631,11 @@ def merge_asof(
... pd.Timestamp("2016-05-25 13:30:00.038"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
- ... pd.Timestamp("2016-05-25 13:30:00.048")
+ ... pd.Timestamp("2016-05-25 13:30:00.048"),
... ],
... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
- ... "quantity": [75, 155, 100, 100, 100]
+ ... "quantity": [75, 155, 100, 100, 100],
... }
... )
>>> trades
@@ -678,7 +678,7 @@ def merge_asof(
... on="time",
... by="ticker",
... tolerance=pd.Timedelta("10ms"),
- ... allow_exact_matches=False
+ ... allow_exact_matches=False,
... )
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 51d91e4113c4e..7d563ed7b62f6 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -651,14 +651,55 @@ def crosstab(
Examples
--------
- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
- ... "bar", "bar", "foo", "foo", "foo"], dtype=object)
- >>> b = np.array(["one", "one", "one", "two", "one", "one",
- ... "one", "two", "two", "two", "one"], dtype=object)
- >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
- ... "shiny", "dull", "shiny", "shiny", "shiny"],
- ... dtype=object)
- >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
+ >>> a = np.array(
+ ... [
+ ... "foo",
+ ... "foo",
+ ... "foo",
+ ... "foo",
+ ... "bar",
+ ... "bar",
+ ... "bar",
+ ... "bar",
+ ... "foo",
+ ... "foo",
+ ... "foo",
+ ... ],
+ ... dtype=object,
+ ... )
+ >>> b = np.array(
+ ... [
+ ... "one",
+ ... "one",
+ ... "one",
+ ... "two",
+ ... "one",
+ ... "one",
+ ... "one",
+ ... "two",
+ ... "two",
+ ... "two",
+ ... "one",
+ ... ],
+ ... dtype=object,
+ ... )
+ >>> c = np.array(
+ ... [
+ ... "dull",
+ ... "dull",
+ ... "shiny",
+ ... "dull",
+ ... "dull",
+ ... "shiny",
+ ... "shiny",
+ ... "dull",
+ ... "shiny",
+ ... "shiny",
+ ... "shiny",
+ ... ],
+ ... dtype=object,
+ ... )
+ >>> pd.crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"])
b one two
c dull shiny dull shiny
a
@@ -669,8 +710,8 @@ def crosstab(
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
- >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
- >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
+ >>> foo = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
+ >>> bar = pd.Categorical(["d", "e"], categories=["d", "e", "f"])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index ad313b112a2e7..bb544b588dd35 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -82,8 +82,9 @@ class _Unstacker:
Examples
--------
- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
- ... ('two', 'a'), ('two', 'b')])
+ >>> index = pd.MultiIndex.from_tuples(
+ ... [("one", "a"), ("one", "b"), ("two", "a"), ("two", "b")]
+ ... )
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
@@ -889,7 +890,7 @@ def _reorder_for_extension_array_stack(
Examples
--------
- >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
+ >>> arr = np.array(["a", "b", "c", "d", "e", "f"])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 4aecc9794384a..82c697306edb2 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -166,16 +166,14 @@ def cut(
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
- >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
- ... 3, labels=["bad", "medium", "good"])
+ >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, labels=["bad", "medium", "good"])
['bad', 'good', 'medium', 'medium', 'good', 'bad']
Categories (3, object): ['bad' < 'medium' < 'good']
``ordered=False`` will result in unordered categories when labels are passed.
This parameter can be used to allow non-unique labels:
- >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3,
- ... labels=["B", "A", "B"], ordered=False)
+ >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, labels=["B", "A", "B"], ordered=False)
['B', 'B', 'A', 'A', 'B', 'B']
Categories (2, object): ['A', 'B']
@@ -186,8 +184,7 @@ def cut(
Passing a Series as an input returns a Series with categorical dtype:
- >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
- ... index=['a', 'b', 'c', 'd', 'e'])
+ >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), index=["a", "b", "c", "d", "e"])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
@@ -201,8 +198,7 @@ def cut(
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
- >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
- ... index=['a', 'b', 'c', 'd', 'e'])
+ >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), index=["a", "b", "c", "d", "e"])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 1.0
@@ -215,8 +211,14 @@ def cut(
Use `drop` optional when bins is not unique
- >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
- ... right=False, duplicates='drop')
+ >>> pd.cut(
+ ... s,
+ ... [0, 2, 4, 6, 10, 10],
+ ... labels=False,
+ ... retbins=True,
+ ... right=False,
+ ... duplicates="drop",
+ ... )
... # doctest: +ELLIPSIS
(a 1.0
b 2.0
diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py
index 476e3922b6989..0f1fbc662e1a6 100644
--- a/pandas/core/reshape/util.py
+++ b/pandas/core/reshape/util.py
@@ -25,7 +25,7 @@ def cartesian_product(X) -> list[np.ndarray]:
Examples
--------
- >>> cartesian_product([list('ABC'), [1, 2]])
+ >>> cartesian_product([list("ABC"), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])]
See Also
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 78a3bdd2281ce..641a44efbf286 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -294,8 +294,8 @@ class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc]
--------
Constructing Series from a dictionary with an Index specified
- >>> d = {'a': 1, 'b': 2, 'c': 3}
- >>> ser = pd.Series(data=d, index=['a', 'b', 'c'])
+ >>> d = {"a": 1, "b": 2, "c": 3}
+ >>> ser = pd.Series(data=d, index=["a", "b", "c"])
>>> ser
a 1
b 2
@@ -305,8 +305,8 @@ class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc]
The keys of the dictionary match with the Index values, hence the Index
values have no effect.
- >>> d = {'a': 1, 'b': 2, 'c': 3}
- >>> ser = pd.Series(data=d, index=['x', 'y', 'z'])
+ >>> d = {"a": 1, "b": 2, "c": 3}
+ >>> ser = pd.Series(data=d, index=["x", "y", "z"])
>>> ser
x NaN
y NaN
@@ -733,7 +733,7 @@ def name(self) -> Hashable:
--------
The Series name can be set initially when calling the constructor.
- >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers')
+ >>> s = pd.Series([1, 2, 3], dtype=np.int64, name="Numbers")
>>> s
0 1
1 2
@@ -748,8 +748,9 @@ def name(self) -> Hashable:
The name of a Series within a DataFrame is its column name.
- >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],
- ... columns=["Odd Numbers", "Even Numbers"])
+ >>> df = pd.DataFrame(
+ ... [[1, 2], [3, 4], [5, 6]], columns=["Odd Numbers", "Even Numbers"]
+ ... )
>>> df
Odd Numbers Even Numbers
0 1 2
@@ -790,17 +791,16 @@ def values(self):
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
- >>> pd.Series(list('aabc')).values
+ >>> pd.Series(list("aabc")).values
array(['a', 'a', 'b', 'c'], dtype=object)
- >>> pd.Series(list('aabc')).astype('category').values
+ >>> pd.Series(list("aabc")).astype("category").values
['a', 'a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Timezone aware datetime data is converted to UTC:
- >>> pd.Series(pd.date_range('20130101', periods=3,
- ... tz='US/Eastern')).values
+ >>> pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern")).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
@@ -985,7 +985,7 @@ def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
For timezone-aware data, the timezones may be retained with
``dtype='object'``
- >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
+ >>> tzser = pd.Series(pd.date_range("2000", periods=2, tz="CET"))
>>> np.asarray(tzser, dtype="object")
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
@@ -1425,7 +1425,7 @@ def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series:
Examples
--------
- >>> s = pd.Series(['a', 'b', 'c'])
+ >>> s = pd.Series(["a", "b", "c"])
>>> s
0 a
1 b
@@ -1541,8 +1541,11 @@ def reset_index(
Examples
--------
- >>> s = pd.Series([1, 2, 3, 4], name='foo',
- ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
+ >>> s = pd.Series(
+ ... [1, 2, 3, 4],
+ ... name="foo",
+ ... index=pd.Index(["a", "b", "c", "d"], name="idx"),
+ ... )
Generate a DataFrame with default index.
@@ -1555,7 +1558,7 @@ def reset_index(
To specify the name of the new column use `name`.
- >>> s.reset_index(name='values')
+ >>> s.reset_index(name="values")
idx values
0 a 1
1 b 2
@@ -1574,16 +1577,19 @@ def reset_index(
The `level` parameter is interesting for Series with a multi-level
index.
- >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
- ... np.array(['one', 'two', 'one', 'two'])]
+ >>> arrays = [
+ ... np.array(["bar", "bar", "baz", "baz"]),
+ ... np.array(["one", "two", "one", "two"]),
+ ... ]
>>> s2 = pd.Series(
- ... range(4), name='foo',
- ... index=pd.MultiIndex.from_arrays(arrays,
- ... names=['a', 'b']))
+ ... range(4),
+ ... name="foo",
+ ... index=pd.MultiIndex.from_arrays(arrays, names=["a", "b"]),
+ ... )
To remove a specific level from the Index, use `level`.
- >>> s2.reset_index(level='a')
+ >>> s2.reset_index(level="a")
a foo
b
one bar 0
@@ -1863,7 +1869,7 @@ def items(self) -> Iterable[tuple[Hashable, Any]]:
Examples
--------
- >>> s = pd.Series(['A', 'B', 'C'])
+ >>> s = pd.Series(["A", "B", "C"])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
@@ -1966,8 +1972,7 @@ def to_frame(self, name: Hashable = lib.no_default) -> DataFrame:
Examples
--------
- >>> s = pd.Series(["a", "b", "c"],
- ... name="vals")
+ >>> s = pd.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
@@ -2245,16 +2250,17 @@ def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation
Examples
--------
- >>> pd.Series([2, 1, 3, 3], name='A').unique()
+ >>> pd.Series([2, 1, 3, 3], name="A").unique()
array([2, 1, 3])
- >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
+ >>> pd.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00']
Length: 1, dtype: datetime64[ns]
- >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
- ... for _ in range(3)]).unique()
+ >>> pd.Series(
+ ... [pd.Timestamp("2016-01-01", tz="US/Eastern") for _ in range(3)]
+ ... ).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
@@ -2262,11 +2268,12 @@ def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation
An Categorical will return categories in the order of
appearance and with the same dtype.
- >>> pd.Series(pd.Categorical(list('baabc'))).unique()
+ >>> pd.Series(pd.Categorical(list("baabc"))).unique()
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
- >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
- ... ordered=True)).unique()
+ >>> pd.Series(
+ ... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
+ ... ).unique()
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
"""
@@ -2338,8 +2345,9 @@ def drop_duplicates(
--------
Generate a Series with duplicated entries.
- >>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama', 'hippo'],
- ... name='animal')
+ >>> s = pd.Series(
+ ... ["llama", "cow", "llama", "beetle", "llama", "hippo"], name="animal"
+ ... )
>>> s
0 llama
1 cow
@@ -2363,7 +2371,7 @@ def drop_duplicates(
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
- >>> s.drop_duplicates(keep='last')
+ >>> s.drop_duplicates(keep="last")
1 cow
3 beetle
4 llama
@@ -2427,7 +2435,7 @@ def duplicated(self, keep: DropKeep = "first") -> Series:
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
- >>> animals = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama'])
+ >>> animals = pd.Series(["llama", "cow", "llama", "beetle", "llama"])
>>> animals.duplicated()
0 False
1 False
@@ -2438,7 +2446,7 @@ def duplicated(self, keep: DropKeep = "first") -> Series:
which is equivalent to
- >>> animals.duplicated(keep='first')
+ >>> animals.duplicated(keep="first")
0 False
1 False
2 True
@@ -2449,7 +2457,7 @@ def duplicated(self, keep: DropKeep = "first") -> Series:
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
- >>> animals.duplicated(keep='last')
+ >>> animals.duplicated(keep="last")
0 True
1 False
2 True
@@ -2516,8 +2524,7 @@ def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashab
Examples
--------
- >>> s = pd.Series(data=[1, None, 4, 1],
- ... index=['A', 'B', 'C', 'D'])
+ >>> s = pd.Series(data=[1, None, 4, 1], index=["A", "B", "C", "D"])
>>> s
A 1.0
B NaN
@@ -2599,8 +2606,7 @@ def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashab
Examples
--------
- >>> s = pd.Series(data=[1, None, 4, 3, 4],
- ... index=['A', 'B', 'C', 'D', 'E'])
+ >>> s = pd.Series(data=[1, None, 4, 3, 4], index=["A", "B", "C", "D", "E"])
>>> s
A 1.0
B NaN
@@ -2736,9 +2742,9 @@ def quantile(
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
- >>> s.quantile(.5)
+ >>> s.quantile(0.5)
2.5
- >>> s.quantile([.25, .5, .75])
+ >>> s.quantile([0.25, 0.5, 0.75])
0.25 1.75
0.50 2.50
0.75 3.25
@@ -2820,8 +2826,8 @@ def corr(
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
- >>> s1 = pd.Series([.2, .0, .6, .2])
- >>> s2 = pd.Series([.3, .6, .0, .1])
+ >>> s1 = pd.Series([0.2, 0.0, 0.6, 0.2])
+ >>> s2 = pd.Series([0.3, 0.6, 0.0, 0.1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
@@ -3278,12 +3284,12 @@ def combine(
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
- >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
+ >>> s1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
- >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
+ >>> s2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
>>> s2
falcon 345.0
eagle 200.0
@@ -3379,8 +3385,8 @@ def combine_first(self, other) -> Series:
Null values still persist if the location of that null value
does not exist in `other`
- >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0})
- >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0})
+ >>> s1 = pd.Series({"falcon": np.nan, "eagle": 160.0})
+ >>> s2 = pd.Series({"eagle": 200.0, "duck": 30.0})
>>> s1.combine_first(s2)
duck 30.0
eagle 160.0
@@ -3433,8 +3439,8 @@ def update(self, other: Series | Sequence | Mapping) -> None:
2 6
dtype: int64
- >>> s = pd.Series(['a', 'b', 'c'])
- >>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
+ >>> s = pd.Series(["a", "b", "c"])
+ >>> s.update(pd.Series(["d", "e"], index=[0, 2]))
>>> s
0 d
1 b
@@ -3624,7 +3630,7 @@ def sort_values(
Sort values putting NAs first
- >>> s.sort_values(na_position='first')
+ >>> s.sort_values(na_position="first")
0 NaN
1 1.0
2 3.0
@@ -3634,7 +3640,7 @@ def sort_values(
Sort a series of strings
- >>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
+ >>> s = pd.Series(["z", "b", "d", "a", "c"])
>>> s
0 z
1 b
@@ -3654,7 +3660,7 @@ def sort_values(
Sort using a key function. Your `key` function will be
given the ``Series`` of values and should return an array-like.
- >>> s = pd.Series(['a', 'B', 'c', 'D', 'e'])
+ >>> s = pd.Series(["a", "B", "c", "D", "e"])
>>> s.sort_values()
1 B
3 D
@@ -3845,7 +3851,7 @@ def sort_index(
Examples
--------
- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
+ >>> s = pd.Series(["a", "b", "c", "d"], index=[3, 2, 1, 4])
>>> s.sort_index()
1 c
2 b
@@ -3865,8 +3871,8 @@ def sort_index(
By default NaNs are put at the end, but use `na_position` to place
them at the beginning
- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])
- >>> s.sort_index(na_position='first')
+ >>> s = pd.Series(["a", "b", "c", "d"], index=[3, 2, 1, np.nan])
+ >>> s.sort_index(na_position="first")
NaN d
1.0 c
2.0 b
@@ -3875,10 +3881,10 @@ def sort_index(
Specify index level to sort
- >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',
- ... 'baz', 'baz', 'bar', 'bar']),
- ... np.array(['two', 'one', 'two', 'one',
- ... 'two', 'one', 'two', 'one'])]
+ >>> arrays = [
+ ... np.array(["qux", "qux", "foo", "foo", "baz", "baz", "bar", "bar"]),
+ ... np.array(["two", "one", "two", "one", "two", "one", "two", "one"]),
+ ... ]
>>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)
>>> s.sort_index(level=1)
bar one 8
@@ -3906,8 +3912,8 @@ def sort_index(
Apply a key function before sorting
- >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd'])
- >>> s.sort_index(key=lambda x : x.str.lower())
+ >>> s = pd.Series([1, 2, 3, 4], index=["A", "b", "C", "d"])
+ >>> s.sort_index(key=lambda x: x.str.lower())
A 1
b 2
C 3
@@ -4039,11 +4045,18 @@ def nlargest(
Examples
--------
- >>> countries_population = {"Italy": 59000000, "France": 65000000,
- ... "Malta": 434000, "Maldives": 434000,
- ... "Brunei": 434000, "Iceland": 337000,
- ... "Nauru": 11300, "Tuvalu": 11300,
- ... "Anguilla": 11300, "Montserrat": 5200}
+ >>> countries_population = {
+ ... "Italy": 59000000,
+ ... "France": 65000000,
+ ... "Malta": 434000,
+ ... "Maldives": 434000,
+ ... "Brunei": 434000,
+ ... "Iceland": 337000,
+ ... "Nauru": 11300,
+ ... "Tuvalu": 11300,
+ ... "Anguilla": 11300,
+ ... "Montserrat": 5200,
+ ... }
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
@@ -4081,7 +4094,7 @@ def nlargest(
Brunei will be kept since it is the last with value 434000 based on
the index order.
- >>> s.nlargest(3, keep='last')
+ >>> s.nlargest(3, keep="last")
France 65000000
Italy 59000000
Brunei 434000
@@ -4090,7 +4103,7 @@ def nlargest(
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
- >>> s.nlargest(3, keep='all')
+ >>> s.nlargest(3, keep="all")
France 65000000
Italy 59000000
Malta 434000
@@ -4139,11 +4152,18 @@ def nsmallest(
Examples
--------
- >>> countries_population = {"Italy": 59000000, "France": 65000000,
- ... "Brunei": 434000, "Malta": 434000,
- ... "Maldives": 434000, "Iceland": 337000,
- ... "Nauru": 11300, "Tuvalu": 11300,
- ... "Anguilla": 11300, "Montserrat": 5200}
+ >>> countries_population = {
+ ... "Italy": 59000000,
+ ... "France": 65000000,
+ ... "Brunei": 434000,
+ ... "Malta": 434000,
+ ... "Maldives": 434000,
+ ... "Iceland": 337000,
+ ... "Nauru": 11300,
+ ... "Tuvalu": 11300,
+ ... "Anguilla": 11300,
+ ... "Montserrat": 5200,
+ ... }
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
@@ -4181,7 +4201,7 @@ def nsmallest(
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
- >>> s.nsmallest(3, keep='last')
+ >>> s.nsmallest(3, keep="last")
Montserrat 5200
Anguilla 11300
Tuvalu 11300
@@ -4190,7 +4210,7 @@ def nsmallest(
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
- >>> s.nsmallest(3, keep='all')
+ >>> s.nsmallest(3, keep="all")
Montserrat 5200
Nauru 11300
Tuvalu 11300
@@ -4314,8 +4334,10 @@ def reorder_levels(self, order: Sequence[Level]) -> Series:
Examples
--------
- >>> arrays = [np.array(["dog", "dog", "cat", "cat", "bird", "bird"]),
- ... np.array(["white", "black", "white", "black", "white", "black"])]
+ >>> arrays = [
+ ... np.array(["dog", "dog", "cat", "cat", "bird", "bird"]),
+ ... np.array(["white", "black", "white", "black", "white", "black"]),
+ ... ]
>>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays)
>>> s
dog white 1
@@ -4377,7 +4399,7 @@ def explode(self, ignore_index: bool = False) -> Series:
Examples
--------
- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
+ >>> s = pd.Series([[1, 2, 3], "foo", [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
@@ -4439,9 +4461,10 @@ def unstack(
Examples
--------
- >>> s = pd.Series([1, 2, 3, 4],
- ... index=pd.MultiIndex.from_product([['one', 'two'],
- ... ['a', 'b']]))
+ >>> s = pd.Series(
+ ... [1, 2, 3, 4],
+ ... index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]]),
+ ... )
>>> s
one a 1
b 2
@@ -4508,7 +4531,7 @@ def map(
Examples
--------
- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
+ >>> s = pd.Series(["cat", "dog", np.nan, "rabbit"])
>>> s
0 cat
1 dog
@@ -4520,7 +4543,7 @@ def map(
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
- >>> s.map({'cat': 'kitten', 'dog': 'puppy'})
+ >>> s.map({"cat": "kitten", "dog": "puppy"})
0 kitten
1 puppy
2 NaN
@@ -4529,7 +4552,7 @@ def map(
It also accepts a function:
- >>> s.map('I am a {}'.format)
+ >>> s.map("I am a {}".format)
0 I am a cat
1 I am a dog
2 I am a nan
@@ -4539,7 +4562,7 @@ def map(
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
- >>> s.map('I am a {}'.format, na_action='ignore')
+ >>> s.map("I am a {}".format, na_action="ignore")
0 I am a cat
1 I am a dog
2 NaN
@@ -4696,8 +4719,7 @@ def apply(
--------
Create a series with typical summer temperatures for each city.
- >>> s = pd.Series([20, 21, 12],
- ... index=['London', 'New York', 'Helsinki'])
+ >>> s = pd.Series([20, 21, 12], index=["London", "New York", "Helsinki"])
>>> s
London 20
New York 21
@@ -4708,7 +4730,7 @@ def apply(
argument to ``apply()``.
>>> def square(x):
- ... return x ** 2
+ ... return x**2
>>> s.apply(square)
London 400
New York 441
@@ -4718,7 +4740,7 @@ def apply(
Square the values by passing an anonymous function as an
argument to ``apply()``.
- >>> s.apply(lambda x: x ** 2)
+ >>> s.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
@@ -4912,7 +4934,7 @@ def rename(
1 2
2 3
Name: my_name, dtype: int64
- >>> s.rename(lambda x: x ** 2) # function, changes labels
+ >>> s.rename(lambda x: x**2) # function, changes labels
0 1
1 2
4 3
@@ -5216,7 +5238,7 @@ def drop(
Examples
--------
- >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
+ >>> s = pd.Series(data=np.arange(3), index=["A", "B", "C"])
>>> s
A 0
B 1
@@ -5225,18 +5247,17 @@ def drop(
Drop labels B en C
- >>> s.drop(labels=['B', 'C'])
+ >>> s.drop(labels=["B", "C"])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
- >>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'],
- ... ['speed', 'weight', 'length']],
- ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
- ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
- >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
- ... index=midx)
+ >>> midx = pd.MultiIndex(
+ ... levels=[["llama", "cow", "falcon"], ["speed", "weight", "length"]],
+ ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
+ ... )
+ >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
llama speed 45.0
weight 200.0
@@ -5249,7 +5270,7 @@ def drop(
length 0.3
dtype: float64
- >>> s.drop(labels='weight', level=1)
+ >>> s.drop(labels="weight", level=1)
llama speed 45.0
length 1.2
cow speed 30.0
@@ -5418,9 +5439,10 @@ def isin(self, values) -> Series:
Examples
--------
- >>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama',
- ... 'hippo'], name='animal')
- >>> s.isin(['cow', 'llama'])
+ >>> s = pd.Series(
+ ... ["llama", "cow", "llama", "beetle", "llama", "hippo"], name="animal"
+ ... )
+ >>> s.isin(["cow", "llama"])
0 True
1 True
2 True
@@ -5431,7 +5453,7 @@ def isin(self, values) -> Series:
To invert the boolean values, use the ``~`` operator:
- >>> ~s.isin(['cow', 'llama'])
+ >>> ~s.isin(["cow", "llama"])
0 False
1 False
2 False
@@ -5443,7 +5465,7 @@ def isin(self, values) -> Series:
Passing a single string as ``s.isin('llama')`` will raise an error. Use
a list of one element instead:
- >>> s.isin(['llama'])
+ >>> s.isin(["llama"])
0 True
1 False
2 True
@@ -5454,10 +5476,10 @@ def isin(self, values) -> Series:
Strings and integers are distinct and are therefore not comparable:
- >>> pd.Series([1]).isin(['1'])
+ >>> pd.Series([1]).isin(["1"])
0 False
dtype: bool
- >>> pd.Series([1.1]).isin(['1.1'])
+ >>> pd.Series([1.1]).isin(["1.1"])
0 False
dtype: bool
"""
@@ -5531,8 +5553,8 @@ def between(
`left` and `right` can be any scalar value:
- >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
- >>> s.between('Anna', 'Daniel')
+ >>> s = pd.Series(["Alice", "Bob", "Carol", "Eve"])
+ >>> s.between("Anna", "Daniel")
0 False
1 True
2 True
@@ -5600,12 +5622,16 @@ def case_when(
Examples
--------
- >>> c = pd.Series([6, 7, 8, 9], name='c')
+ >>> c = pd.Series([6, 7, 8, 9], name="c")
>>> a = pd.Series([0, 0, 1, 2])
>>> b = pd.Series([0, 3, 4, 5])
- >>> c.case_when(caselist=[(a.gt(0), a), # condition, replacement
- ... (b.gt(0), b)])
+ >>> c.case_when(
+ ... caselist=[
+ ... (a.gt(0), a), # condition, replacement
+ ... (b.gt(0), b),
+ ... ]
+ ... )
0 6
1 3
2 1
@@ -5764,7 +5790,7 @@ def dropna(
Examples
--------
- >>> ser = pd.Series([1., 2., np.nan])
+ >>> ser = pd.Series([1.0, 2.0, np.nan])
>>> ser
0 1.0
1 2.0
@@ -5781,7 +5807,7 @@ def dropna(
Empty strings are not considered NA values. ``None`` is considered an
NA value.
- >>> ser = pd.Series([np.nan, 2, pd.NaT, '', None, 'I stay'])
+ >>> ser = pd.Series([np.nan, 2, pd.NaT, "", None, "I stay"])
>>> ser
0 NaN
1 2
@@ -5857,7 +5883,7 @@ def to_timestamp(
Examples
--------
- >>> idx = pd.PeriodIndex(['2023', '2024', '2025'], freq='Y')
+ >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")
>>> s1 = pd.Series([1, 2, 3], index=idx)
>>> s1
2023 1
@@ -5877,7 +5903,7 @@ def to_timestamp(
Using `freq` which is the offset that the Timestamps will have
>>> s2 = pd.Series([1, 2, 3], index=idx)
- >>> s2 = s2.to_timestamp(freq='M')
+ >>> s2 = s2.to_timestamp(freq="M")
>>> s2
2023-01-31 1
2024-01-31 2
@@ -5922,7 +5948,7 @@ def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series
Examples
--------
- >>> idx = pd.DatetimeIndex(['2023', '2024', '2025'])
+ >>> idx = pd.DatetimeIndex(["2023", "2024", "2025"])
>>> s = pd.Series([1, 2, 3], index=idx)
>>> s = s.to_period()
>>> s
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index fa85897872981..bd523969fba13 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -538,20 +538,20 @@ def cat(
When not passing `others`, all values are concatenated into a single
string:
- >>> s = pd.Series(['a', 'b', np.nan, 'd'])
- >>> s.str.cat(sep=' ')
+ >>> s = pd.Series(["a", "b", np.nan, "d"])
+ >>> s.str.cat(sep=" ")
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
- >>> s.str.cat(sep=' ', na_rep='?')
+ >>> s.str.cat(sep=" ", na_rep="?")
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
- >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
+ >>> s.str.cat(["A", "B", "C", "D"], sep=",")
0 a,A
1 b,B
2 NaN
@@ -561,7 +561,7 @@ def cat(
Missing values will remain missing in the result, but can again be
represented using `na_rep`
- >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
+ >>> s.str.cat(["A", "B", "C", "D"], sep=",", na_rep="-")
0 a,A
1 b,B
2 -,C
@@ -571,7 +571,7 @@ def cat(
If `sep` is not specified, the values are concatenated without
separation.
- >>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
+ >>> s.str.cat(["A", "B", "C", "D"], na_rep="-")
0 aA
1 bB
2 -C
@@ -581,15 +581,15 @@ def cat(
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
- >>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
- >>> s.str.cat(t, join='left', na_rep='-')
+ >>> t = pd.Series(["d", "a", "e", "c"], index=[3, 0, 4, 2])
+ >>> s.str.cat(t, join="left", na_rep="-")
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
- >>> s.str.cat(t, join='outer', na_rep='-')
+ >>> s.str.cat(t, join="outer", na_rep="-")
0 aa
1 b-
2 -c
@@ -597,13 +597,13 @@ def cat(
4 -e
dtype: object
>>>
- >>> s.str.cat(t, join='inner', na_rep='-')
+ >>> s.str.cat(t, join="inner", na_rep="-")
0 aa
2 -c
3 dd
dtype: object
>>>
- >>> s.str.cat(t, join='right', na_rep='-')
+ >>> s.str.cat(t, join="right", na_rep="-")
3 dd
0 aa
4 -e
@@ -1082,12 +1082,16 @@ def get(self, i):
Examples
--------
- >>> s = pd.Series(["String",
- ... (1, 2, 3),
- ... ["a", "b", "c"],
- ... 123,
- ... -456,
- ... {1: "Hello", "2": "World"}])
+ >>> s = pd.Series(
+ ... [
+ ... "String",
+ ... (1, 2, 3),
+ ... ["a", "b", "c"],
+ ... 123,
+ ... -456,
+ ... {1: "Hello", "2": "World"},
+ ... ]
+ ... )
>>> s
0 String
1 (1, 2, 3)
@@ -1117,9 +1121,13 @@ def get(self, i):
Return element with given key
- >>> s = pd.Series([{"name": "Hello", "value": "World"},
- ... {"name": "Goodbye", "value": "Planet"}])
- >>> s.str.get('name')
+ >>> s = pd.Series(
+ ... [
+ ... {"name": "Hello", "value": "World"},
+ ... {"name": "Goodbye", "value": "Planet"},
+ ... ]
+ ... )
+ >>> s.str.get("name")
0 Hello
1 Goodbye
dtype: object
@@ -1166,11 +1174,15 @@ def join(self, sep: str):
--------
Example with a list that contains non-string elements.
- >>> s = pd.Series([['lion', 'elephant', 'zebra'],
- ... [1.1, 2.2, 3.3],
- ... ['cat', np.nan, 'dog'],
- ... ['cow', 4.5, 'goat'],
- ... ['duck', ['swan', 'fish'], 'guppy']])
+ >>> s = pd.Series(
+ ... [
+ ... ["lion", "elephant", "zebra"],
+ ... [1.1, 2.2, 3.3],
+ ... ["cat", np.nan, "dog"],
+ ... ["cow", 4.5, "goat"],
+ ... ["duck", ["swan", "fish"], "guppy"],
+ ... ]
+ ... )
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
@@ -1182,7 +1194,7 @@ def join(self, sep: str):
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
- >>> s.str.join('-')
+ >>> s.str.join("-")
0 lion-elephant-zebra
1 NaN
2 NaN
@@ -1238,8 +1250,8 @@ def contains(
--------
Returning a Series of booleans using only a literal pattern.
- >>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.nan])
- >>> s1.str.contains('og', regex=False)
+ >>> s1 = pd.Series(["Mouse", "dog", "house and parrot", "23", np.nan])
+ >>> s1.str.contains("og", regex=False)
0 False
1 True
2 False
@@ -1249,13 +1261,13 @@ def contains(
Returning an Index of booleans using only a literal pattern.
- >>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.nan])
- >>> ind.str.contains('23', regex=False)
+ >>> ind = pd.Index(["Mouse", "dog", "house and parrot", "23.0", np.nan])
+ >>> ind.str.contains("23", regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
- >>> s1.str.contains('oG', case=True, regex=True)
+ >>> s1.str.contains("oG", case=True, regex=True)
0 False
1 False
2 False
@@ -1267,7 +1279,7 @@ def contains(
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
- >>> s1.str.contains('og', na=False, regex=True)
+ >>> s1.str.contains("og", na=False, regex=True)
0 False
1 True
2 False
@@ -1277,7 +1289,7 @@ def contains(
Returning 'house' or 'dog' when either expression occurs in a string.
- >>> s1.str.contains('house|dog', regex=True)
+ >>> s1.str.contains("house|dog", regex=True)
0 False
1 True
2 True
@@ -1288,7 +1300,7 @@ def contains(
Ignoring case sensitivity using `flags` with regex.
>>> import re
- >>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
+ >>> s1.str.contains("PARROT", flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
@@ -1298,7 +1310,7 @@ def contains(
Returning any digit using regular expression.
- >>> s1.str.contains('\\d', regex=True)
+ >>> s1.str.contains("\\d", regex=True)
0 False
1 False
2 False
@@ -1311,8 +1323,8 @@ def contains(
return `True`. However, '.0' as a regex matches any character
followed by a 0.
- >>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
- >>> s2.str.contains('.0', regex=True)
+ >>> s2 = pd.Series(["40", "40.0", "41", "41.0", "35"])
+ >>> s2.str.contains(".0", regex=True)
0 True
1 True
2 False
@@ -1403,7 +1415,7 @@ def fullmatch(self, pat, case: bool = True, flags: int = 0, na=None):
Examples
--------
>>> ser = pd.Series(["cat", "duck", "dove"])
- >>> ser.str.fullmatch(r'd.+')
+ >>> ser.str.fullmatch(r"d.+")
0 False
1 True
2 True
@@ -1482,7 +1494,7 @@ def replace(
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
- >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
+ >>> pd.Series(["foo", "fuz", np.nan]).str.replace("f.", "ba", regex=True)
0 bao
1 baz
2 NaN
@@ -1491,7 +1503,7 @@ def replace(
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
- >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
+ >>> pd.Series(["f.o", "fuz", np.nan]).str.replace("f.", "ba", regex=False)
0 bao
1 fuz
2 NaN
@@ -1503,7 +1515,7 @@ def replace(
To get the idea:
- >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr, regex=True)
+ >>> pd.Series(["foo", "fuz", np.nan]).str.replace("f", repr, regex=True)
0 <re.Match object; span=(0, 1), match='f'>oo
1 <re.Match object; span=(0, 1), match='f'>uz
2 NaN
@@ -1512,8 +1524,8 @@ def replace(
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
- >>> ser = pd.Series(['foo 123', 'bar baz', np.nan])
- >>> ser.str.replace(r'[a-z]+', repl, regex=True)
+ >>> ser = pd.Series(["foo 123", "bar baz", np.nan])
+ >>> ser.str.replace(r"[a-z]+", repl, regex=True)
0 oof 123
1 rab zab
2 NaN
@@ -1522,8 +1534,8 @@ def replace(
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
- >>> repl = lambda m: m.group('two').swapcase()
- >>> ser = pd.Series(['One Two Three', 'Foo Bar Baz'])
+ >>> repl = lambda m: m.group("two").swapcase()
+ >>> ser = pd.Series(["One Two Three", "Foo Bar Baz"])
>>> ser.str.replace(pat, repl, regex=True)
0 tWO
1 bAR
@@ -1532,8 +1544,8 @@ def replace(
Using a compiled regex with flags
>>> import re
- >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
- >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar', regex=True)
+ >>> regex_pat = re.compile(r"FUZ", flags=re.IGNORECASE)
+ >>> pd.Series(["foo", "fuz", np.nan]).str.replace(regex_pat, "bar", regex=True)
0 foo
1 bar
2 NaN
@@ -1583,7 +1595,7 @@ def repeat(self, repeats):
Examples
--------
- >>> s = pd.Series(['a', 'b', 'c'])
+ >>> s = pd.Series(["a", "b", "c"])
>>> s
0 a
1 b
@@ -1658,12 +1670,12 @@ def pad(
1 tiger
dtype: object
- >>> s.str.pad(width=10, side='right', fillchar='-')
+ >>> s.str.pad(width=10, side="right", fillchar="-")
0 caribou---
1 tiger-----
dtype: object
- >>> s.str.pad(width=10, side='both', fillchar='-')
+ >>> s.str.pad(width=10, side="both", fillchar="-")
0 -caribou--
1 --tiger---
dtype: object
@@ -1782,7 +1794,7 @@ def zfill(self, width: int):
Examples
--------
- >>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
+ >>> s = pd.Series(["-1", "1", "1000", 10, np.nan])
>>> s
0 -1
1 1
@@ -1917,7 +1929,7 @@ def slice_replace(self, start=None, stop=None, repl=None):
Examples
--------
- >>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
+ >>> s = pd.Series(["a", "ab", "abc", "abdc", "abcde"])
>>> s
0 a
1 ab
@@ -1929,7 +1941,7 @@ def slice_replace(self, start=None, stop=None, repl=None):
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
- >>> s.str.slice_replace(1, repl='X')
+ >>> s.str.slice_replace(1, repl="X")
0 aX
1 aX
2 aX
@@ -1940,7 +1952,7 @@ def slice_replace(self, start=None, stop=None, repl=None):
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
- >>> s.str.slice_replace(stop=2, repl='X')
+ >>> s.str.slice_replace(stop=2, repl="X")
0 X
1 X
2 Xc
@@ -1952,7 +1964,7 @@ def slice_replace(self, start=None, stop=None, repl=None):
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
- >>> s.str.slice_replace(start=1, stop=3, repl='X')
+ >>> s.str.slice_replace(start=1, stop=3, repl="X")
0 aX
1 aX
2 aX
@@ -1983,8 +1995,8 @@ def decode(self, encoding, errors: str = "strict"):
--------
For Series:
- >>> ser = pd.Series([b'cow', b'123', b'()'])
- >>> ser.str.decode('ascii')
+ >>> ser = pd.Series([b"cow", b"123", b"()"])
+ >>> ser.str.decode("ascii")
0 cow
1 123
2 ()
@@ -2020,8 +2032,8 @@ def encode(self, encoding, errors: str = "strict"):
Examples
--------
- >>> ser = pd.Series(['cow', '123', '()'])
- >>> ser.str.encode(encoding='ascii')
+ >>> ser = pd.Series(["cow", "123", "()"])
+ >>> ser.str.encode(encoding="ascii")
0 b'cow'
1 b'123'
2 b'()'
@@ -2247,7 +2259,7 @@ def wrap(self, width: int, **kwargs):
Examples
--------
- >>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
+ >>> s = pd.Series(["line to be wrapped", "another line to be wrapped"])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
@@ -2281,13 +2293,13 @@ def get_dummies(self, sep: str = "|"):
Examples
--------
- >>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
+ >>> pd.Series(["a|b", "a", "a|c"]).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
- >>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
+ >>> pd.Series(["a|b", np.nan, "a|c"]).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
@@ -2325,7 +2337,7 @@ def translate(self, table):
Examples
--------
>>> ser = pd.Series(["El niño", "Françoise"])
- >>> mytable = str.maketrans({'ñ': 'n', 'ç': 'c'})
+ >>> mytable = str.maketrans({"ñ": "n", "ç": "c"})
>>> ser.str.translate(mytable)
0 El nino
1 Francoise
@@ -2370,8 +2382,8 @@ def count(self, pat, flags: int = 0):
Examples
--------
- >>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
- >>> s.str.count('a')
+ >>> s = pd.Series(["A", "B", "Aaba", "Baca", np.nan, "CABA", "cat"])
+ >>> s.str.count("a")
0 0.0
1 0.0
2 2.0
@@ -2383,8 +2395,8 @@ def count(self, pat, flags: int = 0):
Escape ``'$'`` to find the literal dollar sign.
- >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
- >>> s.str.count('\\$')
+ >>> s = pd.Series(["$", "B", "Aab$", "$$ca", "C$B$", "cat"])
+ >>> s.str.count("\\$")
0 1
1 0
2 1
@@ -2395,7 +2407,7 @@ def count(self, pat, flags: int = 0):
This is also available on Index
- >>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
+ >>> pd.Index(["A", "A", "Aaba", "cat"]).str.count("a")
Index([0, 0, 2, 1], dtype='int64')
"""
result = self._data.array._str_count(pat, flags)
@@ -2434,7 +2446,7 @@ def startswith(
Examples
--------
- >>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
+ >>> s = pd.Series(["bat", "Bear", "cat", np.nan])
>>> s
0 bat
1 Bear
@@ -2442,14 +2454,14 @@ def startswith(
3 NaN
dtype: object
- >>> s.str.startswith('b')
+ >>> s.str.startswith("b")
0 True
1 False
2 False
3 NaN
dtype: object
- >>> s.str.startswith(('b', 'B'))
+ >>> s.str.startswith(("b", "B"))
0 True
1 True
2 False
@@ -2458,7 +2470,7 @@ def startswith(
Specifying `na` to be `False` instead of `NaN`.
- >>> s.str.startswith('b', na=False)
+ >>> s.str.startswith("b", na=False)
0 True
1 False
2 False
@@ -2504,7 +2516,7 @@ def endswith(
Examples
--------
- >>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
+ >>> s = pd.Series(["bat", "bear", "caT", np.nan])
>>> s
0 bat
1 bear
@@ -2512,14 +2524,14 @@ def endswith(
3 NaN
dtype: object
- >>> s.str.endswith('t')
+ >>> s.str.endswith("t")
0 True
1 False
2 False
3 NaN
dtype: object
- >>> s.str.endswith(('t', 'T'))
+ >>> s.str.endswith(("t", "T"))
0 True
1 False
2 True
@@ -2528,7 +2540,7 @@ def endswith(
Specifying `na` to be `False` instead of `NaN`.
- >>> s.str.endswith('t', na=False)
+ >>> s.str.endswith("t", na=False)
0 True
1 False
2 False
@@ -2575,11 +2587,11 @@ def findall(self, pat, flags: int = 0):
Examples
--------
- >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
+ >>> s = pd.Series(["Lion", "Monkey", "Rabbit"])
The search for the pattern 'Monkey' returns one match:
- >>> s.str.findall('Monkey')
+ >>> s.str.findall("Monkey")
0 []
1 [Monkey]
2 []
@@ -2588,7 +2600,7 @@ def findall(self, pat, flags: int = 0):
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
- >>> s.str.findall('MONKEY')
+ >>> s.str.findall("MONKEY")
0 []
1 []
2 []
@@ -2598,7 +2610,7 @@ def findall(self, pat, flags: int = 0):
to find the pattern 'MONKEY' ignoring the case:
>>> import re
- >>> s.str.findall('MONKEY', flags=re.IGNORECASE)
+ >>> s.str.findall("MONKEY", flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
@@ -2607,7 +2619,7 @@ def findall(self, pat, flags: int = 0):
When the pattern matches more than one string in the Series, all matches
are returned:
- >>> s.str.findall('on')
+ >>> s.str.findall("on")
0 [on]
1 [on]
2 []
@@ -2616,7 +2628,7 @@ def findall(self, pat, flags: int = 0):
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
- >>> s.str.findall('on$')
+ >>> s.str.findall("on$")
0 [on]
1 []
2 []
@@ -2625,7 +2637,7 @@ def findall(self, pat, flags: int = 0):
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
- >>> s.str.findall('b')
+ >>> s.str.findall("b")
0 []
1 []
2 [b, b]
@@ -2678,8 +2690,8 @@ def extract(
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
- >>> s = pd.Series(['a1', 'b2', 'c3'])
- >>> s.str.extract(r'([ab])(\d)')
+ >>> s = pd.Series(["a1", "b2", "c3"])
+ >>> s.str.extract(r"([ab])(\d)")
0 1
0 a 1
1 b 2
@@ -2687,7 +2699,7 @@ def extract(
A pattern may contain optional groups.
- >>> s.str.extract(r'([ab])?(\d)')
+ >>> s.str.extract(r"([ab])?(\d)")
0 1
0 a 1
1 b 2
@@ -2695,7 +2707,7 @@ def extract(
Named groups will become column names in the result.
- >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
+ >>> s.str.extract(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
0 a 1
1 b 2
@@ -2704,7 +2716,7 @@ def extract(
A pattern with one group will return a DataFrame with one column
if expand=True.
- >>> s.str.extract(r'[ab](\d)', expand=True)
+ >>> s.str.extract(r"[ab](\d)", expand=True)
0
0 1
1 2
@@ -2712,7 +2724,7 @@ def extract(
A pattern with one group will return a Series if expand=False.
- >>> s.str.extract(r'[ab](\d)', expand=False)
+ >>> s.str.extract(r"[ab](\d)", expand=False)
0 1
1 2
2 NaN
@@ -2938,8 +2950,8 @@ def normalize(self, form):
Examples
--------
- >>> ser = pd.Series(['ñ'])
- >>> ser.str.normalize('NFC') == ser.str.normalize('NFD')
+ >>> ser = pd.Series(["ñ"])
+ >>> ser.str.normalize("NFC") == ser.str.normalize("NFD")
0 False
dtype: bool
"""
@@ -3052,12 +3064,9 @@ def len(self):
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
- >>> s = pd.Series(['dog',
- ... '',
- ... 5,
- ... {'foo' : 'bar'},
- ... [2, 3, 5, 7],
- ... ('one', 'two', 'three')])
+ >>> s = pd.Series(
+ ... ["dog", "", 5, {"foo": "bar"}, [2, 3, 5, 7], ("one", "two", "three")]
+ ... )
>>> s
0 dog
1
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 8e0a96e508516..6c8c2c7e5009e 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -897,9 +897,7 @@ def to_datetime(
can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
- >>> df = pd.DataFrame({'year': [2015, 2016],
- ... 'month': [2, 3],
- ... 'day': [4, 5]})
+ >>> df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
@@ -907,9 +905,9 @@ def to_datetime(
Using a unix epoch time
- >>> pd.to_datetime(1490195805, unit='s')
+ >>> pd.to_datetime(1490195805, unit="s")
Timestamp('2017-03-22 15:16:45')
- >>> pd.to_datetime(1490195805433502912, unit='ns')
+ >>> pd.to_datetime(1490195805433502912, unit="ns")
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
@@ -917,8 +915,7 @@ def to_datetime(
Using a non-unix epoch origin
- >>> pd.to_datetime([1, 2, 3], unit='D',
- ... origin=pd.Timestamp('1960-01-01'))
+ >>> pd.to_datetime([1, 2, 3], unit="D", origin=pd.Timestamp("1960-01-01"))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
dtype='datetime64[ns]', freq=None)
@@ -926,8 +923,7 @@ def to_datetime(
:const:`"%f"` will parse all the way up to nanoseconds.
- >>> pd.to_datetime('2018-10-26 12:00:00.0000000011',
- ... format='%Y-%m-%d %H:%M:%S.%f')
+ >>> pd.to_datetime("2018-10-26 12:00:00.0000000011", format="%Y-%m-%d %H:%M:%S.%f")
Timestamp('2018-10-26 12:00:00.000000001')
**Non-convertible date/times**
@@ -935,7 +931,7 @@ def to_datetime(
Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
- >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
+ >>> pd.to_datetime("13000101", format="%Y%m%d", errors="coerce")
NaT
.. _to_datetime_tz_examples:
@@ -946,14 +942,14 @@ def to_datetime(
- Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
- >>> pd.to_datetime(['2018-10-26 12:00:00', '2018-10-26 13:00:15'])
+ >>> pd.to_datetime(["2018-10-26 12:00:00", "2018-10-26 13:00:15"])
DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
dtype='datetime64[ns]', freq=None)
- Timezone-aware inputs *with constant time offset* are converted to
timezone-aware :class:`DatetimeIndex`:
- >>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
+ >>> pd.to_datetime(["2018-10-26 12:00 -0500", "2018-10-26 13:00 -0500"])
DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
dtype='datetime64[ns, UTC-05:00]', freq=None)
@@ -965,8 +961,9 @@ def to_datetime(
and a simple :class:`Index` containing :class:`datetime.datetime`
objects will be returned:
- >>> pd.to_datetime(['2020-10-25 02:00 +0200',
- ... '2020-10-25 04:00 +0100']) # doctest: +SKIP
+ >>> pd.to_datetime(
+ ... ["2020-10-25 02:00 +0200", "2020-10-25 04:00 +0100"]
+ ... ) # doctest: +SKIP
FutureWarning: In a future version of pandas, parsing datetimes with mixed
time zones will raise an error unless `utc=True`. Please specify `utc=True`
to opt in to the new behaviour and silence this warning. To create a `Series`
@@ -979,8 +976,9 @@ def to_datetime(
a simple :class:`Index` containing :class:`datetime.datetime` objects:
>>> from datetime import datetime
- >>> pd.to_datetime(["2020-01-01 01:00:00-01:00",
- ... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP
+ >>> pd.to_datetime(
+ ... ["2020-01-01 01:00:00-01:00", datetime(2020, 1, 1, 3, 0)]
+ ... ) # doctest: +SKIP
FutureWarning: In a future version of pandas, parsing datetimes with mixed
time zones will raise an error unless `utc=True`. Please specify `utc=True`
to opt in to the new behaviour and silence this warning. To create a `Series`
@@ -994,22 +992,21 @@ def to_datetime(
- Timezone-naive inputs are *localized* as UTC
- >>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
+ >>> pd.to_datetime(["2018-10-26 12:00", "2018-10-26 13:00"], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Timezone-aware inputs are *converted* to UTC (the output represents the
exact same datetime, but viewed from the UTC time offset `+00:00`).
- >>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
- ... utc=True)
+ >>> pd.to_datetime(["2018-10-26 12:00 -0530", "2018-10-26 12:00 -0500"], utc=True)
DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Inputs can contain both string or datetime, the above
rules still apply
- >>> pd.to_datetime(['2018-10-26 12:00', datetime(2020, 1, 1, 18)], utc=True)
+ >>> pd.to_datetime(["2018-10-26 12:00", datetime(2020, 1, 1, 18)], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2020-01-01 18:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
"""
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index 09652a7d8bc92..2ae57d3c8508e 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -124,24 +124,24 @@ def to_numeric(
--------
Take separate series and convert to numeric, coercing when told to
- >>> s = pd.Series(['1.0', '2', -3])
+ >>> s = pd.Series(["1.0", "2", -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
- >>> pd.to_numeric(s, downcast='float')
+ >>> pd.to_numeric(s, downcast="float")
0 1.0
1 2.0
2 -3.0
dtype: float32
- >>> pd.to_numeric(s, downcast='signed')
+ >>> pd.to_numeric(s, downcast="signed")
0 1
1 2
2 -3
dtype: int8
- >>> s = pd.Series(['apple', '1.0', '2', -3])
- >>> pd.to_numeric(s, errors='coerce')
+ >>> s = pd.Series(["apple", "1.0", "2", -3])
+ >>> pd.to_numeric(s, errors="coerce")
0 NaN
1 1.0
2 2.0
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index fcf4f7606a594..47dfae3c6cadd 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -160,24 +160,24 @@ def to_timedelta(
--------
Parsing a single string to a Timedelta:
- >>> pd.to_timedelta('1 days 06:05:01.00003')
+ >>> pd.to_timedelta("1 days 06:05:01.00003")
Timedelta('1 days 06:05:01.000030')
- >>> pd.to_timedelta('15.5us')
+ >>> pd.to_timedelta("15.5us")
Timedelta('0 days 00:00:00.000015500')
Parsing a list or array of strings:
- >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
+ >>> pd.to_timedelta(["1 days 06:05:01.00003", "15.5us", "nan"])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
- >>> pd.to_timedelta(np.arange(5), unit='s')
+ >>> pd.to_timedelta(np.arange(5), unit="s")
TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',
'0 days 00:00:03', '0 days 00:00:04'],
dtype='timedelta64[ns]', freq=None)
- >>> pd.to_timedelta(np.arange(5), unit='d')
+ >>> pd.to_timedelta(np.arange(5), unit="d")
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
"""
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index b1a1da387ab83..b0048d5024064 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -959,7 +959,7 @@ class Window(BaseWindow):
Examples
--------
- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
@@ -982,12 +982,16 @@ class Window(BaseWindow):
Rolling sum with a window span of 2 seconds.
- >>> df_time = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
- ... index=[pd.Timestamp('20130101 09:00:00'),
- ... pd.Timestamp('20130101 09:00:02'),
- ... pd.Timestamp('20130101 09:00:03'),
- ... pd.Timestamp('20130101 09:00:05'),
- ... pd.Timestamp('20130101 09:00:06')])
+ >>> df_time = pd.DataFrame(
+ ... {"B": [0, 1, 2, np.nan, 4]},
+ ... index=[
+ ... pd.Timestamp("20130101 09:00:00"),
+ ... pd.Timestamp("20130101 09:00:02"),
+ ... pd.Timestamp("20130101 09:00:03"),
+ ... pd.Timestamp("20130101 09:00:05"),
+ ... pd.Timestamp("20130101 09:00:06"),
+ ... ],
+ ... )
>>> df_time
B
@@ -997,7 +1001,7 @@ class Window(BaseWindow):
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
- >>> df_time.rolling('2s').sum()
+ >>> df_time.rolling("2s").sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
@@ -1065,7 +1069,7 @@ class Window(BaseWindow):
Rolling sum with a window length of 2, using the Scipy ``'gaussian'``
window type. ``std`` is required in the aggregation function.
- >>> df.rolling(2, win_type='gaussian').sum(std=3)
+ >>> df.rolling(2, win_type="gaussian").sum(std=3)
B
0 NaN
1 0.986207
@@ -1077,12 +1081,17 @@ class Window(BaseWindow):
Rolling sum with a window length of 2 days.
- >>> df = pd.DataFrame({
- ... 'A': [pd.to_datetime('2020-01-01'),
- ... pd.to_datetime('2020-01-01'),
- ... pd.to_datetime('2020-01-02'),],
- ... 'B': [1, 2, 3], },
- ... index=pd.date_range('2020', periods=3))
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": [
+ ... pd.to_datetime("2020-01-01"),
+ ... pd.to_datetime("2020-01-01"),
+ ... pd.to_datetime("2020-01-02"),
+ ... ],
+ ... "B": [1, 2, 3],
+ ... },
+ ... index=pd.date_range("2020", periods=3),
+ ... )
>>> df
A B
@@ -1090,7 +1099,7 @@ class Window(BaseWindow):
2020-01-02 2020-01-01 2
2020-01-03 2020-01-02 3
- >>> df.rolling('2D', on='A').sum()
+ >>> df.rolling("2D", on="A").sum()
A B
2020-01-01 2020-01-01 1.0
2020-01-02 2020-01-01 3.0
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 97db508bda1b4..c51122fe9e140 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -49,9 +49,9 @@ class PerformanceWarning(Warning):
Examples
--------
- >>> df = pd.DataFrame({"jim": [0, 0, 1, 1],
- ... "joe": ["x", "x", "z", "y"],
- ... "jolie": [1, 2, 3, 4]})
+ >>> df = pd.DataFrame(
+ ... {"jim": [0, 0, 1, 1], "joe": ["x", "x", "z", "y"], "jolie": [1, 2, 3, 4]}
+ ... )
>>> df = df.set_index(["jim", "joe"])
>>> df
jolie
@@ -60,7 +60,7 @@ class PerformanceWarning(Warning):
x 2
1 z 3
y 4
- >>> df.loc[(1, 'z')] # doctest: +SKIP
+ >>> df.loc[(1, "z")] # doctest: +SKIP
# PerformanceWarning: indexing past lexsort depth may impact performance.
df.loc[(1, 'z')]
jolie
@@ -77,10 +77,9 @@ class UnsupportedFunctionCall(ValueError):
Examples
--------
- >>> df = pd.DataFrame({"A": [0, 0, 1, 1],
- ... "B": ["x", "x", "z", "y"],
- ... "C": [1, 2, 3, 4]}
- ... )
+ >>> df = pd.DataFrame(
+ ... {"A": [0, 0, 1, 1], "B": ["x", "x", "z", "y"], "C": [1, 2, 3, 4]}
+ ... )
>>> np.cumsum(df.groupby(["A"]))
Traceback (most recent call last):
UnsupportedFunctionCall: numpy operations are not valid with groupby.
@@ -96,10 +95,13 @@ class UnsortedIndexError(KeyError):
Examples
--------
- >>> df = pd.DataFrame({"cat": [0, 0, 1, 1],
- ... "color": ["white", "white", "brown", "black"],
- ... "lives": [4, 4, 3, 7]},
- ... )
+ >>> df = pd.DataFrame(
+ ... {
+ ... "cat": [0, 0, 1, 1],
+ ... "color": ["white", "white", "brown", "black"],
+ ... "lives": [4, 4, 3, 7],
+ ... },
+ ... )
>>> df = df.set_index(["cat", "color"])
>>> df
lives
@@ -108,7 +110,7 @@ class UnsortedIndexError(KeyError):
white 4
1 brown 3
black 7
- >>> df.loc[(0, "black"):(1, "white")]
+ >>> df.loc[(0, "black") : (1, "white")]
Traceback (most recent call last):
UnsortedIndexError: 'Key length (2) was greater
than MultiIndex lexsort depth (1)'
@@ -133,7 +135,7 @@ class ParserError(ValueError):
... cat,foo,bar
... dog,foo,"baz'''
>>> from io import StringIO
- >>> pd.read_csv(StringIO(data), skipfooter=1, engine='python')
+ >>> pd.read_csv(StringIO(data), skipfooter=1, engine="python")
Traceback (most recent call last):
ParserError: ',' expected after '"'. Error could possibly be due
to parsing errors in the skipped footer rows
@@ -167,11 +169,14 @@ class DtypeWarning(Warning):
This example creates and reads a large CSV file with a column that contains
`int` and `str`.
- >>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 +
- ... ['1'] * 100000),
- ... 'b': ['b'] * 300000}) # doctest: +SKIP
- >>> df.to_csv('test.csv', index=False) # doctest: +SKIP
- >>> df2 = pd.read_csv('test.csv') # doctest: +SKIP
+ >>> df = pd.DataFrame(
+ ... {
+ ... "a": (["1"] * 100000 + ["X"] * 100000 + ["1"] * 100000),
+ ... "b": ["b"] * 300000,
+ ... }
+ ... ) # doctest: +SKIP
+ >>> df.to_csv("test.csv", index=False) # doctest: +SKIP
+ >>> df2 = pd.read_csv("test.csv") # doctest: +SKIP
... # DtypeWarning: Columns (0) have mixed types
Important to notice that ``df2`` will contain both `str` and `int` for the
@@ -189,7 +194,7 @@ class DtypeWarning(Warning):
One way to solve this issue is using the `dtype` parameter in the
`read_csv` and `read_table` functions to explicit the conversion:
- >>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str}) # doctest: +SKIP
+ >>> df2 = pd.read_csv("test.csv", sep=",", dtype={"a": str}) # doctest: +SKIP
No warning was issued.
"""
@@ -241,12 +246,12 @@ class ParserWarning(Warning):
>>> csv = '''a;b;c
... 1;1,8
... 1;2,1'''
- >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP
+ >>> df = pd.read_csv(io.StringIO(csv), sep="[;,]") # doctest: +SKIP
... # ParserWarning: Falling back to the 'python' engine...
Adding `engine='python'` to `pd.read_csv` removes the Warning:
- >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python')
+ >>> df = pd.read_csv(io.StringIO(csv), sep="[;,]", engine="python")
"""
@@ -258,13 +263,19 @@ class MergeError(ValueError):
Examples
--------
- >>> left = pd.DataFrame({"a": ["a", "b", "b", "d"],
- ... "b": ["cat", "dog", "weasel", "horse"]},
- ... index=range(4))
- >>> right = pd.DataFrame({"a": ["a", "b", "c", "d"],
- ... "c": ["meow", "bark", "chirp", "nay"]},
- ... index=range(4)).set_index("a")
- >>> left.join(right, on="a", validate="one_to_one",)
+ >>> left = pd.DataFrame(
+ ... {"a": ["a", "b", "b", "d"], "b": ["cat", "dog", "weasel", "horse"]},
+ ... index=range(4),
+ ... )
+ >>> right = pd.DataFrame(
+ ... {"a": ["a", "b", "c", "d"], "c": ["meow", "bark", "chirp", "nay"]},
+ ... index=range(4),
+ ... ).set_index("a")
+ >>> left.join(
+ ... right,
+ ... on="a",
+ ... validate="one_to_one",
+ ... )
Traceback (most recent call last):
MergeError: Merge keys are not unique in left dataset; not a one-to-one merge
"""
@@ -280,6 +291,7 @@ class AbstractMethodError(NotImplementedError):
... @classmethod
... def classmethod(cls):
... raise pd.errors.AbstractMethodError(cls, methodtype="classmethod")
+ ...
... def method(self):
... raise pd.errors.AbstractMethodError(self)
>>> test = Foo.classmethod()
@@ -314,8 +326,9 @@ class NumbaUtilError(Exception):
Examples
--------
- >>> df = pd.DataFrame({"key": ["a", "a", "b", "b"], "data": [1, 2, 3, 4]},
- ... columns=["key", "data"])
+ >>> df = pd.DataFrame(
+ ... {"key": ["a", "a", "b", "b"], "data": [1, 2, 3, 4]}, columns=["key", "data"]
+ ... )
>>> def incorrect_function(x):
... return sum(x) * 2.7
>>> df.groupby("key").agg(incorrect_function, engine="numba")
@@ -331,10 +344,10 @@ class DuplicateLabelError(ValueError):
Examples
--------
- >>> s = pd.Series([0, 1, 2], index=['a', 'b', 'c']).set_flags(
+ >>> s = pd.Series([0, 1, 2], index=["a", "b", "c"]).set_flags(
... allows_duplicate_labels=False
... )
- >>> s.reindex(['a', 'a', 'b'])
+ >>> s.reindex(["a", "a", "b"])
Traceback (most recent call last):
...
DuplicateLabelError: Index has duplicates.
@@ -351,8 +364,7 @@ class InvalidIndexError(Exception):
Examples
--------
>>> idx = pd.MultiIndex.from_product([["x", "y"], [0, 1]])
- >>> df = pd.DataFrame([[1, 1, 2, 2],
- ... [3, 3, 4, 4]], columns=idx)
+ >>> df = pd.DataFrame([[1, 1, 2, 2], [3, 3, 4, 4]], columns=idx)
>>> df
x y
0 1 0 1
@@ -373,7 +385,7 @@ class DataError(Exception):
Examples
--------
- >>> ser = pd.Series(['a', 'b', 'c'])
+ >>> ser = pd.Series(["a", "b", "c"])
>>> ser.rolling(2).sum()
Traceback (most recent call last):
DataError: No numeric types to aggregate
@@ -394,16 +406,14 @@ class SpecificationError(Exception):
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
- ... 'B': range(5),
- ... 'C': range(5)})
- >>> df.groupby('A').B.agg({'foo': 'count'}) # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
+ >>> df.groupby("A").B.agg({"foo": "count"}) # doctest: +SKIP
... # SpecificationError: nested renamer is not supported
- >>> df.groupby('A').agg({'B': {'foo': ['sum', 'max']}}) # doctest: +SKIP
+ >>> df.groupby("A").agg({"B": {"foo": ["sum", "max"]}}) # doctest: +SKIP
... # SpecificationError: nested renamer is not supported
- >>> df.groupby('A').agg(['min', 'min']) # doctest: +SKIP
+ >>> df.groupby("A").agg(["min", "min"]) # doctest: +SKIP
... # SpecificationError: nested renamer is not supported
"""
@@ -424,7 +434,7 @@ class ChainedAssignmentError(Warning):
Examples
--------
>>> pd.options.mode.copy_on_write = True
- >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A'])
+ >>> df = pd.DataFrame({"A": [1, 1, 1, 2, 2]}, columns=["A"])
>>> df["A"][0:3] = 10 # doctest: +SKIP
... # ChainedAssignmentError: ...
>>> pd.options.mode.copy_on_write = False
@@ -441,11 +451,11 @@ class NumExprClobberingError(NameError):
Examples
--------
- >>> df = pd.DataFrame({'abs': [1, 1, 1]})
+ >>> df = pd.DataFrame({"abs": [1, 1, 1]})
>>> df.query("abs > 2") # doctest: +SKIP
... # NumExprClobberingError: Variables in expression "(abs) > (2)" overlap...
>>> sin, a = 1, 2
- >>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP
+ >>> pd.eval("sin + a", engine="numexpr") # doctest: +SKIP
... # NumExprClobberingError: Variables in expression "(sin) + (a)" overlap...
"""
@@ -458,12 +468,12 @@ class UndefinedVariableError(NameError):
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 1, 1]})
+ >>> df = pd.DataFrame({"A": [1, 1, 1]})
>>> df.query("A > x") # doctest: +SKIP
... # UndefinedVariableError: name 'x' is not defined
>>> df.query("A > @y") # doctest: +SKIP
... # UndefinedVariableError: local variable 'y' is not defined
- >>> pd.eval('x + 1') # doctest: +SKIP
+ >>> pd.eval("x + 1") # doctest: +SKIP
... # UndefinedVariableError: name 'x' is not defined
"""
@@ -493,17 +503,16 @@ class IndexingError(Exception):
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 1, 1]})
- >>> df.loc[..., ..., 'A'] # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [1, 1, 1]})
+ >>> df.loc[..., ..., "A"] # doctest: +SKIP
... # IndexingError: indexer may only contain one '...' entry
- >>> df = pd.DataFrame({'A': [1, 1, 1]})
- >>> df.loc[1, ..., ...] # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [1, 1, 1]})
+ >>> df.loc[1, ..., ...] # doctest: +SKIP
... # IndexingError: Too many indexers
- >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP
+ >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP
... # IndexingError: Unalignable boolean Series provided as indexer...
- >>> s = pd.Series(range(2),
- ... index=pd.MultiIndex.from_product([["a", "b"], ["c"]]))
- >>> s.loc["a", "c", "d"] # doctest: +SKIP
+ >>> s = pd.Series(range(2), index=pd.MultiIndex.from_product([["a", "b"], ["c"]]))
+ >>> s.loc["a", "c", "d"] # doctest: +SKIP
... # IndexingError: Too many indexers
"""
@@ -539,14 +548,14 @@ class CSSWarning(UserWarning):
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 1, 1]})
- >>> df.style.applymap(
- ... lambda x: 'background-color: blueGreenRed;'
- ... ).to_excel('styled.xlsx') # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [1, 1, 1]})
+ >>> df.style.applymap(lambda x: "background-color: blueGreenRed;").to_excel(
+ ... "styled.xlsx"
+ ... ) # doctest: +SKIP
CSSWarning: Unhandled color format: 'blueGreenRed'
- >>> df.style.applymap(
- ... lambda x: 'border: 1px solid red red;'
- ... ).to_excel('styled.xlsx') # doctest: +SKIP
+ >>> df.style.applymap(lambda x: "border: 1px solid red red;").to_excel(
+ ... "styled.xlsx"
+ ... ) # doctest: +SKIP
CSSWarning: Unhandled color format: 'blueGreenRed'
"""
@@ -557,9 +566,8 @@ class PossibleDataLossError(Exception):
Examples
--------
- >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
+ >>> store = pd.HDFStore("my-store", "a") # doctest: +SKIP
>>> store.open("w") # doctest: +SKIP
- ... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]...
"""
@@ -569,7 +577,7 @@ class ClosedFileError(Exception):
Examples
--------
- >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
+ >>> store = pd.HDFStore("my-store", "a") # doctest: +SKIP
>>> store.close() # doctest: +SKIP
>>> store.keys() # doctest: +SKIP
... # ClosedFileError: my-store file is not open!
@@ -592,12 +600,12 @@ class AttributeConflictWarning(Warning):
Examples
--------
- >>> idx1 = pd.Index(['a', 'b'], name='name1')
+ >>> idx1 = pd.Index(["a", "b"], name="name1")
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], index=idx1)
- >>> df1.to_hdf('file', 'data', 'w', append=True) # doctest: +SKIP
- >>> idx2 = pd.Index(['c', 'd'], name='name2')
+ >>> df1.to_hdf("file", "data", "w", append=True) # doctest: +SKIP
+ >>> idx2 = pd.Index(["c", "d"], name="name2")
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], index=idx2)
- >>> df2.to_hdf('file', 'data', 'a', append=True) # doctest: +SKIP
+ >>> df2.to_hdf("file", "data", "a", append=True) # doctest: +SKIP
AttributeConflictWarning: the [index_name] attribute of the existing index is
[name1] which conflicts with the new [name2]...
"""
@@ -616,9 +624,8 @@ class DatabaseError(OSError):
Examples
--------
>>> from sqlite3 import connect
- >>> conn = connect(':memory:')
- >>> pd.read_sql('select * test', conn) # doctest: +SKIP
- ... # DatabaseError: Execution failed on sql 'test': near "test": syntax error
+ >>> conn = connect(":memory:")
+ >>> pd.read_sql("select * test", conn) # doctest: +SKIP
"""
@@ -632,8 +639,7 @@ class PossiblePrecisionLoss(Warning):
Examples
--------
>>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)})
- >>> df.to_stata('test') # doctest: +SKIP
- ... # PossiblePrecisionLoss: Column converted from int64 to float64...
+ >>> df.to_stata("test") # doctest: +SKIP
"""
@@ -644,8 +650,7 @@ class ValueLabelTypeMismatch(Warning):
Examples
--------
>>> df = pd.DataFrame({"categories": pd.Series(["a", 2], dtype="category")})
- >>> df.to_stata('test') # doctest: +SKIP
- ... # ValueLabelTypeMismatch: Stata value labels (pandas categories) must be str...
+ >>> df.to_stata("test") # doctest: +SKIP
"""
@@ -663,8 +668,7 @@ class InvalidColumnName(Warning):
Examples
--------
>>> df = pd.DataFrame({"0categories": pd.Series([2, 2])})
- >>> df.to_stata('test') # doctest: +SKIP
- ... # InvalidColumnName: Not all pandas column names were valid Stata variable...
+ >>> df.to_stata("test") # doctest: +SKIP
"""
@@ -675,7 +679,7 @@ class CategoricalConversionWarning(Warning):
Examples
--------
>>> from pandas.io.stata import StataReader
- >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP
+ >>> with StataReader("dta_file", chunksize=2) as reader: # doctest: +SKIP
... for i, block in enumerate(reader):
... print(i, block)
... # CategoricalConversionWarning: One or more series with value labels...
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index a15e37328e9fa..8e8b22967ea01 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -64,7 +64,7 @@ def read_clipboard(
Examples
--------
- >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
>>> df.to_clipboard() # doctest: +SKIP
>>> pd.read_clipboard() # doctest: +SKIP
A B C
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 4109b6d0965bb..1f272d0e09db8 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -1019,7 +1019,7 @@ class ExcelWriter(Generic[_WorkbookT]):
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... date_format="YYYY-MM-DD",
- ... datetime_format="YYYY-MM-DD HH:MM:SS"
+ ... datetime_format="YYYY-MM-DD HH:MM:SS",
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
@@ -1073,7 +1073,7 @@ class ExcelWriter(Generic[_WorkbookT]):
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... engine="xlsxwriter",
- ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
+ ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}},
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
@@ -1084,7 +1084,7 @@ class ExcelWriter(Generic[_WorkbookT]):
... "path_to_file.xlsx",
... engine="openpyxl",
... mode="a",
- ... engine_kwargs={{"keep_vba": True}}
+ ... engine_kwargs={{"keep_vba": True}},
... ) as writer:
... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
"""
@@ -1494,7 +1494,7 @@ class ExcelFile:
Examples
--------
- >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
+ >>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
"""
@@ -1617,9 +1617,9 @@ def parse(
Examples
--------
- >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
- >>> df.to_excel('myfile.xlsx') # doctest: +SKIP
- >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
+ >>> df.to_excel("myfile.xlsx") # doctest: +SKIP
+ >>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> file.parse() # doctest: +SKIP
"""
return self._reader.parse(
diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py
index f7a1fcb8052e3..95d43f60a22c5 100644
--- a/pandas/io/excel/_util.py
+++ b/pandas/io/excel/_util.py
@@ -143,9 +143,9 @@ def _range2cols(areas: str) -> list[int]:
Examples
--------
- >>> _range2cols('A:E')
+ >>> _range2cols("A:E")
[0, 1, 2, 3, 4]
- >>> _range2cols('A,C,Z:AB')
+ >>> _range2cols("A,C,Z:AB")
[0, 2, 25, 26, 27]
"""
cols: list[int] = []
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index 89f7cb9c4dec6..0c6885d789f15 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -244,14 +244,17 @@ def __call__(
Examples
--------
>>> resolve = CSSResolver()
- >>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}
- >>> out = resolve('''
+ >>> inherited = {"font-family": "serif", "font-weight": "bold"}
+ >>> out = resolve(
+ ... '''
... border-color: BLUE RED;
... font-size: 1em;
... font-size: 2em;
... font-weight: normal;
... font-weight: inherit;
- ... ''', inherited)
+ ... ''',
+ ... inherited,
+ ... )
>>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
[('border-bottom-color', 'blue'),
('border-left-color', 'red'),
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 2d28b032ca49d..a837eddd6cf5b 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -334,10 +334,10 @@ def _sizeof_fmt(num: float, size_qualifier: str) -> str:
Examples
--------
- >>> _sizeof_fmt(23028, '')
+ >>> _sizeof_fmt(23028, "")
'22.5 KB'
- >>> _sizeof_fmt(23028, '+')
+ >>> _sizeof_fmt(23028, "+")
'22.5+ KB'
"""
for x in ["bytes", "KB", "MB", "GB", "TB"]:
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 2cc9368f8846a..45465eb51c975 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -474,7 +474,7 @@ def _justify(
Examples
--------
- >>> _justify([['a', 'b']], [['abc', 'abcd']])
+ >>> _justify([["a", "b"]], [["abc", "abcd"]])
([(' a', ' b')], [('abc', 'abcd')])
"""
combined = head + tail
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 3a6a44a8be253..7be23b69dfa09 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -245,10 +245,12 @@ class Styler(StylerRenderer):
Examples
--------
- >>> df = pd.DataFrame([[1.0, 2.0, 3.0], [4, 5, 6]], index=['a', 'b'],
- ... columns=['A', 'B', 'C'])
- >>> pd.io.formats.style.Styler(df, precision=2,
- ... caption="My table") # doctest: +SKIP
+ >>> df = pd.DataFrame(
+ ... [[1.0, 2.0, 3.0], [4, 5, 6]], index=["a", "b"], columns=["A", "B", "C"]
+ ... )
+ >>> pd.io.formats.style.Styler(
+ ... df, precision=2, caption="My table"
+ ... ) # doctest: +SKIP
Please see:
`Table Visualization <../../user_guide/style.ipynb>`_ for more examples.
@@ -355,9 +357,11 @@ def concat(self, other: Styler) -> Styler:
A common use case is adding totals rows, or otherwise, via methods calculated
in ``DataFrame.agg``.
- >>> df = pd.DataFrame([[4, 6], [1, 9], [3, 4], [5, 5], [9, 6]],
- ... columns=["Mike", "Jim"],
- ... index=["Mon", "Tue", "Wed", "Thurs", "Fri"])
+ >>> df = pd.DataFrame(
+ ... [[4, 6], [1, 9], [3, 4], [5, 5], [9, 6]],
+ ... columns=["Mike", "Jim"],
+ ... index=["Mon", "Tue", "Wed", "Thurs", "Fri"],
+ ... )
>>> styler = df.style.concat(df.agg(["sum"]).style) # doctest: +SKIP
.. figure:: ../../_static/style/footer_simple.png
@@ -367,14 +371,16 @@ def concat(self, other: Styler) -> Styler:
>>> descriptors = df.agg(["sum", "mean", lambda s: s.dtype])
>>> descriptors.index = ["Total", "Average", "dtype"]
- >>> other = (descriptors.style
- ... .highlight_max(axis=1, subset=(["Total", "Average"], slice(None)))
- ... .format(subset=("Average", slice(None)), precision=2, decimal=",")
- ... .map(lambda v: "font-weight: bold;"))
- >>> styler = (df.style
- ... .highlight_max(color="salmon")
- ... .set_table_styles([{"selector": ".foot_row0",
- ... "props": "border-top: 1px solid black;"}]))
+ >>> other = (
+ ... descriptors.style.highlight_max(
+ ... axis=1, subset=(["Total", "Average"], slice(None))
+ ... )
+ ... .format(subset=("Average", slice(None)), precision=2, decimal=",")
+ ... .map(lambda v: "font-weight: bold;")
+ ... )
+ >>> styler = df.style.highlight_max(color="salmon").set_table_styles(
+ ... [{"selector": ".foot_row0", "props": "border-top: 1px solid black;"}]
+ ... )
>>> styler.concat(other) # doctest: +SKIP
.. figure:: ../../_static/style/footer_extended.png
@@ -382,8 +388,9 @@ def concat(self, other: Styler) -> Styler:
When ``other`` has fewer index levels than the original Styler it is possible
to extend the index in ``other``, with placeholder levels.
- >>> df = pd.DataFrame([[1], [2]],
- ... index=pd.MultiIndex.from_product([[0], [1, 2]]))
+ >>> df = pd.DataFrame(
+ ... [[1], [2]], index=pd.MultiIndex.from_product([[0], [1, 2]])
+ ... )
>>> descriptors = df.agg(["sum"])
>>> descriptors.index = pd.MultiIndex.from_product([[""], descriptors.index])
>>> df.style.concat(descriptors.style) # doctest: +SKIP
@@ -482,13 +489,20 @@ def set_tooltips(
Optionally controlling the tooltip visual display
- >>> df.style.set_tooltips(ttips, css_class='tt-add', props=[
- ... ('visibility', 'hidden'),
- ... ('position', 'absolute'),
- ... ('z-index', 1)]) # doctest: +SKIP
>>> df.style.set_tooltips(
- ... ttips, css_class='tt-add',
- ... props='visibility:hidden; position:absolute; z-index:1;')
+ ... ttips,
+ ... css_class="tt-add",
+ ... props=[
+ ... ("visibility", "hidden"),
+ ... ("position", "absolute"),
+ ... ("z-index", 1),
+ ... ],
+ ... ) # doctest: +SKIP
+ >>> df.style.set_tooltips(
+ ... ttips,
+ ... css_class="tt-add",
+ ... props="visibility:hidden; position:absolute; z-index:1;",
+ ... )
... # doctest: +SKIP
"""
if not self.cell_ids:
@@ -1316,7 +1330,7 @@ def to_html(
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> print(df.style.to_html()) # doctest: +SKIP
<style type="text/css">
</style>
@@ -1443,7 +1457,7 @@ def to_string(
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> df.style.to_string()
' A B\\n0 1 3\\n1 2 4\\n'
"""
@@ -1496,19 +1510,24 @@ def set_td_classes(self, classes: DataFrame) -> Styler:
Examples
--------
>>> df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
- >>> classes = pd.DataFrame([
- ... ["min-val red", "", "blue"],
- ... ["red", None, "blue max-val"]
- ... ], index=df.index, columns=df.columns)
+ >>> classes = pd.DataFrame(
+ ... [["min-val red", "", "blue"], ["red", None, "blue max-val"]],
+ ... index=df.index,
+ ... columns=df.columns,
+ ... )
>>> df.style.set_td_classes(classes) # doctest: +SKIP
Using `MultiIndex` columns and a `classes` `DataFrame` as a subset of the
underlying,
- >>> df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"],
- ... columns=[["level0", "level0"], ["level1a", "level1b"]])
- >>> classes = pd.DataFrame(["min-val"], index=["a"],
- ... columns=[["level0"], ["level1a"]])
+ >>> df = pd.DataFrame(
+ ... [[1, 2], [3, 4]],
+ ... index=["a", "b"],
+ ... columns=[["level0", "level0"], ["level1a", "level1b"]],
+ ... )
+ >>> classes = pd.DataFrame(
+ ... ["min-val"], index=["a"], columns=[["level0"], ["level1a"]]
+ ... )
>>> df.style.set_td_classes(classes) # doctest: +SKIP
Form of the output with new additional css classes,
@@ -1680,11 +1699,11 @@ def clear(self) -> None:
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, np.nan]})
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, np.nan]})
After any added style:
- >>> df.style.highlight_null(color='yellow') # doctest: +SKIP
+ >>> df.style.highlight_null(color="yellow") # doctest: +SKIP
Remove it with:
@@ -1821,22 +1840,22 @@ def apply(
>>> def highlight_max(x, color):
... return np.where(x == np.nanmax(x.to_numpy()), f"color: {color};", None)
>>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"])
- >>> df.style.apply(highlight_max, color='red') # doctest: +SKIP
- >>> df.style.apply(highlight_max, color='blue', axis=1) # doctest: +SKIP
- >>> df.style.apply(highlight_max, color='green', axis=None) # doctest: +SKIP
+ >>> df.style.apply(highlight_max, color="red") # doctest: +SKIP
+ >>> df.style.apply(highlight_max, color="blue", axis=1) # doctest: +SKIP
+ >>> df.style.apply(highlight_max, color="green", axis=None) # doctest: +SKIP
Using ``subset`` to restrict application to a single column or multiple columns
- >>> df.style.apply(highlight_max, color='red', subset="A")
+ >>> df.style.apply(highlight_max, color="red", subset="A")
... # doctest: +SKIP
- >>> df.style.apply(highlight_max, color='red', subset=["A", "B"])
+ >>> df.style.apply(highlight_max, color="red", subset=["A", "B"])
... # doctest: +SKIP
Using a 2d input to ``subset`` to select rows in addition to columns
- >>> df.style.apply(highlight_max, color='red', subset=([0, 1, 2], slice(None)))
+ >>> df.style.apply(highlight_max, color="red", subset=([0, 1, 2], slice(None)))
... # doctest: +SKIP
- >>> df.style.apply(highlight_max, color='red', subset=(slice(0, 5, 2), "A"))
+ >>> df.style.apply(highlight_max, color="red", subset=(slice(0, 5, 2), "A"))
... # doctest: +SKIP
Using a function which returns a Series / DataFrame of unequal length but
@@ -1945,7 +1964,7 @@ def apply_index(
Selectively applying to specific levels of MultiIndex columns.
- >>> midx = pd.MultiIndex.from_product([['ix', 'jy'], [0, 1], ['x3', 'z4']])
+ >>> midx = pd.MultiIndex.from_product([["ix", "jy"], [0, 1], ["x3", "z4"]])
>>> df = pd.DataFrame([np.arange(8)], columns=midx)
>>> def highlight_x({var}):
... return {ret2}
@@ -2073,20 +2092,22 @@ def map(self, func: Callable, subset: Subset | None = None, **kwargs) -> Styler:
>>> def color_negative(v, color):
... return f"color: {color};" if v < 0 else None
>>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"])
- >>> df.style.map(color_negative, color='red') # doctest: +SKIP
+ >>> df.style.map(color_negative, color="red") # doctest: +SKIP
Using ``subset`` to restrict application to a single column or multiple columns
- >>> df.style.map(color_negative, color='red', subset="A") # doctest: +SKIP
- >>> df.style.map(color_negative,
- ... color='red', subset=["A", "B"]) # doctest: +SKIP
+ >>> df.style.map(color_negative, color="red", subset="A")
+ ... # doctest: +SKIP
+ >>> df.style.map(color_negative, color="red", subset=["A", "B"])
+ ... # doctest: +SKIP
Using a 2d input to ``subset`` to select rows in addition to columns
- >>> df.style.map(color_negative, color='red',
- ... subset=([0, 1, 2], slice(None))) # doctest: +SKIP
- >>> df.style.map(color_negative,
- ... color='red', subset=(slice(0, 5, 2), "A")) # doctest: +SKIP
+ >>> df.style.map(
+ ... color_negative, color="red", subset=([0, 1, 2], slice(None))
+ ... ) # doctest: +SKIP
+ >>> df.style.map(color_negative, color="red", subset=(slice(0, 5, 2), "A"))
+ ... # doctest: +SKIP
See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for
more details.
@@ -2301,7 +2322,7 @@ def set_uuid(self, uuid: str) -> Styler:
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [3, 4]], index=['A', 'B'], columns=['c1', 'c2'])
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["c1", "c2"])
You can get the `id` attributes with the following:
@@ -2335,7 +2356,7 @@ def set_caption(self, caption: str | tuple | list) -> Styler:
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> df.style.set_caption("test") # doctest: +SKIP
Please see:
@@ -2391,7 +2412,7 @@ def set_sticky(
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> df.style.set_sticky(axis="index") # doctest: +SKIP
Please see:
@@ -2552,49 +2573,55 @@ def set_table_styles(
.. code-block:: python
- css_class_names = {"row_heading": "row_heading",
- "col_heading": "col_heading",
- "index_name": "index_name",
- "col": "col",
- "row": "row",
- "col_trim": "col_trim",
- "row_trim": "row_trim",
- "level": "level",
- "data": "data",
- "blank": "blank",
- "foot": "foot"}
+ css_class_names = {
+ "row_heading": "row_heading",
+ "col_heading": "col_heading",
+ "index_name": "index_name",
+ "col": "col",
+ "row": "row",
+ "col_trim": "col_trim",
+ "row_trim": "row_trim",
+ "level": "level",
+ "data": "data",
+ "blank": "blank",
+ "foot": "foot",
+ }
Examples
--------
- >>> df = pd.DataFrame(np.random.randn(10, 4),
- ... columns=['A', 'B', 'C', 'D'])
+ >>> df = pd.DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
>>> df.style.set_table_styles(
- ... [{'selector': 'tr:hover',
- ... 'props': [('background-color', 'yellow')]}]
+ ... [{"selector": "tr:hover", "props": [("background-color", "yellow")]}]
... ) # doctest: +SKIP
Or with CSS strings
>>> df.style.set_table_styles(
- ... [{'selector': 'tr:hover',
- ... 'props': 'background-color: yellow; font-size: 1em;'}]
+ ... [
+ ... {
+ ... "selector": "tr:hover",
+ ... "props": "background-color: yellow; font-size: 1em;",
+ ... }
+ ... ]
... ) # doctest: +SKIP
Adding column styling by name
- >>> df.style.set_table_styles({
- ... 'A': [{'selector': '',
- ... 'props': [('color', 'red')]}],
- ... 'B': [{'selector': 'td',
- ... 'props': 'color: blue;'}]
- ... }, overwrite=False) # doctest: +SKIP
+ >>> df.style.set_table_styles(
+ ... {
+ ... "A": [{"selector": "", "props": [("color", "red")]}],
+ ... "B": [{"selector": "td", "props": "color: blue;"}],
+ ... },
+ ... overwrite=False,
+ ... ) # doctest: +SKIP
Adding row styling
- >>> df.style.set_table_styles({
- ... 0: [{'selector': 'td:hover',
- ... 'props': [('font-size', '25px')]}]
- ... }, axis=1, overwrite=False) # doctest: +SKIP
+ >>> df.style.set_table_styles(
+ ... {0: [{"selector": "td:hover", "props": [("font-size", "25px")]}]},
+ ... axis=1,
+ ... overwrite=False,
+ ... ) # doctest: +SKIP
See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for
more details.
@@ -2923,10 +2950,14 @@ def background_gradient(
Examples
--------
- >>> df = pd.DataFrame(columns=["City", "Temp (c)", "Rain (mm)", "Wind (m/s)"],
- ... data=[["Stockholm", 21.6, 5.0, 3.2],
- ... ["Oslo", 22.4, 13.3, 3.1],
- ... ["Copenhagen", 24.5, 0.0, 6.7]])
+ >>> df = pd.DataFrame(
+ ... columns=["City", "Temp (c)", "Rain (mm)", "Wind (m/s)"],
+ ... data=[
+ ... ["Stockholm", 21.6, 5.0, 3.2],
+ ... ["Oslo", 22.4, 13.3, 3.1],
+ ... ["Copenhagen", 24.5, 0.0, 6.7],
+ ... ],
+ ... )
Shading the values column-wise, with ``axis=0``, preselecting numeric columns
@@ -2963,9 +2994,9 @@ def background_gradient(
explicitly state ``subset`` to match the ``gmap`` shape
>>> gmap = np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]])
- >>> df.style.{name}_gradient(
- ... axis=None, gmap=gmap, cmap='YlOrRd',
- ... subset=['Temp (c)', 'Rain (mm)', 'Wind (m/s)']) # doctest: +SKIP
+ >>> df.style.{name}_gradient(axis=None, gmap=gmap,
+ ... cmap='YlOrRd', subset=['Temp (c)', 'Rain (mm)', 'Wind (m/s)']
+ ... ) # doctest: +SKIP
.. figure:: ../../_static/style/{image_prefix}_axNone_gmap.png
"""
@@ -3044,7 +3075,7 @@ def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right") # doctest: +SKIP
- >>> df.style.set_properties(**{'background-color': 'yellow'}) # doctest: +SKIP
+ >>> df.style.set_properties(**{"background-color": "yellow"}) # doctest: +SKIP
See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for
more details.
@@ -3140,8 +3171,8 @@ def bar( # pylint: disable=disallowed-name
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
- >>> df.style.bar(subset=['A'], color='gray') # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]})
+ >>> df.style.bar(subset=["A"], color="gray") # doctest: +SKIP
"""
if color is None and cmap is None:
color = "#d65f5f"
@@ -3219,8 +3250,8 @@ def highlight_null(
Examples
--------
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, np.nan]})
- >>> df.style.highlight_null(color='yellow') # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, np.nan]})
+ >>> df.style.highlight_null(color="yellow") # doctest: +SKIP
Please see:
`Table Visualization <../../user_guide/style.ipynb>`_ for more examples.
@@ -3273,8 +3304,8 @@ def highlight_max(
Examples
--------
- >>> df = pd.DataFrame({'A': [2, 1], 'B': [3, 4]})
- >>> df.style.highlight_max(color='yellow') # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [2, 1], "B": [3, 4]})
+ >>> df.style.highlight_max(color="yellow") # doctest: +SKIP
Please see:
`Table Visualization <../../user_guide/style.ipynb>`_ for more examples.
@@ -3329,8 +3360,8 @@ def highlight_min(
Examples
--------
- >>> df = pd.DataFrame({'A': [2, 1], 'B': [3, 4]})
- >>> df.style.highlight_min(color='yellow') # doctest: +SKIP
+ >>> df = pd.DataFrame({"A": [2, 1], "B": [3, 4]})
+ >>> df.style.highlight_min(color="yellow") # doctest: +SKIP
Please see:
`Table Visualization <../../user_guide/style.ipynb>`_ for more examples.
@@ -3409,11 +3440,13 @@ def highlight_between(
--------
Basic usage
- >>> df = pd.DataFrame({
- ... 'One': [1.2, 1.6, 1.5],
- ... 'Two': [2.9, 2.1, 2.5],
- ... 'Three': [3.1, 3.2, 3.8],
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "One": [1.2, 1.6, 1.5],
+ ... "Two": [2.9, 2.1, 2.5],
+ ... "Three": [3.1, 3.2, 3.8],
+ ... }
+ ... )
>>> df.style.highlight_between(left=2.1, right=2.9) # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_basic.png
@@ -3421,8 +3454,9 @@ def highlight_between(
Using a range input sequence along an ``axis``, in this case setting a ``left``
and ``right`` for each column individually
- >>> df.style.highlight_between(left=[1.4, 2.4, 3.4], right=[1.6, 2.6, 3.6],
- ... axis=1, color="#fffd75") # doctest: +SKIP
+ >>> df.style.highlight_between(
+ ... left=[1.4, 2.4, 3.4], right=[1.6, 2.6, 3.6], axis=1, color="#fffd75"
+ ... ) # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_seq.png
@@ -3430,16 +3464,19 @@ def highlight_between(
matches the input DataFrame, with a constant ``right``
>>> df.style.highlight_between(
- ... left=[[2, 2, 3], [2, 2, 3], [3, 3, 3]], right=3.5,
- ... axis=None, color="#fffd75") # doctest: +SKIP
+ ... left=[[2, 2, 3], [2, 2, 3], [3, 3, 3]],
+ ... right=3.5,
+ ... axis=None,
+ ... color="#fffd75",
+ ... ) # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_axNone.png
Using ``props`` instead of default background coloring
>>> df.style.highlight_between(
- ... left=1.5, right=3.5,
- ... props='font-weight:bold;color:#e83e8c') # doctest: +SKIP
+ ... left=1.5, right=3.5, props="font-weight:bold;color:#e83e8c"
+ ... ) # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_props.png
"""
@@ -3529,8 +3566,11 @@ def highlight_quantile(
Use ``props`` instead of default background coloring
>>> df.style.highlight_quantile(
- ... axis=None, q_left=0.2, q_right=0.8,
- ... props='font-weight:bold;color:#e83e8c') # doctest: +SKIP
+ ... axis=None,
+ ... q_left=0.2,
+ ... q_right=0.8,
+ ... props="font-weight:bold;color:#e83e8c",
+ ... ) # doctest: +SKIP
.. figure:: ../../_static/style/hq_props.png
"""
@@ -3602,9 +3642,10 @@ def from_custom_template(
Examples
--------
>>> from pandas.io.formats.style import Styler
- >>> EasyStyler = Styler.from_custom_template("path/to/template",
- ... "template.tpl",
- ... ) # doctest: +SKIP
+ >>> EasyStyler = Styler.from_custom_template(
+ ... "path/to/template",
+ ... "template.tpl",
+ ... ) # doctest: +SKIP
>>> df = pd.DataFrame({"A": [1, 2]})
>>> EasyStyler(df) # doctest: +SKIP
@@ -3688,9 +3729,7 @@ def pipe(
.. code-block:: python
- (df.style.format(precision=3)
- .pipe(g, arg1=a)
- .pipe(f, arg2=b, arg3=c))
+ (df.style.format(precision=3).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
@@ -3718,9 +3757,11 @@ def pipe(
Since the method returns a ``Styler`` object it can be chained with other
methods as if applying the underlying highlighters directly.
- >>> (df.style.format("{:.1f}")
+ >>> (
+ ... df.style.format("{:.1f}")
... .pipe(some_highlights, min_color="green")
- ... .highlight_between(left=2, right=5)) # doctest: +SKIP
+ ... .highlight_between(left=2, right=5)
+ ... ) # doctest: +SKIP
.. figure:: ../../_static/style/df_pipe_hl2.png
@@ -3739,8 +3780,9 @@ def pipe(
>>> def highlight_last_level(styler):
... return styler.apply_index(
- ... lambda v: "background-color: pink; color: yellow", axis="columns",
- ... level=styler.columns.nlevels - 1
+ ... lambda v: "background-color: pink; color: yellow",
+ ... axis="columns",
+ ... level=styler.columns.nlevels - 1,
... ) # doctest: +SKIP
>>> df.columns = pd.MultiIndex.from_product([["A", "B"], ["X", "Y"]])
>>> df.style.pipe(highlight_last_level) # doctest: +SKIP
@@ -3757,6 +3799,7 @@ def pipe(
... return np.where(
... styler.data.isna().any(), "background-color: red;", ""
... )
+ ...
... return styler.apply_index(dynamic_highlight, axis=1, level=level)
>>> df.style.pipe(highlight_header_missing, level=1) # doctest: +SKIP
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 4ba094ec614d0..1cf54dc2cc756 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -1449,10 +1449,10 @@ def relabel_index(
# relabel first, then hide
df = pd.DataFrame({"col": ["a", "b", "c"]})
- df.style.relabel_index(["A", "B", "C"]).hide([0,1])
+ df.style.relabel_index(["A", "B", "C"]).hide([0, 1])
# hide first, then relabel
df = pd.DataFrame({"col": ["a", "b", "c"]})
- df.style.hide([0,1]).relabel_index(["C"])
+ df.style.hide([0, 1]).relabel_index(["C"])
This method should be used, rather than :meth:`Styler.format_index`, in one of
the following cases (see examples):
@@ -1493,8 +1493,9 @@ def relabel_index(
1 5
1 0 6
1 7
- >>> styler.hide((midx.get_level_values(0) == 0) |
- ... (midx.get_level_values(1) == 0))
+ >>> styler.hide(
+ ... (midx.get_level_values(0) == 0) | (midx.get_level_values(1) == 0)
+ ... )
... # doctest: +SKIP
>>> styler.hide(level=[0, 1]) # doctest: +SKIP
>>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP
@@ -2154,10 +2155,12 @@ def _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | N
Examples
--------
- >>> table_styles = [{'selector': 'foo', 'props': [('attr','value')]},
- ... {'selector': 'bar', 'props': [('attr', 'overwritten')]},
- ... {'selector': 'bar', 'props': [('a1', 'baz'), ('a2', 'ignore')]}]
- >>> _parse_latex_table_styles(table_styles, selector='bar')
+ >>> table_styles = [
+ ... {"selector": "foo", "props": [("attr", "value")]},
+ ... {"selector": "bar", "props": [("attr", "overwritten")]},
+ ... {"selector": "bar", "props": [("a1", "baz"), ("a2", "ignore")]},
+ ... ]
+ >>> _parse_latex_table_styles(table_styles, selector="bar")
'baz'
Notes
@@ -2241,8 +2244,8 @@ def _parse_latex_header_span(
Examples
--------
- >>> cell = {'cellstyle': '', 'display_value':'text', 'attributes': 'colspan="3"'}
- >>> _parse_latex_header_span(cell, 't', 'c')
+ >>> cell = {"cellstyle": "", "display_value": "text", "attributes": 'colspan="3"'}
+ >>> _parse_latex_header_span(cell, "t", "c")
'\\multicolumn{3}{c}{text}'
"""
display_val = _parse_latex_cell_styles(
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 302f901aa0d16..adcb78d3fb7d1 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -1100,13 +1100,13 @@ def read_html(
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
- attrs = {{'id': 'table'}}
+ attrs = {{"id": "table"}}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. ::
- attrs = {{'asdf': 'table'}}
+ attrs = {{"asdf": "table"}}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index a6d58d6cffb10..e9f2e319c0136 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -594,9 +594,7 @@ def read_parquet(
Examples
--------
- >>> original_df = pd.DataFrame(
- ... {{"foo": range(5), "bar": range(5, 10)}}
- ... )
+ >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}})
>>> original_df
foo bar
0 0 5
@@ -624,7 +622,7 @@ def read_parquet(
2 7
3 8
4 9
- >>> restored_bar.equals(original_df[['bar']])
+ >>> restored_bar.equals(original_df[["bar"]])
True
The function uses `kwargs` that are passed directly to the engine.
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index f24d7a628998e..67f3e5a9f4880 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -396,7 +396,7 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
def ensure_dtype_objs(
- dtype: DtypeArg | dict[Hashable, DtypeArg] | None
+ dtype: DtypeArg | dict[Hashable, DtypeArg] | None,
) -> DtypeObj | dict[Hashable, DtypeObj] | None:
"""
Ensure we have either None, a dtype object, or a dictionary mapping to
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 71e1a31759a0c..07920eb1750f2 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -1515,7 +1515,7 @@ def read_fwf(
Examples
--------
- >>> pd.read_fwf('data.csv') # doctest: +SKIP
+ >>> pd.read_fwf("data.csv") # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 89867ab4f19d0..d3e93ebeb8fbb 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -78,7 +78,9 @@ def to_pickle(
Examples
--------
- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP
+ >>> original_df = pd.DataFrame(
+ ... {{"foo": range(5), "bar": range(5, 10)}}
+ ... ) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
@@ -96,7 +98,7 @@ def to_pickle(
2 2 7
3 3 8
4 4 9
- """ # noqa: E501
+ """
if protocol < 0:
protocol = pickle.HIGHEST_PROTOCOL
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0baf642495584..1e11a9783f0e1 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -384,9 +384,9 @@ def read_hdf(
Examples
--------
- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP
- >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP
- >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 1.0, "a"]], columns=["x", "y", "z"]) # doctest: +SKIP
+ >>> df.to_hdf("./store.h5", "data") # doctest: +SKIP
+ >>> reread = pd.read_hdf("./store.h5") # doctest: +SKIP
"""
if mode not in ["r", "r+", "a"]:
raise ValueError(
@@ -527,9 +527,9 @@ class HDFStore:
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
- >>> store = pd.HDFStore('test.h5')
- >>> store['foo'] = bar # write to HDF5
- >>> bar = store['foo'] # retrieve
+ >>> store = pd.HDFStore("test.h5")
+ >>> store["foo"] = bar # write to HDF5
+ >>> bar = store["foo"] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
@@ -539,9 +539,9 @@ class HDFStore:
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
- >>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
- >>> store['foo'] = bar
- >>> store.close() # only now, data is written to disk
+ >>> store = pd.HDFStore("test.h5", driver="H5FD_CORE")
+ >>> store["foo"] = bar
+ >>> store.close() # only now, data is written to disk
"""
_handle: File | None
@@ -665,10 +665,10 @@ def keys(self, include: str = "pandas") -> list[str]:
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df) # doctest: +SKIP
- >>> store.get('data') # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df) # doctest: +SKIP
+ >>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
@@ -794,10 +794,10 @@ def get(self, key: str):
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df) # doctest: +SKIP
- >>> store.get('data') # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df) # doctest: +SKIP
+ >>> store.get("data") # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
with patch_pickle():
@@ -856,17 +856,17 @@ def select(
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df) # doctest: +SKIP
- >>> store.get('data') # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df) # doctest: +SKIP
+ >>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
- >>> store.select('/data1') # doctest: +SKIP
+ >>> store.select("/data1") # doctest: +SKIP
A B
0 1 2
1 3 4
- >>> store.select('/data1', where='columns == A') # doctest: +SKIP
+ >>> store.select("/data1", where="columns == A") # doctest: +SKIP
A
0 1
1 3
@@ -1146,9 +1146,9 @@ def put(
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df) # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df) # doctest: +SKIP
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
@@ -1288,11 +1288,11 @@ def append(
Examples
--------
- >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df1, format='table') # doctest: +SKIP
- >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
- >>> store.append('data', df2) # doctest: +SKIP
+ >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df1, format="table") # doctest: +SKIP
+ >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["A", "B"])
+ >>> store.append("data", df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
A B
0 1 2
@@ -1479,9 +1479,9 @@ def groups(self) -> list:
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df) # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
@@ -1534,11 +1534,11 @@ def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]:
Examples
--------
- >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df1, format='table') # doctest: +SKIP
- >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
- >>> store.append('data', df2) # doctest: +SKIP
+ >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df1, format="table") # doctest: +SKIP
+ >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["A", "B"])
+ >>> store.append("data", df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
>>> for group in store.walk(): # doctest: +SKIP
... print(group) # doctest: +SKIP
@@ -1660,9 +1660,9 @@ def info(self) -> str:
Examples
--------
- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
- >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
- >>> store.put('data', df) # doctest: +SKIP
+ >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ >>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
+ >>> store.put("data", df) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b4330c717d368..08f99a4d3093a 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -343,7 +343,7 @@ def read_sql_table(
Examples
--------
- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
+ >>> pd.read_sql_table("table_name", "postgres:///db_name") # doctest:+SKIP
"""
check_dtype_backend(dtype_backend)
@@ -637,24 +637,28 @@ def read_sql(
providing only the SQL tablename will result in an error.
>>> from sqlite3 import connect
- >>> conn = connect(':memory:')
- >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
- ... columns=['int_column', 'date_column'])
- >>> df.to_sql(name='test_data', con=conn)
+ >>> conn = connect(":memory:")
+ >>> df = pd.DataFrame(
+ ... data=[[0, "10/11/12"], [1, "12/11/10"]],
+ ... columns=["int_column", "date_column"],
+ ... )
+ >>> df.to_sql(name="test_data", con=conn)
2
- >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
+ >>> pd.read_sql("SELECT int_column, date_column FROM test_data", conn)
int_column date_column
0 0 10/11/12
1 1 12/11/10
- >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
+ >>> pd.read_sql("test_data", "postgres:///db_name") # doctest:+SKIP
For parameterized query, using ``params`` is recommended over string interpolation.
>>> from sqlalchemy import text
- >>> sql = text('SELECT int_column, date_column FROM test_data WHERE int_column=:int_val')
- >>> pd.read_sql(sql, conn, params={'int_val': 1}) # doctest:+SKIP
+ >>> sql = text(
+ ... "SELECT int_column, date_column FROM test_data WHERE int_column=:int_val"
+ ... )
+ >>> pd.read_sql(sql, conn, params={"int_val": 1}) # doctest:+SKIP
int_column date_column
0 1 12/11/10
@@ -663,9 +667,11 @@ def read_sql(
Custom argument values for applying ``pd.to_datetime`` on a column are specified
via a dictionary format:
- >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
- ... conn,
- ... parse_dates={"date_column": {"format": "%d/%m/%y"}})
+ >>> pd.read_sql(
+ ... "SELECT int_column, date_column FROM test_data",
+ ... conn,
+ ... parse_dates={"date_column": {"format": "%d/%m/%y"}},
+ ... )
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
@@ -675,12 +681,12 @@ def read_sql(
pandas now supports reading via ADBC drivers
>>> from adbc_driver_postgresql import dbapi # doctest:+SKIP
- >>> with dbapi.connect('postgres:///db_name') as conn: # doctest:+SKIP
- ... pd.read_sql('SELECT int_column FROM test_data', conn)
+ >>> with dbapi.connect("postgres:///db_name") as conn: # doctest:+SKIP
+ ... pd.read_sql("SELECT int_column FROM test_data", conn)
int_column
0 0
1 1
- """ # noqa: E501
+ """
check_dtype_backend(dtype_backend)
if dtype_backend is lib.no_default:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 447c97d078e02..c2a3db2d44b16 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -254,7 +254,7 @@ def _stata_elapsed_date_to_datetime_vec(dates: Series, fmt: str) -> Series:
Examples
--------
>>> dates = pd.Series([52])
- >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
+ >>> _stata_elapsed_date_to_datetime_vec(dates, "%tw")
0 1961-01-01
dtype: datetime64[s]
@@ -1955,9 +1955,12 @@ def data_label(self) -> str:
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> data_label = "This is a data file."
>>> path = "/My_path/filename.dta"
- >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP
- ... data_label=data_label, # doctest: +SKIP
- ... version=None) # doctest: +SKIP
+ >>> df.to_stata(
+ ... path,
+ ... time_stamp=time_stamp, # doctest: +SKIP
+ ... data_label=data_label, # doctest: +SKIP
+ ... version=None,
+ ... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.data_label) # doctest: +SKIP
This is a data file.
@@ -1987,8 +1990,12 @@ def variable_labels(self) -> dict[str, str]:
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> path = "/My_path/filename.dta"
>>> variable_labels = {"col_1": "This is an example"}
- >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP
- ... variable_labels=variable_labels, version=None) # doctest: +SKIP
+ >>> df.to_stata(
+ ... path,
+ ... time_stamp=time_stamp, # doctest: +SKIP
+ ... variable_labels=variable_labels,
+ ... version=None,
+ ... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.variable_labels()) # doctest: +SKIP
{'index': '', 'col_1': 'This is an example', 'col_2': ''}
@@ -2014,8 +2021,12 @@ def value_labels(self) -> dict[str, dict[float, str]]:
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> path = "/My_path/filename.dta"
>>> value_labels = {"col_1": {3: "x"}}
- >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP
- ... value_labels=value_labels, version=None) # doctest: +SKIP
+ >>> df.to_stata(
+ ... path,
+ ... time_stamp=time_stamp, # doctest: +SKIP
+ ... value_labels=value_labels,
+ ... version=None,
+ ... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.value_labels()) # doctest: +SKIP
{'col_1': {3: 'x'}}
@@ -2272,19 +2283,19 @@ class StataWriter(StataParser):
Examples
--------
- >>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
- >>> writer = StataWriter('./data_file.dta', data)
+ >>> data = pd.DataFrame([[1.0, 1]], columns=["a", "b"])
+ >>> writer = StataWriter("./data_file.dta", data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {{"method": "zip", "archive_name": "data_file.dta"}}
- >>> writer = StataWriter('./data_file.zip', data, compression=compression)
+ >>> writer = StataWriter("./data_file.zip", data, compression=compression)
>>> writer.write_file()
Save a DataFrame with dates
>>> from datetime import datetime
- >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
- >>> writer = StataWriter('./date_data_file.dta', data, {{'date' : 'tw'}})
+ >>> data = pd.DataFrame([[datetime(2000, 1, 1)]], columns=["date"])
+ >>> writer = StataWriter("./date_data_file.dta", data, {{"date": "tw"}})
>>> writer.write_file()
"""
@@ -2655,18 +2666,22 @@ def write_file(self) -> None:
Examples
--------
- >>> df = pd.DataFrame({"fully_labelled": [1, 2, 3, 3, 1],
- ... "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan],
- ... "Y": [7, 7, 9, 8, 10],
- ... "Z": pd.Categorical(["j", "k", "l", "k", "j"]),
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "fully_labelled": [1, 2, 3, 3, 1],
+ ... "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan],
+ ... "Y": [7, 7, 9, 8, 10],
+ ... "Z": pd.Categorical(["j", "k", "l", "k", "j"]),
+ ... }
+ ... )
>>> path = "/My_path/filename.dta"
- >>> labels = {"fully_labelled": {1: "one", 2: "two", 3: "three"},
- ... "partially_labelled": {1.0: "one", 2.0: "two"},
- ... }
- >>> writer = pd.io.stata.StataWriter(path,
- ... df,
- ... value_labels=labels) # doctest: +SKIP
+ >>> labels = {
+ ... "fully_labelled": {1: "one", 2: "two", 3: "three"},
+ ... "partially_labelled": {1.0: "one", 2.0: "two"},
+ ... }
+ >>> writer = pd.io.stata.StataWriter(
+ ... path, df, value_labels=labels
+ ... ) # doctest: +SKIP
>>> writer.write_file() # doctest: +SKIP
>>> df = pd.read_stata(path) # doctest: +SKIP
>>> df # doctest: +SKIP
@@ -3226,22 +3241,24 @@ class StataWriter117(StataWriter):
Examples
--------
- >>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c'])
- >>> writer = pd.io.stata.StataWriter117('./data_file.dta', data)
+ >>> data = pd.DataFrame([[1.0, 1, "a"]], columns=["a", "b", "c"])
+ >>> writer = pd.io.stata.StataWriter117("./data_file.dta", data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = pd.io.stata.StataWriter117(
- ... './data_file.zip', data, compression=compression
- ... )
+ ... "./data_file.zip", data, compression=compression
+ ... )
>>> writer.write_file()
Or with long strings stored in strl format
- >>> data = pd.DataFrame([['A relatively long string'], [''], ['']],
- ... columns=['strls'])
+ >>> data = pd.DataFrame(
+ ... [["A relatively long string"], [""], [""]], columns=["strls"]
+ ... )
>>> writer = pd.io.stata.StataWriter117(
- ... './data_file_with_long_strings.dta', data, convert_strl=['strls'])
+ ... "./data_file_with_long_strings.dta", data, convert_strl=["strls"]
+ ... )
>>> writer.write_file()
"""
@@ -3619,21 +3636,23 @@ class StataWriterUTF8(StataWriter117):
Using Unicode data and column names
>>> from pandas.io.stata import StataWriterUTF8
- >>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ'])
- >>> writer = StataWriterUTF8('./data_file.dta', data)
+ >>> data = pd.DataFrame([[1.0, 1, "ᴬ"]], columns=["a", "β", "ĉ"])
+ >>> writer = StataWriterUTF8("./data_file.dta", data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
- >>> writer = StataWriterUTF8('./data_file.zip', data, compression=compression)
+ >>> writer = StataWriterUTF8("./data_file.zip", data, compression=compression)
>>> writer.write_file()
Or with long strings stored in strl format
- >>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']],
- ... columns=['strls'])
- >>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data,
- ... convert_strl=['strls'])
+ >>> data = pd.DataFrame(
+ ... [["ᴀ relatively long ŝtring"], [""], [""]], columns=["strls"]
+ ... )
+ >>> writer = StataWriterUTF8(
+ ... "./data_file_with_long_strings.dta", data, convert_strl=["strls"]
+ ... )
>>> writer.write_file()
"""
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 3faffbd21842f..97bf520a77611 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -916,9 +916,7 @@ def read_xml(
Note: if XML document uses default namespace denoted as
`xmlns='<URI>'` without a prefix, you must assign any temporary
namespace prefix such as 'doc' to the URI in order to parse
- underlying nodes and/or attributes. For example, ::
-
- namespaces = {{"doc": "https://example.com"}}
+ underlying nodes and/or attributes.
elems_only : bool, optional, default False
Parse only the child elements at the specified ``xpath``. By default,
@@ -987,9 +985,7 @@ def read_xml(
and unlike ``xpath``, descendants do not need to relate to each other but can
exist any where in document under the repeating element. This memory-
efficient method should be used for very large XML files (500MB, 1GB, or 5GB+).
- For example, ::
-
- iterparse = {{"row_element": ["child_elem", "attr", "grandchild_elem"]}}
+ For example, ``{{"row_element": ["child_elem", "attr", "grandchild_elem"]}}``.
.. versionadded:: 1.5.0
@@ -1118,9 +1114,11 @@ def read_xml(
... </doc:row>
... </doc:data>'''
- >>> df = pd.read_xml(StringIO(xml),
- ... xpath="//doc:row",
- ... namespaces={{"doc": "https://example.com"}})
+ >>> df = pd.read_xml(
+ ... StringIO(xml),
+ ... xpath="//doc:row",
+ ... namespaces={{"doc": "https://example.com"}},
+ ... )
>>> df
shape degrees sides
0 square 360 4.0
@@ -1147,9 +1145,9 @@ def read_xml(
... </data>
... '''
- >>> df = pd.read_xml(StringIO(xml_data),
- ... dtype_backend="numpy_nullable",
- ... parse_dates=["e"])
+ >>> df = pd.read_xml(
+ ... StringIO(xml_data), dtype_backend="numpy_nullable", parse_dates=["e"]
+ ... )
>>> df
index a b c d e
0 0 1 2.5 True a 2019-12-31
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 7c02ffdbafcfa..51201eafb9475 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -112,7 +112,7 @@ def hist_series(
.. plot::
:context: close-figs
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
>>> hist = ser.hist()
@@ -121,7 +121,7 @@ def hist_series(
.. plot::
:context: close-figs
- >>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
+ >>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
>>> hist = ser.groupby(level=0).hist()
"""
@@ -241,12 +241,11 @@ def hist_frame(
.. plot::
:context: close-figs
- >>> data = {'length': [1.5, 0.5, 1.2, 0.9, 3],
- ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]}
- >>> index = ['pig', 'rabbit', 'duck', 'chicken', 'horse']
+ >>> data = {"length": [1.5, 0.5, 1.2, 0.9, 3], "width": [0.7, 0.2, 0.15, 0.2, 1.1]}
+ >>> index = ["pig", "rabbit", "duck", "chicken", "horse"]
>>> df = pd.DataFrame(data, index=index)
>>> hist = df.hist(bins=3)
- """
+ """ # noqa: E501
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_frame(
data,
@@ -606,10 +605,10 @@ def boxplot_frame_groupby(
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
- >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
+ >>> index = pd.MultiIndex.from_tuples(tuples, names=["lvl0", "lvl1"])
>>> data = np.random.randn(len(index), 4)
- >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
- >>> grouped = df.groupby(level='lvl1')
+ >>> df = pd.DataFrame(data, columns=list("ABCD"), index=index)
+ >>> grouped = df.groupby(level="lvl1")
>>> grouped.boxplot(rot=45, fontsize=12, figsize=(8, 10)) # doctest: +SKIP
The ``subplots=False`` option shows the boxplots in a single figure.
@@ -802,16 +801,17 @@ class PlotAccessor(PandasObject):
:context: close-figs
>>> ser = pd.Series([1, 2, 3, 3])
- >>> plot = ser.plot(kind='hist', title="My plot")
+ >>> plot = ser.plot(kind="hist", title="My plot")
For DataFrame:
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({'length': [1.5, 0.5, 1.2, 0.9, 3],
- ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]},
- ... index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
+ >>> df = pd.DataFrame(
+ ... {"length": [1.5, 0.5, 1.2, 0.9, 3], "width": [0.7, 0.2, 0.15, 0.2, 1.1]},
+ ... index=["pig", "rabbit", "duck", "chicken", "horse"],
+ ... )
>>> plot = df.plot(title="DataFrame Plot")
For SeriesGroupBy:
@@ -828,10 +828,9 @@ class PlotAccessor(PandasObject):
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({"col1" : [1, 2, 3, 4],
- ... "col2" : ["A", "B", "A", "B"]})
+ >>> df = pd.DataFrame({"col1": [1, 2, 3, 4], "col2": ["A", "B", "A", "B"]})
>>> plot = df.groupby("col2").plot(kind="bar", title="DataFrameGroupBy Plot")
- """
+ """ # noqa: E501
_common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
_series_kinds = ("pie",)
@@ -1347,7 +1346,7 @@ def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor:
:context: close-figs
>>> data = np.random.randn(25, 4)
- >>> df = pd.DataFrame(data, columns=list('ABCD'))
+ >>> df = pd.DataFrame(data, columns=list("ABCD"))
>>> ax = df.plot.box()
You can also generate groupings if you specify the `by` parameter (which
@@ -1410,8 +1409,8 @@ def hist(
.. plot::
:context: close-figs
- >>> df = pd.DataFrame(np.random.randint(1, 7, 6000), columns=['one'])
- >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
+ >>> df = pd.DataFrame(np.random.randint(1, 7, 6000), columns=["one"])
+ >>> df["two"] = df["one"] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
A grouped histogram can be generated by providing the parameter `by` (which
@@ -1509,10 +1508,12 @@ def kde(
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({
- ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
- ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
- ... })
+ >>> df = pd.DataFrame(
+ ... {
+ ... "x": [1, 2, 2.5, 3, 3.5, 4, 5],
+ ... "y": [4, 4, 4.5, 5, 5.5, 6, 6],
+ ... }
+ ... )
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
@@ -1583,12 +1584,14 @@ def area(
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({
- ... 'sales': [3, 2, 3, 9, 10, 6],
- ... 'signups': [5, 5, 6, 12, 14, 13],
- ... 'visits': [20, 42, 28, 62, 81, 50],
- ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
- ... freq='ME'))
+ >>> df = pd.DataFrame(
+ ... {
+ ... "sales": [3, 2, 3, 9, 10, 6],
+ ... "signups": [5, 5, 6, 12, 14, 13],
+ ... "visits": [20, 42, 28, 62, 81, 50],
+ ... },
+ ... index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="ME"),
+ ... )
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
@@ -1604,20 +1607,22 @@ def area(
.. plot::
:context: close-figs
- >>> ax = df.plot.area(y='sales')
+ >>> ax = df.plot.area(y="sales")
Draw with a different `x`:
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({
- ... 'sales': [3, 2, 3],
- ... 'visits': [20, 42, 28],
- ... 'day': [1, 2, 3],
- ... })
- >>> ax = df.plot.area(x='day')
- """
+ >>> df = pd.DataFrame(
+ ... {
+ ... "sales": [3, 2, 3],
+ ... "visits": [20, 42, 28],
+ ... "day": [1, 2, 3],
+ ... }
+ ... )
+ >>> ax = df.plot.area(x="day")
+ """ # noqa: E501
return self(kind="area", x=x, y=y, stacked=stacked, **kwargs)
def pie(self, y: IndexLabel | None = None, **kwargs) -> PlotAccessor:
@@ -1657,10 +1662,11 @@ def pie(self, y: IndexLabel | None = None, **kwargs) -> PlotAccessor:
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
- ... 'radius': [2439.7, 6051.8, 6378.1]},
- ... index=['Mercury', 'Venus', 'Earth'])
- >>> plot = df.plot.pie(y='mass', figsize=(5, 5))
+ >>> df = pd.DataFrame(
+ ... {"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
+ ... index=["Mercury", "Venus", "Earth"],
+ ... )
+ >>> plot = df.plot.pie(y="mass", figsize=(5, 5))
.. plot::
:context: close-figs
@@ -1748,22 +1754,26 @@ def scatter(
.. plot::
:context: close-figs
- >>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
- ... [6.4, 3.2, 1], [5.9, 3.0, 2]],
- ... columns=['length', 'width', 'species'])
- >>> ax1 = df.plot.scatter(x='length',
- ... y='width',
- ... c='DarkBlue')
+ >>> df = pd.DataFrame(
+ ... [
+ ... [5.1, 3.5, 0],
+ ... [4.9, 3.0, 0],
+ ... [7.0, 3.2, 1],
+ ... [6.4, 3.2, 1],
+ ... [5.9, 3.0, 2],
+ ... ],
+ ... columns=["length", "width", "species"],
+ ... )
+ >>> ax1 = df.plot.scatter(x="length", y="width", c="DarkBlue")
And now with the color determined by a column as well.
.. plot::
:context: close-figs
- >>> ax2 = df.plot.scatter(x='length',
- ... y='width',
- ... c='species',
- ... colormap='viridis')
+ >>> ax2 = df.plot.scatter(
+ ... x="length", y="width", c="species", colormap="viridis"
+ ... )
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)
@@ -1832,9 +1842,8 @@ def hexbin(
:context: close-figs
>>> n = 10000
- >>> df = pd.DataFrame({'x': np.random.randn(n),
- ... 'y': np.random.randn(n)})
- >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
+ >>> df = pd.DataFrame({"x": np.random.randn(n), "y": np.random.randn(n)})
+ >>> ax = df.plot.hexbin(x="x", y="y", gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
@@ -1845,17 +1854,21 @@ def hexbin(
:context: close-figs
>>> n = 500
- >>> df = pd.DataFrame({
- ... 'coord_x': np.random.uniform(-3, 3, size=n),
- ... 'coord_y': np.random.uniform(30, 50, size=n),
- ... 'observations': np.random.randint(1, 5, size=n)
- ... })
- >>> ax = df.plot.hexbin(x='coord_x',
- ... y='coord_y',
- ... C='observations',
- ... reduce_C_function=np.sum,
- ... gridsize=10,
- ... cmap="viridis")
+ >>> df = pd.DataFrame(
+ ... {
+ ... "coord_x": np.random.uniform(-3, 3, size=n),
+ ... "coord_y": np.random.uniform(30, 50, size=n),
+ ... "observations": np.random.randint(1, 5, size=n),
+ ... }
+ ... )
+ >>> ax = df.plot.hexbin(
+ ... x="coord_x",
+ ... y="coord_y",
+ ... C="observations",
+ ... reduce_C_function=np.sum,
+ ... gridsize=10,
+ ... cmap="viridis",
+ ... )
"""
if reduce_C_function is not None:
kwargs["reduce_C_function"] = reduce_C_function
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 6fa75ba5fb12d..1c8cd9a4970c8 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -465,7 +465,7 @@ def _validate_color_args(self, color, colormap):
@final
@staticmethod
def _iter_data(
- data: DataFrame | dict[Hashable, Series | DataFrame]
+ data: DataFrame | dict[Hashable, Series | DataFrame],
) -> Iterator[tuple[Hashable, np.ndarray]]:
for col, values in data.items():
# This was originally written to use values.values before EAs
diff --git a/pandas/plotting/_matplotlib/groupby.py b/pandas/plotting/_matplotlib/groupby.py
index cbb66065a8039..783f79710097c 100644
--- a/pandas/plotting/_matplotlib/groupby.py
+++ b/pandas/plotting/_matplotlib/groupby.py
@@ -50,10 +50,9 @@ def create_iter_data_given_by(
If `by` is assigned:
>>> import numpy as np
- >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')]
+ >>> tuples = [("h1", "a"), ("h1", "b"), ("h2", "a"), ("h2", "b")]
>>> mi = pd.MultiIndex.from_tuples(tuples)
- >>> value = [[1, 3, np.nan, np.nan],
- ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]]
+ >>> value = [[1, 3, np.nan, np.nan], [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]]
>>> data = pd.DataFrame(value, columns=mi)
>>> create_iter_data_given_by(data)
{'h1': h1
@@ -106,9 +105,9 @@ def reconstruct_data_with_by(
Examples
--------
- >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]}
+ >>> d = {"h": ["h1", "h1", "h2"], "a": [1, 3, 5], "b": [3, 4, 6]}
>>> df = pd.DataFrame(d)
- >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b'])
+ >>> reconstruct_data_with_by(df, by="h", cols=["a", "b"])
h1 h2
a b a b
0 1.0 3.0 NaN NaN
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 89a8a7cf79719..50cfdbd967ea7 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -98,13 +98,14 @@ def _get_layout(
nrows, ncols = layout
if nrows == -1 and ncols > 0:
- layout = nrows, ncols = (ceil(nplots / ncols), ncols)
+ layout = (ceil(nplots / ncols), ncols)
elif ncols == -1 and nrows > 0:
- layout = nrows, ncols = (nrows, ceil(nplots / nrows))
+ layout = (nrows, ceil(nplots / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
+ nrows, ncols = layout
if nrows * ncols < nplots:
raise ValueError(
f"Layout of {nrows}x{ncols} must be larger than required size {nplots}"
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index c8c8f68f5289e..eb2d12e588b8f 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -51,12 +51,13 @@ def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:
:context: close-figs
>>> import matplotlib.pyplot as plt
- >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
+ >>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> fix, ax = plt.subplots()
- >>> ax.axis('off')
+ >>> ax.axis("off")
(0.0, 1.0, 0.0, 1.0)
- >>> table = pd.plotting.table(ax, df, loc='center',
- ... cellLoc='center', colWidths=list([.2, .2]))
+ >>> table = pd.plotting.table(
+ ... ax, df, loc="center", cellLoc="center", colWidths=list([0.2, 0.2])
+ ... )
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.table(
@@ -92,16 +93,17 @@ def register() -> None:
>>> pd.plotting.register_matplotlib_converters()
- >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'),
- ... 'y': [1, 2]
- ... })
- >>> plot = df.plot.line(x='ts', y='y')
+ >>> df = pd.DataFrame(
+ ... {"ts": pd.period_range("2020", periods=2, freq="M"), "y": [1, 2]}
+ ... )
+ >>> plot = df.plot.line(x="ts", y="y")
Unsetting the register manually an error will be raised:
- >>> pd.set_option("plotting.matplotlib.register_converters",
- ... False) # doctest: +SKIP
- >>> df.plot.line(x='ts', y='y') # doctest: +SKIP
+ >>> pd.set_option(
+ ... "plotting.matplotlib.register_converters", False
+ ... ) # doctest: +SKIP
+ >>> df.plot.line(x="ts", y="y") # doctest: +SKIP
Traceback (most recent call last):
TypeError: float() argument must be a string or a real number, not 'Period'
"""
@@ -135,16 +137,17 @@ def deregister() -> None:
>>> pd.plotting.register_matplotlib_converters()
- >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'),
- ... 'y': [1, 2]
- ... })
- >>> plot = df.plot.line(x='ts', y='y')
+ >>> df = pd.DataFrame(
+ ... {"ts": pd.period_range("2020", periods=2, freq="M"), "y": [1, 2]}
+ ... )
+ >>> plot = df.plot.line(x="ts", y="y")
Unsetting the register manually an error will be raised:
- >>> pd.set_option("plotting.matplotlib.register_converters",
- ... False) # doctest: +SKIP
- >>> df.plot.line(x='ts', y='y') # doctest: +SKIP
+ >>> pd.set_option(
+ ... "plotting.matplotlib.register_converters", False
+ ... ) # doctest: +SKIP
+ >>> df.plot.line(x="ts", y="y") # doctest: +SKIP
Traceback (most recent call last):
TypeError: float() argument must be a string or a real number, not 'Period'
"""
@@ -204,7 +207,7 @@ def scatter_matrix(
.. plot::
:context: close-figs
- >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A', 'B', 'C', 'D'])
+ >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=["A", "B", "C", "D"])
>>> pd.plotting.scatter_matrix(df, alpha=0.2)
array([[<Axes: xlabel='A', ylabel='A'>, <Axes: xlabel='B', ylabel='A'>,
<Axes: xlabel='C', ylabel='A'>, <Axes: xlabel='D', ylabel='A'>],
@@ -288,25 +291,25 @@ def radviz(
>>> df = pd.DataFrame(
... {
- ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6],
- ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6],
- ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0],
- ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2],
- ... 'Category': [
- ... 'virginica',
- ... 'virginica',
- ... 'setosa',
- ... 'virginica',
- ... 'virginica',
- ... 'versicolor',
- ... 'versicolor',
- ... 'setosa',
- ... 'virginica',
- ... 'setosa'
- ... ]
+ ... "SepalLength": [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6],
+ ... "SepalWidth": [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6],
+ ... "PetalLength": [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0],
+ ... "PetalWidth": [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2],
+ ... "Category": [
+ ... "virginica",
+ ... "virginica",
+ ... "setosa",
+ ... "virginica",
+ ... "virginica",
+ ... "versicolor",
+ ... "versicolor",
+ ... "setosa",
+ ... "virginica",
+ ... "setosa",
+ ... ],
... }
... )
- >>> pd.plotting.radviz(df, 'Category') # doctest: +SKIP
+ >>> pd.plotting.radviz(df, "Category") # doctest: +SKIP
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.radviz(
@@ -371,10 +374,10 @@ def andrews_curves(
:context: close-figs
>>> df = pd.read_csv(
- ... 'https://raw.githubusercontent.com/pandas-dev/'
- ... 'pandas/main/pandas/tests/io/data/csv/iris.csv'
+ ... "https://raw.githubusercontent.com/pandas-dev/"
+ ... "pandas/main/pandas/tests/io/data/csv/iris.csv"
... )
- >>> pd.plotting.andrews_curves(df, 'Name') # doctest: +SKIP
+ >>> pd.plotting.andrews_curves(df, "Name") # doctest: +SKIP
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.andrews_curves(
@@ -502,11 +505,11 @@ def parallel_coordinates(
:context: close-figs
>>> df = pd.read_csv(
- ... 'https://raw.githubusercontent.com/pandas-dev/'
- ... 'pandas/main/pandas/tests/io/data/csv/iris.csv'
+ ... "https://raw.githubusercontent.com/pandas-dev/"
+ ... "pandas/main/pandas/tests/io/data/csv/iris.csv"
... )
>>> pd.plotting.parallel_coordinates(
- ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464')
+ ... df, "Name", color=("#556270", "#4ECDC4", "#C7F464")
... ) # doctest: +SKIP
"""
plot_backend = _get_plot_backend("matplotlib")
@@ -620,10 +623,10 @@ class _Options(dict):
:context: close-figs
>>> np.random.seed(42)
- >>> df = pd.DataFrame({'A': np.random.randn(10),
- ... 'B': np.random.randn(10)},
- ... index=pd.date_range("1/1/2000",
- ... freq='4MS', periods=10))
+ >>> df = pd.DataFrame(
+ ... {"A": np.random.randn(10), "B": np.random.randn(10)},
+ ... index=pd.date_range("1/1/2000", freq="4MS", periods=10),
+ ... )
>>> with pd.plotting.plot_params.use("x_compat", True):
... _ = df["A"].plot(color="r")
... _ = df["B"].plot(color="g")
diff --git a/pandas/tests/indexes/multi/test_join.py b/pandas/tests/indexes/multi/test_join.py
index 85f15795cdfb5..2be6bba475af7 100644
--- a/pandas/tests/indexes/multi/test_join.py
+++ b/pandas/tests/indexes/multi/test_join.py
@@ -260,7 +260,7 @@ def test_join_dtypes_all_nan(any_numeric_ea_dtype):
def test_join_index_levels():
# GH#53093
- midx = midx = MultiIndex.from_tuples([("a", "2019-02-01"), ("a", "2019-02-01")])
+ midx = MultiIndex.from_tuples([("a", "2019-02-01"), ("a", "2019-02-01")])
midx2 = MultiIndex.from_tuples([("a", "2019-01-31")])
result = midx.join(midx2, how="outer")
expected = MultiIndex.from_tuples(
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 96a0ccc33808a..e2d4a0bac9559 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -196,7 +196,7 @@ def create_mgr(descr, item_shape=None):
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
- 'a:f8-1; b:f8-2; c:f8-foobar'
+ "a:f8-1; b:f8-2; c:f8-foobar"
"""
if item_shape is None:
diff --git a/pandas/tests/io/xml/conftest.py b/pandas/tests/io/xml/conftest.py
index 273b1a3beef3b..40a94f27e98a9 100644
--- a/pandas/tests/io/xml/conftest.py
+++ b/pandas/tests/io/xml/conftest.py
@@ -11,7 +11,7 @@ def xml_data_path():
Examples
--------
>>> def test_read_xml(xml_data_path):
- ... read_xml(xml_data_path / 'file.xsl')
+ ... read_xml(xml_data_path / "file.xsl")
"""
return Path(__file__).parent.parent / "data" / "xml"
diff --git a/pandas/tests/strings/conftest.py b/pandas/tests/strings/conftest.py
index 036e4de20ba53..92b7b16da3c1f 100644
--- a/pandas/tests/strings/conftest.py
+++ b/pandas/tests/strings/conftest.py
@@ -122,7 +122,7 @@ def any_string_method(request):
Examples
--------
>>> def test_something(any_string_method):
- ... s = Series(['a', 'b', np.nan, 'd'])
+ ... s = Series(["a", "b", np.nan, "d"])
...
... method_name, args, kwargs = any_string_method
... method = getattr(s.str, method_name)
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 4a1a668426b36..92b4bcc17946f 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -111,7 +111,7 @@ def infer_freq(
Examples
--------
- >>> idx = pd.date_range(start='2020/12/01', end='2020/12/30', periods=30)
+ >>> idx = pd.date_range(start="2020/12/01", end="2020/12/30", periods=30)
>>> pd.infer_freq(idx)
'D'
"""
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 650e77b264d14..50d0d33f0339f 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -200,8 +200,10 @@ class from pandas.tseries.offsets
Holiday: July 3rd (month=7, day=3, )
>>> NewYears = pd.tseries.holiday.Holiday(
- ... "New Years Day", month=1, day=1,
- ... observance=pd.tseries.holiday.nearest_workday
+ ... "New Years Day",
+ ... month=1,
+ ... day=1,
+ ... observance=pd.tseries.holiday.nearest_workday,
... )
>>> NewYears # doctest: +SKIP
Holiday: New Years Day (
@@ -209,8 +211,7 @@ class from pandas.tseries.offsets
)
>>> July3rd = pd.tseries.holiday.Holiday(
- ... "July 3rd", month=7, day=3,
- ... days_of_week=(0, 1, 2, 3)
+ ... "July 3rd", month=7, day=3, days_of_week=(0, 1, 2, 3)
... )
>>> July3rd
Holiday: July 3rd (month=7, day=3, )
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 83c9a66cbd2ca..a15e2054205f7 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -122,44 +122,41 @@ def deprecate_kwarg(
--------
The following deprecates 'cols', using 'columns' instead
- >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
- ... def f(columns=''):
+ >>> @deprecate_kwarg(old_arg_name="cols", new_arg_name="columns")
+ ... def f(columns=""):
... print(columns)
- ...
- >>> f(columns='should work ok')
+ >>> f(columns="should work ok")
should work ok
- >>> f(cols='should raise warning') # doctest: +SKIP
+ >>> f(cols="should raise warning") # doctest: +SKIP
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
- >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP
+ >>> f(cols="should error", columns="can't pass do both") # doctest: +SKIP
TypeError: Can only specify 'cols' or 'columns', not both
- >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
+ >>> @deprecate_kwarg("old", "new", {"yes": True, "no": False})
... def f(new=False):
- ... print('yes!' if new else 'no!')
- ...
- >>> f(old='yes') # doctest: +SKIP
+ ... print("yes!" if new else "no!")
+ >>> f(old="yes") # doctest: +SKIP
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
- >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
- ... def f(cols='', another_param=''):
+ >>> @deprecate_kwarg(old_arg_name="cols", new_arg_name=None)
+ ... def f(cols="", another_param=""):
... print(cols)
- ...
- >>> f(cols='should raise warning') # doctest: +SKIP
+ >>> f(cols="should raise warning") # doctest: +SKIP
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
- >>> f(another_param='should not raise warning') # doctest: +SKIP
+ >>> f(another_param="should not raise warning") # doctest: +SKIP
should not raise warning
- >>> f(cols='should raise warning', another_param='') # doctest: +SKIP
+ >>> f(cols="should raise warning", another_param="") # doctest: +SKIP
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
diff --git a/pyproject.toml b/pyproject.toml
index bd7172ec85132..c0d8c859d0c12 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -346,6 +346,9 @@ exclude = [
fixture-parentheses = false
mark-parentheses = false
+[tool.ruff.format]
+docstring-code-format = true
+
[tool.pylint.messages_control]
max-line-length = 88
disable = [
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index d54592252206e..a4d53d360a12b 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -193,7 +193,7 @@ def validate_pep8(self):
"flake8",
"--format=%(row)d\t%(col)d\t%(code)s\t%(text)s",
"--max-line-length=88",
- "--ignore=E203,E3,W503,W504,E402,E731",
+ "--ignore=E203,E3,W503,W504,E402,E731,E128,E124",
file.name,
]
response = subprocess.run(cmd, capture_output=True, check=False, text=True)
| closes https://github.com/pandas-dev/pandas/issues/56804 | https://api.github.com/repos/pandas-dev/pandas/pulls/56863 | 2024-01-13T22:53:45Z | 2024-02-08T16:42:08Z | 2024-02-08T16:42:08Z | 2024-02-08T19:36:30Z |
Backport PR #56849 on branch 2.2.x (REGR: freq "m" (as alias of deprecated "M") raises an error) | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 3a339171d0da2..205ab6f01f8c6 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -4845,15 +4845,15 @@ cpdef to_offset(freq, bint is_period=False):
tups = zip(split[0::4], split[1::4], split[2::4])
for n, (sep, stride, name) in enumerate(tups):
- if is_period is False and name in c_OFFSET_DEPR_FREQSTR:
+ if is_period is False and name.upper() in c_OFFSET_DEPR_FREQSTR:
warnings.warn(
f"\'{name}\' is deprecated and will be removed "
f"in a future version, please use "
- f"\'{c_OFFSET_DEPR_FREQSTR.get(name)}\' instead.",
+ f"\'{c_OFFSET_DEPR_FREQSTR.get(name.upper())}\' instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
- name = c_OFFSET_DEPR_FREQSTR[name]
+ name = c_OFFSET_DEPR_FREQSTR[name.upper()]
if is_period is True and name in c_REVERSE_OFFSET_DEPR_FREQSTR:
if name.startswith("Y"):
raise ValueError(
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 44dd64e162413..d26bee80003e9 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -822,6 +822,17 @@ def test_frequencies_A_deprecated_Y_renamed(self, freq, freq_depr):
result = date_range("1/1/2000", periods=2, freq=freq_depr)
tm.assert_index_equal(result, expected)
+ def test_to_offset_with_lowercase_deprecated_freq(self) -> None:
+ # https://github.com/pandas-dev/pandas/issues/56847
+ msg = (
+ "'m' is deprecated and will be removed in a future version, please use "
+ "'ME' instead."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = date_range("2010-01-01", periods=2, freq="m")
+ expected = DatetimeIndex(["2010-01-31", "2010-02-28"], freq="ME")
+ tm.assert_index_equal(result, expected)
+
def test_date_range_bday(self):
sdate = datetime(1999, 12, 25)
idx = date_range(start=sdate, freq="1B", periods=20)
diff --git a/pandas/tests/tslibs/test_to_offset.py b/pandas/tests/tslibs/test_to_offset.py
index ef68408305232..6e654e65a36d6 100644
--- a/pandas/tests/tslibs/test_to_offset.py
+++ b/pandas/tests/tslibs/test_to_offset.py
@@ -45,6 +45,7 @@ def test_to_offset_negative(freqstr, expected):
assert result.n == expected
+@pytest.mark.filterwarnings("ignore:.*'m' is deprecated.*:FutureWarning")
@pytest.mark.parametrize(
"freqstr",
[
| Backport PR #56849: REGR: freq "m" (as alias of deprecated "M") raises an error | https://api.github.com/repos/pandas-dev/pandas/pulls/56862 | 2024-01-13T20:18:33Z | 2024-01-13T21:02:29Z | 2024-01-13T21:02:29Z | 2024-01-13T21:02:29Z |
DOC: ex03 - no fix required | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 75b6925d14f21..e19842b172f82 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -109,21 +109,10 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Index.rename \
pandas.Index.droplevel \
pandas.Index.isin \
- pandas.CategoricalIndex.set_categories \
pandas.MultiIndex.names \
pandas.MultiIndex.droplevel \
pandas.IndexSlice \
- pandas.DatetimeIndex.month_name \
- pandas.DatetimeIndex.day_name \
- pandas.core.window.rolling.Rolling.corr \
pandas.Grouper \
- pandas.core.groupby.SeriesGroupBy.apply \
- pandas.core.groupby.DataFrameGroupBy.apply \
- pandas.core.groupby.SeriesGroupBy.transform \
- pandas.core.groupby.SeriesGroupBy.pipe \
- pandas.core.groupby.DataFrameGroupBy.pipe \
- pandas.core.groupby.DataFrameGroupBy.boxplot \
- pandas.core.groupby.DataFrameGroupBy.hist \
pandas.io.formats.style.Styler.map \
pandas.io.formats.style.Styler.apply_index \
pandas.io.formats.style.Styler.map_index \
@@ -141,18 +130,12 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.io.formats.style.Styler.text_gradient \
pandas.DataFrame.values \
pandas.DataFrame.groupby \
- pandas.DataFrame.skew \
- pandas.DataFrame.var \
pandas.DataFrame.idxmax \
pandas.DataFrame.idxmin \
pandas.DataFrame.pivot \
pandas.DataFrame.sort_values \
- pandas.DataFrame.tz_convert \
- pandas.DataFrame.tz_localize \
- pandas.DataFrame.plot.bar \
pandas.DataFrame.plot.hexbin \
pandas.DataFrame.plot.line \
- pandas.DataFrame.hist \
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
| - xref #56804
Used this script to validate:
```py
import subprocess as sp
def validate_func(func_name):
proc = sp.Popen(['./scripts/validate_docstrings.py', '--errors=EX03', func_name], stdout=sp.PIPE, stderr=sp.PIPE)
proc.wait()
out = proc.stderr.read().decode().strip()
return out.endswith(f'Docstring for "{func_name}" correct. :)')
funcs = ['pandas.DataFrame.var',
'pandas.DatetimeIndex.day_name',
'pandas.core.groupby.DataFrameGroupBy.apply',
'pandas.DatetimeIndex.month_name',
'pandas.core.groupby.DataFrameGroupBy.hist',
'pandas.core.groupby.SeriesGroupBy.apply',
'pandas.core.groupby.SeriesGroupBy.transform',
'pandas.DataFrame.hist',
'pandas.DataFrame.tz_localize',
'pandas.CategoricalIndex.set_categories',
'pandas.core.groupby.DataFrameGroupBy.boxplot',
'pandas.core.groupby.SeriesGroupBy.pipe',
'pandas.DataFrame.plot.bar',
'pandas.DataFrame.tz_convert',
'pandas.core.groupby.DataFrameGroupBy.pipe',
'pandas.DataFrame.skew',
'pandas.core.window.rolling.Rolling.corr']
for func in funcs:
print(func, validate_func(func))
```
Let me know if it's too many at once. | https://api.github.com/repos/pandas-dev/pandas/pulls/56861 | 2024-01-13T15:39:08Z | 2024-01-13T20:09:33Z | 2024-01-13T20:09:33Z | 2024-02-07T14:52:52Z |
TST: Move tests out of test_groupby | diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 8198cc532d998..86f03b04fddb3 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -160,6 +160,33 @@ def test_agg_apply_corner(ts, tsframe):
tm.assert_frame_equal(res, exp_df)
+def test_with_na_groups(any_real_numpy_dtype):
+ index = Index(np.arange(10))
+ values = Series(np.ones(10), index, dtype=any_real_numpy_dtype)
+ labels = Series(
+ [np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
+ index=index,
+ )
+
+ # this SHOULD be an int
+ grouped = values.groupby(labels)
+ agged = grouped.agg(len)
+ expected = Series([4, 2], index=["bar", "foo"])
+
+ tm.assert_series_equal(agged, expected, check_dtype=False)
+
+ # assert issubclass(agged.dtype.type, np.integer)
+
+ # explicitly return a float from my function
+ def f(x):
+ return float(len(x))
+
+ agged = grouped.agg(f)
+ expected = Series([4.0, 2.0], index=["bar", "foo"])
+
+ tm.assert_series_equal(agged, expected)
+
+
def test_agg_grouping_is_list_tuple(ts):
df = DataFrame(
np.random.default_rng(2).standard_normal((30, 4)),
@@ -1049,6 +1076,73 @@ def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
tm.assert_frame_equal(result, expected)
+def test_groupby_as_index_agg(df):
+ grouped = df.groupby("A", as_index=False)
+
+ # single-key
+
+ result = grouped[["C", "D"]].agg("mean")
+ expected = grouped.mean(numeric_only=True)
+ tm.assert_frame_equal(result, expected)
+
+ result2 = grouped.agg({"C": "mean", "D": "sum"})
+ expected2 = grouped.mean(numeric_only=True)
+ expected2["D"] = grouped.sum()["D"]
+ tm.assert_frame_equal(result2, expected2)
+
+ grouped = df.groupby("A", as_index=True)
+
+ msg = r"nested renamer is not supported"
+ with pytest.raises(SpecificationError, match=msg):
+ grouped["C"].agg({"Q": "sum"})
+
+ # multi-key
+
+ grouped = df.groupby(["A", "B"], as_index=False)
+
+ result = grouped.agg("mean")
+ expected = grouped.mean()
+ tm.assert_frame_equal(result, expected)
+
+ result2 = grouped.agg({"C": "mean", "D": "sum"})
+ expected2 = grouped.mean()
+ expected2["D"] = grouped.sum()["D"]
+ tm.assert_frame_equal(result2, expected2)
+
+ expected3 = grouped["C"].sum()
+ expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
+ msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result3 = grouped["C"].agg({"Q": "sum"})
+ tm.assert_frame_equal(result3, expected3)
+
+ # GH7115 & GH8112 & GH8582
+ df = DataFrame(
+ np.random.default_rng(2).integers(0, 100, (50, 3)),
+ columns=["jim", "joe", "jolie"],
+ )
+ ts = Series(np.random.default_rng(2).integers(5, 10, 50), name="jim")
+
+ gr = df.groupby(ts)
+ gr.nth(0) # invokes set_selection_from_grouper internally
+
+ msg = "The behavior of DataFrame.sum with axis=None is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
+ res = gr.apply(sum)
+ with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
+ alt = df.groupby(ts).apply(sum)
+ tm.assert_frame_equal(res, alt)
+
+ for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
+ gr = df.groupby(ts, as_index=False)
+ left = getattr(gr, attr)()
+
+ gr = df.groupby(ts.values, as_index=True)
+ right = getattr(gr, attr)().reset_index(drop=True)
+
+ tm.assert_frame_equal(left, right)
+
+
@pytest.mark.parametrize(
"func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
)
@@ -1252,6 +1346,28 @@ def test_agg_multiple_lambda(self):
tm.assert_frame_equal(result2, expected)
+def test_pass_args_kwargs_duplicate_columns(tsframe, as_index):
+ # go through _aggregate_frame with self.axis == 0 and duplicate columns
+ tsframe.columns = ["A", "B", "A", "C"]
+ gb = tsframe.groupby(lambda x: x.month, as_index=as_index)
+
+ warn = None if as_index else FutureWarning
+ msg = "A grouping .* was excluded from the result"
+ with tm.assert_produces_warning(warn, match=msg):
+ res = gb.agg(np.percentile, 80, axis=0)
+
+ ex_data = {
+ 1: tsframe[tsframe.index.month == 1].quantile(0.8),
+ 2: tsframe[tsframe.index.month == 2].quantile(0.8),
+ }
+ expected = DataFrame(ex_data).T
+ if not as_index:
+ # TODO: try to get this more consistent?
+ expected.index = Index(range(2))
+
+ tm.assert_frame_equal(res, expected)
+
+
def test_groupby_get_by_index():
# GH 33439
df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index f4b228eb5b326..5de98156b44e1 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -1602,3 +1602,75 @@ def test_builtins_apply(keys, f):
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(axis=0), getattr(df, fname)(axis=0))
+
+
+def test_inconsistent_return_type():
+ # GH5592
+ # inconsistent return type
+ df = DataFrame(
+ {
+ "A": ["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
+ "B": Series(np.arange(7), dtype="int64"),
+ "C": pd.date_range("20130101", periods=7),
+ }
+ )
+
+ def f_0(grp):
+ return grp.iloc[0]
+
+ expected = df.groupby("A").first()[["B"]]
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").apply(f_0)[["B"]]
+ tm.assert_frame_equal(result, expected)
+
+ def f_1(grp):
+ if grp.name == "Tiger":
+ return None
+ return grp.iloc[0]
+
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").apply(f_1)[["B"]]
+ e = expected.copy()
+ e.loc["Tiger"] = np.nan
+ tm.assert_frame_equal(result, e)
+
+ def f_2(grp):
+ if grp.name == "Pony":
+ return None
+ return grp.iloc[0]
+
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").apply(f_2)[["B"]]
+ e = expected.copy()
+ e.loc["Pony"] = np.nan
+ tm.assert_frame_equal(result, e)
+
+ # 5592 revisited, with datetimes
+ def f_3(grp):
+ if grp.name == "Pony":
+ return None
+ return grp.iloc[0]
+
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").apply(f_3)[["C"]]
+ e = df.groupby("A").first()[["C"]]
+ e.loc["Pony"] = pd.NaT
+ tm.assert_frame_equal(result, e)
+
+ # scalar outputs
+ def f_4(grp):
+ if grp.name == "Pony":
+ return None
+ return grp.iloc[0].loc["C"]
+
+ msg = "DataFrameGroupBy.apply operated on the grouping columns"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").apply(f_4)
+ e = df.groupby("A").first()["C"].copy()
+ e.loc["Pony"] = np.nan
+ e.name = None
+ tm.assert_series_equal(result, e)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 14c5c21d41772..8750dd18b3db4 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -43,99 +43,6 @@ def test_repr():
assert result == expected
-def test_groupby_std_datetimelike(warn_copy_on_write):
- # GH#48481
- tdi = pd.timedelta_range("1 Day", periods=10000)
- ser = Series(tdi)
- ser[::5] *= 2 # get different std for different groups
-
- df = ser.to_frame("A").copy()
-
- df["B"] = ser + Timestamp(0)
- df["C"] = ser + Timestamp(0, tz="UTC")
- df.iloc[-1] = pd.NaT # last group includes NaTs
-
- gb = df.groupby(list(range(5)) * 2000)
-
- result = gb.std()
-
- # Note: this does not _exactly_ match what we would get if we did
- # [gb.get_group(i).std() for i in gb.groups]
- # but it _does_ match the floating point error we get doing the
- # same operation on int64 data xref GH#51332
- td1 = Timedelta("2887 days 11:21:02.326710176")
- td4 = Timedelta("2886 days 00:42:34.664668096")
- exp_ser = Series([td1 * 2, td1, td1, td1, td4], index=np.arange(5))
- expected = DataFrame({"A": exp_ser, "B": exp_ser, "C": exp_ser})
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
-def test_basic_aggregations(dtype):
- data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
-
- index = np.arange(9)
- np.random.default_rng(2).shuffle(index)
- data = data.reindex(index)
-
- grouped = data.groupby(lambda x: x // 3, group_keys=False)
-
- for k, v in grouped:
- assert len(v) == 3
-
- msg = "using SeriesGroupBy.mean"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- agged = grouped.aggregate(np.mean)
- assert agged[1] == 1
-
- msg = "using SeriesGroupBy.mean"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = grouped.agg(np.mean)
- tm.assert_series_equal(agged, expected) # shorthand
- tm.assert_series_equal(agged, grouped.mean())
- result = grouped.sum()
- msg = "using SeriesGroupBy.sum"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = grouped.agg(np.sum)
- tm.assert_series_equal(result, expected)
-
- expected = grouped.apply(lambda x: x * x.sum())
- transformed = grouped.transform(lambda x: x * x.sum())
- assert transformed[7] == 12
- tm.assert_series_equal(transformed, expected)
-
- value_grouped = data.groupby(data)
- msg = "using SeriesGroupBy.mean"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = value_grouped.aggregate(np.mean)
- tm.assert_series_equal(result, agged, check_index_type=False)
-
- # complex agg
- msg = "using SeriesGroupBy.[mean|std]"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- agged = grouped.aggregate([np.mean, np.std])
-
- msg = r"nested renamer is not supported"
- with pytest.raises(SpecificationError, match=msg):
- grouped.aggregate({"one": np.mean, "two": np.std})
-
- group_constants = {0: 10, 1: 20, 2: 30}
- msg = (
- "Pinning the groupby key to each group in SeriesGroupBy.agg is deprecated, "
- "and cases that relied on it will raise in a future version"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- # GH#41090
- agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
- assert agged[1] == 21
-
- # corner cases
- msg = "Must produce aggregated value"
- # exception raised is type Exception
- with pytest.raises(Exception, match=msg):
- grouped.aggregate(lambda x: x * 2)
-
-
def test_groupby_nonobject_dtype(multiindex_dataframe_random_data):
key = multiindex_dataframe_random_data.index.codes[0]
grouped = multiindex_dataframe_random_data.groupby(key)
@@ -170,78 +77,6 @@ def max_value(group):
tm.assert_series_equal(result, expected)
-def test_inconsistent_return_type():
- # GH5592
- # inconsistent return type
- df = DataFrame(
- {
- "A": ["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
- "B": Series(np.arange(7), dtype="int64"),
- "C": date_range("20130101", periods=7),
- }
- )
-
- def f_0(grp):
- return grp.iloc[0]
-
- expected = df.groupby("A").first()[["B"]]
- msg = "DataFrameGroupBy.apply operated on the grouping columns"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.groupby("A").apply(f_0)[["B"]]
- tm.assert_frame_equal(result, expected)
-
- def f_1(grp):
- if grp.name == "Tiger":
- return None
- return grp.iloc[0]
-
- msg = "DataFrameGroupBy.apply operated on the grouping columns"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.groupby("A").apply(f_1)[["B"]]
- e = expected.copy()
- e.loc["Tiger"] = np.nan
- tm.assert_frame_equal(result, e)
-
- def f_2(grp):
- if grp.name == "Pony":
- return None
- return grp.iloc[0]
-
- msg = "DataFrameGroupBy.apply operated on the grouping columns"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.groupby("A").apply(f_2)[["B"]]
- e = expected.copy()
- e.loc["Pony"] = np.nan
- tm.assert_frame_equal(result, e)
-
- # 5592 revisited, with datetimes
- def f_3(grp):
- if grp.name == "Pony":
- return None
- return grp.iloc[0]
-
- msg = "DataFrameGroupBy.apply operated on the grouping columns"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.groupby("A").apply(f_3)[["C"]]
- e = df.groupby("A").first()[["C"]]
- e.loc["Pony"] = pd.NaT
- tm.assert_frame_equal(result, e)
-
- # scalar outputs
- def f_4(grp):
- if grp.name == "Pony":
- return None
- return grp.iloc[0].loc["C"]
-
- msg = "DataFrameGroupBy.apply operated on the grouping columns"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.groupby("A").apply(f_4)
- e = df.groupby("A").first()["C"].copy()
- e.loc["Pony"] = np.nan
- e.name = None
- tm.assert_series_equal(result, e)
-
-
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
@@ -295,28 +130,6 @@ def f(x, q=None, axis=0):
tm.assert_frame_equal(apply_result, expected, check_names=False)
-def test_pass_args_kwargs_duplicate_columns(tsframe, as_index):
- # go through _aggregate_frame with self.axis == 0 and duplicate columns
- tsframe.columns = ["A", "B", "A", "C"]
- gb = tsframe.groupby(lambda x: x.month, as_index=as_index)
-
- warn = None if as_index else FutureWarning
- msg = "A grouping .* was excluded from the result"
- with tm.assert_produces_warning(warn, match=msg):
- res = gb.agg(np.percentile, 80, axis=0)
-
- ex_data = {
- 1: tsframe[tsframe.index.month == 1].quantile(0.8),
- 2: tsframe[tsframe.index.month == 2].quantile(0.8),
- }
- expected = DataFrame(ex_data).T
- if not as_index:
- # TODO: try to get this more consistent?
- expected.index = Index(range(2))
-
- tm.assert_frame_equal(res, expected)
-
-
def test_len():
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
@@ -350,33 +163,6 @@ def test_basic_regression():
grouped.mean()
-def test_with_na_groups(any_real_numpy_dtype):
- index = Index(np.arange(10))
- values = Series(np.ones(10), index, dtype=any_real_numpy_dtype)
- labels = Series(
- [np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
- index=index,
- )
-
- # this SHOULD be an int
- grouped = values.groupby(labels)
- agged = grouped.agg(len)
- expected = Series([4, 2], index=["bar", "foo"])
-
- tm.assert_series_equal(agged, expected, check_dtype=False)
-
- # assert issubclass(agged.dtype.type, np.integer)
-
- # explicitly return a float from my function
- def f(x):
- return float(len(x))
-
- agged = grouped.agg(f)
- expected = Series([4.0, 2.0], index=["bar", "foo"])
-
- tm.assert_series_equal(agged, expected)
-
-
def test_indices_concatenation_order():
# GH 2808
@@ -761,73 +547,6 @@ def test_groupby_as_index_select_column_sum_empty_df():
tm.assert_frame_equal(left, expected)
-def test_groupby_as_index_agg(df):
- grouped = df.groupby("A", as_index=False)
-
- # single-key
-
- result = grouped[["C", "D"]].agg("mean")
- expected = grouped.mean(numeric_only=True)
- tm.assert_frame_equal(result, expected)
-
- result2 = grouped.agg({"C": "mean", "D": "sum"})
- expected2 = grouped.mean(numeric_only=True)
- expected2["D"] = grouped.sum()["D"]
- tm.assert_frame_equal(result2, expected2)
-
- grouped = df.groupby("A", as_index=True)
-
- msg = r"nested renamer is not supported"
- with pytest.raises(SpecificationError, match=msg):
- grouped["C"].agg({"Q": "sum"})
-
- # multi-key
-
- grouped = df.groupby(["A", "B"], as_index=False)
-
- result = grouped.agg("mean")
- expected = grouped.mean()
- tm.assert_frame_equal(result, expected)
-
- result2 = grouped.agg({"C": "mean", "D": "sum"})
- expected2 = grouped.mean()
- expected2["D"] = grouped.sum()["D"]
- tm.assert_frame_equal(result2, expected2)
-
- expected3 = grouped["C"].sum()
- expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
- msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result3 = grouped["C"].agg({"Q": "sum"})
- tm.assert_frame_equal(result3, expected3)
-
- # GH7115 & GH8112 & GH8582
- df = DataFrame(
- np.random.default_rng(2).integers(0, 100, (50, 3)),
- columns=["jim", "joe", "jolie"],
- )
- ts = Series(np.random.default_rng(2).integers(5, 10, 50), name="jim")
-
- gr = df.groupby(ts)
- gr.nth(0) # invokes set_selection_from_grouper internally
-
- msg = "The behavior of DataFrame.sum with axis=None is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
- res = gr.apply(sum)
- with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
- alt = df.groupby(ts).apply(sum)
- tm.assert_frame_equal(res, alt)
-
- for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
- gr = df.groupby(ts, as_index=False)
- left = getattr(gr, attr)()
-
- gr = df.groupby(ts.values, as_index=True)
- right = getattr(gr, attr)().reset_index(drop=True)
-
- tm.assert_frame_equal(left, right)
-
-
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
diff --git a/pandas/tests/groupby/test_reductions.py b/pandas/tests/groupby/test_reductions.py
index 273734e84d9aa..7530c9ca78cbc 100644
--- a/pandas/tests/groupby/test_reductions.py
+++ b/pandas/tests/groupby/test_reductions.py
@@ -20,6 +20,72 @@
from pandas.util import _test_decorators as td
+@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
+def test_basic_aggregations(dtype):
+ data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
+
+ index = np.arange(9)
+ np.random.default_rng(2).shuffle(index)
+ data = data.reindex(index)
+
+ grouped = data.groupby(lambda x: x // 3, group_keys=False)
+
+ for k, v in grouped:
+ assert len(v) == 3
+
+ msg = "using SeriesGroupBy.mean"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ agged = grouped.aggregate(np.mean)
+ assert agged[1] == 1
+
+ msg = "using SeriesGroupBy.mean"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ expected = grouped.agg(np.mean)
+ tm.assert_series_equal(agged, expected) # shorthand
+ tm.assert_series_equal(agged, grouped.mean())
+ result = grouped.sum()
+ msg = "using SeriesGroupBy.sum"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ expected = grouped.agg(np.sum)
+ tm.assert_series_equal(result, expected)
+
+ expected = grouped.apply(lambda x: x * x.sum())
+ transformed = grouped.transform(lambda x: x * x.sum())
+ assert transformed[7] == 12
+ tm.assert_series_equal(transformed, expected)
+
+ value_grouped = data.groupby(data)
+ msg = "using SeriesGroupBy.mean"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = value_grouped.aggregate(np.mean)
+ tm.assert_series_equal(result, agged, check_index_type=False)
+
+ # complex agg
+ msg = "using SeriesGroupBy.[mean|std]"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ agged = grouped.aggregate([np.mean, np.std])
+
+ msg = r"nested renamer is not supported"
+ with pytest.raises(pd.errors.SpecificationError, match=msg):
+ grouped.aggregate({"one": np.mean, "two": np.std})
+
+ group_constants = {0: 10, 1: 20, 2: 30}
+ msg = (
+ "Pinning the groupby key to each group in SeriesGroupBy.agg is deprecated, "
+ "and cases that relied on it will raise in a future version"
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ # GH#41090
+ agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
+ assert agged[1] == 21
+
+ # corner cases
+ msg = "Must produce aggregated value"
+ # exception raised is type Exception
+ with pytest.raises(Exception, match=msg):
+ grouped.aggregate(lambda x: x * 2)
+
+
@pytest.mark.parametrize(
"vals",
[
@@ -1071,3 +1137,30 @@ def test_groupby_prod_with_int64_dtype():
result = df.groupby(["A"]).prod().reset_index()
expected = DataFrame({"A": [1], "B": [180970905912331920]}, dtype="int64")
tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_std_datetimelike(warn_copy_on_write):
+ # GH#48481
+ tdi = pd.timedelta_range("1 Day", periods=10000)
+ ser = Series(tdi)
+ ser[::5] *= 2 # get different std for different groups
+
+ df = ser.to_frame("A").copy()
+
+ df["B"] = ser + Timestamp(0)
+ df["C"] = ser + Timestamp(0, tz="UTC")
+ df.iloc[-1] = pd.NaT # last group includes NaTs
+
+ gb = df.groupby(list(range(5)) * 2000)
+
+ result = gb.std()
+
+ # Note: this does not _exactly_ match what we would get if we did
+ # [gb.get_group(i).std() for i in gb.groups]
+ # but it _does_ match the floating point error we get doing the
+ # same operation on int64 data xref GH#51332
+ td1 = pd.Timedelta("2887 days 11:21:02.326710176")
+ td4 = pd.Timedelta("2886 days 00:42:34.664668096")
+ exp_ser = Series([td1 * 2, td1, td1, td1, td4], index=np.arange(5))
+ expected = DataFrame({"A": exp_ser, "B": exp_ser, "C": exp_ser})
+ tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Moving tests for specific methods out of `test_groupby.py` | https://api.github.com/repos/pandas-dev/pandas/pulls/56859 | 2024-01-13T12:20:35Z | 2024-01-13T20:13:56Z | 2024-01-13T20:13:55Z | 2024-01-13T22:26:14Z |
change | diff --git a/Dockerfile b/Dockerfile
index c697f0c1c66c7..0bde5ce107bcb 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,3 +12,4 @@ RUN python -m pip install --upgrade pip
RUN python -m pip install \
-r https://raw.githubusercontent.com/pandas-dev/pandas/main/requirements-dev.txt
CMD ["/bin/bash"]
+CMD
\ No newline at end of file
diff --git a/codecov.yml b/codecov.yml
index d893bdbdc9298..fc24b9d382a4d 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -16,3 +16,4 @@ coverage:
github_checks:
annotations: false
+ hello there
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56858 | 2024-01-13T11:28:51Z | 2024-01-13T11:29:36Z | null | 2024-01-13T11:29:36Z |
add ignore_index parameter to query | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 711b9cd5dbf36..d30f392c55f87 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4627,18 +4627,18 @@ def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None:
# Unsorted
@overload
- def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame:
+ def query(self, expr: str, *, inplace: Literal[False] = ..., ignore_index: bool = ..., **kwargs) -> DataFrame:
...
@overload
- def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None:
+ def query(self, expr: str, *, inplace: Literal[True], ignore_index: bool = ..., **kwargs) -> None:
...
@overload
- def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None:
+ def query(self, expr: str, *, inplace: bool = ..., ignore_index: bool = ..., **kwargs) -> DataFrame | None:
...
- def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None:
+ def query(self, expr: str, *, inplace: bool = False, ignore_index: bool = False, **kwargs) -> DataFrame | None:
"""
Query the columns of a DataFrame with a boolean expression.
@@ -4663,6 +4663,8 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No
inplace : bool
Whether to modify the DataFrame rather than creating a new one.
+ ignore_index : bool, default False
+ If True, the resulting axis will be labeled 0, 1, …, n - 1.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
@@ -4790,6 +4792,9 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No
# valid query
result = self[res]
+ if ignore_index:
+ result.index = default_index(len(result))
+
if inplace:
self._update_inplace(result)
return None
| This PR adds `ignore_index` parameter to `DataFrame.query` method. The `ignore_index()` parameter allows other DataFrame methods that filter out rows (e.g. `dropna`, `drop_duplicates` etc.) to reset to the default RangeIndex in the same method call. By adding it to `query`, we improve consistency. So with this parameter, instead of
```python
df.query(condition).reset_index(drop=True)
```
which requires two method calls, we could do
```python
df.query(condition, ignore_index=True)
```
This also allows for in-place index reset as well.
```python
df.query(condition, inplace=True)
df.reset_index(drop=True, inplace=True) # <--- two lines and a bit cumbersome
df.query(condition, inplace=True, ignore_index=True) # <--- cleaner
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/56857 | 2024-01-13T10:43:47Z | 2024-01-13T20:17:02Z | null | 2024-01-13T20:17:03Z |
DOC: fix EX03 errors in docstrings | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 145be3e52f2c0..c90645374b2f3 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -95,7 +95,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.DataFrame.groupby \
pandas.DataFrame.sort_values \
pandas.DataFrame.plot.hexbin \
- pandas.DataFrame.plot.line \
+ pandas.DataFrame.plot.line
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index cd6689e9f1ce2..e251bd28245e7 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -213,7 +213,10 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): # type: ignore[misc]
Examples
--------
>>> pd.arrays.DatetimeArray._from_sequence(
- ... pd.DatetimeIndex(['2023-01-01', '2023-01-02'], freq='D'))
+ ... pd.DatetimeIndex(
+ ... ["2023-01-01", "2023-01-02"], freq="D"
+ ... )
+ ... )
<DatetimeArray>
['2023-01-01 00:00:00', '2023-01-02 00:00:00']
Length: 2, dtype: datetime64[ns]
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a2666cd6cb229..1663a5a78225f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2124,7 +2124,8 @@ def droplevel(self, level: IndexLabel = 0):
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
- ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
+ ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']
+ ... )
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 25bcc1f307082..6bcfaa6d26fc2 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1638,7 +1638,8 @@ def _set_names(self, names, *, level=None, validate: bool = True) -> None:
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
- ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
+ ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']
+ ... )
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
| - [ ] xref #56804
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56855 | 2024-01-13T05:53:51Z | 2024-01-16T08:21:35Z | 2024-01-16T08:21:35Z | 2024-01-16T08:50:21Z |
DOC: Fix EX03 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 75b6925d14f21..3ff5d07796178 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -74,13 +74,9 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Series.plot.line \
pandas.Series.to_sql \
pandas.Series.to_latex \
- pandas.errors.CategoricalConversionWarning \
- pandas.errors.ChainedAssignmentError \
- pandas.errors.ClosedFileError \
pandas.errors.DatabaseError \
pandas.errors.IndexingError \
pandas.errors.InvalidColumnName \
- pandas.errors.NumExprClobberingError \
pandas.errors.PossibleDataLossError \
pandas.errors.PossiblePrecisionLoss \
pandas.errors.SettingWithCopyError \
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index d3ca9c8521203..9faa17f6e5f15 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -469,7 +469,7 @@ class ChainedAssignmentError(Warning):
--------
>>> pd.options.mode.copy_on_write = True
>>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A'])
- >>> df["A"][0:3] = 10 # doctest: +SKIP
+ >>> df["A"][0:3] = 10 # doctest: +SKIP
... # ChainedAssignmentError: ...
>>> pd.options.mode.copy_on_write = False
"""
@@ -561,10 +561,10 @@ class NumExprClobberingError(NameError):
Examples
--------
>>> df = pd.DataFrame({'abs': [1, 1, 1]})
- >>> df.query("abs > 2") # doctest: +SKIP
+ >>> df.query("abs > 2") # doctest: +SKIP
... # NumExprClobberingError: Variables in expression "(abs) > (2)" overlap...
>>> sin, a = 1, 2
- >>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP
+ >>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP
... # NumExprClobberingError: Variables in expression "(sin) + (a)" overlap...
"""
@@ -677,9 +677,9 @@ class ClosedFileError(Exception):
Examples
--------
- >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
- >>> store.close() # doctest: +SKIP
- >>> store.keys() # doctest: +SKIP
+ >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+ >>> store.keys() # doctest: +SKIP
... # ClosedFileError: my-store file is not open!
"""
@@ -773,9 +773,9 @@ class CategoricalConversionWarning(Warning):
Examples
--------
>>> from pandas.io.stata import StataReader
- >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP
- ... for i, block in enumerate(reader):
- ... print(i, block)
+ >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP
+ ... for i, block in enumerate(reader):
+ ... print(i, block)
... # CategoricalConversionWarning: One or more series with value labels...
"""
| Error EX03 has cleaned.
pandas.errors.CategoricalConversionWarning
<img width="470" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/d007f149-24f0-478c-b2fb-c32be2039bcc">
pandas.errors.ChainedAssignmentError
<img width="395" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/3d7b7c4c-4900-42a6-846f-977bcd621c31">
pandas.errors.ClosedFileError
<img width="356" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/c0e4dd3d-79ad-4d27-89cf-25042decfc0f">
pandas.errors.NumExprClobberingError
<img width="391" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/acbc09fa-35cf-4790-8d30-f02b47a089a5">
- [ ] xref #56804 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. | https://api.github.com/repos/pandas-dev/pandas/pulls/56854 | 2024-01-13T04:23:46Z | 2024-01-13T20:11:25Z | 2024-01-13T20:11:25Z | 2024-01-14T06:23:41Z |
REGR: freq "m" (as alias of deprecated "M") raises an error | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 84544322b57a8..446088821b10d 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -4860,15 +4860,15 @@ cpdef to_offset(freq, bint is_period=False):
tups = zip(split[0::4], split[1::4], split[2::4])
for n, (sep, stride, name) in enumerate(tups):
- if is_period is False and name in c_OFFSET_DEPR_FREQSTR:
+ if is_period is False and name.upper() in c_OFFSET_DEPR_FREQSTR:
warnings.warn(
f"\'{name}\' is deprecated and will be removed "
f"in a future version, please use "
- f"\'{c_OFFSET_DEPR_FREQSTR.get(name)}\' instead.",
+ f"\'{c_OFFSET_DEPR_FREQSTR.get(name.upper())}\' instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
- name = c_OFFSET_DEPR_FREQSTR[name]
+ name = c_OFFSET_DEPR_FREQSTR[name.upper()]
if is_period is True and name in c_REVERSE_OFFSET_DEPR_FREQSTR:
if name.startswith("Y"):
raise ValueError(
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index ec158f7b194a0..e26f35f4e8258 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -818,6 +818,17 @@ def test_frequencies_A_deprecated_Y_renamed(self, freq, freq_depr):
result = date_range("1/1/2000", periods=2, freq=freq_depr)
tm.assert_index_equal(result, expected)
+ def test_to_offset_with_lowercase_deprecated_freq(self) -> None:
+ # https://github.com/pandas-dev/pandas/issues/56847
+ msg = (
+ "'m' is deprecated and will be removed in a future version, please use "
+ "'ME' instead."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = date_range("2010-01-01", periods=2, freq="m")
+ expected = DatetimeIndex(["2010-01-31", "2010-02-28"], freq="ME")
+ tm.assert_index_equal(result, expected)
+
def test_date_range_bday(self):
sdate = datetime(1999, 12, 25)
idx = date_range(start=sdate, freq="1B", periods=20)
diff --git a/pandas/tests/tslibs/test_to_offset.py b/pandas/tests/tslibs/test_to_offset.py
index 204775347e47a..c5a2f08933392 100644
--- a/pandas/tests/tslibs/test_to_offset.py
+++ b/pandas/tests/tslibs/test_to_offset.py
@@ -44,6 +44,7 @@ def test_to_offset_negative(freqstr, expected):
assert result.n == expected
+@pytest.mark.filterwarnings("ignore:.*'m' is deprecated.*:FutureWarning")
@pytest.mark.parametrize(
"freqstr",
[
| - [ ] closes #56847 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
No whatsnew note as this hasn't hit users yet (thanks @jorisvandenbossche for reporting it in time!) | https://api.github.com/repos/pandas-dev/pandas/pulls/56849 | 2024-01-12T19:15:10Z | 2024-01-13T20:17:37Z | 2024-01-13T20:17:37Z | 2024-01-15T00:48:18Z |
DOC: fix EX03 errors in docstrings - pandas.Series.to_latex, pandas.read_pickle, pandas.DataFrame.to_latex, pandas.core.resample.Resampler.pipe | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index a08a0cbd87383..dd5b6217c8318 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -73,7 +73,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX03 --ignore_functions \
pandas.Series.plot.line \
pandas.Series.to_sql \
- pandas.Series.to_latex \
pandas.errors.DatabaseError \
pandas.errors.IndexingError \
pandas.errors.InvalidColumnName \
@@ -87,16 +86,13 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Timestamp.ceil \
pandas.Timestamp.floor \
pandas.Timestamp.round \
- pandas.read_pickle \
pandas.ExcelWriter \
pandas.read_json \
pandas.io.json.build_table_schema \
- pandas.DataFrame.to_latex \
pandas.io.formats.style.Styler.to_latex \
pandas.read_parquet \
pandas.DataFrame.to_sql \
pandas.read_stata \
- pandas.core.resample.Resampler.pipe \
pandas.core.resample.Resampler.interpolate \
pandas.plotting.scatter_matrix \
pandas.pivot \
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b37f22339fcfd..a61148a09be18 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3544,7 +3544,7 @@ def to_latex(
>>> print(df.to_latex(index=False,
... formatters={"name": str.upper},
... float_format="{:.1f}".format,
- ... )) # doctest: +SKIP
+ ... )) # doctest: +SKIP
\begin{tabular}{lrr}
\toprule
name & age & height \\
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c9beaee55d608..f36297a59498d 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -444,14 +444,15 @@ class providing the base-class of operations.
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
-args : iterable, optional
+*args : iterable, optional
Positional arguments passed into `func`.
-kwargs : dict, optional
+**kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
-the return type of `func`.
+%(klass)s
+ The original object with the function `func` applied.
See Also
--------
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index b62f7581ac220..26349dc129361 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -770,7 +770,7 @@ def to_latex(
For example the following code will highlight and bold a cell in HTML-CSS:
- >>> df = pd.DataFrame([[1,2], [3,4]])
+ >>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> s = df.style.highlight_max(axis=None,
... props='background-color:red; font-weight:bold;')
>>> s.to_html() # doctest: +SKIP
@@ -893,9 +893,9 @@ def to_latex(
>>> s.table_styles = []
>>> s.caption = None
>>> s.format({
- ... ("Numeric", "Integers"): '\${}',
- ... ("Numeric", "Floats"): '{:.3f}',
- ... ("Non-Numeric", "Strings"): str.upper
+ ... ("Numeric", "Integers"): '\\${}',
+ ... ("Numeric", "Floats"): '{:.3f}',
+ ... ("Non-Numeric", "Strings"): str.upper
... }) # doctest: +SKIP
Numeric Non-Numeric
Integers Floats Strings
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 0dae0e7106b69..89867ab4f19d0 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -121,7 +121,7 @@ def read_pickle(
storage_options: StorageOptions | None = None,
) -> DataFrame | Series:
"""
- Load pickled pandas object (or any object) from file.
+ Load pickled pandas object (or any object) from file and return unpickled object.
.. warning::
@@ -143,7 +143,8 @@ def read_pickle(
Returns
-------
- same type as object stored in file
+ object
+ The unpickled pandas object (or any object) that was stored in file.
See Also
--------
@@ -162,7 +163,7 @@ def read_pickle(
--------
>>> original_df = pd.DataFrame(
... {{"foo": range(5), "bar": range(5, 10)}}
- ... ) # doctest: +SKIP
+ ... ) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
| Checked if validation docstrings passes for :
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Series.to_latex
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.read_pickle
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.to_latex
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.resample.Resampler.pipe
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.Series.to_latex
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.Series.to_latex" correct. :)
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.read_pickle
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.read_pickle" correct. :)
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.to_latex
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.to_latex" correct. :)
```
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.resample.Resampler.pipe
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.core.resample.Resampler.pipe" correct. :)
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56843 | 2024-01-12T04:07:41Z | 2024-01-14T09:39:34Z | 2024-01-14T09:39:34Z | 2024-01-15T23:18:15Z |
PERF: use the libjoin fast paths in a few more cases | diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst
index 4f8bb30a6cd37..e217e8c8557bb 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v2.3.0.rst
@@ -101,6 +101,7 @@ Deprecations
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- Performance improvement in :meth:`DataFrame.join` for sorted but non-unique indexes (:issue:`56941`)
- Performance improvement in :meth:`DataFrame.join` when left and/or right are non-unique and ``how`` is ``"left"``, ``"right"``, or ``"inner"`` (:issue:`56817`)
- Performance improvement in :meth:`Index.take` when ``indices`` is a full range indexer from zero to length of index (:issue:`56806`)
-
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c14a635dc7b87..c3775961cedb8 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4996,35 +4996,29 @@ def _join_monotonic(
ridx: npt.NDArray[np.intp] | None
lidx: npt.NDArray[np.intp] | None
- if self.is_unique and other.is_unique:
- # We can perform much better than the general case
- if how == "left":
+ if how == "left":
+ if other.is_unique:
+ # We can perform much better than the general case
join_index = self
lidx = None
ridx = self._left_indexer_unique(other)
- elif how == "right":
+ else:
+ join_array, lidx, ridx = self._left_indexer(other)
+ join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
+ elif how == "right":
+ if self.is_unique:
+ # We can perform much better than the general case
join_index = other
lidx = other._left_indexer_unique(self)
ridx = None
- elif how == "inner":
- join_array, lidx, ridx = self._inner_indexer(other)
- join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
- elif how == "outer":
- join_array, lidx, ridx = self._outer_indexer(other)
- join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
- else:
- if how == "left":
- join_array, lidx, ridx = self._left_indexer(other)
- elif how == "right":
+ else:
join_array, ridx, lidx = other._left_indexer(self)
- elif how == "inner":
- join_array, lidx, ridx = self._inner_indexer(other)
- elif how == "outer":
- join_array, lidx, ridx = self._outer_indexer(other)
-
- assert lidx is not None
- assert ridx is not None
-
+ join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
+ elif how == "inner":
+ join_array, lidx, ridx = self._inner_indexer(other)
+ join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
+ elif how == "outer":
+ join_array, lidx, ridx = self._outer_indexer(other)
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
lidx = None if lidx is None else ensure_platform_int(lidx)
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.3.0.rst` file if fixing a bug or adding a new feature.
Additional cases where the fast path can be used:
* left join with many:1 (right unique)
* right join with 1:many (left unique)
These are covered by existing tests.
```
import pandas as pd
import numpy as np
left = pd.Index(np.arange(100_000)).repeat(10)
right = pd.Index(np.arange(100_000))
%timeit left.join(right, how="left")
# 22.7 ms ± 7.84 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
# 3.8 ms ± 60.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
```
```
import pandas as pd
import numpy as np
data = [f"i_{i:06}" for i in range(100_000)]
left = pd.Index(data, "string[pyarrow_numpy]")
right = pd.Index(data, "string[pyarrow_numpy]").repeat(10)
%timeit left.join(right, how="right")
# 393 ms ± 4.64 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
# 150 ms ± 3.98 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/56841 | 2024-01-12T01:43:09Z | 2024-01-12T17:32:54Z | 2024-01-12T17:32:54Z | 2024-01-12T17:33:02Z |
CLN: Drop Numpy 1.22 per NEP29 | diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml
index 7067048c4434d..115ccf01ccaad 100644
--- a/ci/deps/actions-39-minimum_versions.yaml
+++ b/ci/deps/actions-39-minimum_versions.yaml
@@ -22,7 +22,7 @@ dependencies:
# required dependencies
- python-dateutil=2.8.2
- - numpy=1.22.4
+ - numpy=1.23.5
- pytz=2020.1
# optional dependencies
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 1d7eca5223544..f041ed134f693 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -203,7 +203,7 @@ pandas requires the following dependencies.
================================================================ ==========================
Package Minimum supported version
================================================================ ==========================
-`NumPy <https://numpy.org>`__ 1.22.4
+`NumPy <https://numpy.org>`__ 1.23.5
`python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__ 2.8.2
`pytz <https://pypi.org/project/pytz/>`__ 2020.1
`tzdata <https://pypi.org/project/tzdata/>`__ 2022.7
diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst
index 7b53ddb3923f0..762468dfea52f 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v2.3.0.rst
@@ -65,7 +65,7 @@ If installed, we now require:
+-----------------+-----------------+----------+---------+
| Package | Minimum Version | Required | Changed |
+=================+=================+==========+=========+
-| | | X | X |
+| numpy | 1.23.5 | X | X |
+-----------------+-----------------+----------+---------+
For `optional libraries <https://pandas.pydata.org/docs/getting_started/install.html>`_ the general recommendation is to use the latest version.
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 3014bd652d8c4..7fc4b8d1d9b10 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -8,13 +8,12 @@
# numpy versioning
_np_version = np.__version__
_nlv = Version(_np_version)
-np_version_lt1p23 = _nlv < Version("1.23")
np_version_gte1p24 = _nlv >= Version("1.24")
np_version_gte1p24p3 = _nlv >= Version("1.24.3")
np_version_gte1p25 = _nlv >= Version("1.25")
np_version_gt2 = _nlv >= Version("2.0.0.dev0")
is_numpy_dev = _nlv.dev is not None
-_min_numpy_ver = "1.22.4"
+_min_numpy_ver = "1.23.5"
if _nlv < Version(_min_numpy_ver):
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 2bc488fbb1dd1..96a239b89d6ce 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -8,7 +8,6 @@
is_ci_environment,
is_platform_windows,
)
-from pandas.compat.numpy import np_version_lt1p23
import pandas as pd
import pandas._testing as tm
@@ -260,7 +259,6 @@ def test_datetime():
tm.assert_frame_equal(df, from_dataframe(df.__dataframe__()))
-@pytest.mark.skipif(np_version_lt1p23, reason="Numpy > 1.23 required")
def test_categorical_to_numpy_dlpack():
# https://github.com/pandas-dev/pandas/issues/48393
df = pd.DataFrame({"A": pd.Categorical(["a", "b", "a"])})
diff --git a/pyproject.toml b/pyproject.toml
index ebdf9deb034b5..b901c0b6a202f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,8 +30,7 @@ authors = [
license = {file = 'LICENSE'}
requires-python = '>=3.9'
dependencies = [
- "numpy>=1.22.4; python_version<'3.11'",
- "numpy>=1.23.2; python_version=='3.11'",
+ "numpy>=1.23.5; python_version<'3.12'",
"numpy>=1.26.0; python_version>='3.12'",
"python-dateutil>=2.8.2",
"pytz>=2020.1",
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56838 | 2024-01-11T17:51:00Z | 2024-01-18T17:48:11Z | 2024-01-18T17:48:11Z | 2024-01-18T17:48:56Z |
DOC: Corrected indentation of column names | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b37f22339fcfd..a4aae969052c1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2986,7 +2986,7 @@ def to_sql(
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
- name
+ name
0 User 1
1 User 2
2 User 3
@@ -3063,7 +3063,7 @@ def to_sql(
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
- A
+ A
0 1.0
1 NaN
2 2.0
@@ -8345,13 +8345,13 @@ def interpolate(
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
- a b c d
+ a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
- a b c d
+ a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
| The Column names ("A","B",etc .) in the file `generic.py` were not indented in the multiples of 4. So changed as many as I could spot | https://api.github.com/repos/pandas-dev/pandas/pulls/56837 | 2024-01-11T14:42:14Z | 2024-01-11T16:34:55Z | null | 2024-01-11T16:34:55Z |
DOC: fixed EX03 errors in docstrings for `pandas.Series.dt.day_name` and `pandas.Series.dt.day_name` | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 38dccff6e2bdc..d92d08b547d7c 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -71,8 +71,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
MSG='Partially validate docstrings (EX03)' ; echo $MSG
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX03 --ignore_functions \
- pandas.Series.dt.day_name \
- pandas.Series.str.len \
pandas.Series.cat.set_categories \
pandas.Series.plot.bar \
pandas.Series.plot.hist \
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index a4d01dd6667f6..4ca9af725abc4 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1365,7 +1365,7 @@ def day_name(self, locale=None) -> npt.NDArray[np.object_]:
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
- >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP
+ >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP
Index(['Segunda', 'Terça', 'Quarta'], dtype='object')
"""
values = self._local_timestamps()
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 7c6dca3bad7d9..6c271ef1021f4 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -3055,11 +3055,11 @@ def len(self):
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
- ... '',
- ... 5,
- ... {'foo' : 'bar'},
- ... [2, 3, 5, 7],
- ... ('one', 'two', 'three')])
+ ... '',
+ ... 5,
+ ... {'foo' : 'bar'},
+ ... [2, 3, 5, 7],
+ ... ('one', 'two', 'three')])
>>> s
0 dog
1
| Modified the docstrings to resolve EX03/flake8 errors for,
- `pandas.Series.dt.day_name`
```
################################################################################
################################## Validation ##################################
################################################################################
4 Errors found for `pandas.Series.dt.day_name`:
No extended summary found
Parameters {'*args', '**kwargs'} not documented
Unknown parameters {'locale'}
See Also section not found
```
- `pandas.Series.str.len`
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.Series.str.len" correct. :)
```
- [x] #56804
- [ ] [Tests passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56835 | 2024-01-11T12:53:03Z | 2024-01-11T16:15:26Z | 2024-01-11T16:15:26Z | 2024-01-11T16:15:36Z |
DOC: Fix EX03 flake8 errors in docstrings | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 092681049f7f2..75b6925d14f21 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -71,9 +71,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
MSG='Partially validate docstrings (EX03)' ; echo $MSG
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX03 --ignore_functions \
- pandas.Series.cat.set_categories \
- pandas.Series.plot.bar \
- pandas.Series.plot.hist \
pandas.Series.plot.line \
pandas.Series.to_sql \
pandas.Series.to_latex \
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 342aaad3f9adc..5a88aed1419e3 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1082,7 +1082,7 @@ def set_categories(
For :class:`pandas.Series`:
>>> raw_cat = pd.Categorical(['a', 'b', 'c', 'A'],
- ... categories=['a', 'b', 'c'], ordered=True)
+ ... categories=['a', 'b', 'c'], ordered=True)
>>> ser = pd.Series(raw_cat)
>>> ser
0 a
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index cb5598a98d5af..96609fdc1671b 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1114,7 +1114,7 @@ def line(
.. plot::
:context: close-figs
- >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
+ >>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
@@ -1195,7 +1195,7 @@ def bar( # pylint: disable=disallowed-name
"""
See Also
--------
- DataFrame.plot.bar: Vertical bar plot.
+ DataFrame.plot.bar : Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
For `pandas.Series.cat.set_categories`
<img width="459" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/bbde9ea6-5d8f-4597-a8ab-3d873af66863">
For `pandas.Series.plot.bar`
<img width="308" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/d59ffaad-ae12-4ca1-b1ee-cb2da4126a26">
For `pandas.Series.plot.hist`
<img width="379" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/a36e946c-1774-4b7b-a175-e2beb7934385">
Some errors remain, but unrelated to EX03.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56834 | 2024-01-11T11:21:02Z | 2024-01-12T17:59:25Z | 2024-01-12T17:59:25Z | 2024-01-13T03:31:34Z |
Fix typo in docstring example | diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 48a5f85e1c388..31309777c154d 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1039,7 +1039,7 @@ def interpolate(
2023-03-01 07:00:04 3
Freq: s, dtype: int64
- Upsample the dataframe to 0.5Hz by providing the period time of 2s.
+ Downsample the dataframe to 0.5Hz by providing the period time of 2s.
>>> series.resample("2s").interpolate("linear")
2023-03-01 07:00:00 1
@@ -1047,7 +1047,7 @@ def interpolate(
2023-03-01 07:00:04 3
Freq: 2s, dtype: int64
- Downsample the dataframe to 2Hz by providing the period time of 500ms.
+ Upsample the dataframe to 2Hz by providing the period time of 500ms.
>>> series.resample("500ms").interpolate("linear")
2023-03-01 07:00:00.000 1.0
| The words "upsample" and "downsample" were switched in for `pandas.core.resample.Resampler.interpolate`'s docstring examples | https://api.github.com/repos/pandas-dev/pandas/pulls/56833 | 2024-01-11T11:19:56Z | 2024-01-11T16:36:21Z | 2024-01-11T16:36:21Z | 2024-01-11T16:36:28Z |
BUG: Negative freq in date_range produces values out of start and endpoints | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 8e9c72faf3231..10d5a518f686d 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -308,6 +308,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
- Bug in :func:`date_range` where the last valid timestamp would sometimes not be produced (:issue:`56134`)
+- Bug in :func:`date_range` where using a negative frequency value would not include all points between the start and end values (:issue:`56382`)
-
Timedelta
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index e4862ac1030b6..ad4611aac9e35 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2777,7 +2777,12 @@ def _generate_range(
if start and not offset.is_on_offset(start):
# Incompatible types in assignment (expression has type "datetime",
# variable has type "Optional[Timestamp]")
- start = offset.rollforward(start) # type: ignore[assignment]
+
+ # GH #56147 account for negative direction and range bounds
+ if offset.n >= 0:
+ start = offset.rollforward(start) # type: ignore[assignment]
+ else:
+ start = offset.rollback(start) # type: ignore[assignment]
# Unsupported operand types for < ("Timestamp" and "None")
if periods is None and end < start and offset.n >= 0: # type: ignore[operator]
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 4f9c810cc7e1d..2d773c04b8ea9 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -841,13 +841,15 @@ def date_range(
Return a fixed frequency DatetimeIndex.
Returns the range of equally spaced time points (where the difference between any
- two adjacent points is specified by the given frequency) such that they all
- satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp.,
- the first and last time points in that range that fall on the boundary of ``freq``
- (if given as a frequency string) or that are valid for ``freq`` (if given as a
- :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``,
- ``end``, or ``freq`` is *not* specified, this missing parameter can be computed
- given ``periods``, the number of timesteps in the range. See the note below.)
+ two adjacent points is specified by the given frequency) such that they fall in the
+ range `[start, end]` , where the first one and the last one are, resp., the first
+ and last time points in that range that fall on the boundary of ``freq`` (if given
+ as a frequency string) or that are valid for ``freq`` (if given as a
+ :class:`pandas.tseries.offsets.DateOffset`). If ``freq`` is positive, the points
+ satisfy `start <[=] x <[=] end`, and if ``freq`` is negative, the points satisfy
+ `end <[=] x <[=] start`. (If exactly one of ``start``, ``end``, or ``freq`` is *not*
+ specified, this missing parameter can be computed given ``periods``, the number of
+ timesteps in the range. See the note below.)
Parameters
----------
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index fecd7f4e7f2b0..ddbeecf150a5e 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -1735,3 +1735,18 @@ def test_date_range_partial_day_year_end(self, unit):
freq="YE",
)
tm.assert_index_equal(rng, exp)
+
+ def test_date_range_negative_freq_year_end_inbounds(self, unit):
+ # GH#56147
+ rng = date_range(
+ start="2023-10-31 00:00:00",
+ end="2021-10-31 00:00:00",
+ freq="-1YE",
+ unit=unit,
+ )
+ exp = DatetimeIndex(
+ ["2022-12-31 00:00:00", "2021-12-31 00:00:00"],
+ dtype=f"M8[{unit}]",
+ freq="-1YE",
+ )
+ tm.assert_index_equal(rng, exp)
| - [X] closes https://github.com/pandas-dev/pandas/issues/56147
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56832 | 2024-01-11T11:17:27Z | 2024-03-20T18:59:30Z | 2024-03-20T18:59:30Z | 2024-03-20T18:59:37Z |
BUG fix for date_range 56134 | diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst
index 7b53ddb3923f0..75f68e1bd0a48 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v2.3.0.rst
@@ -119,7 +119,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
--
+- Bug in :func:`date_range` where the last valid timestamp would sometimes not be produced (:issue:`56134`)
-
Timedelta
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index a4d01dd6667f6..8a9b19dc7934f 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2775,11 +2775,6 @@ def _generate_range(
# variable has type "Optional[Timestamp]")
start = offset.rollforward(start) # type: ignore[assignment]
- elif end and not offset.is_on_offset(end):
- # Incompatible types in assignment (expression has type "datetime",
- # variable has type "Optional[Timestamp]")
- end = offset.rollback(end) # type: ignore[assignment]
-
# Unsupported operand types for < ("Timestamp" and "None")
if periods is None and end < start and offset.n >= 0: # type: ignore[operator]
end = None
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 024f37ee5b710..ec158f7b194a0 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -1703,3 +1703,18 @@ def test_date_range_freqstr_matches_offset(self, freqstr, offset):
idx2 = date_range(start=sdate, end=edate, freq=offset)
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
+
+ def test_date_range_partial_day_year_end(self, unit):
+ # GH#56134
+ rng = date_range(
+ start="2021-12-31 00:00:01",
+ end="2023-10-31 00:00:00",
+ freq="YE",
+ unit=unit,
+ )
+ exp = DatetimeIndex(
+ ["2021-12-31 00:00:01", "2022-12-31 00:00:01"],
+ dtype=f"M8[{unit}]",
+ freq="YE",
+ )
+ tm.assert_index_equal(rng, exp)
| - [X] closes https://github.com/pandas-dev/pandas/issues/56134
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Edit, the first commit is incorrectly named - it fixes 56134 instead
| https://api.github.com/repos/pandas-dev/pandas/pulls/56831 | 2024-01-11T11:07:10Z | 2024-01-11T14:35:07Z | 2024-01-11T14:35:07Z | 2024-01-30T07:13:53Z |
DOC: fix EX03 errors in docstrings -pandas.DataFrame for idxmax, idxmin, pivot | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 021c7b74adb7f..711b9cd5dbf36 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9223,11 +9223,11 @@ def groupby(
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
- ... "lev1": [1, 1, 1, 2, 2, 2],
- ... "lev2": [1, 1, 2, 1, 1, 2],
- ... "lev3": [1, 2, 1, 2, 1, 2],
- ... "lev4": [1, 2, 3, 4, 5, 6],
- ... "values": [0, 1, 2, 3, 4, 5]})
+ ... "lev1": [1, 1, 1, 2, 2, 2],
+ ... "lev2": [1, 1, 2, 1, 1, 2],
+ ... "lev3": [1, 2, 1, 2, 1, 2],
+ ... "lev4": [1, 2, 3, 4, 5, 6],
+ ... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 3369df5da4cba..49b380e0af01e 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -839,7 +839,7 @@
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({{'consumption': [10.51, 103.11, 55.48],
- ... 'co2_emissions': [37.2, 19.66, 1712]}},
+ ... 'co2_emissions': [37.2, 19.66, 1712]}},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
@@ -904,7 +904,7 @@
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({{'consumption': [10.51, 103.11, 55.48],
- ... 'co2_emissions': [37.2, 19.66, 1712]}},
+ ... 'co2_emissions': [37.2, 19.66, 1712]}},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
| Checked if validation docstrings passes for :
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmax
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmin
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.pivot
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmax
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.idxmax" correct. :)
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.idxmin
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.idxmin" correct. :)
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.pivot
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.pivot" correct. :)
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56830 | 2024-01-11T09:53:44Z | 2024-01-11T16:14:25Z | 2024-01-11T16:14:25Z | 2024-01-12T04:57:57Z |
Remove pandas.core.resample.Resampler.fillna, pandas.core.groupby.SeriesGroupBy.describe, pandas.DataFrame.last, pandas.DataFrame.plot.hist from ci/code_checks.sh | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 38dccff6e2bdc..663288f40234e 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -106,7 +106,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.DataFrame.to_sql \
pandas.read_stata \
pandas.core.resample.Resampler.pipe \
- pandas.core.resample.Resampler.fillna \
pandas.core.resample.Resampler.interpolate \
pandas.plotting.scatter_matrix \
pandas.pivot \
@@ -132,7 +131,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.core.groupby.DataFrameGroupBy.idxmax \
pandas.core.groupby.DataFrameGroupBy.idxmin \
pandas.core.groupby.DataFrameGroupBy.value_counts \
- pandas.core.groupby.SeriesGroupBy.describe \
pandas.core.groupby.DataFrameGroupBy.boxplot \
pandas.core.groupby.DataFrameGroupBy.hist \
pandas.io.formats.style.Styler.map \
@@ -156,14 +154,12 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.DataFrame.var \
pandas.DataFrame.idxmax \
pandas.DataFrame.idxmin \
- pandas.DataFrame.last \
pandas.DataFrame.pivot \
pandas.DataFrame.sort_values \
pandas.DataFrame.tz_convert \
pandas.DataFrame.tz_localize \
pandas.DataFrame.plot.bar \
pandas.DataFrame.plot.hexbin \
- pandas.DataFrame.plot.hist \
pandas.DataFrame.plot.line \
pandas.DataFrame.hist \
RET=$(($RET + $?)) ; echo $MSG "DONE"
| Checked if validation docstrings passes for :
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.resample.Resampler.fillna
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.SeriesGroupBy.describe
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.last
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.plot.hist
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.resample.Resampler.fillna
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.core.resample.Resampler.fillna" correct. :)
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.SeriesGroupBy.describe
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.core.groupby.SeriesGroupBy.describe" correct. :)
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.last
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.last" correct. :)
```
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.plot.hist
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.plot.hist" correct. :)
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56829 | 2024-01-11T07:52:34Z | 2024-01-11T16:10:04Z | 2024-01-11T16:10:04Z | 2024-01-12T04:56:39Z |
Remove pandas.core.groupby.DataFrameGroupBy for describe, idxmax, idxmin, value_counts from ci/code_checks.sh | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 19ba75393c4d9..092681049f7f2 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -125,10 +125,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.core.groupby.SeriesGroupBy.transform \
pandas.core.groupby.SeriesGroupBy.pipe \
pandas.core.groupby.DataFrameGroupBy.pipe \
- pandas.core.groupby.DataFrameGroupBy.describe \
- pandas.core.groupby.DataFrameGroupBy.idxmax \
- pandas.core.groupby.DataFrameGroupBy.idxmin \
- pandas.core.groupby.DataFrameGroupBy.value_counts \
pandas.core.groupby.DataFrameGroupBy.boxplot \
pandas.core.groupby.DataFrameGroupBy.hist \
pandas.io.formats.style.Styler.map \
| Checked if validation docstrings passes for :
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.describe
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.idxmax
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.idxmin
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.value_counts
OUTPUT:
1. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.describe
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.core.groupby.DataFrameGroupBy.describe" correct. :)
```
2. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.idxmax
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.core.groupby.DataFrameGroupBy.idxmax" correct. :)
```
3. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.idxmin
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.core.groupby.DataFrameGroupBy.idxmin" correct. :)
```
4. scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.core.groupby.DataFrameGroupBy.value_counts
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.core.groupby.DataFrameGroupBy.value_counts" correct. :)
```
- [ ] xref https://github.com/pandas-dev/pandas/issues/56804
- [x] [Tests passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56828 | 2024-01-11T07:26:33Z | 2024-01-11T22:40:32Z | 2024-01-11T22:40:32Z | 2024-01-12T04:57:16Z |
CI: Improve error message format in validate_docstrings.py | diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 53c67b7df928b..682d64244bc1f 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -392,7 +392,7 @@ def header(title, width=80, char="#") -> str:
if result["errors"]:
sys.stderr.write(f'{len(result["errors"])} Errors found for `{func_name}`:\n')
for err_code, err_desc in result["errors"]:
- sys.stderr.write(f"\t{err_desc}\n")
+ sys.stderr.write(f"\t{err_code}\t{err_desc}\n")
else:
sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n')
| Enhance the error message generated by executing `scripts/validate_docstrings.py --format=actions --errors=EX03 method-name`.
Additionally, there appears to be a discrepancy, as including the ``--error=EX03`` flag still results in the display of another error. The cause of this inconsistency is unclear, and it might be related to a specific configuration.
(This observation and suggestion were brought to our attention by @asishm https://github.com/pandas-dev/pandas/issues/56804#issuecomment-1885402362)
After
<img width="631" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/ee8e3125-617b-4b5d-8203-57e892741d23">
Before
<img width="615" alt="image" src="https://github.com/pandas-dev/pandas/assets/77875500/7ba9b8f8-ea93-48b4-8c7d-6e9fa5128355">
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. | https://api.github.com/repos/pandas-dev/pandas/pulls/56827 | 2024-01-11T06:13:15Z | 2024-01-15T06:11:39Z | 2024-01-15T06:11:39Z | 2024-01-15T09:21:10Z |
ENH: Add missing parameters to read_gbq for BigQuery | diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst
index 7b53ddb3923f0..4552b5ea3ea8b 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v2.3.0.rst
@@ -110,7 +110,7 @@ Performance improvements
Bug fixes
~~~~~~~~~
- Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`)
-
+-
Categorical
^^^^^^^^^^^
@@ -169,7 +169,7 @@ MultiIndex
I/O
^^^
--
+- Bug in :func:`read_gbq` that was missing parameters ``progress_bar_type``, ``dtypes``, ``auth_redirect_uri``, ``client_id``, ``columns``, which ``col_order`` is now an alias of (:issue:`#56826`)
-
Period
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index fe8702c2e16ae..691118ff581fc 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -30,10 +30,10 @@ def _try_import() -> ModuleType:
def read_gbq(
- query: str,
+ query_or_table: str,
project_id: str | None = None,
index_col: str | None = None,
- col_order: list[str] | None = None,
+ columns: list[str] | None = None,
reauth: bool = False,
auth_local_webserver: bool = True,
dialect: str | None = None,
@@ -43,6 +43,11 @@ def read_gbq(
use_bqstorage_api: bool | None = None,
max_results: int | None = None,
progress_bar_type: str | None = None,
+ dtypes: dict[str, Any] | None = None,
+ auth_redirect_uri: str | None = None,
+ client_id: str | None = None,
+ client_secret: str | None = None,
+ col_order: list[str] | None = None,
) -> DataFrame:
"""
Load data from Google BigQuery.
@@ -60,14 +65,14 @@ def read_gbq(
Parameters
----------
- query : str
+ query_or_table : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
- col_order : list(str), optional
+ columns : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : bool, default False
@@ -163,6 +168,26 @@ def read_gbq(
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
+ dtypes : dict, optional
+ A dictionary of column names to pandas ``dtype``. The provided
+ ``dtype`` is used when constructing the series for the column
+ specified. Otherwise, a default ``dtype`` is used.
+
+ auth_redirect_uri : str
+ Path to the authentication page for organization-specific authentication
+ workflows. Used when ``auth_local_webserver=False``.
+
+ client_id : str
+ The Client ID for the Google Cloud Project the user is attempting to
+ connect to.
+
+ client_secret : str
+ The Client Secret associated with the Client ID for the Google Cloud Project
+ the user is attempting to connect to.
+
+ col_order : list(str), optional
+ Alias for columns, retained for backwards compatibility.
+
Returns
-------
df: DataFrame
@@ -195,22 +220,33 @@ def read_gbq(
)
pandas_gbq = _try_import()
- kwargs: dict[str, str | bool | int | None] = {}
+ kwargs: dict[str, str | bool | int | dict[str, Any] | list[str] | None] = {}
# START: new kwargs. Don't populate unless explicitly set.
if use_bqstorage_api is not None:
kwargs["use_bqstorage_api"] = use_bqstorage_api
if max_results is not None:
kwargs["max_results"] = max_results
-
- kwargs["progress_bar_type"] = progress_bar_type
+ if progress_bar_type is not None:
+ kwargs["progress_bar_type"] = progress_bar_type
+ if dtypes is not None:
+ kwargs["dtypes"] = dtypes
+ if auth_redirect_uri is not None:
+ kwargs["auth_redirect_uri"] = auth_redirect_uri
+ if client_id is not None:
+ kwargs["client_id"] = client_id
+ if client_secret is not None:
+ kwargs["client_secret"] = client_secret
+ if columns is not None:
+ kwargs["columns"] = columns
+ if col_order is not None:
+ kwargs["col_order"] = col_order
# END: new kwargs
return pandas_gbq.read_gbq(
- query,
+ query_or_table=query_or_table,
project_id=project_id,
index_col=index_col,
- col_order=col_order,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
dialect=dialect,
| There are some missing params from pandas-gbq needed to be added: progress_bar_type, dtypes, auth_redirect_uri, client_id, columns, which col_order is now an alias of
- [ ] ~closes #xxxx~
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56826 | 2024-01-11T05:42:28Z | 2024-01-11T16:40:11Z | null | 2024-01-11T18:21:13Z |
DOC: 2.2.0 whatsnew cleanups | diff --git a/doc/source/whatsnew/v2.1.4.rst b/doc/source/whatsnew/v2.1.4.rst
index 57b83a294963b..73b1103c1bd37 100644
--- a/doc/source/whatsnew/v2.1.4.rst
+++ b/doc/source/whatsnew/v2.1.4.rst
@@ -42,4 +42,4 @@ Bug fixes
Contributors
~~~~~~~~~~~~
-.. contributors:: v2.1.3..v2.1.4|HEAD
+.. contributors:: v2.1.3..v2.1.4
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 5de5bd58bd35f..ceb67b4ef956c 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_220:
-What's new in 2.2.0 (Month XX, 2024)
-------------------------------------
+What's new in 2.2.0 (January XX, 2024)
+--------------------------------------
These are the changes in pandas 2.2.0. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -436,12 +436,6 @@ index levels when joining on two indexes with different levels (:issue:`34133`).
result
-.. ---------------------------------------------------------------------------
-.. _whatsnew_220.api_breaking:
-
-Backwards incompatible API changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
.. _whatsnew_220.api_breaking.deps:
Increased minimum versions for dependencies
@@ -820,7 +814,7 @@ Conversion
- Bug in :meth:`DataFrame.astype` when called with ``str`` on unpickled array - the array might change in-place (:issue:`54654`)
- Bug in :meth:`DataFrame.astype` where ``errors="ignore"`` had no effect for extension types (:issue:`54654`)
- Bug in :meth:`Series.convert_dtypes` not converting all NA column to ``null[pyarrow]`` (:issue:`55346`)
-- Bug in ``DataFrame.loc`` was not throwing "incompatible dtype warning" (see `PDEP6 <https://pandas.pydata.org/pdeps/0006-ban-upcasting.html>`_) when assigning a ``Series`` with a different dtype using a full column setter (e.g. ``df.loc[:, 'a'] = incompatible_value``) (:issue:`39584`)
+- Bug in :meth:``DataFrame.loc`` was not throwing "incompatible dtype warning" (see `PDEP6 <https://pandas.pydata.org/pdeps/0006-ban-upcasting.html>`_) when assigning a ``Series`` with a different dtype using a full column setter (e.g. ``df.loc[:, 'a'] = incompatible_value``) (:issue:`39584`)
Strings
^^^^^^^
@@ -830,10 +824,10 @@ Strings
- Bug in :meth:`Index.str.cat` always casting result to object dtype (:issue:`56157`)
- Bug in :meth:`Series.__mul__` for :class:`ArrowDtype` with ``pyarrow.string`` dtype and ``string[pyarrow]`` for the pyarrow backend (:issue:`51970`)
- Bug in :meth:`Series.str.find` when ``start < 0`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`56411`)
+- Bug in :meth:`Series.str.fullmatch` when ``dtype=pandas.ArrowDtype(pyarrow.string()))`` allows partial matches when regex ends in literal //$ (:issue:`56652`)
- Bug in :meth:`Series.str.replace` when ``n < 0`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`56404`)
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with arguments of type ``tuple[str, ...]`` for :class:`ArrowDtype` with ``pyarrow.string`` dtype (:issue:`56579`)
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with arguments of type ``tuple[str, ...]`` for ``string[pyarrow]`` (:issue:`54942`)
-- Bug in :meth:`str.fullmatch` when ``dtype=pandas.ArrowDtype(pyarrow.string()))`` allows partial matches when regex ends in literal //$ (:issue:`56652`)
- Bug in comparison operations for ``dtype="string[pyarrow_numpy]"`` raising if dtypes can't be compared (:issue:`56008`)
Interval
@@ -892,7 +886,6 @@ Plotting
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
-- Bug in :class:`.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`)
- Bug in :meth:`.DataFrameGroupBy.idxmin`, :meth:`.DataFrameGroupBy.idxmax`, :meth:`.SeriesGroupBy.idxmin`, and :meth:`.SeriesGroupBy.idxmax` would not retain :class:`.Categorical` dtype when the index was a :class:`.CategoricalIndex` that contained NA values (:issue:`54234`)
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` when ``observed=False`` and ``f="idxmin"`` or ``f="idxmax"`` would incorrectly raise on unobserved categories (:issue:`54234`)
- Bug in :meth:`.DataFrameGroupBy.value_counts` and :meth:`.SeriesGroupBy.value_counts` could result in incorrect sorting if the columns of the DataFrame or name of the Series are integers (:issue:`55951`)
@@ -906,6 +899,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.resample` when resampling on a :class:`ArrowDtype` of ``pyarrow.timestamp`` or ``pyarrow.duration`` type (:issue:`55989`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`)
+- Bug in :meth:`DataFrame.rolling` and :meth:`Series.rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`)
- Bug in :meth:`DataFrame.rolling` and :meth:`Series.rolling` where either the ``index`` or ``on`` column was :class:`ArrowDtype` with ``pyarrow.timestamp`` type (:issue:`55849`)
Reshaping
@@ -927,16 +921,16 @@ Reshaping
Sparse
^^^^^^
-- Bug in :meth:`SparseArray.take` when using a different fill value than the array's fill value (:issue:`55181`)
+- Bug in :meth:`arrays.SparseArray.take` when using a different fill value than the array's fill value (:issue:`55181`)
Other
^^^^^
- :meth:`DataFrame.__dataframe__` did not support pyarrow large strings (:issue:`56702`)
- Bug in :func:`DataFrame.describe` when formatting percentiles in the resulting percentile 99.999% is rounded to 100% (:issue:`55765`)
+- Bug in :func:`api.interchange.from_dataframe` where it raised ``NotImplementedError`` when handling empty string columns (:issue:`56703`)
- Bug in :func:`cut` and :func:`qcut` with ``datetime64`` dtype values with non-nanosecond units incorrectly returning nanosecond-unit bins (:issue:`56101`)
- Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`)
- Bug in :func:`infer_freq` and :meth:`DatetimeIndex.inferred_freq` with weekly frequencies and non-nanosecond resolutions (:issue:`55609`)
-- Bug in :func:`pd.api.interchange.from_dataframe` where it raised ``NotImplementedError`` when handling empty string columns (:issue:`56703`)
- Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`)
- Bug in :meth:`DataFrame.from_dict` which would always sort the rows of the created :class:`DataFrame`. (:issue:`55683`)
- Bug in :meth:`DataFrame.sort_index` when passing ``axis="columns"`` and ``ignore_index=True`` raising a ``ValueError`` (:issue:`56478`)
@@ -944,10 +938,12 @@ Other
- Bug in rendering a :class:`Series` with a :class:`MultiIndex` when one of the index level's names is 0 not having that name displayed (:issue:`55415`)
- Bug in the error message when assigning an empty :class:`DataFrame` to a column (:issue:`55956`)
- Bug when time-like strings were being cast to :class:`ArrowDtype` with ``pyarrow.time64`` type (:issue:`56463`)
-- Fixed a spurious deprecation warning from ``numba`` >= 0.58.0 when passing a numpy ufunc in :class:`pandas.core.window.Rolling.apply` with ``engine="numba"`` (:issue:`55247`)
+- Fixed a spurious deprecation warning from ``numba`` >= 0.58.0 when passing a numpy ufunc in :class:`core.window.Rolling.apply` with ``engine="numba"`` (:issue:`55247`)
.. ---------------------------------------------------------------------------
.. _whatsnew_220.contributors:
Contributors
~~~~~~~~~~~~
+
+.. contributors:: v2.1.4..v2.2.0|HEAD
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56824 | 2024-01-11T03:59:57Z | 2024-01-17T21:53:34Z | 2024-01-17T21:53:33Z | 2024-01-17T22:29:15Z |
DOC: Change 2.3.0 to 3.0.0 | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 09d1ae08df57a..92b995851afb4 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -10,13 +10,13 @@ This is the list of changes to pandas between each release. For full details,
see the `commit logs <https://github.com/pandas-dev/pandas/commits/>`_. For install and
upgrade instructions, see :ref:`install`.
-Version 2.3
+Version 3.0
-----------
.. toctree::
:maxdepth: 2
- v2.3.0
+ v3.0.0
Version 2.2
diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v3.0.0.rst
similarity index 87%
rename from doc/source/whatsnew/v2.3.0.rst
rename to doc/source/whatsnew/v3.0.0.rst
index 629b044f24f90..0c389719ae01a 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -1,30 +1,30 @@
.. _whatsnew_230:
-What's new in 2.3.0 (Month XX, 2024)
+What's new in 3.0.0 (Month XX, 2024)
------------------------------------
-These are the changes in pandas 2.2.0. See :ref:`release` for a full changelog
+These are the changes in pandas 3.0.0. See :ref:`release` for a full changelog
including other versions of pandas.
{{ header }}
.. ---------------------------------------------------------------------------
-.. _whatsnew_230.enhancements:
+.. _whatsnew_300.enhancements:
Enhancements
~~~~~~~~~~~~
-.. _whatsnew_230.enhancements.enhancement1:
+.. _whatsnew_300.enhancements.enhancement1:
enhancement1
^^^^^^^^^^^^
-.. _whatsnew_230.enhancements.enhancement2:
+.. _whatsnew_300.enhancements.enhancement2:
enhancement2
^^^^^^^^^^^^
-.. _whatsnew_230.enhancements.other:
+.. _whatsnew_300.enhancements.other:
Other enhancements
^^^^^^^^^^^^^^^^^^
@@ -32,30 +32,30 @@ Other enhancements
-
.. ---------------------------------------------------------------------------
-.. _whatsnew_230.notable_bug_fixes:
+.. _whatsnew_300.notable_bug_fixes:
Notable bug fixes
~~~~~~~~~~~~~~~~~
These are bug fixes that might have notable behavior changes.
-.. _whatsnew_230.notable_bug_fixes.notable_bug_fix1:
+.. _whatsnew_300.notable_bug_fixes.notable_bug_fix1:
notable_bug_fix1
^^^^^^^^^^^^^^^^
-.. _whatsnew_230.notable_bug_fixes.notable_bug_fix2:
+.. _whatsnew_300.notable_bug_fixes.notable_bug_fix2:
notable_bug_fix2
^^^^^^^^^^^^^^^^
.. ---------------------------------------------------------------------------
-.. _whatsnew_230.api_breaking:
+.. _whatsnew_300.api_breaking:
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _whatsnew_230.api_breaking.deps:
+.. _whatsnew_300.api_breaking.deps:
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -80,7 +80,7 @@ Optional libraries below the lowest tested version may still work, but are not c
See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more.
-.. _whatsnew_230.api_breaking.other:
+.. _whatsnew_300.api_breaking.other:
Other API changes
^^^^^^^^^^^^^^^^^
@@ -88,7 +88,7 @@ Other API changes
-
.. ---------------------------------------------------------------------------
-.. _whatsnew_230.deprecations:
+.. _whatsnew_300.deprecations:
Deprecations
~~~~~~~~~~~~
@@ -97,7 +97,7 @@ Deprecations
-
.. ---------------------------------------------------------------------------
-.. _whatsnew_230.performance:
+.. _whatsnew_300.performance:
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -109,7 +109,7 @@ Performance improvements
-
.. ---------------------------------------------------------------------------
-.. _whatsnew_230.bug_fixes:
+.. _whatsnew_300.bug_fixes:
Bug fixes
~~~~~~~~~
@@ -222,7 +222,7 @@ Other
-
.. ---------------------------------------------------------------------------
-.. _whatsnew_230.contributors:
+.. _whatsnew_300.contributors:
Contributors
~~~~~~~~~~~~
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56823 | 2024-01-11T03:21:10Z | 2024-01-22T00:26:05Z | 2024-01-22T00:26:05Z | 2024-01-22T00:26:18Z |
TST: Additions/updates to tests | diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index 6fa72bd48031c..f94702fce4c85 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -1586,3 +1586,15 @@ def test_output_buffer(mi_styler, format):
# gh 47053
with tm.ensure_clean(f"delete_me.{format}") as f:
getattr(mi_styler, f"to_{format}")(f)
+
+
+def test_deprecation_warning_for_usage_of_aaply_map_index_method_of_styler_object():
+ # 56717 https://github.com/pandas-dev/pandas/issues/56717
+ df = DataFrame([[1, 2], [3, 4]], index=["A", "B"])
+ msg = "Styler.applymap_index has been deprecated. Use Styler.map_index instead."
+
+ def color_b(s):
+ return "background-color:yellow;"
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.style.applymap_index(color_b, axis="columns")
| wrote a test for https://github.com/pandas-dev/pandas/issues/56717
test function name is test_deprecation_warning_for_usage_of_aaply_map_index_method_of_styler_object
The original function that was tested was the Styler.applymap_index in the style module
The new test was appended to the test_style module file
The test checks for the FutureWarning
- [ ] closes #56717
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/56822 | 2024-01-11T02:49:39Z | 2024-01-11T16:48:43Z | 2024-01-11T16:48:43Z | 2024-01-11T16:48:50Z |
TST: Additions/updates to tests | diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index 6fa72bd48031c..10bad77bfe32f 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -1586,3 +1586,14 @@ def test_output_buffer(mi_styler, format):
# gh 47053
with tm.ensure_clean(f"delete_me.{format}") as f:
getattr(mi_styler, f"to_{format}")(f)
+
+
+def test_deprecation_warning_for_usage_of_aaply_map_index_method_of_styler_object():
+ #56717 https://github.com/pandas-dev/pandas/issues/56717
+ df = DataFrame([[1,2], [3,4]], index=["A", "B"])
+ msg = ("Styler.applymap_index has been deprecated. Use Styler.map_index instead.")
+
+ def color_b(s):
+ return np.where(s == "B", "background-color: yellow;", "")
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.style.applymap_index(color_b)
| wrote a test for https://github.com/pandas-dev/pandas/issues/56717
test function name is test_deprecation_warning_for_usage_of_aaply_map_index_method_of_styler_object
The original function that was tested was the Styler.applymap_index in the style module
The new test was appended to the test_style module file
The test checks for the FutureWarning
- [ ] closes #56717 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) | https://api.github.com/repos/pandas-dev/pandas/pulls/56821 | 2024-01-11T01:40:37Z | 2024-01-11T02:22:26Z | null | 2024-01-11T02:23:07Z |
Backport PR #55327 on branch 2.2.x (COMPAT: Fix warning with numba >= 0.58.0) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 4265447f05b8b..5de5bd58bd35f 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -944,6 +944,7 @@ Other
- Bug in rendering a :class:`Series` with a :class:`MultiIndex` when one of the index level's names is 0 not having that name displayed (:issue:`55415`)
- Bug in the error message when assigning an empty :class:`DataFrame` to a column (:issue:`55956`)
- Bug when time-like strings were being cast to :class:`ArrowDtype` with ``pyarrow.time64`` type (:issue:`56463`)
+- Fixed a spurious deprecation warning from ``numba`` >= 0.58.0 when passing a numpy ufunc in :class:`pandas.core.window.Rolling.apply` with ``engine="numba"`` (:issue:`55247`)
.. ---------------------------------------------------------------------------
.. _whatsnew_220.contributors:
diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py
index b8d489179338b..4825c9fee24b1 100644
--- a/pandas/core/util/numba_.py
+++ b/pandas/core/util/numba_.py
@@ -1,11 +1,14 @@
"""Common utilities for Numba operations"""
from __future__ import annotations
+import types
from typing import (
TYPE_CHECKING,
Callable,
)
+import numpy as np
+
from pandas.compat._optional import import_optional_dependency
from pandas.errors import NumbaUtilError
@@ -83,6 +86,12 @@ def jit_user_function(func: Callable) -> Callable:
if numba.extending.is_jitted(func):
# Don't jit a user passed jitted function
numba_func = func
+ elif getattr(np, func.__name__, False) is func or isinstance(
+ func, types.BuiltinFunctionType
+ ):
+ # Not necessary to jit builtins or np functions
+ # This will mess up register_jitable
+ numba_func = func
else:
numba_func = numba.extending.register_jitable(func)
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index b1cc7ec186f19..139e1ff7f65fd 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -446,3 +446,10 @@ def test_table_method_ewm(self, data, method, axis, nogil, parallel, nopython):
engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
+
+
+@td.skip_if_no("numba")
+def test_npfunc_no_warnings():
+ df = DataFrame({"col1": [1, 2, 3, 4, 5]})
+ with tm.assert_produces_warning(False):
+ df.col1.rolling(2).apply(np.prod, raw=True, engine="numba")
| Backport PR #55327: COMPAT: Fix warning with numba >= 0.58.0 | https://api.github.com/repos/pandas-dev/pandas/pulls/56820 | 2024-01-11T01:33:04Z | 2024-01-11T04:29:35Z | 2024-01-11T04:29:35Z | 2024-01-11T04:29:35Z |
Backport PR #56818 on branch 2.2.x (CI: Fix failing builds) | diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py
index d5dc723e2c7c5..9f42cf674b0a7 100644
--- a/pandas/tests/io/parser/common/test_chunksize.py
+++ b/pandas/tests/io/parser/common/test_chunksize.py
@@ -220,14 +220,9 @@ def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch):
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
- warn = None
- if parser.engine == "pyarrow":
- warn = DeprecationWarning
- depr_msg = "Passing a BlockManager to DataFrame|make_block is deprecated"
- with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):
- with monkeypatch.context() as m:
- m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
- result = parser.read_csv(StringIO(data))
+ with monkeypatch.context() as m:
+ m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
+ result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == float
@@ -251,12 +246,8 @@ def test_warn_if_chunks_have_mismatched_type(all_parsers):
buf = StringIO(data)
if parser.engine == "pyarrow":
- df = parser.read_csv_check_warnings(
- DeprecationWarning,
- "Passing a BlockManager to DataFrame is deprecated|"
- "make_block is deprecated",
+ df = parser.read_csv(
buf,
- check_stacklevel=False,
)
else:
df = parser.read_csv_check_warnings(
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index db8b586d22fc0..f5a724bad4fa2 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -130,14 +130,9 @@ def test_catch_too_many_names(all_parsers):
else "Number of passed names did not match "
"number of header fields in the file"
)
- depr_msg = "Passing a BlockManager to DataFrame is deprecated"
- warn = None
- if parser.engine == "pyarrow":
- warn = DeprecationWarning
- with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
@skip_pyarrow # CSV parse error: Empty CSV file or block
@@ -168,13 +163,7 @@ def test_suppress_error_output(all_parsers):
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
- warn = None
- if parser.engine == "pyarrow":
- warn = DeprecationWarning
- msg = "Passing a BlockManager to DataFrame|make_block is deprecated"
-
- with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
- result = parser.read_csv(StringIO(data), on_bad_lines="skip")
+ result = parser.read_csv(StringIO(data), on_bad_lines="skip")
tm.assert_frame_equal(result, expected)
| Backport PR #56818: CI: Fix failing builds | https://api.github.com/repos/pandas-dev/pandas/pulls/56819 | 2024-01-11T00:26:29Z | 2024-01-11T02:23:04Z | 2024-01-11T02:23:04Z | 2024-01-11T02:23:04Z |
CI: Fix failing builds | diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py
index d5dc723e2c7c5..9f42cf674b0a7 100644
--- a/pandas/tests/io/parser/common/test_chunksize.py
+++ b/pandas/tests/io/parser/common/test_chunksize.py
@@ -220,14 +220,9 @@ def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch):
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
- warn = None
- if parser.engine == "pyarrow":
- warn = DeprecationWarning
- depr_msg = "Passing a BlockManager to DataFrame|make_block is deprecated"
- with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):
- with monkeypatch.context() as m:
- m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
- result = parser.read_csv(StringIO(data))
+ with monkeypatch.context() as m:
+ m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
+ result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == float
@@ -251,12 +246,8 @@ def test_warn_if_chunks_have_mismatched_type(all_parsers):
buf = StringIO(data)
if parser.engine == "pyarrow":
- df = parser.read_csv_check_warnings(
- DeprecationWarning,
- "Passing a BlockManager to DataFrame is deprecated|"
- "make_block is deprecated",
+ df = parser.read_csv(
buf,
- check_stacklevel=False,
)
else:
df = parser.read_csv_check_warnings(
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index db8b586d22fc0..f5a724bad4fa2 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -130,14 +130,9 @@ def test_catch_too_many_names(all_parsers):
else "Number of passed names did not match "
"number of header fields in the file"
)
- depr_msg = "Passing a BlockManager to DataFrame is deprecated"
- warn = None
- if parser.engine == "pyarrow":
- warn = DeprecationWarning
- with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
@skip_pyarrow # CSV parse error: Empty CSV file or block
@@ -168,13 +163,7 @@ def test_suppress_error_output(all_parsers):
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
- warn = None
- if parser.engine == "pyarrow":
- warn = DeprecationWarning
- msg = "Passing a BlockManager to DataFrame|make_block is deprecated"
-
- with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
- result = parser.read_csv(StringIO(data), on_bad_lines="skip")
+ result = parser.read_csv(StringIO(data), on_bad_lines="skip")
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56818 | 2024-01-10T22:47:23Z | 2024-01-11T00:26:20Z | 2024-01-11T00:26:20Z | 2024-01-11T01:14:03Z |
PERF: Join non unique | diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst
index 7b53ddb3923f0..2dc05ac8057c8 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v2.3.0.rst
@@ -101,6 +101,7 @@ Deprecations
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- Performance improvement in :meth:`DataFrame.join` when left and/or right are non-unique and ``how`` is ``"left"``, ``"right"``, or ``"inner"`` (:issue:`56817`)
- Performance improvement in :meth:`Index.take` when ``indices`` is a full range indexer from zero to length of index (:issue:`56806`)
-
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 99114b4865af0..c14a635dc7b87 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4809,11 +4809,18 @@ def _join_non_unique(
left_idx, right_idx = get_join_indexers_non_unique(
self._values, other._values, how=how, sort=sort
)
- mask = left_idx == -1
- join_idx = self.take(left_idx)
- right = other.take(right_idx)
- join_index = join_idx.putmask(mask, right)
+ if how == "right":
+ join_index = other.take(right_idx)
+ else:
+ join_index = self.take(left_idx)
+
+ if how == "outer":
+ mask = left_idx == -1
+ if mask.any():
+ right = other.take(right_idx)
+ join_index = join_index.putmask(mask, right)
+
if isinstance(join_index, ABCMultiIndex) and how == "outer":
# test_join_index_levels
join_index = join_index._sort_levels_monotonic()
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.3.0.rst` file if fixing a bug or adding a new feature.
Seeing ~10% improvement when `how` is "left", "right", or "inner".
```
import pandas as pd
import numpy as np
idx1 = pd.Index(np.tile(np.arange(1000), 1000))
idx2 = pd.Index(np.arange(100))
%timeit idx1.join(idx2, how="left")
# 103 ms ± 1.61 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) -> main
# 90.6 ms ± 504 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) -> PR
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/56817 | 2024-01-10T22:10:34Z | 2024-01-11T16:51:01Z | 2024-01-11T16:51:01Z | 2024-01-11T16:51:10Z |
Backport PR #56788 on branch 2.2.x (Bug: Interchange protocol implementation does not allow for empty string columns) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index b5df0a319bc18..3a4c9438dbc21 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -936,6 +936,7 @@ Other
- Bug in :func:`cut` and :func:`qcut` with ``datetime64`` dtype values with non-nanosecond units incorrectly returning nanosecond-unit bins (:issue:`56101`)
- Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`)
- Bug in :func:`infer_freq` and :meth:`DatetimeIndex.inferred_freq` with weekly frequencies and non-nanosecond resolutions (:issue:`55609`)
+- Bug in :func:`pd.api.interchange.from_dataframe` where it raised ``NotImplementedError`` when handling empty string columns (:issue:`56703`)
- Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`)
- Bug in :meth:`DataFrame.from_dict` which would always sort the rows of the created :class:`DataFrame`. (:issue:`55683`)
- Bug in :meth:`DataFrame.sort_index` when passing ``axis="columns"`` and ``ignore_index=True`` raising a ``ValueError`` (:issue:`56478`)
@@ -944,7 +945,6 @@ Other
- Bug in the error message when assigning an empty :class:`DataFrame` to a column (:issue:`55956`)
- Bug when time-like strings were being cast to :class:`ArrowDtype` with ``pyarrow.time64`` type (:issue:`56463`)
-
.. ---------------------------------------------------------------------------
.. _whatsnew_220.contributors:
diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index 7f524d6823f30..ee1b5cd34a7f7 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -116,7 +116,7 @@ def dtype(self) -> tuple[DtypeKind, int, str, str]:
Endianness.NATIVE,
)
elif is_string_dtype(dtype):
- if infer_dtype(self._col) == "string":
+ if infer_dtype(self._col) in ("string", "empty"):
return (
DtypeKind.STRING,
8,
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 27ea8ccdd17b1..c7b13f9fd7b2d 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -364,6 +364,14 @@ def test_interchange_from_corrected_buffer_dtypes(monkeypatch) -> None:
pd.api.interchange.from_dataframe(df)
+def test_empty_string_column():
+ # https://github.com/pandas-dev/pandas/issues/56703
+ df = pd.DataFrame({"a": []}, dtype=str)
+ df2 = df.__dataframe__()
+ result = pd.api.interchange.from_dataframe(df2)
+ tm.assert_frame_equal(df, result)
+
+
def test_large_string():
# GH#56702
pytest.importorskip("pyarrow")
| Backport PR #56788: Bug: Interchange protocol implementation does not allow for empty string columns | https://api.github.com/repos/pandas-dev/pandas/pulls/56816 | 2024-01-10T21:30:59Z | 2024-01-10T22:40:55Z | 2024-01-10T22:40:55Z | 2024-01-10T22:40:55Z |
Backport PR #56481 on branch 2.2.x (Revert "DEPR: make_block (#56422)") | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index b5df0a319bc18..498e267dea965 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -662,7 +662,6 @@ Other Deprecations
- Changed :meth:`Timedelta.resolution_string` to return ``h``, ``min``, ``s``, ``ms``, ``us``, and ``ns`` instead of ``H``, ``T``, ``S``, ``L``, ``U``, and ``N``, for compatibility with respective deprecations in frequency aliases (:issue:`52536`)
- Deprecated :attr:`offsets.Day.delta`, :attr:`offsets.Hour.delta`, :attr:`offsets.Minute.delta`, :attr:`offsets.Second.delta`, :attr:`offsets.Milli.delta`, :attr:`offsets.Micro.delta`, :attr:`offsets.Nano.delta`, use ``pd.Timedelta(obj)`` instead (:issue:`55498`)
- Deprecated :func:`pandas.api.types.is_interval` and :func:`pandas.api.types.is_period`, use ``isinstance(obj, pd.Interval)`` and ``isinstance(obj, pd.Period)`` instead (:issue:`55264`)
-- Deprecated :func:`pd.core.internals.api.make_block`, use public APIs instead (:issue:`40226`)
- Deprecated :func:`read_gbq` and :meth:`DataFrame.to_gbq`. Use ``pandas_gbq.read_gbq`` and ``pandas_gbq.to_gbq`` instead https://pandas-gbq.readthedocs.io/en/latest/api.html (:issue:`55525`)
- Deprecated :meth:`.DataFrameGroupBy.fillna` and :meth:`.SeriesGroupBy.fillna`; use :meth:`.DataFrameGroupBy.ffill`, :meth:`.DataFrameGroupBy.bfill` for forward and backward filling or :meth:`.DataFrame.fillna` to fill with a single value (or the Series equivalents) (:issue:`55718`)
- Deprecated :meth:`DateOffset.is_anchored`, use ``obj.n == 1`` for non-Tick subclasses (for Tick this was always False) (:issue:`55388`)
@@ -722,6 +721,7 @@ Other Deprecations
- Deprecated the extension test classes ``BaseNoReduceTests``, ``BaseBooleanReduceTests``, and ``BaseNumericReduceTests``, use ``BaseReduceTests`` instead (:issue:`54663`)
- Deprecated the option ``mode.data_manager`` and the ``ArrayManager``; only the ``BlockManager`` will be available in future versions (:issue:`55043`)
- Deprecated the previous implementation of :class:`DataFrame.stack`; specify ``future_stack=True`` to adopt the future version (:issue:`53515`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_220.performance:
diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py
index e5ef44d07061e..b0b3937ca47ea 100644
--- a/pandas/core/internals/api.py
+++ b/pandas/core/internals/api.py
@@ -9,12 +9,10 @@
from __future__ import annotations
from typing import TYPE_CHECKING
-import warnings
import numpy as np
from pandas._libs.internals import BlockPlacement
-from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import pandas_dtype
from pandas.core.dtypes.dtypes import (
@@ -52,14 +50,6 @@ def make_block(
- Block.make_block_same_class
- Block.__init__
"""
- warnings.warn(
- # GH#40226
- "make_block is deprecated and will be removed in a future version. "
- "Use public APIs instead.",
- DeprecationWarning,
- stacklevel=find_stack_level(),
- )
-
if dtype is not None:
dtype = pandas_dtype(dtype)
@@ -123,6 +113,7 @@ def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int
def __getattr__(name: str):
# GH#55139
+ import warnings
if name in [
"Block",
diff --git a/pandas/tests/internals/test_api.py b/pandas/tests/internals/test_api.py
index f816cef38b9ab..1251a6ae97a1c 100644
--- a/pandas/tests/internals/test_api.py
+++ b/pandas/tests/internals/test_api.py
@@ -68,9 +68,7 @@ def test_deprecations(name):
def test_make_block_2d_with_dti():
# GH#41168
dti = pd.date_range("2012", periods=3, tz="UTC")
- msg = "make_block is deprecated"
- with tm.assert_produces_warning(DeprecationWarning, match=msg):
- blk = api.make_block(dti, placement=[0])
+ blk = api.make_block(dti, placement=[0])
assert blk.shape == (1, 3)
assert blk.values.shape == (1, 3)
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 2265522bc7ecb..ce88bae6e02f2 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1383,11 +1383,9 @@ def test_validate_ndim():
values = np.array([1.0, 2.0])
placement = BlockPlacement(slice(2))
msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
- depr_msg = "make_block is deprecated"
with pytest.raises(ValueError, match=msg):
- with tm.assert_produces_warning(DeprecationWarning, match=depr_msg):
- make_block(values, placement, ndim=2)
+ make_block(values, placement, ndim=2)
def test_block_shape():
@@ -1402,12 +1400,8 @@ def test_make_block_no_pandas_array(block_maker):
# https://github.com/pandas-dev/pandas/pull/24866
arr = pd.arrays.NumpyExtensionArray(np.array([1, 2]))
- warn = None if block_maker is not make_block else DeprecationWarning
- msg = "make_block is deprecated and will be removed in a future version"
-
# NumpyExtensionArray, no dtype
- with tm.assert_produces_warning(warn, match=msg):
- result = block_maker(arr, BlockPlacement(slice(len(arr))), ndim=arr.ndim)
+ result = block_maker(arr, BlockPlacement(slice(len(arr))), ndim=arr.ndim)
assert result.dtype.kind in ["i", "u"]
if block_maker is make_block:
@@ -1415,16 +1409,14 @@ def test_make_block_no_pandas_array(block_maker):
assert result.is_extension is False
# NumpyExtensionArray, NumpyEADtype
- with tm.assert_produces_warning(warn, match=msg):
- result = block_maker(arr, slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim)
+ result = block_maker(arr, slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim)
assert result.dtype.kind in ["i", "u"]
assert result.is_extension is False
# new_block no longer taked dtype keyword
# ndarray, NumpyEADtype
- with tm.assert_produces_warning(warn, match=msg):
- result = block_maker(
- arr.to_numpy(), slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim
- )
+ result = block_maker(
+ arr.to_numpy(), slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim
+ )
assert result.dtype.kind in ["i", "u"]
assert result.is_extension is False
diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py
index 9660b283a491b..d5dc723e2c7c5 100644
--- a/pandas/tests/io/parser/common/test_chunksize.py
+++ b/pandas/tests/io/parser/common/test_chunksize.py
@@ -233,7 +233,6 @@ def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch):
assert result.a.dtype == float
-@pytest.mark.filterwarnings("ignore:make_block is deprecated:FutureWarning")
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index d8f362039ba13..623657b412682 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -33,12 +33,9 @@
from pandas.io.parsers import read_csv
-pytestmark = [
- pytest.mark.filterwarnings(
- "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
- ),
- pytest.mark.filterwarnings("ignore:make_block is deprecated:DeprecationWarning"),
-]
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
| Backport PR #56481: Revert "DEPR: make_block (#56422)" | https://api.github.com/repos/pandas-dev/pandas/pulls/56814 | 2024-01-10T19:07:48Z | 2024-01-11T01:14:18Z | 2024-01-11T01:14:18Z | 2024-01-11T01:14:41Z |
Backport PR #56594 on branch 2.2.x (DEPR: the method is_anchored() for offsets) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 9a9ac769a4893..b5df0a319bc18 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -665,11 +665,13 @@ Other Deprecations
- Deprecated :func:`pd.core.internals.api.make_block`, use public APIs instead (:issue:`40226`)
- Deprecated :func:`read_gbq` and :meth:`DataFrame.to_gbq`. Use ``pandas_gbq.read_gbq`` and ``pandas_gbq.to_gbq`` instead https://pandas-gbq.readthedocs.io/en/latest/api.html (:issue:`55525`)
- Deprecated :meth:`.DataFrameGroupBy.fillna` and :meth:`.SeriesGroupBy.fillna`; use :meth:`.DataFrameGroupBy.ffill`, :meth:`.DataFrameGroupBy.bfill` for forward and backward filling or :meth:`.DataFrame.fillna` to fill with a single value (or the Series equivalents) (:issue:`55718`)
+- Deprecated :meth:`DateOffset.is_anchored`, use ``obj.n == 1`` for non-Tick subclasses (for Tick this was always False) (:issue:`55388`)
- Deprecated :meth:`DatetimeArray.__init__` and :meth:`TimedeltaArray.__init__`, use :func:`array` instead (:issue:`55623`)
- Deprecated :meth:`Index.format`, use ``index.astype(str)`` or ``index.map(formatter)`` instead (:issue:`55413`)
- Deprecated :meth:`Series.ravel`, the underlying array is already 1D, so ravel is not necessary (:issue:`52511`)
- Deprecated :meth:`Series.resample` and :meth:`DataFrame.resample` with a :class:`PeriodIndex` (and the 'convention' keyword), convert to :class:`DatetimeIndex` (with ``.to_timestamp()``) before resampling instead (:issue:`53481`)
- Deprecated :meth:`Series.view`, use :meth:`Series.astype` instead to change the dtype (:issue:`20251`)
+- Deprecated :meth:`offsets.Tick.is_anchored`, use ``False`` instead (:issue:`55388`)
- Deprecated ``core.internals`` members ``Block``, ``ExtensionBlock``, and ``DatetimeTZBlock``, use public APIs instead (:issue:`55139`)
- Deprecated ``year``, ``month``, ``quarter``, ``day``, ``hour``, ``minute``, and ``second`` keywords in the :class:`PeriodIndex` constructor, use :meth:`PeriodIndex.from_fields` instead (:issue:`55960`)
- Deprecated accepting a type as an argument in :meth:`Index.view`, call without any arguments instead (:issue:`55709`)
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index b3788b6003e67..3a339171d0da2 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -756,11 +756,14 @@ cdef class BaseOffset:
raise ValueError(f"{self} is a non-fixed frequency")
def is_anchored(self) -> bool:
- # TODO: Does this make sense for the general case? It would help
- # if there were a canonical docstring for what is_anchored means.
+ # GH#55388
"""
Return boolean whether the frequency is a unit frequency (n=1).
+ .. deprecated:: 2.2.0
+ is_anchored is deprecated and will be removed in a future version.
+ Use ``obj.n == 1`` instead.
+
Examples
--------
>>> pd.DateOffset().is_anchored()
@@ -768,6 +771,12 @@ cdef class BaseOffset:
>>> pd.DateOffset(2).is_anchored()
False
"""
+ warnings.warn(
+ f"{type(self).__name__}.is_anchored is deprecated and will be removed "
+ f"in a future version, please use \'obj.n == 1\' instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
return self.n == 1
# ------------------------------------------------------------------
@@ -954,6 +963,27 @@ cdef class Tick(SingleConstructorOffset):
return True
def is_anchored(self) -> bool:
+ # GH#55388
+ """
+ Return False.
+
+ .. deprecated:: 2.2.0
+ is_anchored is deprecated and will be removed in a future version.
+ Use ``False`` instead.
+
+ Examples
+ --------
+ >>> pd.offsets.Hour().is_anchored()
+ False
+ >>> pd.offsets.Hour(2).is_anchored()
+ False
+ """
+ warnings.warn(
+ f"{type(self).__name__}.is_anchored is deprecated and will be removed "
+ f"in a future version, please use False instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
return False
# This is identical to BaseOffset.__hash__, but has to be redefined here
@@ -2663,6 +2693,13 @@ cdef class QuarterOffset(SingleConstructorOffset):
return f"{self._prefix}-{month}"
def is_anchored(self) -> bool:
+ warnings.warn(
+ f"{type(self).__name__}.is_anchored is deprecated and will be removed "
+ f"in a future version, please use \'obj.n == 1 "
+ f"and obj.startingMonth is not None\' instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
return self.n == 1 and self.startingMonth is not None
def is_on_offset(self, dt: datetime) -> bool:
@@ -3308,6 +3345,13 @@ cdef class Week(SingleConstructorOffset):
self._cache = state.pop("_cache", {})
def is_anchored(self) -> bool:
+ warnings.warn(
+ f"{type(self).__name__}.is_anchored is deprecated and will be removed "
+ f"in a future version, please use \'obj.n == 1 "
+ f"and obj.weekday is not None\' instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
return self.n == 1 and self.weekday is not None
@apply_wraps
@@ -3597,6 +3641,12 @@ cdef class FY5253Mixin(SingleConstructorOffset):
self.variation = state.pop("variation")
def is_anchored(self) -> bool:
+ warnings.warn(
+ f"{type(self).__name__}.is_anchored is deprecated and will be removed "
+ f"in a future version, please use \'obj.n == 1\' instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
return (
self.n == 1 and self.startingMonth is not None and self.weekday is not None
)
diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py
index d4d4a09c44d13..e8de59f84bcc6 100644
--- a/pandas/tests/indexes/interval/test_interval_range.py
+++ b/pandas/tests/indexes/interval/test_interval_range.py
@@ -84,9 +84,7 @@ def test_constructor_timestamp(self, closed, name, freq, periods, tz):
tm.assert_index_equal(result, expected)
# GH 20976: linspace behavior defined from start/end/periods
- if not breaks.freq.is_anchored() and tz is None:
- # matches expected only for non-anchored offsets and tz naive
- # (anchored/DST transitions cause unequal spacing in expected)
+ if not breaks.freq.n == 1 and tz is None:
result = interval_range(
start=start, end=end, periods=periods, name=name, closed=closed
)
diff --git a/pandas/tests/tseries/offsets/test_business_quarter.py b/pandas/tests/tseries/offsets/test_business_quarter.py
index 44a7f16ab039d..6d7a115054b7f 100644
--- a/pandas/tests/tseries/offsets/test_business_quarter.py
+++ b/pandas/tests/tseries/offsets/test_business_quarter.py
@@ -9,6 +9,7 @@
import pytest
+import pandas._testing as tm
from pandas.tests.tseries.offsets.common import (
assert_is_on_offset,
assert_offset_equal,
@@ -54,9 +55,12 @@ def test_repr(self):
assert repr(BQuarterBegin(startingMonth=1)) == expected
def test_is_anchored(self):
- assert BQuarterBegin(startingMonth=1).is_anchored()
- assert BQuarterBegin().is_anchored()
- assert not BQuarterBegin(2, startingMonth=1).is_anchored()
+ msg = "BQuarterBegin.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert BQuarterBegin(startingMonth=1).is_anchored()
+ assert BQuarterBegin().is_anchored()
+ assert not BQuarterBegin(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
@@ -177,9 +181,12 @@ def test_repr(self):
assert repr(BQuarterEnd(startingMonth=1)) == expected
def test_is_anchored(self):
- assert BQuarterEnd(startingMonth=1).is_anchored()
- assert BQuarterEnd().is_anchored()
- assert not BQuarterEnd(2, startingMonth=1).is_anchored()
+ msg = "BQuarterEnd.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert BQuarterEnd(startingMonth=1).is_anchored()
+ assert BQuarterEnd().is_anchored()
+ assert not BQuarterEnd(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
index 7f8c34bc6832e..824e66a1ddef1 100644
--- a/pandas/tests/tseries/offsets/test_fiscal.py
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -7,6 +7,7 @@
import pytest
from pandas import Timestamp
+import pandas._testing as tm
from pandas.tests.tseries.offsets.common import (
WeekDay,
assert_is_on_offset,
@@ -295,15 +296,18 @@ def test_apply(self):
class TestFY5253LastOfMonthQuarter:
def test_is_anchored(self):
- assert makeFY5253LastOfMonthQuarter(
- startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4
- ).is_anchored()
- assert makeFY5253LastOfMonthQuarter(
- weekday=WeekDay.SAT, startingMonth=3, qtr_with_extra_week=4
- ).is_anchored()
- assert not makeFY5253LastOfMonthQuarter(
- 2, startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4
- ).is_anchored()
+ msg = "FY5253Quarter.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4
+ ).is_anchored()
+ assert makeFY5253LastOfMonthQuarter(
+ weekday=WeekDay.SAT, startingMonth=3, qtr_with_extra_week=4
+ ).is_anchored()
+ assert not makeFY5253LastOfMonthQuarter(
+ 2, startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4
+ ).is_anchored()
def test_equality(self):
assert makeFY5253LastOfMonthQuarter(
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index ddf56e68b1611..62afb8b83d576 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -625,8 +625,11 @@ def test_default_constructor(self, dt):
assert (dt + DateOffset(2)) == datetime(2008, 1, 4)
def test_is_anchored(self):
- assert not DateOffset(2).is_anchored()
- assert DateOffset(1).is_anchored()
+ msg = "DateOffset.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert not DateOffset(2).is_anchored()
+ assert DateOffset(1).is_anchored()
def test_copy(self):
assert DateOffset(months=2).copy() == DateOffset(months=2)
diff --git a/pandas/tests/tseries/offsets/test_quarter.py b/pandas/tests/tseries/offsets/test_quarter.py
index d183645da507d..5fd3ba0a5fb87 100644
--- a/pandas/tests/tseries/offsets/test_quarter.py
+++ b/pandas/tests/tseries/offsets/test_quarter.py
@@ -9,6 +9,7 @@
import pytest
+import pandas._testing as tm
from pandas.tests.tseries.offsets.common import (
assert_is_on_offset,
assert_offset_equal,
@@ -53,9 +54,12 @@ def test_repr(self):
assert repr(QuarterBegin(startingMonth=1)) == expected
def test_is_anchored(self):
- assert QuarterBegin(startingMonth=1).is_anchored()
- assert QuarterBegin().is_anchored()
- assert not QuarterBegin(2, startingMonth=1).is_anchored()
+ msg = "QuarterBegin.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert QuarterBegin(startingMonth=1).is_anchored()
+ assert QuarterBegin().is_anchored()
+ assert not QuarterBegin(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
@@ -161,9 +165,12 @@ def test_repr(self):
assert repr(QuarterEnd(startingMonth=1)) == expected
def test_is_anchored(self):
- assert QuarterEnd(startingMonth=1).is_anchored()
- assert QuarterEnd().is_anchored()
- assert not QuarterEnd(2, startingMonth=1).is_anchored()
+ msg = "QuarterEnd.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert QuarterEnd(startingMonth=1).is_anchored()
+ assert QuarterEnd().is_anchored()
+ assert not QuarterEnd(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py
index b68b91826bc6f..399b7038d3426 100644
--- a/pandas/tests/tseries/offsets/test_ticks.py
+++ b/pandas/tests/tseries/offsets/test_ticks.py
@@ -339,7 +339,10 @@ def test_tick_equalities(cls):
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_offset(cls):
- assert not cls().is_anchored()
+ msg = f"{cls.__name__}.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert not cls().is_anchored()
@pytest.mark.parametrize("cls", tick_classes)
diff --git a/pandas/tests/tseries/offsets/test_week.py b/pandas/tests/tseries/offsets/test_week.py
index f42ff091af277..0cd6f769769ae 100644
--- a/pandas/tests/tseries/offsets/test_week.py
+++ b/pandas/tests/tseries/offsets/test_week.py
@@ -21,6 +21,7 @@
WeekOfMonth,
)
+import pandas._testing as tm
from pandas.tests.tseries.offsets.common import (
WeekDay,
assert_is_on_offset,
@@ -42,10 +43,13 @@ def test_corner(self):
Week(weekday=-1)
def test_is_anchored(self):
- assert Week(weekday=0).is_anchored()
- assert not Week().is_anchored()
- assert not Week(2, weekday=2).is_anchored()
- assert not Week(2).is_anchored()
+ msg = "Week.is_anchored is deprecated "
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert Week(weekday=0).is_anchored()
+ assert not Week().is_anchored()
+ assert not Week(2, weekday=2).is_anchored()
+ assert not Week(2).is_anchored()
offset_cases = []
# not business week
| Backport PR #56594: DEPR: the method is_anchored() for offsets | https://api.github.com/repos/pandas-dev/pandas/pulls/56813 | 2024-01-10T16:40:03Z | 2024-01-10T18:07:54Z | 2024-01-10T18:07:54Z | 2024-01-10T18:07:54Z |
BLD: Pin numpy on 2.2.x | diff --git a/pyproject.toml b/pyproject.toml
index 5e65edf81f9c7..68a3dca188464 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,9 +30,9 @@ authors = [
license = {file = 'LICENSE'}
requires-python = '>=3.9'
dependencies = [
- "numpy>=1.22.4; python_version<'3.11'",
- "numpy>=1.23.2; python_version=='3.11'",
- "numpy>=1.26.0; python_version>='3.12'",
+ "numpy>=1.22.4,<2; python_version<'3.11'",
+ "numpy>=1.23.2,<2; python_version=='3.11'",
+ "numpy>=1.26.0,<2; python_version>='3.12'",
"python-dateutil>=2.8.2",
"pytz>=2020.1",
"tzdata>=2022.7"
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56812 | 2024-01-10T16:21:09Z | 2024-01-10T17:53:27Z | 2024-01-10T17:53:27Z | 2024-01-10T17:53:29Z |
DOC: missing closing bracket | diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst
index 87aecb6936c9c..e2881c1087e60 100644
--- a/doc/source/development/contributing_docstring.rst
+++ b/doc/source/development/contributing_docstring.rst
@@ -939,7 +939,7 @@ Each shared docstring will have a base template with variables, like
Finally, docstrings can also be appended to with the ``doc`` decorator.
In this example, we'll create a parent docstring normally (this is like
-``pandas.core.generic.NDFrame``. Then we'll have two children (like
+``pandas.core.generic.NDFrame``). Then we'll have two children (like
``pandas.core.series.Series`` and ``pandas.core.frame.DataFrame``). We'll
substitute the class names in this docstring.
| Added a missing closing bracket in "contributing_docstring.rst" | https://api.github.com/repos/pandas-dev/pandas/pulls/56811 | 2024-01-10T15:36:36Z | 2024-01-10T16:00:09Z | 2024-01-10T16:00:09Z | 2024-01-11T09:10:45Z |
Backport PR #56757 on branch 2.2.x (ENH: Implement interpolation for arrow and masked dtypes) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index e244794664b34..51f3dc025864a 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -343,6 +343,7 @@ Other enhancements
- :meth:`ExtensionArray.duplicated` added to allow extension type implementations of the ``duplicated`` method (:issue:`55255`)
- :meth:`Series.ffill`, :meth:`Series.bfill`, :meth:`DataFrame.ffill`, and :meth:`DataFrame.bfill` have gained the argument ``limit_area``; 3rd party :class:`.ExtensionArray` authors need to add this argument to the method ``_pad_or_backfill`` (:issue:`56492`)
- Allow passing ``read_only``, ``data_only`` and ``keep_links`` arguments to openpyxl using ``engine_kwargs`` of :func:`read_excel` (:issue:`55027`)
+- Implement :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` for :class:`ArrowDtype` and masked dtypes (:issue:`56267`)
- Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`)
- Implemented :meth:`Series.dt` methods and attributes for :class:`ArrowDtype` with ``pyarrow.duration`` type (:issue:`52284`)
- Implemented :meth:`Series.str.extract` for :class:`ArrowDtype` (:issue:`56268`)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 3858ce4cf0ea1..a5ce46ed612f3 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -182,6 +182,7 @@ def floordiv_compat(
AxisInt,
Dtype,
FillnaOptions,
+ InterpolateOptions,
Iterator,
NpDtype,
NumpySorter,
@@ -2048,6 +2049,45 @@ def _maybe_convert_setitem_value(self, value):
raise TypeError(msg) from err
return value
+ def interpolate(
+ self,
+ *,
+ method: InterpolateOptions,
+ axis: int,
+ index,
+ limit,
+ limit_direction,
+ limit_area,
+ copy: bool,
+ **kwargs,
+ ) -> Self:
+ """
+ See NDFrame.interpolate.__doc__.
+ """
+ # NB: we return type(self) even if copy=False
+ mask = self.isna()
+ if self.dtype.kind == "f":
+ data = self._pa_array.to_numpy()
+ elif self.dtype.kind in "iu":
+ data = self.to_numpy(dtype="f8", na_value=0.0)
+ else:
+ raise NotImplementedError(
+ f"interpolate is not implemented for dtype={self.dtype}"
+ )
+
+ missing.interpolate_2d_inplace(
+ data,
+ method=method,
+ axis=0,
+ index=index,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ mask=mask,
+ **kwargs,
+ )
+ return type(self)(self._box_pa_array(pa.array(data, mask=mask)))
+
@classmethod
def _if_else(
cls,
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 545d45e450f3f..234d96e53a67c 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -22,6 +22,7 @@
AxisInt,
DtypeObj,
FillnaOptions,
+ InterpolateOptions,
NpDtype,
PositionalIndexer,
Scalar,
@@ -98,6 +99,7 @@
NumpySorter,
NumpyValueArrayLike,
)
+ from pandas.core.arrays import FloatingArray
from pandas.compat.numpy import function as nv
@@ -1491,6 +1493,58 @@ def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
else:
return self.dtype.na_value
+ def interpolate(
+ self,
+ *,
+ method: InterpolateOptions,
+ axis: int,
+ index,
+ limit,
+ limit_direction,
+ limit_area,
+ copy: bool,
+ **kwargs,
+ ) -> FloatingArray:
+ """
+ See NDFrame.interpolate.__doc__.
+ """
+ # NB: we return type(self) even if copy=False
+ if self.dtype.kind == "f":
+ if copy:
+ data = self._data.copy()
+ mask = self._mask.copy()
+ else:
+ data = self._data
+ mask = self._mask
+ elif self.dtype.kind in "iu":
+ copy = True
+ data = self._data.astype("f8")
+ mask = self._mask.copy()
+ else:
+ raise NotImplementedError(
+ f"interpolate is not implemented for dtype={self.dtype}"
+ )
+
+ missing.interpolate_2d_inplace(
+ data,
+ method=method,
+ axis=0,
+ index=index,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ mask=mask,
+ **kwargs,
+ )
+ if not copy:
+ return self # type: ignore[return-value]
+ if self.dtype.kind == "f":
+ return type(self)._simple_new(data, mask) # type: ignore[return-value]
+ else:
+ from pandas.core.arrays import FloatingArray
+
+ return FloatingArray._simple_new(data, mask)
+
def _accumulate(
self, name: str, *, skipna: bool = True, **kwargs
) -> BaseMaskedArray:
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index ff45662d0bdc8..c016aab8ad074 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -349,6 +349,7 @@ def interpolate_2d_inplace(
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
+ mask=None,
**kwargs,
) -> None:
"""
@@ -396,6 +397,7 @@ def func(yvalues: np.ndarray) -> None:
limit_area=limit_area_validated,
fill_value=fill_value,
bounds_error=False,
+ mask=mask,
**kwargs,
)
@@ -440,6 +442,7 @@ def _interpolate_1d(
fill_value: Any | None = None,
bounds_error: bool = False,
order: int | None = None,
+ mask=None,
**kwargs,
) -> None:
"""
@@ -453,8 +456,10 @@ def _interpolate_1d(
-----
Fills 'yvalues' in-place.
"""
-
- invalid = isna(yvalues)
+ if mask is not None:
+ invalid = mask
+ else:
+ invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
@@ -531,7 +536,10 @@ def _interpolate_1d(
**kwargs,
)
- if is_datetimelike:
+ if mask is not None:
+ mask[:] = False
+ mask[preserve_nans] = True
+ elif is_datetimelike:
yvalues[preserve_nans] = NaT.value
else:
yvalues[preserve_nans] = np.nan
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index e0641fcb65bd3..252b950004bea 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -508,8 +508,41 @@ def test_interpolate_empty_df(self):
assert result is None
tm.assert_frame_equal(df, expected)
- def test_interpolate_ea_raise(self):
+ def test_interpolate_ea(self, any_int_ea_dtype):
# GH#55347
- df = DataFrame({"a": [1, None, 2]}, dtype="Int64")
- with pytest.raises(NotImplementedError, match="does not implement"):
- df.interpolate()
+ df = DataFrame({"a": [1, None, None, None, 3]}, dtype=any_int_ea_dtype)
+ orig = df.copy()
+ result = df.interpolate(limit=2)
+ expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="Float64")
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(df, orig)
+
+ @pytest.mark.parametrize(
+ "dtype",
+ [
+ "Float64",
+ "Float32",
+ pytest.param("float32[pyarrow]", marks=td.skip_if_no("pyarrow")),
+ pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow")),
+ ],
+ )
+ def test_interpolate_ea_float(self, dtype):
+ # GH#55347
+ df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype)
+ orig = df.copy()
+ result = df.interpolate(limit=2)
+ expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(df, orig)
+
+ @pytest.mark.parametrize(
+ "dtype",
+ ["int64", "uint64", "int32", "int16", "int8", "uint32", "uint16", "uint8"],
+ )
+ def test_interpolate_arrow(self, dtype):
+ # GH#55347
+ pytest.importorskip("pyarrow")
+ df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype + "[pyarrow]")
+ result = df.interpolate(limit=2)
+ expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="float64[pyarrow]")
+ tm.assert_frame_equal(result, expected)
| #56757 | https://api.github.com/repos/pandas-dev/pandas/pulls/56809 | 2024-01-10T12:13:16Z | 2024-01-10T16:27:15Z | 2024-01-10T16:27:15Z | 2024-01-10T16:27:53Z |
Removed describe, loc and iloc methods from code_checks.sh | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 16500aade0476..38dccff6e2bdc 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -151,10 +151,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.io.formats.style.Styler.background_gradient \
pandas.io.formats.style.Styler.text_gradient \
pandas.DataFrame.values \
- pandas.DataFrame.loc \
- pandas.DataFrame.iloc \
pandas.DataFrame.groupby \
- pandas.DataFrame.describe \
pandas.DataFrame.skew \
pandas.DataFrame.var \
pandas.DataFrame.idxmax \
| **PR Summary**
Checked if validation docstrings passes for :
- pandas.DataFrame.loc by running >python scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.loc
- pandas.DataFrame.iloc by running >python scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.iloc
- pandas.DataFrame.describe by running >python scripts/validate_docstrings.py --format=actions --errors=EX03 pandas.DataFrame.describe
*OUTPUT*
1. pandas.DataFrame.describe
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.describe" correct. :)`
```
2. pandas.DataFrame.iloc
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.iloc" correct. :)
```
3. pandas.DataFrame.iloc
```
################################################################################
################################## Validation ##################################
################################################################################
Docstring for "pandas.DataFrame.loc" correct. :)
```
**PR checklist**
- [ ] xref #56804
- [x] Tests passed
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56808 | 2024-01-10T11:47:08Z | 2024-01-10T18:34:26Z | 2024-01-10T18:34:26Z | 2024-01-10T18:34:59Z |
'Backport PR #56146: BUG raise pdep6 warning for loc full setter' | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index e244794664b34..9d577aa5ac426 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -817,6 +817,7 @@ Conversion
- Bug in :meth:`DataFrame.astype` when called with ``str`` on unpickled array - the array might change in-place (:issue:`54654`)
- Bug in :meth:`DataFrame.astype` where ``errors="ignore"`` had no effect for extension types (:issue:`54654`)
- Bug in :meth:`Series.convert_dtypes` not converting all NA column to ``null[pyarrow]`` (:issue:`55346`)
+- Bug in ``DataFrame.loc`` was not throwing "incompatible dtype warning" (see `PDEP6 <https://pandas.pydata.org/pdeps/0006-ban-upcasting.html>`_) when assigning a ``Series`` with a different dtype using a full column setter (e.g. ``df.loc[:, 'a'] = incompatible_value``) (:issue:`39584`)
Strings
^^^^^^^
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 4be7e17035128..934ba3a4d7f29 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2141,6 +2141,26 @@ def _setitem_single_column(self, loc: int, value, plane_indexer) -> None:
# If we're setting an entire column and we can't do it inplace,
# then we can use value's dtype (or inferred dtype)
# instead of object
+ dtype = self.obj.dtypes.iloc[loc]
+ if dtype not in (np.void, object) and not self.obj.empty:
+ # - Exclude np.void, as that is a special case for expansion.
+ # We want to warn for
+ # df = pd.DataFrame({'a': [1, 2]})
+ # df.loc[:, 'a'] = .3
+ # but not for
+ # df = pd.DataFrame({'a': [1, 2]})
+ # df.loc[:, 'b'] = .3
+ # - Exclude `object`, as then no upcasting happens.
+ # - Exclude empty initial object with enlargement,
+ # as then there's nothing to be inconsistent with.
+ warnings.warn(
+ f"Setting an item of incompatible dtype is deprecated "
+ "and will raise in a future error of pandas. "
+ f"Value '{value}' has dtype incompatible with {dtype}, "
+ "please explicitly cast to a compatible dtype first.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
self.obj.isetitem(loc, value)
else:
# set value into the column (first attempting to operate inplace, then
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 06fd9ebe47eae..70a27300bd60f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -499,6 +499,9 @@ def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block:
and is_integer_dtype(self.values.dtype)
and isna(other)
and other is not NaT
+ and not (
+ isinstance(other, (np.datetime64, np.timedelta64)) and np.isnat(other)
+ )
):
warn_on_upcast = False
elif (
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 2681c07f01990..479fa148f994a 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -1144,11 +1144,16 @@ def test_set_value_copy_only_necessary_column(
df_orig = df.copy()
view = df[:]
- if val == "a" and indexer[0] != slice(None):
+ if val == "a" and not warn_copy_on_write:
with tm.assert_produces_warning(
FutureWarning, match="Setting an item of incompatible dtype is deprecated"
):
indexer_func(df)[indexer] = val
+ if val == "a" and warn_copy_on_write:
+ with tm.assert_produces_warning(
+ FutureWarning, match="incompatible dtype|Setting a value on a view"
+ ):
+ indexer_func(df)[indexer] = val
else:
with tm.assert_cow_warning(warn_copy_on_write and val == 100):
indexer_func(df)[indexer] = val
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 97e7ae15c6c63..22d9c7f26a57c 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -949,7 +949,8 @@ def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
- df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
@@ -1387,20 +1388,20 @@ def test_loc_expand_empty_frame_keep_midx_names(self):
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
- "val, idxr, warn",
+ "val, idxr",
[
- ("x", "a", None), # TODO: this should warn as well
- ("x", ["a"], None), # TODO: this should warn as well
- (1, "a", None), # TODO: this should warn as well
- (1, ["a"], FutureWarning),
+ ("x", "a"),
+ ("x", ["a"]),
+ (1, "a"),
+ (1, ["a"]),
],
)
- def test_loc_setitem_rhs_frame(self, idxr, val, warn):
+ def test_loc_setitem_rhs_frame(self, idxr, val):
# GH#47578
df = DataFrame({"a": [1, 2]})
with tm.assert_produces_warning(
- warn, match="Setting an item of incompatible dtype"
+ FutureWarning, match="Setting an item of incompatible dtype"
):
df.loc[:, idxr] = DataFrame({"a": [val, 11]}, index=[1, 2])
expected = DataFrame({"a": [np.nan, val]})
@@ -1996,7 +1997,7 @@ def _check_setitem_invalid(self, df, invalid, indexer, warn):
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
- _indexers = [0, [0], slice(0, 1), [True, False, False]]
+ _indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]
@pytest.mark.parametrize(
"invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]
@@ -2010,7 +2011,7 @@ def test_setitem_validation_scalar_bool(self, invalid, indexer):
@pytest.mark.parametrize("indexer", _indexers)
def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):
df = DataFrame({"a": [1, 2, 3]}, dtype=any_int_numpy_dtype)
- if isna(invalid) and invalid is not pd.NaT:
+ if isna(invalid) and invalid is not pd.NaT and not np.isnat(invalid):
warn = None
else:
warn = FutureWarning
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index e802a56ecbc81..99233d3cd4cf3 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -1381,3 +1381,23 @@ def test_frame_setitem_empty_dataframe(self):
index=dti[:0],
)
tm.assert_frame_equal(df, expected)
+
+
+def test_full_setter_loc_incompatible_dtype():
+ # https://github.com/pandas-dev/pandas/issues/55791
+ df = DataFrame({"a": [1, 2]})
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "a"] = True
+ expected = DataFrame({"a": [True, True]})
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame({"a": [1, 2]})
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "a"] = {0: 3.5, 1: 4.5}
+ expected = DataFrame({"a": [3.5, 4.5]})
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame({"a": [1, 2]})
+ df.loc[:, "a"] = {0: 3, 1: 4}
+ expected = DataFrame({"a": [3, 4]})
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
index 7c7a0d23ff75f..20ba550beeb30 100644
--- a/pandas/tests/frame/methods/test_update.py
+++ b/pandas/tests/frame/methods/test_update.py
@@ -160,11 +160,8 @@ def test_update_with_different_dtype(self, using_copy_on_write):
# GH#3217
df = DataFrame({"a": [1, 3], "b": [np.nan, 2]})
df["c"] = np.nan
- if using_copy_on_write:
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
df.update({"c": Series(["foo"], index=[0])})
- else:
- with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
- df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
{
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 6e818d79d5ba8..acd0675fd43ec 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2857,7 +2857,7 @@ def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type):
)
result = DataFrame({key_val: [1, 2]}, columns=cols)
expected = DataFrame([[1, np.nan], [2, np.nan]], columns=cols)
- expected.iloc[:, 1] = expected.iloc[:, 1].astype(object)
+ expected.isetitem(1, expected.iloc[:, 1].astype(object))
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 409eca42f404b..43dd3812e8b7d 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -535,7 +535,8 @@ def test_iloc_setitem_frame_duplicate_columns_multiple_blocks(
# if the assigned values cannot be held by existing integer arrays,
# we cast
- df.iloc[:, 0] = df.iloc[:, 0] + 0.5
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.iloc[:, 0] = df.iloc[:, 0] + 0.5
if not using_array_manager:
assert len(df._mgr.blocks) == 2
@@ -1471,6 +1472,7 @@ def test_iloc_setitem_pure_position_based(self):
def test_iloc_nullable_int64_size_1_nan(self):
# GH 31861
result = DataFrame({"a": ["test"], "b": [np.nan]})
- result.loc[:, "b"] = result.loc[:, "b"].astype("Int64")
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ result.loc[:, "b"] = result.loc[:, "b"].astype("Int64")
expected = DataFrame({"a": ["test"], "b": array([NA], dtype="Int64")})
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index fb0adc56c401b..61c44c8a2a8f4 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -584,7 +584,8 @@ def test_loc_setitem_consistency(self, frame_for_consistency, val):
}
)
df = frame_for_consistency.copy()
- df.loc[:, "date"] = val
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = val
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_dt64_to_str(self, frame_for_consistency):
@@ -598,7 +599,8 @@ def test_loc_setitem_consistency_dt64_to_str(self, frame_for_consistency):
}
)
df = frame_for_consistency.copy()
- df.loc[:, "date"] = "foo"
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = "foo"
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_dt64_to_float(self, frame_for_consistency):
@@ -611,14 +613,16 @@ def test_loc_setitem_consistency_dt64_to_float(self, frame_for_consistency):
}
)
df = frame_for_consistency.copy()
- df.loc[:, "date"] = 1.0
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_single_row(self):
# GH 15494
# setting on frame with single row
df = DataFrame({"date": Series([Timestamp("20180101")])})
- df.loc[:, "date"] = "string"
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, "date"] = "string"
expected = DataFrame({"date": Series(["string"])})
tm.assert_frame_equal(df, expected)
@@ -678,9 +682,10 @@ def test_loc_setitem_consistency_slice_column_len(self):
# timedelta64[m] -> float, so this cannot be done inplace, so
# no warning
- df.loc[:, ("Respondent", "Duration")] = df.loc[
- :, ("Respondent", "Duration")
- ] / Timedelta(60_000_000_000)
+ with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
+ df.loc[:, ("Respondent", "Duration")] = df.loc[
+ :, ("Respondent", "Duration")
+ ] / Timedelta(60_000_000_000)
expected = Series(
[23.0, 12.0, 14.0, 36.0], index=df.index, name=("Respondent", "Duration")
@@ -1487,7 +1492,11 @@ def test_loc_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture):
# if result started off with object dtype, then the .loc.__setitem__
# below would retain object dtype
result = DataFrame(index=idx, columns=["var"], dtype=np.float64)
- result.loc[:, idxer] = expected
+ with tm.assert_produces_warning(
+ FutureWarning if idxer == "var" else None, match="incompatible dtype"
+ ):
+ # See https://github.com/pandas-dev/pandas/issues/56223
+ result.loc[:, idxer] = expected
tm.assert_frame_equal(result, expected)
def test_loc_setitem_time_key(self, using_array_manager):
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 0eefb0b52c483..1da27ad173235 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -179,7 +179,7 @@ def test_frame_non_unique_columns(self, orient, data):
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
- expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
+ expected.isetitem(0, expected.iloc[:, 0].astype(np.int64) // 1000000)
elif orient == "split":
expected = df
expected.columns = ["x", "x.1"]
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index ab8d22e567d27..27959609422f3 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -2984,9 +2984,9 @@ def test_merge_empty_frames_column_order(left_empty, right_empty):
if left_empty and right_empty:
expected = expected.iloc[:0]
elif left_empty:
- expected.loc[:, "B"] = np.nan
+ expected["B"] = np.nan
elif right_empty:
- expected.loc[:, ["C", "D"]] = np.nan
+ expected[["C", "D"]] = np.nan
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index c52e47a812183..f4992b758af74 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -491,7 +491,7 @@ def _check_setitem_invalid(self, ser, invalid, indexer, warn):
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
- _indexers = [0, [0], slice(0, 1), [True, False, False]]
+ _indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]
@pytest.mark.parametrize(
"invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]
@@ -505,7 +505,7 @@ def test_setitem_validation_scalar_bool(self, invalid, indexer):
@pytest.mark.parametrize("indexer", _indexers)
def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):
ser = Series([1, 2, 3], dtype=any_int_numpy_dtype)
- if isna(invalid) and invalid is not NaT:
+ if isna(invalid) and invalid is not NaT and not np.isnat(invalid):
warn = None
else:
warn = FutureWarning
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56807 | 2024-01-10T10:19:15Z | 2024-01-10T12:47:41Z | 2024-01-10T12:47:41Z | 2024-01-10T12:47:41Z |
PERF: Index.take to check for full range indices | diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst
index 2b5280346b63b..7b53ddb3923f0 100644
--- a/doc/source/whatsnew/v2.3.0.rst
+++ b/doc/source/whatsnew/v2.3.0.rst
@@ -101,7 +101,7 @@ Deprecations
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
--
+- Performance improvement in :meth:`Index.take` when ``indices`` is a full range indexer from zero to length of index (:issue:`56806`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 74c1f165ac06c..99114b4865af0 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1157,6 +1157,9 @@ def take(
indices = ensure_platform_int(indices)
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
+ if indices.ndim == 1 and lib.is_range_indexer(indices, len(self)):
+ return self.copy()
+
# Note: we discard fill_value and use self._na_value, only relevant
# in the case where allow_fill is True and fill_value is not None
values = self._values
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 5242706e0ce23..25bcc1f307082 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2247,6 +2247,9 @@ def take(
# only fill if we are passing a non-None fill_value
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
+ if indices.ndim == 1 and lib.is_range_indexer(indices, len(self)):
+ return self.copy()
+
na_value = -1
taken = [lab.take(indices) for lab in self.codes]
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.3.0.rst` file if fixing a bug or adding a new feature.
```
import pandas as pd
import numpy as np
N = 1_000_000
idx = pd.Index(np.arange(N))
indices = np.arange(N)
%timeit idx.take(indices)
# 11.1 ms ± 271 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) -> main
# 1.33 ms ± 279 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) -> PR
```
Motivating use-case:
```
idx1 = pd.Index(np.tile(np.arange(1000), 1000))
idx2 = pd.Index(np.arange(100))
%timeit idx1.join(idx2, how="left")
# 132 ms ± 1.43 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) -> main
# 110 ms ± 587 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) -> PR
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/56806 | 2024-01-10T03:57:55Z | 2024-01-10T21:34:23Z | 2024-01-10T21:34:23Z | 2024-01-11T00:05:20Z |
CI: Add fixture back in | diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index 5973f13c9d495..b1aa6b88bc4ee 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -94,6 +94,7 @@ def test_merge_on_multikey(self, left, right, join_type):
@pytest.mark.parametrize(
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
)
+ @pytest.mark.parametrize("sort", [True, False])
def test_left_join_multi_index(self, sort, infer_string):
with option_context("future.infer_string", infer_string):
icols = ["1st", "2nd", "3rd"]
|
cc @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/56803 | 2024-01-09T22:03:21Z | 2024-01-09T22:29:45Z | 2024-01-09T22:29:45Z | 2024-01-09T22:29:49Z |
BUG: Change the prompt on non list record path errors (PR: #56405) | diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index b1e2210f9d894..7e3e83d59c87c 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -427,8 +427,8 @@ def _pull_records(js: dict[str, Any], spec: list | str) -> list:
result = []
else:
raise TypeError(
- f"{js} has non list value {result} for path {spec}. "
- "Must be list or null."
+ f"Path must contain list or null, "
+ f"but got {type(result).__name__} at {repr(spec)}"
)
return result
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index 7914d40ea8aaa..0f33883feba3a 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -526,8 +526,8 @@ def test_non_list_record_path_errors(self, value):
test_input = {"state": "Texas", "info": parsed_value}
test_path = "info"
msg = (
- f"{test_input} has non list value {parsed_value} for path {test_path}. "
- "Must be list or null."
+ f"Path must contain list or null, "
+ f"but got {type(parsed_value).__name__} at 'info'"
)
with pytest.raises(TypeError, match=msg):
json_normalize([test_input], record_path=[test_path])
| - [x] closes #56405
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56802 | 2024-01-09T20:58:04Z | 2024-01-10T21:31:27Z | 2024-01-10T21:31:27Z | 2024-01-10T21:31:33Z |
Backport PR #56059 on branch 2.2.x (ENH: Add case_when method) | diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index af262f9e6c336..a4ea0ec396ceb 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -177,6 +177,7 @@ Reindexing / selection / label manipulation
:toctree: api/
Series.align
+ Series.case_when
Series.drop
Series.droplevel
Series.drop_duplicates
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 6a232365fbfeb..e244794664b34 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -188,6 +188,26 @@ For a full list of ADBC drivers and their development status, see the `ADBC Driv
Implementation Status <https://arrow.apache.org/adbc/current/driver/status.html>`_
documentation.
+.. _whatsnew_220.enhancements.case_when:
+
+Create a pandas Series based on one or more conditions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :meth:`Series.case_when` function has been added to create a Series object based on one or more conditions. (:issue:`39154`)
+
+.. ipython:: python
+
+ import pandas as pd
+
+ df = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6]))
+ default=pd.Series('default', index=df.index)
+ default.case_when(
+ caselist=[
+ (df.a == 1, 'first'), # condition, replacement
+ (df.a.gt(1) & df.b.eq(5), 'second'), # condition, replacement
+ ],
+ )
+
.. _whatsnew_220.enhancements.to_numpy_ea:
``to_numpy`` for NumPy nullable and Arrow types converts to suitable NumPy dtype
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a6762dd1b48a2..83eb545b9b681 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -67,6 +67,9 @@
from pandas.core.dtypes.astype import astype_is_view
from pandas.core.dtypes.cast import (
LossySetitemError,
+ construct_1d_arraylike_from_scalar,
+ find_common_type,
+ infer_dtype_from,
maybe_box_native,
maybe_cast_pointwise_result,
)
@@ -84,7 +87,10 @@
CategoricalDtype,
ExtensionDtype,
)
-from pandas.core.dtypes.generic import ABCDataFrame
+from pandas.core.dtypes.generic import (
+ ABCDataFrame,
+ ABCSeries,
+)
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
isna,
@@ -113,6 +119,7 @@
from pandas.core.arrays.sparse import SparseAccessor
from pandas.core.arrays.string_ import StringDtype
from pandas.core.construction import (
+ array as pd_array,
extract_array,
sanitize_array,
)
@@ -5627,6 +5634,121 @@ def between(
return lmask & rmask
+ def case_when(
+ self,
+ caselist: list[
+ tuple[
+ ArrayLike | Callable[[Series], Series | np.ndarray | Sequence[bool]],
+ ArrayLike | Scalar | Callable[[Series], Series | np.ndarray],
+ ],
+ ],
+ ) -> Series:
+ """
+ Replace values where the conditions are True.
+
+ Parameters
+ ----------
+ caselist : A list of tuples of conditions and expected replacements
+ Takes the form: ``(condition0, replacement0)``,
+ ``(condition1, replacement1)``, ... .
+ ``condition`` should be a 1-D boolean array-like object
+ or a callable. If ``condition`` is a callable,
+ it is computed on the Series
+ and should return a boolean Series or array.
+ The callable must not change the input Series
+ (though pandas doesn`t check it). ``replacement`` should be a
+ 1-D array-like object, a scalar or a callable.
+ If ``replacement`` is a callable, it is computed on the Series
+ and should return a scalar or Series. The callable
+ must not change the input Series
+ (though pandas doesn`t check it).
+
+ .. versionadded:: 2.2.0
+
+ Returns
+ -------
+ Series
+
+ See Also
+ --------
+ Series.mask : Replace values where the condition is True.
+
+ Examples
+ --------
+ >>> c = pd.Series([6, 7, 8, 9], name='c')
+ >>> a = pd.Series([0, 0, 1, 2])
+ >>> b = pd.Series([0, 3, 4, 5])
+
+ >>> c.case_when(caselist=[(a.gt(0), a), # condition, replacement
+ ... (b.gt(0), b)])
+ 0 6
+ 1 3
+ 2 1
+ 3 2
+ Name: c, dtype: int64
+ """
+ if not isinstance(caselist, list):
+ raise TypeError(
+ f"The caselist argument should be a list; instead got {type(caselist)}"
+ )
+
+ if not caselist:
+ raise ValueError(
+ "provide at least one boolean condition, "
+ "with a corresponding replacement."
+ )
+
+ for num, entry in enumerate(caselist):
+ if not isinstance(entry, tuple):
+ raise TypeError(
+ f"Argument {num} must be a tuple; instead got {type(entry)}."
+ )
+ if len(entry) != 2:
+ raise ValueError(
+ f"Argument {num} must have length 2; "
+ "a condition and replacement; "
+ f"instead got length {len(entry)}."
+ )
+ caselist = [
+ (
+ com.apply_if_callable(condition, self),
+ com.apply_if_callable(replacement, self),
+ )
+ for condition, replacement in caselist
+ ]
+ default = self.copy()
+ conditions, replacements = zip(*caselist)
+ common_dtypes = [infer_dtype_from(arg)[0] for arg in [*replacements, default]]
+ if len(set(common_dtypes)) > 1:
+ common_dtype = find_common_type(common_dtypes)
+ updated_replacements = []
+ for condition, replacement in zip(conditions, replacements):
+ if is_scalar(replacement):
+ replacement = construct_1d_arraylike_from_scalar(
+ value=replacement, length=len(condition), dtype=common_dtype
+ )
+ elif isinstance(replacement, ABCSeries):
+ replacement = replacement.astype(common_dtype)
+ else:
+ replacement = pd_array(replacement, dtype=common_dtype)
+ updated_replacements.append(replacement)
+ replacements = updated_replacements
+ default = default.astype(common_dtype)
+
+ counter = reversed(range(len(conditions)))
+ for position, condition, replacement in zip(
+ counter, conditions[::-1], replacements[::-1]
+ ):
+ try:
+ default = default.mask(
+ condition, other=replacement, axis=0, inplace=False, level=None
+ )
+ except Exception as error:
+ raise ValueError(
+ f"Failed to apply condition{position} and replacement{position}."
+ ) from error
+ return default
+
# error: Cannot determine type of 'isna'
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def isna(self) -> Series:
diff --git a/pandas/tests/series/methods/test_case_when.py b/pandas/tests/series/methods/test_case_when.py
new file mode 100644
index 0000000000000..7cb60a11644a3
--- /dev/null
+++ b/pandas/tests/series/methods/test_case_when.py
@@ -0,0 +1,148 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ Series,
+ array as pd_array,
+ date_range,
+)
+import pandas._testing as tm
+
+
+@pytest.fixture
+def df():
+ """
+ base dataframe for testing
+ """
+ return DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+
+
+def test_case_when_caselist_is_not_a_list(df):
+ """
+ Raise ValueError if caselist is not a list.
+ """
+ msg = "The caselist argument should be a list; "
+ msg += "instead got.+"
+ with pytest.raises(TypeError, match=msg): # GH39154
+ df["a"].case_when(caselist=())
+
+
+def test_case_when_no_caselist(df):
+ """
+ Raise ValueError if no caselist is provided.
+ """
+ msg = "provide at least one boolean condition, "
+ msg += "with a corresponding replacement."
+ with pytest.raises(ValueError, match=msg): # GH39154
+ df["a"].case_when([])
+
+
+def test_case_when_odd_caselist(df):
+ """
+ Raise ValueError if no of caselist is odd.
+ """
+ msg = "Argument 0 must have length 2; "
+ msg += "a condition and replacement; instead got length 3."
+
+ with pytest.raises(ValueError, match=msg):
+ df["a"].case_when([(df["a"].eq(1), 1, df.a.gt(1))])
+
+
+def test_case_when_raise_error_from_mask(df):
+ """
+ Raise Error from within Series.mask
+ """
+ msg = "Failed to apply condition0 and replacement0."
+ with pytest.raises(ValueError, match=msg):
+ df["a"].case_when([(df["a"].eq(1), [1, 2])])
+
+
+def test_case_when_single_condition(df):
+ """
+ Test output on a single condition.
+ """
+ result = Series([np.nan, np.nan, np.nan]).case_when([(df.a.eq(1), 1)])
+ expected = Series([1, np.nan, np.nan])
+ tm.assert_series_equal(result, expected)
+
+
+def test_case_when_multiple_conditions(df):
+ """
+ Test output when booleans are derived from a computation
+ """
+ result = Series([np.nan, np.nan, np.nan]).case_when(
+ [(df.a.eq(1), 1), (Series([False, True, False]), 2)]
+ )
+ expected = Series([1, 2, np.nan])
+ tm.assert_series_equal(result, expected)
+
+
+def test_case_when_multiple_conditions_replacement_list(df):
+ """
+ Test output when replacement is a list
+ """
+ result = Series([np.nan, np.nan, np.nan]).case_when(
+ [([True, False, False], 1), (df["a"].gt(1) & df["b"].eq(5), [1, 2, 3])]
+ )
+ expected = Series([1, 2, np.nan])
+ tm.assert_series_equal(result, expected)
+
+
+def test_case_when_multiple_conditions_replacement_extension_dtype(df):
+ """
+ Test output when replacement has an extension dtype
+ """
+ result = Series([np.nan, np.nan, np.nan]).case_when(
+ [
+ ([True, False, False], 1),
+ (df["a"].gt(1) & df["b"].eq(5), pd_array([1, 2, 3], dtype="Int64")),
+ ],
+ )
+ expected = Series([1, 2, np.nan], dtype="Float64")
+ tm.assert_series_equal(result, expected)
+
+
+def test_case_when_multiple_conditions_replacement_series(df):
+ """
+ Test output when replacement is a Series
+ """
+ result = Series([np.nan, np.nan, np.nan]).case_when(
+ [
+ (np.array([True, False, False]), 1),
+ (df["a"].gt(1) & df["b"].eq(5), Series([1, 2, 3])),
+ ],
+ )
+ expected = Series([1, 2, np.nan])
+ tm.assert_series_equal(result, expected)
+
+
+def test_case_when_non_range_index():
+ """
+ Test output if index is not RangeIndex
+ """
+ rng = np.random.default_rng(seed=123)
+ dates = date_range("1/1/2000", periods=8)
+ df = DataFrame(
+ rng.standard_normal(size=(8, 4)), index=dates, columns=["A", "B", "C", "D"]
+ )
+ result = Series(5, index=df.index, name="A").case_when([(df.A.gt(0), df.B)])
+ expected = df.A.mask(df.A.gt(0), df.B).where(df.A.gt(0), 5)
+ tm.assert_series_equal(result, expected)
+
+
+def test_case_when_callable():
+ """
+ Test output on a callable
+ """
+ # https://numpy.org/doc/stable/reference/generated/numpy.piecewise.html
+ x = np.linspace(-2.5, 2.5, 6)
+ ser = Series(x)
+ result = ser.case_when(
+ caselist=[
+ (lambda df: df < 0, lambda df: -df),
+ (lambda df: df >= 0, lambda df: df),
+ ]
+ )
+ expected = np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
+ tm.assert_series_equal(result, Series(expected))
| #56059 | https://api.github.com/repos/pandas-dev/pandas/pulls/56800 | 2024-01-09T13:03:53Z | 2024-01-09T23:46:09Z | 2024-01-09T23:46:09Z | 2024-01-09T23:46:12Z |
Backport PR #5644 on branch 2.2.x (BUG: merge not sorting for new string dtype) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 36e677fa2a7a9..6a232365fbfeb 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -893,6 +893,7 @@ Reshaping
- Bug in :func:`merge_asof` when using a :class:`Timedelta` tolerance on a :class:`ArrowDtype` column (:issue:`56486`)
- Bug in :func:`merge` not raising when merging datetime columns with timedelta columns (:issue:`56455`)
- Bug in :func:`merge` not raising when merging string columns with numeric columns (:issue:`56441`)
+- Bug in :func:`merge` not sorting for new string dtype (:issue:`56442`)
- Bug in :func:`merge` returning columns in incorrect order when left and/or right is empty (:issue:`51929`)
- Bug in :meth:`DataFrame.melt` where an exception was raised if ``var_name`` was not a string (:issue:`55948`)
- Bug in :meth:`DataFrame.melt` where it would not preserve the datetime (:issue:`55254`)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 320e4e33a29fb..410301b7697f2 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -2488,18 +2488,30 @@ def _factorize_keys(
.combine_chunks()
.dictionary_encode()
)
- length = len(dc.dictionary)
llab, rlab, count = (
- pc.fill_null(dc.indices[slice(len_lk)], length)
+ pc.fill_null(dc.indices[slice(len_lk)], -1)
.to_numpy()
.astype(np.intp, copy=False),
- pc.fill_null(dc.indices[slice(len_lk, None)], length)
+ pc.fill_null(dc.indices[slice(len_lk, None)], -1)
.to_numpy()
.astype(np.intp, copy=False),
len(dc.dictionary),
)
+
+ if sort:
+ uniques = dc.dictionary.to_numpy(zero_copy_only=False)
+ llab, rlab = _sort_labels(uniques, llab, rlab)
+
if dc.null_count > 0:
+ lmask = llab == -1
+ lany = lmask.any()
+ rmask = rlab == -1
+ rany = rmask.any()
+ if lany:
+ np.putmask(llab, lmask, count)
+ if rany:
+ np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 5a1f47e341222..1d5ed2d7373ce 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -16,6 +16,7 @@
bdate_range,
concat,
merge,
+ option_context,
)
import pandas._testing as tm
@@ -563,24 +564,30 @@ def test_join_many_non_unique_index(self):
tm.assert_frame_equal(inner, left)
tm.assert_frame_equal(inner, right)
- def test_join_sort(self):
- left = DataFrame({"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
- right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"])
-
- joined = left.join(right, on="key", sort=True)
- expected = DataFrame(
- {
- "key": ["bar", "baz", "foo", "foo"],
- "value": [2, 3, 1, 4],
- "value2": ["a", "b", "c", "c"],
- },
- index=[1, 2, 0, 3],
- )
- tm.assert_frame_equal(joined, expected)
-
- # smoke test
- joined = left.join(right, on="key", sort=False)
- tm.assert_index_equal(joined.index, Index(range(4)), exact=True)
+ @pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+ )
+ def test_join_sort(self, infer_string):
+ with option_context("future.infer_string", infer_string):
+ left = DataFrame(
+ {"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]}
+ )
+ right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"])
+
+ joined = left.join(right, on="key", sort=True)
+ expected = DataFrame(
+ {
+ "key": ["bar", "baz", "foo", "foo"],
+ "value": [2, 3, 1, 4],
+ "value2": ["a", "b", "c", "c"],
+ },
+ index=[1, 2, 0, 3],
+ )
+ tm.assert_frame_equal(joined, expected)
+
+ # smoke test
+ joined = left.join(right, on="key", sort=False)
+ tm.assert_index_equal(joined.index, Index(range(4)), exact=True)
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index 269d3a2b7078e..5973f13c9d495 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
import pandas as pd
from pandas import (
DataFrame,
@@ -9,6 +11,7 @@
RangeIndex,
Series,
Timestamp,
+ option_context,
)
import pandas._testing as tm
from pandas.core.reshape.concat import concat
@@ -88,67 +91,70 @@ def test_merge_on_multikey(self, left, right, join_type):
tm.assert_frame_equal(result, expected)
- @pytest.mark.parametrize("sort", [False, True])
- def test_left_join_multi_index(self, sort):
- icols = ["1st", "2nd", "3rd"]
+ @pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+ )
+ def test_left_join_multi_index(self, sort, infer_string):
+ with option_context("future.infer_string", infer_string):
+ icols = ["1st", "2nd", "3rd"]
- def bind_cols(df):
- iord = lambda a: 0 if a != a else ord(a)
- f = lambda ts: ts.map(iord) - ord("a")
- return f(df["1st"]) + f(df["3rd"]) * 1e2 + df["2nd"].fillna(0) * 10
+ def bind_cols(df):
+ iord = lambda a: 0 if a != a else ord(a)
+ f = lambda ts: ts.map(iord) - ord("a")
+ return f(df["1st"]) + f(df["3rd"]) * 1e2 + df["2nd"].fillna(0) * 10
- def run_asserts(left, right, sort):
- res = left.join(right, on=icols, how="left", sort=sort)
+ def run_asserts(left, right, sort):
+ res = left.join(right, on=icols, how="left", sort=sort)
- assert len(left) < len(res) + 1
- assert not res["4th"].isna().any()
- assert not res["5th"].isna().any()
+ assert len(left) < len(res) + 1
+ assert not res["4th"].isna().any()
+ assert not res["5th"].isna().any()
- tm.assert_series_equal(res["4th"], -res["5th"], check_names=False)
- result = bind_cols(res.iloc[:, :-2])
- tm.assert_series_equal(res["4th"], result, check_names=False)
- assert result.name is None
+ tm.assert_series_equal(res["4th"], -res["5th"], check_names=False)
+ result = bind_cols(res.iloc[:, :-2])
+ tm.assert_series_equal(res["4th"], result, check_names=False)
+ assert result.name is None
- if sort:
- tm.assert_frame_equal(res, res.sort_values(icols, kind="mergesort"))
+ if sort:
+ tm.assert_frame_equal(res, res.sort_values(icols, kind="mergesort"))
- out = merge(left, right.reset_index(), on=icols, sort=sort, how="left")
+ out = merge(left, right.reset_index(), on=icols, sort=sort, how="left")
- res.index = RangeIndex(len(res))
- tm.assert_frame_equal(out, res)
+ res.index = RangeIndex(len(res))
+ tm.assert_frame_equal(out, res)
- lc = list(map(chr, np.arange(ord("a"), ord("z") + 1)))
- left = DataFrame(
- np.random.default_rng(2).choice(lc, (50, 2)), columns=["1st", "3rd"]
- )
- # Explicit cast to float to avoid implicit cast when setting nan
- left.insert(
- 1,
- "2nd",
- np.random.default_rng(2).integers(0, 10, len(left)).astype("float"),
- )
+ lc = list(map(chr, np.arange(ord("a"), ord("z") + 1)))
+ left = DataFrame(
+ np.random.default_rng(2).choice(lc, (50, 2)), columns=["1st", "3rd"]
+ )
+ # Explicit cast to float to avoid implicit cast when setting nan
+ left.insert(
+ 1,
+ "2nd",
+ np.random.default_rng(2).integers(0, 10, len(left)).astype("float"),
+ )
- i = np.random.default_rng(2).permutation(len(left))
- right = left.iloc[i].copy()
+ i = np.random.default_rng(2).permutation(len(left))
+ right = left.iloc[i].copy()
- left["4th"] = bind_cols(left)
- right["5th"] = -bind_cols(right)
- right.set_index(icols, inplace=True)
+ left["4th"] = bind_cols(left)
+ right["5th"] = -bind_cols(right)
+ right.set_index(icols, inplace=True)
- run_asserts(left, right, sort)
+ run_asserts(left, right, sort)
- # inject some nulls
- left.loc[1::4, "1st"] = np.nan
- left.loc[2::5, "2nd"] = np.nan
- left.loc[3::6, "3rd"] = np.nan
- left["4th"] = bind_cols(left)
+ # inject some nulls
+ left.loc[1::4, "1st"] = np.nan
+ left.loc[2::5, "2nd"] = np.nan
+ left.loc[3::6, "3rd"] = np.nan
+ left["4th"] = bind_cols(left)
- i = np.random.default_rng(2).permutation(len(left))
- right = left.iloc[i, :-1]
- right["5th"] = -bind_cols(right)
- right.set_index(icols, inplace=True)
+ i = np.random.default_rng(2).permutation(len(left))
+ right = left.iloc[i, :-1]
+ right["5th"] = -bind_cols(right)
+ right.set_index(icols, inplace=True)
- run_asserts(left, right, sort)
+ run_asserts(left, right, sort)
@pytest.mark.parametrize("sort", [False, True])
def test_merge_right_vs_left(self, left, right, sort):
| #56442 | https://api.github.com/repos/pandas-dev/pandas/pulls/56799 | 2024-01-09T12:58:32Z | 2024-01-09T15:47:34Z | 2024-01-09T15:47:34Z | 2024-01-09T16:02:18Z |
Backport PR #56772 on branch 2.2.x (Support large strings in interchange protocol) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index dbb11d3d0788d..36e677fa2a7a9 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -906,6 +906,7 @@ Sparse
Other
^^^^^
+- :meth:`DataFrame.__dataframe__` did not support pyarrow large strings (:issue:`56702`)
- Bug in :func:`DataFrame.describe` when formatting percentiles in the resulting percentile 99.999% is rounded to 100% (:issue:`55765`)
- Bug in :func:`cut` and :func:`qcut` with ``datetime64`` dtype values with non-nanosecond units incorrectly returning nanosecond-unit bins (:issue:`56101`)
- Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index ed5256922377a..e90e92fa0ee1c 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -2190,7 +2190,9 @@ def numpy_dtype(self) -> np.dtype:
# This can be removed if/when pyarrow addresses it:
# https://github.com/apache/arrow/issues/34462
return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")
- if pa.types.is_string(self.pyarrow_dtype):
+ if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(
+ self.pyarrow_dtype
+ ):
# pa.string().to_pandas_dtype() = object which we don't want
return np.dtype(str)
try:
diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index acfbc5d9e6c62..7f524d6823f30 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -301,12 +301,9 @@ def _get_data_buffer(
buffer = PandasBuffer(np.frombuffer(b, dtype="uint8"))
# Define the dtype for the returned buffer
- dtype = (
- DtypeKind.STRING,
- 8,
- ArrowCTypes.STRING,
- Endianness.NATIVE,
- ) # note: currently only support native endianness
+ # TODO: this will need correcting
+ # https://github.com/pandas-dev/pandas/issues/54781
+ dtype = self.dtype
else:
raise NotImplementedError(f"Data type {self._col.dtype} not handled yet")
diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py
index 4ac063080e62d..2e73e560e5740 100644
--- a/pandas/core/interchange/utils.py
+++ b/pandas/core/interchange/utils.py
@@ -37,6 +37,7 @@
"float": "f", # float32
"double": "g", # float64
"string": "u",
+ "large_string": "U",
"binary": "z",
"time32[s]": "tts",
"time32[ms]": "ttm",
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 15c2b8d000b37..27ea8ccdd17b1 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -362,3 +362,12 @@ def test_interchange_from_corrected_buffer_dtypes(monkeypatch) -> None:
interchange.get_column_by_name = lambda _: column
monkeypatch.setattr(df, "__dataframe__", lambda allow_copy: interchange)
pd.api.interchange.from_dataframe(df)
+
+
+def test_large_string():
+ # GH#56702
+ pytest.importorskip("pyarrow")
+ df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
+ result = pd.api.interchange.from_dataframe(df.__dataframe__())
+ expected = pd.DataFrame({"a": ["x"]}, dtype="object")
+ tm.assert_frame_equal(result, expected)
| Backport PR #56772: Support large strings in interchange protocol | https://api.github.com/repos/pandas-dev/pandas/pulls/56795 | 2024-01-09T08:42:08Z | 2024-01-09T09:57:14Z | 2024-01-09T09:57:14Z | 2024-01-09T09:57:14Z |
Series.str.find fix for pd.ArrowDtype(pa.string()) | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 392b4e3cc616a..7bab8c9395ac6 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -2364,20 +2364,26 @@ def _str_fullmatch(
return self._str_match(pat, case, flags, na)
def _str_find(self, sub: str, start: int = 0, end: int | None = None) -> Self:
- if start != 0 and end is not None:
+ if (start == 0 or start is None) and end is None:
+ result = pc.find_substring(self._pa_array, sub)
+ else:
+ if sub == "":
+ # GH 56792
+ result = self._apply_elementwise(lambda val: val.find(sub, start, end))
+ return type(self)(pa.chunked_array(result))
+ if start is None:
+ start_offset = 0
+ start = 0
+ elif start < 0:
+ start_offset = pc.add(start, pc.utf8_length(self._pa_array))
+ start_offset = pc.if_else(pc.less(start_offset, 0), 0, start_offset)
+ else:
+ start_offset = start
slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end)
result = pc.find_substring(slices, sub)
- not_found = pc.equal(result, -1)
- start_offset = max(0, start)
+ found = pc.not_equal(result, pa.scalar(-1, type=result.type))
offset_result = pc.add(result, start_offset)
- result = pc.if_else(not_found, result, offset_result)
- elif start == 0 and end is None:
- slices = self._pa_array
- result = pc.find_substring(slices, sub)
- else:
- raise NotImplementedError(
- f"find not implemented with {sub=}, {start=}, {end=}"
- )
+ result = pc.if_else(found, offset_result, -1)
return type(self)(result)
def _str_join(self, sep: str) -> Self:
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 6970c589dd36f..d3b2ea142df74 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -23,6 +23,7 @@
BytesIO,
StringIO,
)
+from itertools import combinations
import operator
import pickle
import re
@@ -1924,13 +1925,18 @@ def test_str_fullmatch(pat, case, na, exp):
@pytest.mark.parametrize(
- "sub, start, end, exp, exp_typ",
- [["ab", 0, None, [0, None], pa.int32()], ["bc", 1, 3, [1, None], pa.int64()]],
+ "sub, start, end, exp, exp_type",
+ [
+ ["ab", 0, None, [0, None], pa.int32()],
+ ["bc", 1, 3, [1, None], pa.int64()],
+ ["ab", 1, 3, [-1, None], pa.int64()],
+ ["ab", -3, -3, [-1, None], pa.int64()],
+ ],
)
-def test_str_find(sub, start, end, exp, exp_typ):
+def test_str_find(sub, start, end, exp, exp_type):
ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
result = ser.str.find(sub, start=start, end=end)
- expected = pd.Series(exp, dtype=ArrowDtype(exp_typ))
+ expected = pd.Series(exp, dtype=ArrowDtype(exp_type))
tm.assert_series_equal(result, expected)
@@ -1942,10 +1948,70 @@ def test_str_find_negative_start():
tm.assert_series_equal(result, expected)
-def test_str_find_notimplemented():
+def test_str_find_no_end():
ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
- with pytest.raises(NotImplementedError, match="find not implemented"):
- ser.str.find("ab", start=1)
+ if pa_version_under13p0:
+ # https://github.com/apache/arrow/issues/36311
+ with pytest.raises(pa.lib.ArrowInvalid, match="Negative buffer resize"):
+ ser.str.find("ab", start=1)
+ else:
+ result = ser.str.find("ab", start=1)
+ expected = pd.Series([-1, None], dtype="int64[pyarrow]")
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_find_negative_start_negative_end():
+ # GH 56791
+ ser = pd.Series(["abcdefg", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.find(sub="d", start=-6, end=-3)
+ expected = pd.Series([3, None], dtype=ArrowDtype(pa.int64()))
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_find_large_start():
+ # GH 56791
+ ser = pd.Series(["abcdefg", None], dtype=ArrowDtype(pa.string()))
+ if pa_version_under13p0:
+ # https://github.com/apache/arrow/issues/36311
+ with pytest.raises(pa.lib.ArrowInvalid, match="Negative buffer resize"):
+ ser.str.find(sub="d", start=16)
+ else:
+ result = ser.str.find(sub="d", start=16)
+ expected = pd.Series([-1, None], dtype=ArrowDtype(pa.int64()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.skipif(
+ pa_version_under13p0, reason="https://github.com/apache/arrow/issues/36311"
+)
+@pytest.mark.parametrize("start", list(range(-15, 15)) + [None])
+@pytest.mark.parametrize("end", list(range(-15, 15)) + [None])
+@pytest.mark.parametrize(
+ "sub",
+ ["abcaadef"[x:y] for x, y in combinations(range(len("abcaadef") + 1), r=2)]
+ + [
+ "",
+ "az",
+ "abce",
+ ],
+)
+def test_str_find_e2e(start, end, sub):
+ s = pd.Series(
+ ["abcaadef", "abc", "abcdeddefgj8292", "ab", "a", ""],
+ dtype=ArrowDtype(pa.string()),
+ )
+ object_series = s.astype(pd.StringDtype())
+ result = s.str.find(sub, start, end)
+ expected = object_series.str.find(sub, start, end).astype(result.dtype)
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_find_negative_start_negative_end_no_match():
+ # GH 56791
+ ser = pd.Series(["abcdefg", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.find(sub="d", start=-3, end=-6)
+ expected = pd.Series([-1, None], dtype=ArrowDtype(pa.int64()))
+ tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
| - [x] closes #56791(Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56792 | 2024-01-09T02:12:56Z | 2024-02-02T17:57:54Z | 2024-02-02T17:57:54Z | 2024-02-02T23:51:35Z |
Backport PR #56402 on branch 2.2.x (TST/CoW: expand test for chained inplace methods) | diff --git a/pandas/tests/copy_view/test_chained_assignment_deprecation.py b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
index 80e38380ed27c..0a37f6b813e55 100644
--- a/pandas/tests/copy_view/test_chained_assignment_deprecation.py
+++ b/pandas/tests/copy_view/test_chained_assignment_deprecation.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
+from pandas.compat import PY311
from pandas.errors import (
ChainedAssignmentError,
SettingWithCopyWarning,
@@ -42,7 +43,9 @@ def test_methods_iloc_warn(using_copy_on_write):
("ffill", ()),
],
)
-def test_methods_iloc_getitem_item_cache(func, args, using_copy_on_write):
+def test_methods_iloc_getitem_item_cache(
+ func, args, using_copy_on_write, warn_copy_on_write
+):
# ensure we don't incorrectly raise chained assignment warning because
# of the item cache / iloc not setting the item cache
df_orig = DataFrame({"a": [1, 2, 3], "b": 1})
@@ -66,14 +69,74 @@ def test_methods_iloc_getitem_item_cache(func, args, using_copy_on_write):
ser = df["a"]
getattr(ser, func)(*args, inplace=True)
+ df = df_orig.copy()
+ df["a"] # populate the item_cache
+ # TODO(CoW-warn) because of the usage of *args, this doesn't warn on Py3.11+
+ if using_copy_on_write:
+ with tm.raises_chained_assignment_error(not PY311):
+ getattr(df["a"], func)(*args, inplace=True)
+ else:
+ with tm.assert_cow_warning(not PY311, match="A value"):
+ getattr(df["a"], func)(*args, inplace=True)
+
+ df = df_orig.copy()
+ ser = df["a"] # populate the item_cache and keep ref
+ if using_copy_on_write:
+ with tm.raises_chained_assignment_error(not PY311):
+ getattr(df["a"], func)(*args, inplace=True)
+ else:
+ # ideally also warns on the default mode, but the ser' _cacher
+ # messes up the refcount + even in warning mode this doesn't trigger
+ # the warning of Py3.1+ (see above)
+ with tm.assert_cow_warning(warn_copy_on_write and not PY311, match="A value"):
+ getattr(df["a"], func)(*args, inplace=True)
+
+
+def test_methods_iloc_getitem_item_cache_fillna(
+ using_copy_on_write, warn_copy_on_write
+):
+ # ensure we don't incorrectly raise chained assignment warning because
+ # of the item cache / iloc not setting the item cache
+ df_orig = DataFrame({"a": [1, 2, 3], "b": 1})
+
+ df = df_orig.copy()
+ ser = df.iloc[:, 0]
+ ser.fillna(1, inplace=True)
+
+ # parent that holds item_cache is dead, so don't increase ref count
+ df = df_orig.copy()
+ ser = df.copy()["a"]
+ ser.fillna(1, inplace=True)
+
+ df = df_orig.copy()
+ df["a"] # populate the item_cache
+ ser = df.iloc[:, 0] # iloc creates a new object
+ ser.fillna(1, inplace=True)
+
+ df = df_orig.copy()
+ df["a"] # populate the item_cache
+ ser = df["a"]
+ ser.fillna(1, inplace=True)
+
df = df_orig.copy()
df["a"] # populate the item_cache
if using_copy_on_write:
with tm.raises_chained_assignment_error():
- df["a"].fillna(0, inplace=True)
+ df["a"].fillna(1, inplace=True)
else:
with tm.assert_cow_warning(match="A value"):
- df["a"].fillna(0, inplace=True)
+ df["a"].fillna(1, inplace=True)
+
+ df = df_orig.copy()
+ ser = df["a"] # populate the item_cache and keep ref
+ if using_copy_on_write:
+ with tm.raises_chained_assignment_error():
+ df["a"].fillna(1, inplace=True)
+ else:
+ # TODO(CoW-warn) ideally also warns on the default mode, but the ser' _cacher
+ # messes up the refcount
+ with tm.assert_cow_warning(warn_copy_on_write, match="A value"):
+ df["a"].fillna(1, inplace=True)
# TODO(CoW-warn) expand the cases
| Backport PR #56402: TST/CoW: expand test for chained inplace methods | https://api.github.com/repos/pandas-dev/pandas/pulls/56790 | 2024-01-08T23:27:57Z | 2024-01-09T02:28:19Z | 2024-01-09T02:28:19Z | 2024-01-09T02:28:19Z |
DOC: add to to_offset the missing parameter is_period and examples | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index b3788b6003e67..8ac59ea8cdde0 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -4734,6 +4734,10 @@ cpdef to_offset(freq, bint is_period=False):
Parameters
----------
freq : str, datetime.timedelta, BaseOffset or None
+ The frequency represented.
+ is_period : bool, default False
+ Convert string denoting period frequency to corresponding offsets
+ frequency if is_period=True.
Returns
-------
@@ -4768,6 +4772,17 @@ cpdef to_offset(freq, bint is_period=False):
>>> to_offset(pd.offsets.Hour())
<Hour>
+
+ Passing the parameter ``is_period`` equal to True, you can use a string
+ denoting period frequency:
+
+ >>> freq = to_offset(freq="ME", is_period=False)
+ >>> freq.rule_code
+ 'ME'
+
+ >>> freq = to_offset(freq="M", is_period=True)
+ >>> freq.rule_code
+ 'ME'
"""
if freq is None:
return None
| xref #52064
added to `to_offset` the missing parameter `is_period` and examples | https://api.github.com/repos/pandas-dev/pandas/pulls/56789 | 2024-01-08T22:45:46Z | 2024-01-09T13:40:38Z | 2024-01-09T13:40:38Z | 2024-01-11T00:05:19Z |
Bug: Interchange protocol implementation does not allow for empty string columns | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 9a9ac769a4893..2018c8936ee3c 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -934,6 +934,7 @@ Other
- Bug in :func:`cut` and :func:`qcut` with ``datetime64`` dtype values with non-nanosecond units incorrectly returning nanosecond-unit bins (:issue:`56101`)
- Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`)
- Bug in :func:`infer_freq` and :meth:`DatetimeIndex.inferred_freq` with weekly frequencies and non-nanosecond resolutions (:issue:`55609`)
+- Bug in :func:`pd.api.interchange.from_dataframe` where it raised ``NotImplementedError`` when handling empty string columns (:issue:`56703`)
- Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`)
- Bug in :meth:`DataFrame.from_dict` which would always sort the rows of the created :class:`DataFrame`. (:issue:`55683`)
- Bug in :meth:`DataFrame.sort_index` when passing ``axis="columns"`` and ``ignore_index=True`` raising a ``ValueError`` (:issue:`56478`)
@@ -942,7 +943,6 @@ Other
- Bug in the error message when assigning an empty :class:`DataFrame` to a column (:issue:`55956`)
- Bug when time-like strings were being cast to :class:`ArrowDtype` with ``pyarrow.time64`` type (:issue:`56463`)
-
.. ---------------------------------------------------------------------------
.. _whatsnew_220.contributors:
diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index 7f524d6823f30..ee1b5cd34a7f7 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -116,7 +116,7 @@ def dtype(self) -> tuple[DtypeKind, int, str, str]:
Endianness.NATIVE,
)
elif is_string_dtype(dtype):
- if infer_dtype(self._col) == "string":
+ if infer_dtype(self._col) in ("string", "empty"):
return (
DtypeKind.STRING,
8,
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 4ba6eb8464261..2bc488fbb1dd1 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -355,6 +355,14 @@ def test_interchange_from_corrected_buffer_dtypes(monkeypatch) -> None:
pd.api.interchange.from_dataframe(df)
+def test_empty_string_column():
+ # https://github.com/pandas-dev/pandas/issues/56703
+ df = pd.DataFrame({"a": []}, dtype=str)
+ df2 = df.__dataframe__()
+ result = pd.api.interchange.from_dataframe(df2)
+ tm.assert_frame_equal(df, result)
+
+
def test_large_string():
# GH#56702
pytest.importorskip("pyarrow")
| - [x] closes #56703
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56788 | 2024-01-08T22:11:33Z | 2024-01-10T21:30:51Z | 2024-01-10T21:30:51Z | 2024-01-10T21:30:59Z |
TST/CLN: Test parametrizations 4 | diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py
index cd4707ac405de..9f0994b968a47 100644
--- a/pandas/tests/strings/test_find_replace.py
+++ b/pandas/tests/strings/test_find_replace.py
@@ -515,13 +515,11 @@ def test_replace_compiled_regex_callable(any_string_dtype):
tm.assert_series_equal(result, expected)
-@pytest.mark.parametrize(
- "regex,expected", [(True, ["bao", "bao", np.nan]), (False, ["bao", "foo", np.nan])]
-)
-def test_replace_literal(regex, expected, any_string_dtype):
+@pytest.mark.parametrize("regex,expected_val", [(True, "bao"), (False, "foo")])
+def test_replace_literal(regex, expected_val, any_string_dtype):
# GH16808 literal replace (regex=False vs regex=True)
ser = Series(["f.o", "foo", np.nan], dtype=any_string_dtype)
- expected = Series(expected, dtype=any_string_dtype)
+ expected = Series(["bao", expected_val, np.nan], dtype=any_string_dtype)
result = ser.str.replace("f.", "ba", regex=regex)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py
index 9ff1fc0e13ae9..452e5ec5cf939 100644
--- a/pandas/tests/strings/test_split_partition.py
+++ b/pandas/tests/strings/test_split_partition.py
@@ -190,23 +190,24 @@ def test_split_maxsplit(data, pat, any_string_dtype, n):
@pytest.mark.parametrize(
- "data, pat, expected",
+ "data, pat, expected_val",
[
(
["split once", "split once too!"],
None,
- Series({0: ["split", "once"], 1: ["split", "once too!"]}),
+ "once too!",
),
(
["split_once", "split_once_too!"],
"_",
- Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
+ "once_too!",
),
],
)
-def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
+def test_split_no_pat_with_nonzero_n(data, pat, expected_val, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
+ expected = Series({0: ["split", "once"], 1: ["split", expected_val]})
tm.assert_series_equal(expected, result, check_index_type=False)
@@ -533,37 +534,27 @@ def test_partition_series_stdlib(any_string_dtype, method):
@pytest.mark.parametrize(
- "method, expand, exp, exp_levels",
+ "method, exp",
[
[
"partition",
- False,
- np.array(
- [("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
- dtype=object,
- ),
- 1,
+ [("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
],
[
"rpartition",
- False,
- np.array(
- [("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
- dtype=object,
- ),
- 1,
+ [("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
],
],
)
-def test_partition_index(method, expand, exp, exp_levels):
+def test_partition_index(method, exp):
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
- result = getattr(values.str, method)("_", expand=expand)
- exp = Index(exp)
+ result = getattr(values.str, method)("_", expand=False)
+ exp = Index(np.array(exp, dtype=object), dtype=object)
tm.assert_index_equal(result, exp)
- assert result.nlevels == exp_levels
+ assert result.nlevels == 1
@pytest.mark.parametrize(
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index aeddc08e4b888..c99751dca6c9d 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -67,13 +67,7 @@ def test_catch_oob():
pd.Timestamp("15000101").as_unit("ns")
-@pytest.mark.parametrize(
- "is_local",
- [
- True,
- False,
- ],
-)
+@pytest.mark.parametrize("is_local", [True, False])
def test_catch_undefined_variable_error(is_local):
variable_name = "x"
if is_local:
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 576a6615bf694..9d364c2f86ac5 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -212,51 +212,54 @@ def test_to_datetime_format_YYYYMMDD_with_none(self, input_s):
[
# NaN before strings with invalid date values
[
- Series(["19801222", np.nan, "20010012", "10019999"]),
- Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
+ ["19801222", np.nan, "20010012", "10019999"],
+ [Timestamp("19801222"), np.nan, np.nan, np.nan],
],
# NaN after strings with invalid date values
[
- Series(["19801222", "20010012", "10019999", np.nan]),
- Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
+ ["19801222", "20010012", "10019999", np.nan],
+ [Timestamp("19801222"), np.nan, np.nan, np.nan],
],
# NaN before integers with invalid date values
[
- Series([20190813, np.nan, 20010012, 20019999]),
- Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
+ [20190813, np.nan, 20010012, 20019999],
+ [Timestamp("20190813"), np.nan, np.nan, np.nan],
],
# NaN after integers with invalid date values
[
- Series([20190813, 20010012, np.nan, 20019999]),
- Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
+ [20190813, 20010012, np.nan, 20019999],
+ [Timestamp("20190813"), np.nan, np.nan, np.nan],
],
],
)
def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected):
# GH 25512
# format='%Y%m%d', errors='coerce'
+ input_s = Series(input_s)
result = to_datetime(input_s, format="%Y%m%d", errors="coerce")
+ expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data, format, expected",
[
- ([pd.NA], "%Y%m%d%H%M%S", DatetimeIndex(["NaT"])),
- ([pd.NA], None, DatetimeIndex(["NaT"])),
+ ([pd.NA], "%Y%m%d%H%M%S", ["NaT"]),
+ ([pd.NA], None, ["NaT"]),
(
[pd.NA, "20210202202020"],
"%Y%m%d%H%M%S",
- DatetimeIndex(["NaT", "2021-02-02 20:20:20"]),
+ ["NaT", "2021-02-02 20:20:20"],
),
- (["201010", pd.NA], "%y%m%d", DatetimeIndex(["2020-10-10", "NaT"])),
- (["201010", pd.NA], "%d%m%y", DatetimeIndex(["2010-10-20", "NaT"])),
- ([None, np.nan, pd.NA], None, DatetimeIndex(["NaT", "NaT", "NaT"])),
- ([None, np.nan, pd.NA], "%Y%m%d", DatetimeIndex(["NaT", "NaT", "NaT"])),
+ (["201010", pd.NA], "%y%m%d", ["2020-10-10", "NaT"]),
+ (["201010", pd.NA], "%d%m%y", ["2010-10-20", "NaT"]),
+ ([None, np.nan, pd.NA], None, ["NaT", "NaT", "NaT"]),
+ ([None, np.nan, pd.NA], "%Y%m%d", ["NaT", "NaT", "NaT"]),
],
)
def test_to_datetime_with_NA(self, data, format, expected):
# GH#42957
result = to_datetime(data, format=format)
+ expected = DatetimeIndex(expected)
tm.assert_index_equal(result, expected)
def test_to_datetime_with_NA_with_warning(self):
@@ -422,12 +425,12 @@ def test_parse_nanoseconds_with_formula(self, cache, arg):
@pytest.mark.parametrize(
"value,fmt,expected",
[
- ["2009324", "%Y%W%w", Timestamp("2009-08-13")],
- ["2013020", "%Y%U%w", Timestamp("2013-01-13")],
+ ["2009324", "%Y%W%w", "2009-08-13"],
+ ["2013020", "%Y%U%w", "2013-01-13"],
],
)
def test_to_datetime_format_weeks(self, value, fmt, expected, cache):
- assert to_datetime(value, format=fmt, cache=cache) == expected
+ assert to_datetime(value, format=fmt, cache=cache) == Timestamp(expected)
@pytest.mark.parametrize(
"fmt,dates,expected_dates",
@@ -715,24 +718,20 @@ def test_to_datetime_mixed_datetime_and_string_with_format_mixed_offsets_utc_fal
[
pytest.param(
"%Y-%m-%d %H:%M:%S%z",
- Index(
- [
- Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"),
- Timestamp("2000-01-02 02:00:00+0200", tz="UTC+02:00"),
- NaT,
- ]
- ),
+ [
+ Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"),
+ Timestamp("2000-01-02 02:00:00+0200", tz="UTC+02:00"),
+ NaT,
+ ],
id="ISO8601, non-UTC",
),
pytest.param(
"%Y-%d-%m %H:%M:%S%z",
- Index(
- [
- Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"),
- Timestamp("2000-02-01 02:00:00+0200", tz="UTC+02:00"),
- NaT,
- ]
- ),
+ [
+ Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"),
+ Timestamp("2000-02-01 02:00:00+0200", tz="UTC+02:00"),
+ NaT,
+ ],
id="non-ISO8601, non-UTC",
),
],
@@ -747,6 +746,7 @@ def test_to_datetime_mixed_offsets_with_none_tz(self, fmt, expected):
format=fmt,
utc=False,
)
+ expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index c452382ec572b..7f1d13230302b 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -598,21 +598,12 @@ def test_downcast_float64_to_float32():
assert series.dtype == result.dtype
-@pytest.mark.parametrize(
- "ser,expected",
- [
- (
- Series([0, 9223372036854775808]),
- Series([0, 9223372036854775808], dtype=np.uint64),
- )
- ],
-)
-def test_downcast_uint64(ser, expected):
+def test_downcast_uint64():
# see gh-14422:
# BUG: to_numeric doesn't work uint64 numbers
-
+ ser = Series([0, 9223372036854775808])
result = to_numeric(ser, downcast="unsigned")
-
+ expected = Series([0, 9223372036854775808], dtype=np.uint64)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 036462674c270..073533c38f430 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -98,13 +98,12 @@ def test_to_timedelta_oob_non_nano(self):
with pytest.raises(OutOfBoundsTimedelta, match=msg):
TimedeltaArray._from_sequence(arr, dtype="m8[s]")
- @pytest.mark.parametrize(
- "arg", [np.arange(10).reshape(2, 5), pd.DataFrame(np.arange(10).reshape(2, 5))]
- )
+ @pytest.mark.parametrize("box", [lambda x: x, pd.DataFrame])
@pytest.mark.parametrize("errors", ["ignore", "raise", "coerce"])
@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")
- def test_to_timedelta_dataframe(self, arg, errors):
+ def test_to_timedelta_dataframe(self, box, errors):
# GH 11776
+ arg = box(np.arange(10).reshape(2, 5))
with pytest.raises(TypeError, match="1-d array"):
to_timedelta(arg, errors=errors)
diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py
index 632d3b4cc3c84..3798e68b3780b 100644
--- a/pandas/tests/tslibs/test_array_to_datetime.py
+++ b/pandas/tests/tslibs/test_array_to_datetime.py
@@ -300,21 +300,14 @@ class SubDatetime(datetime):
pass
-@pytest.mark.parametrize(
- "data,expected",
- [
- ([SubDatetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]),
- ([datetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]),
- ([Timestamp(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]),
- ],
-)
-def test_datetime_subclass(data, expected):
+@pytest.mark.parametrize("klass", [SubDatetime, datetime, Timestamp])
+def test_datetime_subclass(klass):
# GH 25851
# ensure that subclassed datetime works with
# array_to_datetime
- arr = np.array(data, dtype=object)
+ arr = np.array([klass(2000, 1, 1)], dtype=object)
result, _ = tslib.array_to_datetime(arr)
- expected = np.array(expected, dtype="M8[ns]")
+ expected = np.array(["2000-01-01T00:00:00.000000000"], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py
index 9d7a5e906c3c3..6a0b86cbd03ee 100644
--- a/pandas/tests/tslibs/test_conversion.py
+++ b/pandas/tests/tslibs/test_conversion.py
@@ -86,11 +86,12 @@ def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq):
@pytest.mark.parametrize(
"arr",
[
- pytest.param(np.array([], dtype=np.int64), id="empty"),
- pytest.param(np.array([iNaT], dtype=np.int64), id="all_nat"),
+ pytest.param([], id="empty"),
+ pytest.param([iNaT], id="all_nat"),
],
)
def test_tz_convert_corner(arr):
+ arr = np.array([iNaT], dtype=np.int64)
result = tz_convert_from_utc(arr, timezones.maybe_get_tz("Asia/Tokyo"))
tm.assert_numpy_array_equal(result, arr)
diff --git a/pandas/tests/tslibs/test_liboffsets.py b/pandas/tests/tslibs/test_liboffsets.py
index c189a431146a7..6ffc065bb61cf 100644
--- a/pandas/tests/tslibs/test_liboffsets.py
+++ b/pandas/tests/tslibs/test_liboffsets.py
@@ -127,10 +127,8 @@ def test_get_day_of_month_error():
roll_qtrday(dt, n=3, month=11, day_opt=day_opt, modby=12)
-@pytest.mark.parametrize(
- "month",
- [3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3)
-)
+@pytest.mark.parametrize("month", [3, 5])
+# (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3)
@pytest.mark.parametrize("n", [4, -3])
def test_roll_qtr_day_not_mod_unequal(day_opt, month, n):
expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}}
diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py
index 4e692084f7352..1688e77ccd2d7 100644
--- a/pandas/tests/util/test_assert_almost_equal.py
+++ b/pandas/tests/util/test_assert_almost_equal.py
@@ -257,18 +257,14 @@ def test_assert_almost_equal_strings():
_assert_almost_equal_both("abc", "abc")
-@pytest.mark.parametrize(
- "a,b", [("abc", "abcd"), ("abc", "abd"), ("abc", 1), ("abc", [1])]
-)
-def test_assert_not_almost_equal_strings(a, b):
- _assert_not_almost_equal_both(a, b)
+@pytest.mark.parametrize("b", ["abcd", "abd", 1, [1]])
+def test_assert_not_almost_equal_strings(b):
+ _assert_not_almost_equal_both("abc", b)
-@pytest.mark.parametrize(
- "a,b", [([1, 2, 3], [1, 2, 3]), (np.array([1, 2, 3]), np.array([1, 2, 3]))]
-)
-def test_assert_almost_equal_iterables(a, b):
- _assert_almost_equal_both(a, b)
+@pytest.mark.parametrize("box", [list, np.array])
+def test_assert_almost_equal_iterables(box):
+ _assert_almost_equal_both(box([1, 2, 3]), box([1, 2, 3]))
@pytest.mark.parametrize(
diff --git a/pandas/tests/util/test_assert_categorical_equal.py b/pandas/tests/util/test_assert_categorical_equal.py
index d07bbcbc460a1..c171564574708 100644
--- a/pandas/tests/util/test_assert_categorical_equal.py
+++ b/pandas/tests/util/test_assert_categorical_equal.py
@@ -4,11 +4,9 @@
import pandas._testing as tm
-@pytest.mark.parametrize(
- "c",
- [Categorical([1, 2, 3, 4]), Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4, 5])],
-)
+@pytest.mark.parametrize("c", [None, [1, 2, 3, 4, 5]])
def test_categorical_equal(c):
+ c = Categorical([1, 2, 3, 4], categories=c)
tm.assert_categorical_equal(c, c)
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index e244615ea4629..a3f4b091713f9 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -49,11 +49,13 @@ def test_frame_equal_row_order_mismatch(check_like, frame_or_series):
@pytest.mark.parametrize(
"df1,df2",
[
- (DataFrame({"A": [1, 2, 3]}), DataFrame({"A": [1, 2, 3, 4]})),
- (DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), DataFrame({"A": [1, 2, 3]})),
+ ({"A": [1, 2, 3]}, {"A": [1, 2, 3, 4]}),
+ ({"A": [1, 2, 3], "B": [4, 5, 6]}, {"A": [1, 2, 3]}),
],
)
def test_frame_equal_shape_mismatch(df1, df2, frame_or_series):
+ df1 = DataFrame(df1)
+ df2 = DataFrame(df2)
msg = f"{frame_or_series.__name__} are different"
with pytest.raises(AssertionError, match=msg):
@@ -170,8 +172,8 @@ def test_frame_equal_block_mismatch(by_blocks_fixture, frame_or_series):
"df1,df2,msg",
[
(
- DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]}),
- DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "e̊"]}),
+ {"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]},
+ {"A": ["á", "à", "ä"], "E": ["é", "è", "e̊"]},
"""{obj}\\.iloc\\[:, 1\\] \\(column name="E"\\) are different
{obj}\\.iloc\\[:, 1\\] \\(column name="E"\\) values are different \\(33\\.33333 %\\)
@@ -180,8 +182,8 @@ def test_frame_equal_block_mismatch(by_blocks_fixture, frame_or_series):
\\[right\\]: \\[é, è, e̊\\]""",
),
(
- DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]}),
- DataFrame({"A": ["a", "a", "a"], "E": ["e", "e", "e"]}),
+ {"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]},
+ {"A": ["a", "a", "a"], "E": ["e", "e", "e"]},
"""{obj}\\.iloc\\[:, 0\\] \\(column name="A"\\) are different
{obj}\\.iloc\\[:, 0\\] \\(column name="A"\\) values are different \\(100\\.0 %\\)
@@ -196,6 +198,8 @@ def test_frame_equal_unicode(df1, df2, msg, by_blocks_fixture, frame_or_series):
#
# Test ensures that `tm.assert_frame_equals` raises the right exception
# when comparing DataFrames containing differing unicode objects.
+ df1 = DataFrame(df1)
+ df2 = DataFrame(df2)
msg = msg.format(obj=frame_or_series.__name__)
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(
@@ -256,12 +260,7 @@ def test_assert_frame_equal_ignore_extension_dtype_mismatch_cross_class():
@pytest.mark.parametrize(
- "dtype",
- [
- ("timedelta64[ns]"),
- ("datetime64[ns, UTC]"),
- ("Period[D]"),
- ],
+ "dtype", ["timedelta64[ns]", "datetime64[ns, UTC]", "Period[D]"]
)
def test_assert_frame_equal_datetime_like_dtype_mismatch(dtype):
df1 = DataFrame({"a": []}, dtype=dtype)
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index 1e7fdd920e365..a54e0071aa006 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -222,12 +222,12 @@ def test_hash_pandas_series_diff_index(series):
assert not (a == b).all()
-@pytest.mark.parametrize(
- "obj", [Series([], dtype="float64"), Series([], dtype="object"), Index([])]
-)
-def test_hash_pandas_empty_object(obj, index):
+@pytest.mark.parametrize("klass", [Index, Series])
+@pytest.mark.parametrize("dtype", ["float64", "object"])
+def test_hash_pandas_empty_object(klass, dtype, index):
# These are by-definition the same with
# or without the index as the data is empty.
+ obj = klass([], dtype=dtype)
a = hash_pandas_object(obj, index=index)
b = hash_pandas_object(obj, index=index)
tm.assert_series_equal(a, b)
@@ -236,9 +236,9 @@ def test_hash_pandas_empty_object(obj, index):
@pytest.mark.parametrize(
"s1",
[
- Series(["a", "b", "c", "d"]),
- Series([1000, 2000, 3000, 4000]),
- Series(pd.date_range(0, periods=4)),
+ ["a", "b", "c", "d"],
+ [1000, 2000, 3000, 4000],
+ pd.date_range(0, periods=4),
],
)
@pytest.mark.parametrize("categorize", [True, False])
@@ -247,6 +247,7 @@ def test_categorical_consistency(s1, categorize):
#
# Check that categoricals hash consistent with their values,
# not codes. This should work for categoricals of any dtype.
+ s1 = Series(s1)
s2 = s1.astype("category").cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 058e5ce36e53e..d7c72105a673b 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -271,67 +271,67 @@ def test_ewma_nan_handling():
"s, adjust, ignore_na, w",
[
(
- Series([np.nan, 1.0, 101.0]),
+ [np.nan, 1.0, 101.0],
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
- Series([np.nan, 1.0, 101.0]),
+ [np.nan, 1.0, 101.0],
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
- Series([np.nan, 1.0, 101.0]),
+ [np.nan, 1.0, 101.0],
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
- Series([np.nan, 1.0, 101.0]),
+ [np.nan, 1.0, 101.0],
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
- Series([1.0, np.nan, 101.0]),
+ [1.0, np.nan, 101.0],
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
- Series([1.0, np.nan, 101.0]),
+ [1.0, np.nan, 101.0],
True,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
),
(
- Series([1.0, np.nan, 101.0]),
+ [1.0, np.nan, 101.0],
False,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
),
(
- Series([1.0, np.nan, 101.0]),
+ [1.0, np.nan, 101.0],
False,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
),
(
- Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
+ [np.nan, 1.0, np.nan, np.nan, 101.0, np.nan],
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(
- Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
+ [np.nan, 1.0, np.nan, np.nan, 101.0, np.nan],
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
(
- Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
+ [np.nan, 1.0, np.nan, np.nan, 101.0, np.nan],
False,
False,
[
@@ -344,7 +344,7 @@ def test_ewma_nan_handling():
],
),
(
- Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
+ [np.nan, 1.0, np.nan, np.nan, 101.0, np.nan],
False,
True,
[
@@ -357,7 +357,7 @@ def test_ewma_nan_handling():
],
),
(
- Series([1.0, np.nan, 101.0, 50.0]),
+ [1.0, np.nan, 101.0, 50.0],
True,
False,
[
@@ -368,7 +368,7 @@ def test_ewma_nan_handling():
],
),
(
- Series([1.0, np.nan, 101.0, 50.0]),
+ [1.0, np.nan, 101.0, 50.0],
True,
True,
[
@@ -379,7 +379,7 @@ def test_ewma_nan_handling():
],
),
(
- Series([1.0, np.nan, 101.0, 50.0]),
+ [1.0, np.nan, 101.0, 50.0],
False,
False,
[
@@ -391,7 +391,7 @@ def test_ewma_nan_handling():
],
),
(
- Series([1.0, np.nan, 101.0, 50.0]),
+ [1.0, np.nan, 101.0, 50.0],
False,
True,
[
@@ -405,6 +405,7 @@ def test_ewma_nan_handling():
)
def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
# GH 7603
+ s = Series(s)
expected = (s.multiply(w).cumsum() / Series(w).cumsum()).ffill()
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 10d0afb5a2412..9174307cec5d1 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -127,7 +127,7 @@ def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_serie
"df,expected,min_periods",
[
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
@@ -136,7 +136,7 @@ def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_serie
3,
),
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
@@ -145,7 +145,7 @@ def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_serie
2,
),
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
@@ -153,10 +153,10 @@ def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_serie
],
1,
),
- (DataFrame({"A": [1], "B": [4]}), [], 2),
- (DataFrame(), [({}, [])], 1),
+ ({"A": [1], "B": [4]}, [], 2),
+ (None, [({}, [])], 1),
(
- DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
+ {"A": [1, np.nan, 3], "B": [np.nan, 5, 6]},
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
@@ -165,7 +165,7 @@ def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_serie
3,
),
(
- DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
+ {"A": [1, np.nan, 3], "B": [np.nan, 5, 6]},
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
@@ -174,7 +174,7 @@ def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_serie
2,
),
(
- DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
+ {"A": [1, np.nan, 3], "B": [np.nan, 5, 6]},
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
@@ -186,6 +186,7 @@ def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_serie
)
def test_iter_expanding_dataframe(df, expected, min_periods):
# GH 11704
+ df = DataFrame(df)
expected = [DataFrame(values, index=index) for (values, index) in expected]
for expected, actual in zip(expected, df.expanding(min_periods)):
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 7ab6e7863ad81..2a3a0a54d0767 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -721,7 +721,7 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
"df,expected,window,min_periods",
[
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
@@ -731,7 +731,7 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
None,
),
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
@@ -741,7 +741,7 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
1,
),
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
@@ -751,7 +751,7 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
2,
),
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [2], "B": [5]}, [1]),
@@ -761,7 +761,7 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
1,
),
(
- DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
+ {"A": [1, 2, 3], "B": [4, 5, 6]},
[
({"A": [1], "B": [4]}, [0]),
({"A": [2], "B": [5]}, [1]),
@@ -770,11 +770,11 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
1,
0,
),
- (DataFrame({"A": [1], "B": [4]}), [], 2, None),
- (DataFrame({"A": [1], "B": [4]}), [], 2, 1),
- (DataFrame(), [({}, [])], 2, None),
+ ({"A": [1], "B": [4]}, [], 2, None),
+ ({"A": [1], "B": [4]}, [], 2, 1),
+ (None, [({}, [])], 2, None),
(
- DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
+ {"A": [1, np.nan, 3], "B": [np.nan, 5, 6]},
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
@@ -787,6 +787,7 @@ def test_rolling_count_default_min_periods_with_null_values(frame_or_series):
)
def test_iter_rolling_dataframe(df, expected, window, min_periods):
# GH 11704
+ df = DataFrame(df)
expected = [DataFrame(values, index=index) for (values, index) in expected]
for expected, actual in zip(expected, df.rolling(window, min_periods=min_periods)):
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56787 | 2024-01-08T21:54:16Z | 2024-01-11T16:56:07Z | 2024-01-11T16:56:06Z | 2024-01-11T16:56:14Z |
Backport PR #56724 on branch 2.2.x (TST: Don't ignore tolerance for integer series) | diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index d0f38c85868d4..3de982498e996 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -10,6 +10,7 @@
import numpy as np
+from pandas._libs import lib
from pandas._libs.missing import is_matching_na
from pandas._libs.sparse import SparseIndex
import pandas._libs.testing as _testing
@@ -698,9 +699,9 @@ def assert_extension_array_equal(
right,
check_dtype: bool | Literal["equiv"] = True,
index_values=None,
- check_exact: bool = False,
- rtol: float = 1.0e-5,
- atol: float = 1.0e-8,
+ check_exact: bool | lib.NoDefault = lib.no_default,
+ rtol: float | lib.NoDefault = lib.no_default,
+ atol: float | lib.NoDefault = lib.no_default,
obj: str = "ExtensionArray",
) -> None:
"""
@@ -715,7 +716,12 @@ def assert_extension_array_equal(
index_values : Index | numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_exact : bool, default False
- Whether to compare number exactly. Only takes effect for float dtypes.
+ Whether to compare number exactly.
+
+ .. versionchanged:: 2.2.0
+
+ Defaults to True for integer dtypes if none of
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
atol : float, default 1e-8
@@ -739,6 +745,23 @@ def assert_extension_array_equal(
>>> b, c = a.array, a.array
>>> tm.assert_extension_array_equal(b, c)
"""
+ if (
+ check_exact is lib.no_default
+ and rtol is lib.no_default
+ and atol is lib.no_default
+ ):
+ check_exact = (
+ is_numeric_dtype(left.dtype)
+ and not is_float_dtype(left.dtype)
+ or is_numeric_dtype(right.dtype)
+ and not is_float_dtype(right.dtype)
+ )
+ elif check_exact is lib.no_default:
+ check_exact = False
+
+ rtol = rtol if rtol is not lib.no_default else 1.0e-5
+ atol = atol if atol is not lib.no_default else 1.0e-8
+
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
@@ -784,10 +807,7 @@ def assert_extension_array_equal(
left_valid = left[~left_na].to_numpy(dtype=object)
right_valid = right[~right_na].to_numpy(dtype=object)
- if check_exact or (
- (is_numeric_dtype(left.dtype) and not is_float_dtype(left.dtype))
- or (is_numeric_dtype(right.dtype) and not is_float_dtype(right.dtype))
- ):
+ if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj=obj, index_values=index_values
)
@@ -811,14 +831,14 @@ def assert_series_equal(
check_index_type: bool | Literal["equiv"] = "equiv",
check_series_type: bool = True,
check_names: bool = True,
- check_exact: bool = False,
+ check_exact: bool | lib.NoDefault = lib.no_default,
check_datetimelike_compat: bool = False,
check_categorical: bool = True,
check_category_order: bool = True,
check_freq: bool = True,
check_flags: bool = True,
- rtol: float = 1.0e-5,
- atol: float = 1.0e-8,
+ rtol: float | lib.NoDefault = lib.no_default,
+ atol: float | lib.NoDefault = lib.no_default,
obj: str = "Series",
*,
check_index: bool = True,
@@ -841,7 +861,12 @@ def assert_series_equal(
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
- Whether to compare number exactly. Only takes effect for float dtypes.
+ Whether to compare number exactly.
+
+ .. versionchanged:: 2.2.0
+
+ Defaults to True for integer dtypes if none of
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
@@ -877,6 +902,22 @@ def assert_series_equal(
>>> tm.assert_series_equal(a, b)
"""
__tracebackhide__ = True
+ if (
+ check_exact is lib.no_default
+ and rtol is lib.no_default
+ and atol is lib.no_default
+ ):
+ check_exact = (
+ is_numeric_dtype(left.dtype)
+ and not is_float_dtype(left.dtype)
+ or is_numeric_dtype(right.dtype)
+ and not is_float_dtype(right.dtype)
+ )
+ elif check_exact is lib.no_default:
+ check_exact = False
+
+ rtol = rtol if rtol is not lib.no_default else 1.0e-5
+ atol = atol if atol is not lib.no_default else 1.0e-8
if not check_index and check_like:
raise ValueError("check_like must be False if check_index is False")
@@ -931,10 +972,7 @@ def assert_series_equal(
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
- if check_exact or (
- (is_numeric_dtype(left.dtype) and not is_float_dtype(left.dtype))
- or (is_numeric_dtype(right.dtype) and not is_float_dtype(right.dtype))
- ):
+ if check_exact:
left_values = left._values
right_values = right._values
# Only check exact if dtype is numeric
@@ -1061,14 +1099,14 @@ def assert_frame_equal(
check_frame_type: bool = True,
check_names: bool = True,
by_blocks: bool = False,
- check_exact: bool = False,
+ check_exact: bool | lib.NoDefault = lib.no_default,
check_datetimelike_compat: bool = False,
check_categorical: bool = True,
check_like: bool = False,
check_freq: bool = True,
check_flags: bool = True,
- rtol: float = 1.0e-5,
- atol: float = 1.0e-8,
+ rtol: float | lib.NoDefault = lib.no_default,
+ atol: float | lib.NoDefault = lib.no_default,
obj: str = "DataFrame",
) -> None:
"""
@@ -1103,7 +1141,12 @@ def assert_frame_equal(
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
- Whether to compare number exactly. Only takes effect for float dtypes.
+ Whether to compare number exactly.
+
+ .. versionchanged:: 2.2.0
+
+ Defaults to True for integer dtypes if none of
+ ``check_exact``, ``rtol`` and ``atol`` are specified.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
@@ -1158,6 +1201,9 @@ def assert_frame_equal(
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
+ _rtol = rtol if rtol is not lib.no_default else 1.0e-5
+ _atol = atol if atol is not lib.no_default else 1.0e-8
+ _check_exact = check_exact if check_exact is not lib.no_default else False
# instance validation
_check_isinstance(left, right, DataFrame)
@@ -1181,11 +1227,11 @@ def assert_frame_equal(
right.index,
exact=check_index_type,
check_names=check_names,
- check_exact=check_exact,
+ check_exact=_check_exact,
check_categorical=check_categorical,
check_order=not check_like,
- rtol=rtol,
- atol=atol,
+ rtol=_rtol,
+ atol=_atol,
obj=f"{obj}.index",
)
@@ -1195,11 +1241,11 @@ def assert_frame_equal(
right.columns,
exact=check_column_type,
check_names=check_names,
- check_exact=check_exact,
+ check_exact=_check_exact,
check_categorical=check_categorical,
check_order=not check_like,
- rtol=rtol,
- atol=atol,
+ rtol=_rtol,
+ atol=_atol,
obj=f"{obj}.columns",
)
diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index c4ffc197298f0..784a0347cf92b 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -462,3 +462,15 @@ def test_ea_and_numpy_no_dtype_check(val, check_exact, dtype):
left = Series([1, 2, val], dtype=dtype)
right = Series(pd.array([1, 2, val]))
tm.assert_series_equal(left, right, check_dtype=False, check_exact=check_exact)
+
+
+def test_assert_series_equal_int_tol():
+ # GH#56646
+ left = Series([81, 18, 121, 38, 74, 72, 81, 81, 146, 81, 81, 170, 74, 74])
+ right = Series([72, 9, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72])
+ tm.assert_series_equal(left, right, rtol=1.5)
+
+ tm.assert_frame_equal(left.to_frame(), right.to_frame(), rtol=1.5)
+ tm.assert_extension_array_equal(
+ left.astype("Int64").values, right.astype("Int64").values, rtol=1.5
+ )
| Backport PR #56724: TST: Don't ignore tolerance for integer series | https://api.github.com/repos/pandas-dev/pandas/pulls/56786 | 2024-01-08T21:40:12Z | 2024-01-08T23:27:06Z | 2024-01-08T23:27:06Z | 2024-01-08T23:27:06Z |
Backport PR #56766 on branch 2.2.x (BUG: IntervalIndex.from_tuples raising with masked subtype) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index b138e91b41661..21cab17fd58b2 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -816,6 +816,7 @@ Interval
- Bug in :class:`Interval` ``__repr__`` not displaying UTC offsets for :class:`Timestamp` bounds. Additionally the hour, minute and second components will now be shown (:issue:`55015`)
- Bug in :meth:`IntervalIndex.factorize` and :meth:`Series.factorize` with :class:`IntervalDtype` with datetime64 or timedelta64 intervals not preserving non-nanosecond units (:issue:`56099`)
- Bug in :meth:`IntervalIndex.from_arrays` when passed ``datetime64`` or ``timedelta64`` arrays with mismatched resolutions constructing an invalid ``IntervalArray`` object (:issue:`55714`)
+- Bug in :meth:`IntervalIndex.from_tuples` raising if subtype is a nullable extension dtype (:issue:`56765`)
- Bug in :meth:`IntervalIndex.get_indexer` with datetime or timedelta intervals incorrectly matching on integer targets (:issue:`47772`)
- Bug in :meth:`IntervalIndex.get_indexer` with timezone-aware datetime intervals incorrectly matching on a sequence of timezone-naive targets (:issue:`47772`)
- Bug in setting values on a :class:`Series` with an :class:`IntervalIndex` using a slice incorrectly raising (:issue:`54722`)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 904c87c68e211..e69f996441703 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -79,6 +79,7 @@
unique,
value_counts_internal as value_counts,
)
+from pandas.core.arrays import ArrowExtensionArray
from pandas.core.arrays.base import (
ExtensionArray,
_extension_array_shared_docs,
@@ -370,11 +371,18 @@ def _ensure_simple_new_inputs(
right = ensure_wrapped_if_datetimelike(right)
right = extract_array(right, extract_numpy=True)
- lbase = getattr(left, "_ndarray", left).base
- rbase = getattr(right, "_ndarray", right).base
- if lbase is not None and lbase is rbase:
- # If these share data, then setitem could corrupt our IA
- right = right.copy()
+ if isinstance(left, ArrowExtensionArray) or isinstance(
+ right, ArrowExtensionArray
+ ):
+ pass
+ else:
+ lbase = getattr(left, "_ndarray", left)
+ lbase = getattr(lbase, "_data", lbase).base
+ rbase = getattr(right, "_ndarray", right)
+ rbase = getattr(rbase, "_data", rbase).base
+ if lbase is not None and lbase is rbase:
+ # If these share data, then setitem could corrupt our IA
+ right = right.copy()
dtype = IntervalDtype(left.dtype, closed=closed)
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index 778c07b46e57c..e47a014f18045 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
@@ -517,3 +519,17 @@ def test_dtype_closed_mismatch():
with pytest.raises(ValueError, match=msg):
IntervalArray([], dtype=dtype, closed="neither")
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ ["Float64", pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow"))],
+)
+def test_ea_dtype(dtype):
+ # GH#56765
+ bins = [(0.0, 0.4), (0.4, 0.6)]
+ interval_dtype = IntervalDtype(subtype=dtype, closed="left")
+ result = IntervalIndex.from_tuples(bins, closed="left", dtype=interval_dtype)
+ assert result.dtype == interval_dtype
+ expected = IntervalIndex.from_tuples(bins, closed="left").astype(interval_dtype)
+ tm.assert_index_equal(result, expected)
| Backport PR #56766: BUG: IntervalIndex.from_tuples raising with masked subtype | https://api.github.com/repos/pandas-dev/pandas/pulls/56785 | 2024-01-08T21:34:35Z | 2024-01-08T22:26:48Z | 2024-01-08T22:26:48Z | 2024-01-08T22:26:48Z |
Backport PR #56771 on branch 2.2.x (BUG: to_stata not handling ea dtypes correctly) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index b138e91b41661..c5916c5df7596 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -847,6 +847,7 @@ I/O
- Bug in :func:`read_json` not handling dtype conversion properly if ``infer_string`` is set (:issue:`56195`)
- Bug in :meth:`DataFrame.to_excel`, with ``OdsWriter`` (``ods`` files) writing Boolean/string value (:issue:`54994`)
- Bug in :meth:`DataFrame.to_hdf` and :func:`read_hdf` with ``datetime64`` dtypes with non-nanosecond resolution failing to round-trip correctly (:issue:`55622`)
+- Bug in :meth:`DataFrame.to_stata` raising for extension dtypes (:issue:`54671`)
- Bug in :meth:`~pandas.read_excel` with ``engine="odf"`` (``ods`` files) when a string cell contains an annotation (:issue:`55200`)
- Bug in :meth:`~pandas.read_excel` with an ODS file without cached formatted cell for float values (:issue:`55219`)
- Bug where :meth:`DataFrame.to_json` would raise an ``OverflowError`` instead of a ``TypeError`` with unsupported NumPy types (:issue:`55403`)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index a4d8054ea4f8c..4abf9af185a01 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -47,9 +47,11 @@
)
from pandas.util._exceptions import find_stack_level
+from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
ensure_object,
is_numeric_dtype,
+ is_string_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
@@ -62,8 +64,6 @@
to_datetime,
to_timedelta,
)
-from pandas.core.arrays.boolean import BooleanDtype
-from pandas.core.arrays.integer import IntegerDtype
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.indexes.range import RangeIndex
@@ -591,17 +591,22 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame:
for col in data:
# Cast from unsupported types to supported types
- is_nullable_int = isinstance(data[col].dtype, (IntegerDtype, BooleanDtype))
+ is_nullable_int = (
+ isinstance(data[col].dtype, ExtensionDtype)
+ and data[col].dtype.kind in "iub"
+ )
# We need to find orig_missing before altering data below
orig_missing = data[col].isna()
if is_nullable_int:
- missing_loc = data[col].isna()
- if missing_loc.any():
- # Replace with always safe value
- fv = 0 if isinstance(data[col].dtype, IntegerDtype) else False
- data.loc[missing_loc, col] = fv
+ fv = 0 if data[col].dtype.kind in "iu" else False
# Replace with NumPy-compatible column
- data[col] = data[col].astype(data[col].dtype.numpy_dtype)
+ data[col] = data[col].fillna(fv).astype(data[col].dtype.numpy_dtype)
+ elif isinstance(data[col].dtype, ExtensionDtype):
+ if getattr(data[col].dtype, "numpy_dtype", None) is not None:
+ data[col] = data[col].astype(data[col].dtype.numpy_dtype)
+ elif is_string_dtype(data[col].dtype):
+ data[col] = data[col].astype("object")
+
dtype = data[col].dtype
empty_df = data.shape[0] == 0
for c_data in conversion_data:
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 3e4e1a107da9d..6bd74faa8a3db 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -11,6 +11,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
import pandas as pd
from pandas import CategoricalDtype
import pandas._testing as tm
@@ -1921,6 +1923,41 @@ def test_writer_118_exceptions(self):
with pytest.raises(ValueError, match="You must use version 119"):
StataWriterUTF8(path, df, version=118)
+ @pytest.mark.parametrize(
+ "dtype_backend",
+ ["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))],
+ )
+ def test_read_write_ea_dtypes(self, dtype_backend):
+ df = DataFrame(
+ {
+ "a": [1, 2, None],
+ "b": ["a", "b", "c"],
+ "c": [True, False, None],
+ "d": [1.5, 2.5, 3.5],
+ "e": pd.date_range("2020-12-31", periods=3, freq="D"),
+ },
+ index=pd.Index([0, 1, 2], name="index"),
+ )
+ df = df.convert_dtypes(dtype_backend=dtype_backend)
+ df.to_stata("test_stata.dta", version=118)
+
+ with tm.ensure_clean() as path:
+ df.to_stata(path)
+ written_and_read_again = self.read_dta(path)
+
+ expected = DataFrame(
+ {
+ "a": [1, 2, np.nan],
+ "b": ["a", "b", "c"],
+ "c": [1.0, 0, np.nan],
+ "d": [1.5, 2.5, 3.5],
+ "e": pd.date_range("2020-12-31", periods=3, freq="D"),
+ },
+ index=pd.Index([0, 1, 2], name="index", dtype=np.int32),
+ )
+
+ tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
+
@pytest.mark.parametrize("version", [105, 108, 111, 113, 114])
def test_backward_compat(version, datapath):
| Backport PR #56771: BUG: to_stata not handling ea dtypes correctly | https://api.github.com/repos/pandas-dev/pandas/pulls/56783 | 2024-01-08T21:18:07Z | 2024-01-08T22:24:40Z | 2024-01-08T22:24:40Z | 2024-01-08T22:24:41Z |
Backport PR #56767 on branch 2.2.x (BUG: Series.round raising for nullable bool dtype) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index b138e91b41661..93b63f99ea399 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -790,6 +790,7 @@ Numeric
- Bug in :meth:`Series.__floordiv__` for :class:`ArrowDtype` with integral dtypes raising for large values (:issue:`56645`)
- Bug in :meth:`Series.pow` not filling missing values correctly (:issue:`55512`)
- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` matching float ``0.0`` with ``False`` and vice versa (:issue:`55398`)
+- Bug in :meth:`Series.round` raising for nullable boolean dtype (:issue:`55936`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index fc092ef6eb463..545d45e450f3f 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -403,6 +403,8 @@ def round(self, decimals: int = 0, *args, **kwargs):
DataFrame.round : Round values of a DataFrame.
Series.round : Round values of a Series.
"""
+ if self.dtype.kind == "b":
+ return self
nv.validate_round(args, kwargs)
values = np.round(self._data, decimals=decimals, **kwargs)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e3b401cd3c88b..a6762dd1b48a2 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2788,13 +2788,11 @@ def round(self, decimals: int = 0, *args, **kwargs) -> Series:
dtype: float64
"""
nv.validate_round(args, kwargs)
- result = self._values.round(decimals)
- result = self._constructor(result, index=self.index, copy=False).__finalize__(
+ new_mgr = self._mgr.round(decimals=decimals, using_cow=using_copy_on_write())
+ return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(
self, method="round"
)
- return result
-
@overload
def quantile(
self, q: float = ..., interpolation: QuantileInterpolation = ...
diff --git a/pandas/tests/series/methods/test_round.py b/pandas/tests/series/methods/test_round.py
index 7f60c94f10e4f..c330b7a7dfbbb 100644
--- a/pandas/tests/series/methods/test_round.py
+++ b/pandas/tests/series/methods/test_round.py
@@ -63,3 +63,12 @@ def test_round_nat(self, method, freq, unit):
round_method = getattr(ser.dt, method)
result = round_method(freq)
tm.assert_series_equal(result, expected)
+
+ def test_round_ea_boolean(self):
+ # GH#55936
+ ser = Series([True, False], dtype="boolean")
+ expected = ser.copy()
+ result = ser.round(2)
+ tm.assert_series_equal(result, expected)
+ result.iloc[0] = False
+ tm.assert_series_equal(ser, expected)
| Backport PR #56767: BUG: Series.round raising for nullable bool dtype | https://api.github.com/repos/pandas-dev/pandas/pulls/56782 | 2024-01-08T21:15:24Z | 2024-01-08T22:24:22Z | 2024-01-08T22:24:22Z | 2024-01-08T22:24:22Z |
Update on_bad_lines logic in python_parser.py | diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py
index c1880eb815032..d58fa631d2f24 100644
--- a/pandas/io/parsers/python_parser.py
+++ b/pandas/io/parsers/python_parser.py
@@ -1030,7 +1030,7 @@ def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
for i, _content in iter_content:
actual_len = len(_content)
- if actual_len > col_len:
+ if actual_len != col_len:
if callable(self.on_bad_lines):
new_l = self.on_bad_lines(_content)
if new_l is not None:
| On_bad_lines only considers a bad line as "a line with too many fields" and silently pads NaNs for rows with missing fields. Either consider all rows with inconsistent fields as a bad line, as I am proposing here (tested only with our framework, and we log those error details as part of file validation), or also have a param like "allow_jagged_rows = true/false" so users can decide whether to pad or use this logic proposed.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56781 | 2024-01-08T18:58:16Z | 2024-01-08T19:26:27Z | null | 2024-01-08T19:26:27Z |
Backport PR #56769 on branch 2.2.x (BUG: replace matching Floats with bools for ea dtypes) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 2b436bc5d1855..b138e91b41661 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -789,6 +789,7 @@ Numeric
- Bug in :meth:`Series.__floordiv__` and :meth:`Series.__truediv__` for :class:`ArrowDtype` with integral dtypes raising for large divisors (:issue:`56706`)
- Bug in :meth:`Series.__floordiv__` for :class:`ArrowDtype` with integral dtypes raising for large values (:issue:`56645`)
- Bug in :meth:`Series.pow` not filling missing values correctly (:issue:`55512`)
+- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` matching float ``0.0`` with ``False`` and vice versa (:issue:`55398`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 5dd9aaf5fbb4a..ff45662d0bdc8 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -31,6 +31,7 @@
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
+ is_bool_dtype,
is_numeric_dtype,
is_numeric_v_string_like,
is_object_dtype,
@@ -100,21 +101,34 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
# GH 21977
mask = np.zeros(arr.shape, dtype=bool)
- for x in nonna:
- if is_numeric_v_string_like(arr, x):
- # GH#29553 prevent numpy deprecation warnings
- pass
- else:
- if potential_na:
- new_mask = np.zeros(arr.shape, dtype=np.bool_)
- new_mask[arr_mask] = arr[arr_mask] == x
+ if (
+ is_numeric_dtype(arr.dtype)
+ and not is_bool_dtype(arr.dtype)
+ and is_bool_dtype(nonna.dtype)
+ ):
+ pass
+ elif (
+ is_bool_dtype(arr.dtype)
+ and is_numeric_dtype(nonna.dtype)
+ and not is_bool_dtype(nonna.dtype)
+ ):
+ pass
+ else:
+ for x in nonna:
+ if is_numeric_v_string_like(arr, x):
+ # GH#29553 prevent numpy deprecation warnings
+ pass
else:
- new_mask = arr == x
-
- if not isinstance(new_mask, np.ndarray):
- # usually BooleanArray
- new_mask = new_mask.to_numpy(dtype=bool, na_value=False)
- mask |= new_mask
+ if potential_na:
+ new_mask = np.zeros(arr.shape, dtype=np.bool_)
+ new_mask[arr_mask] = arr[arr_mask] == x
+ else:
+ new_mask = arr == x
+
+ if not isinstance(new_mask, np.ndarray):
+ # usually BooleanArray
+ new_mask = new_mask.to_numpy(dtype=bool, na_value=False)
+ mask |= new_mask
if na_mask.any():
mask |= isna(arr)
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 4330153c186ca..b0f4e233ba5eb 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -799,3 +799,15 @@ def test_replace_numeric_column_with_na(self, val):
ser.replace(to_replace=1, value=pd.NA, inplace=True)
tm.assert_series_equal(ser, expected)
+
+ def test_replace_ea_float_with_bool(self):
+ # GH#55398
+ ser = pd.Series([0.0], dtype="Float64")
+ expected = ser.copy()
+ result = ser.replace(False, 1.0)
+ tm.assert_series_equal(result, expected)
+
+ ser = pd.Series([False], dtype="boolean")
+ expected = ser.copy()
+ result = ser.replace(0.0, True)
+ tm.assert_series_equal(result, expected)
| Backport PR #56769: BUG: replace matching Floats with bools for ea dtypes | https://api.github.com/repos/pandas-dev/pandas/pulls/56780 | 2024-01-08T18:06:29Z | 2024-01-08T19:25:45Z | 2024-01-08T19:25:45Z | 2024-01-08T19:25:45Z |
Add Python 3.12 classifier to pyproject.toml (against 2.1.x) | diff --git a/pyproject.toml b/pyproject.toml
index 77b49d4474334..4b4df9fb29a11 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -49,6 +49,7 @@ classifiers = [
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
+ 'Programming Language :: Python :: 3.12',
'Topic :: Scientific/Engineering'
]
| Looking at https://pypi.org/project/pandas/, we were unsure if pandas supports Python 3.12 yet.
Based on multiple PRs such as https://github.com/pandas-dev/pandas/pull/56333, we assume 2.1.x also has support for Python 3.12.
(the checkboxes below should not apply for this minor doc fix?)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56776 | 2024-01-08T14:35:19Z | 2024-01-08T18:45:24Z | null | 2024-01-08T18:45:24Z |
Support large strings in interchange protocol | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0b04a1d313a6d..0ddde35f35aca 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -901,6 +901,7 @@ Sparse
Other
^^^^^
+- :meth:`DataFrame.__dataframe__` did not support pyarrow large strings (:issue:`56702`)
- Bug in :func:`DataFrame.describe` when formatting percentiles in the resulting percentile 99.999% is rounded to 100% (:issue:`55765`)
- Bug in :func:`cut` and :func:`qcut` with ``datetime64`` dtype values with non-nanosecond units incorrectly returning nanosecond-unit bins (:issue:`56101`)
- Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index ed5256922377a..e90e92fa0ee1c 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -2190,7 +2190,9 @@ def numpy_dtype(self) -> np.dtype:
# This can be removed if/when pyarrow addresses it:
# https://github.com/apache/arrow/issues/34462
return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]")
- if pa.types.is_string(self.pyarrow_dtype):
+ if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(
+ self.pyarrow_dtype
+ ):
# pa.string().to_pandas_dtype() = object which we don't want
return np.dtype(str)
try:
diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index acfbc5d9e6c62..7f524d6823f30 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -301,12 +301,9 @@ def _get_data_buffer(
buffer = PandasBuffer(np.frombuffer(b, dtype="uint8"))
# Define the dtype for the returned buffer
- dtype = (
- DtypeKind.STRING,
- 8,
- ArrowCTypes.STRING,
- Endianness.NATIVE,
- ) # note: currently only support native endianness
+ # TODO: this will need correcting
+ # https://github.com/pandas-dev/pandas/issues/54781
+ dtype = self.dtype
else:
raise NotImplementedError(f"Data type {self._col.dtype} not handled yet")
diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py
index 4ac063080e62d..2e73e560e5740 100644
--- a/pandas/core/interchange/utils.py
+++ b/pandas/core/interchange/utils.py
@@ -37,6 +37,7 @@
"float": "f", # float32
"double": "g", # float64
"string": "u",
+ "large_string": "U",
"binary": "z",
"time32[s]": "tts",
"time32[ms]": "ttm",
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 6d8cc501ade6c..4ba6eb8464261 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -353,3 +353,12 @@ def test_interchange_from_corrected_buffer_dtypes(monkeypatch) -> None:
interchange.get_column_by_name = lambda _: column
monkeypatch.setattr(df, "__dataframe__", lambda allow_copy: interchange)
pd.api.interchange.from_dataframe(df)
+
+
+def test_large_string():
+ # GH#56702
+ pytest.importorskip("pyarrow")
+ df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
+ result = pd.api.interchange.from_dataframe(df.__dataframe__())
+ expected = pd.DataFrame({"a": ["x"]}, dtype="object")
+ tm.assert_frame_equal(result, expected)
| - [ ] closes #56702 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
cc @MarcoGorelli | https://api.github.com/repos/pandas-dev/pandas/pulls/56772 | 2024-01-07T23:25:07Z | 2024-01-09T08:41:08Z | 2024-01-09T08:41:08Z | 2024-01-09T08:42:26Z |
BUG: to_stata not handling ea dtypes correctly | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0b04a1d313a6d..b90185ab9b4ea 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -846,6 +846,7 @@ I/O
- Bug in :func:`read_json` not handling dtype conversion properly if ``infer_string`` is set (:issue:`56195`)
- Bug in :meth:`DataFrame.to_excel`, with ``OdsWriter`` (``ods`` files) writing Boolean/string value (:issue:`54994`)
- Bug in :meth:`DataFrame.to_hdf` and :func:`read_hdf` with ``datetime64`` dtypes with non-nanosecond resolution failing to round-trip correctly (:issue:`55622`)
+- Bug in :meth:`DataFrame.to_stata` raising for extension dtypes (:issue:`54671`)
- Bug in :meth:`~pandas.read_excel` with ``engine="odf"`` (``ods`` files) when a string cell contains an annotation (:issue:`55200`)
- Bug in :meth:`~pandas.read_excel` with an ODS file without cached formatted cell for float values (:issue:`55219`)
- Bug where :meth:`DataFrame.to_json` would raise an ``OverflowError`` instead of a ``TypeError`` with unsupported NumPy types (:issue:`55403`)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index a4d8054ea4f8c..4abf9af185a01 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -47,9 +47,11 @@
)
from pandas.util._exceptions import find_stack_level
+from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
ensure_object,
is_numeric_dtype,
+ is_string_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
@@ -62,8 +64,6 @@
to_datetime,
to_timedelta,
)
-from pandas.core.arrays.boolean import BooleanDtype
-from pandas.core.arrays.integer import IntegerDtype
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.indexes.range import RangeIndex
@@ -591,17 +591,22 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame:
for col in data:
# Cast from unsupported types to supported types
- is_nullable_int = isinstance(data[col].dtype, (IntegerDtype, BooleanDtype))
+ is_nullable_int = (
+ isinstance(data[col].dtype, ExtensionDtype)
+ and data[col].dtype.kind in "iub"
+ )
# We need to find orig_missing before altering data below
orig_missing = data[col].isna()
if is_nullable_int:
- missing_loc = data[col].isna()
- if missing_loc.any():
- # Replace with always safe value
- fv = 0 if isinstance(data[col].dtype, IntegerDtype) else False
- data.loc[missing_loc, col] = fv
+ fv = 0 if data[col].dtype.kind in "iu" else False
# Replace with NumPy-compatible column
- data[col] = data[col].astype(data[col].dtype.numpy_dtype)
+ data[col] = data[col].fillna(fv).astype(data[col].dtype.numpy_dtype)
+ elif isinstance(data[col].dtype, ExtensionDtype):
+ if getattr(data[col].dtype, "numpy_dtype", None) is not None:
+ data[col] = data[col].astype(data[col].dtype.numpy_dtype)
+ elif is_string_dtype(data[col].dtype):
+ data[col] = data[col].astype("object")
+
dtype = data[col].dtype
empty_df = data.shape[0] == 0
for c_data in conversion_data:
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 799b0a63feb53..11b53d711fce2 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -11,6 +11,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
import pandas as pd
from pandas import CategoricalDtype
import pandas._testing as tm
@@ -1919,6 +1921,41 @@ def test_writer_118_exceptions(self):
with pytest.raises(ValueError, match="You must use version 119"):
StataWriterUTF8(path, df, version=118)
+ @pytest.mark.parametrize(
+ "dtype_backend",
+ ["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))],
+ )
+ def test_read_write_ea_dtypes(self, dtype_backend):
+ df = DataFrame(
+ {
+ "a": [1, 2, None],
+ "b": ["a", "b", "c"],
+ "c": [True, False, None],
+ "d": [1.5, 2.5, 3.5],
+ "e": pd.date_range("2020-12-31", periods=3, freq="D"),
+ },
+ index=pd.Index([0, 1, 2], name="index"),
+ )
+ df = df.convert_dtypes(dtype_backend=dtype_backend)
+ df.to_stata("test_stata.dta", version=118)
+
+ with tm.ensure_clean() as path:
+ df.to_stata(path)
+ written_and_read_again = self.read_dta(path)
+
+ expected = DataFrame(
+ {
+ "a": [1, 2, np.nan],
+ "b": ["a", "b", "c"],
+ "c": [1.0, 0, np.nan],
+ "d": [1.5, 2.5, 3.5],
+ "e": pd.date_range("2020-12-31", periods=3, freq="D"),
+ },
+ index=pd.Index([0, 1, 2], name="index", dtype=np.int32),
+ )
+
+ tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
+
@pytest.mark.parametrize("version", [105, 108, 111, 113, 114])
def test_backward_compat(version, datapath):
| - [ ] closes #54671 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56771 | 2024-01-07T21:01:31Z | 2024-01-08T21:17:58Z | 2024-01-08T21:17:58Z | 2024-01-08T21:32:57Z |
Backport PR #56761 on branch 2.2.x (BUG: fix subclass metadata preservation in groupby column selection) | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0b04a1d313a6d..2b436bc5d1855 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -873,6 +873,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.asfreq` and :meth:`Series.asfreq` with a :class:`DatetimeIndex` with non-nanosecond resolution incorrectly converting to nanosecond resolution (:issue:`55958`)
- Bug in :meth:`DataFrame.ewm` when passed ``times`` with non-nanosecond ``datetime64`` or :class:`DatetimeTZDtype` dtype (:issue:`56262`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` where grouping by a combination of ``Decimal`` and NA values would fail when ``sort=True`` (:issue:`54847`)
+- Bug in :meth:`DataFrame.groupby` for DataFrame subclasses when selecting a subset of columns to apply the function to (:issue:`56761`)
- Bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`)
- Bug in :meth:`DataFrame.resample` when resampling on a :class:`ArrowDtype` of ``pyarrow.timestamp`` or ``pyarrow.duration`` type (:issue:`55989`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3e2e589440bd9..15ccbd602c9c8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4016,7 +4016,9 @@ def _getitem_nocopy(self, key: list):
copy=False,
only_slice=True,
)
- return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)
+ result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)
+ result = result.__finalize__(self)
+ return result
def __getitem__(self, key):
check_dict_or_set_indexers(key)
diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py
index bf809bd5db437..17ef6ee913463 100644
--- a/pandas/tests/groupby/test_groupby_subclass.py
+++ b/pandas/tests/groupby/test_groupby_subclass.py
@@ -69,6 +69,7 @@ def test_groupby_preserves_metadata():
def func(group):
assert isinstance(group, tm.SubclassedDataFrame)
assert hasattr(group, "testattr")
+ assert group.testattr == "hello"
return group.testattr
msg = "DataFrameGroupBy.apply operated on the grouping columns"
@@ -79,6 +80,13 @@ def func(group):
expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c"))
tm.assert_series_equal(result, expected)
+ result = custom_df.groupby("c").apply(func, include_groups=False)
+ tm.assert_series_equal(result, expected)
+
+ # https://github.com/pandas-dev/pandas/pull/56761
+ result = custom_df.groupby("c")[["a", "b"]].apply(func)
+ tm.assert_series_equal(result, expected)
+
def func2(group):
assert isinstance(group, tm.SubclassedSeries)
assert hasattr(group, "testattr")
| Backport PR #56761: BUG: fix subclass metadata preservation in groupby column selection | https://api.github.com/repos/pandas-dev/pandas/pulls/56770 | 2024-01-07T19:43:03Z | 2024-01-07T21:02:02Z | 2024-01-07T21:02:02Z | 2024-01-07T21:02:02Z |
BUG: replace matching Floats with bools for ea dtypes | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0b04a1d313a6d..6cac4bcdfa045 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -789,6 +789,7 @@ Numeric
- Bug in :meth:`Series.__floordiv__` and :meth:`Series.__truediv__` for :class:`ArrowDtype` with integral dtypes raising for large divisors (:issue:`56706`)
- Bug in :meth:`Series.__floordiv__` for :class:`ArrowDtype` with integral dtypes raising for large values (:issue:`56645`)
- Bug in :meth:`Series.pow` not filling missing values correctly (:issue:`55512`)
+- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` matching float ``0.0`` with ``False`` and vice versa (:issue:`55398`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index cd76883d50541..f9515bb55e266 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -31,6 +31,7 @@
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
+ is_bool_dtype,
is_numeric_dtype,
is_numeric_v_string_like,
is_object_dtype,
@@ -100,21 +101,34 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
# GH 21977
mask = np.zeros(arr.shape, dtype=bool)
- for x in nonna:
- if is_numeric_v_string_like(arr, x):
- # GH#29553 prevent numpy deprecation warnings
- pass
- else:
- if potential_na:
- new_mask = np.zeros(arr.shape, dtype=np.bool_)
- new_mask[arr_mask] = arr[arr_mask] == x
+ if (
+ is_numeric_dtype(arr.dtype)
+ and not is_bool_dtype(arr.dtype)
+ and is_bool_dtype(nonna.dtype)
+ ):
+ pass
+ elif (
+ is_bool_dtype(arr.dtype)
+ and is_numeric_dtype(nonna.dtype)
+ and not is_bool_dtype(nonna.dtype)
+ ):
+ pass
+ else:
+ for x in nonna:
+ if is_numeric_v_string_like(arr, x):
+ # GH#29553 prevent numpy deprecation warnings
+ pass
else:
- new_mask = arr == x
-
- if not isinstance(new_mask, np.ndarray):
- # usually BooleanArray
- new_mask = new_mask.to_numpy(dtype=bool, na_value=False)
- mask |= new_mask
+ if potential_na:
+ new_mask = np.zeros(arr.shape, dtype=np.bool_)
+ new_mask[arr_mask] = arr[arr_mask] == x
+ else:
+ new_mask = arr == x
+
+ if not isinstance(new_mask, np.ndarray):
+ # usually BooleanArray
+ new_mask = new_mask.to_numpy(dtype=bool, na_value=False)
+ mask |= new_mask
if na_mask.any():
mask |= isna(arr)
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 4330153c186ca..b0f4e233ba5eb 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -799,3 +799,15 @@ def test_replace_numeric_column_with_na(self, val):
ser.replace(to_replace=1, value=pd.NA, inplace=True)
tm.assert_series_equal(ser, expected)
+
+ def test_replace_ea_float_with_bool(self):
+ # GH#55398
+ ser = pd.Series([0.0], dtype="Float64")
+ expected = ser.copy()
+ result = ser.replace(False, 1.0)
+ tm.assert_series_equal(result, expected)
+
+ ser = pd.Series([False], dtype="boolean")
+ expected = ser.copy()
+ result = ser.replace(0.0, True)
+ tm.assert_series_equal(result, expected)
| - [ ] closes #55398 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56769 | 2024-01-07T19:36:32Z | 2024-01-08T18:05:14Z | 2024-01-08T18:05:14Z | 2024-01-08T18:07:21Z |
Update check_test_naming.py | diff --git a/scripts/check_test_naming.py b/scripts/check_test_naming.py
index f9190643b3246..5cd4402286e3c 100644
--- a/scripts/check_test_naming.py
+++ b/scripts/check_test_naming.py
@@ -1,13 +1,3 @@
-"""
-Check that test names start with `test`, and that test classes start with `Test`.
-
-This is meant to be run as a pre-commit hook - to run it manually, you can do:
-
- pre-commit run check-test-naming --all-files
-
-NOTE: if this finds a false positive, you can add the comment `# not a test` to the
-class or function definition. Though hopefully that shouldn't be necessary.
-"""
from __future__ import annotations
import argparse
@@ -18,23 +8,18 @@ class or function definition. Though hopefully that shouldn't be necessary.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
- from collections.abc import (
- Iterator,
- Sequence,
- )
+ from collections.abc import Iterator, Sequence
PRAGMA = "# not a test"
-def _find_names(node: ast.Module) -> Iterator[str]:
+def find_names(node: ast.Module) -> Iterator[str]:
for _node in ast.walk(node):
- if isinstance(_node, ast.Name):
- yield _node.id
- elif isinstance(_node, ast.Attribute):
- yield _node.attr
+ if isinstance(_node, (ast.Name, ast.Attribute)):
+ yield _node.id if isinstance(_node, ast.Name) else _node.attr
-def _is_fixture(node: ast.expr) -> bool:
+def is_fixture(node: ast.expr) -> bool:
if isinstance(node, ast.Call):
node = node.func
return (
@@ -45,27 +30,22 @@ def _is_fixture(node: ast.expr) -> bool:
)
-def _is_register_dtype(node):
+def is_register_dtype(node):
return isinstance(node, ast.Name) and node.id == "register_extension_dtype"
-def is_misnamed_test_func(
- node: ast.expr | ast.stmt, names: Sequence[str], line: str
-) -> bool:
+def is_misnamed_test_func(node: ast.expr | ast.stmt, names: Sequence[str], line: str) -> bool:
return (
isinstance(node, ast.FunctionDef)
and not node.name.startswith("test")
and names.count(node.name) == 0
and not any(_is_fixture(decorator) for decorator in node.decorator_list)
and PRAGMA not in line
- and node.name
- not in ("teardown_method", "setup_method", "teardown_class", "setup_class")
+ and node.name not in ("teardown_method", "setup_method", "teardown_class", "setup_class")
)
-def is_misnamed_test_class(
- node: ast.expr | ast.stmt, names: Sequence[str], line: str
-) -> bool:
+def is_misnamed_test_class(node: ast.expr | ast.stmt, names: Sequence[str], line: str) -> bool:
return (
isinstance(node, ast.ClassDef)
and not node.name.startswith("Test")
@@ -78,47 +58,26 @@ def is_misnamed_test_class(
def main(content: str, file: str) -> int:
lines = content.splitlines()
tree = ast.parse(content)
- names = list(_find_names(tree))
+ names = list(find_names(tree))
ret = 0
+
for node in tree.body:
if is_misnamed_test_func(node, names, lines[node.lineno - 1]):
print(
- f"{file}:{node.lineno}:{node.col_offset} "
- "found test function which does not start with 'test'"
+ f"{file}:{node.lineno}:{node.col_offset} found test function which does not start with 'test'"
)
ret = 1
elif is_misnamed_test_class(node, names, lines[node.lineno - 1]):
print(
- f"{file}:{node.lineno}:{node.col_offset} "
- "found test class which does not start with 'Test'"
+ f"{file}:{node.lineno}:{node.col_offset} found test class which does not start with 'Test'"
)
ret = 1
- if (
- isinstance(node, ast.ClassDef)
- and names.count(node.name) == 0
- and not any(
- _is_register_dtype(decorator) for decorator in node.decorator_list
- )
- and PRAGMA not in lines[node.lineno - 1]
- ):
+
+ if isinstance(node, ast.ClassDef) and names.count(node.name) == 0 and not any(
+ _is_register_dtype(decorator) for decorator in node.decorator_list
+ ) and PRAGMA not in lines[node.lineno - 1]:
for _node in node.body:
if is_misnamed_test_func(_node, names, lines[_node.lineno - 1]):
- # It could be that this function is used somewhere by the
- # parent class. For example, there might be a base class
- # with
- #
- # class Foo:
- # def foo(self):
- # assert 1+1==2
- # def test_foo(self):
- # self.foo()
- #
- # and then some subclass overwrites `foo`. So, we check that
- # `self.foo` doesn't appear in any of the test classes.
- # Note some false negatives might get through, but that's OK.
- # This is good enough that has helped identify several examples
- # of tests not being run.
- assert isinstance(_node, ast.FunctionDef) # help mypy
should_continue = False
for _file in (Path("pandas") / "tests").rglob("*.py"):
with open(os.path.join(_file), encoding="utf-8") as fd:
@@ -126,14 +85,15 @@ def main(content: str, file: str) -> int:
if f"self.{_node.name}" in _content:
should_continue = True
break
+
if should_continue:
continue
print(
- f"{file}:{_node.lineno}:{_node.col_offset} "
- "found test function which does not start with 'test'"
+ f"{file}:{_node.lineno}:{_node.col_offset} found test function which does not start with 'test'"
)
ret = 1
+
return ret
| Changes made:
1. Removed unnecessary import of `annotations` from `__future__`.
2. Simplified the `find_names` function to handle both `ast.Name` and `ast.Attribute`.
3. Combined the `_is_fixture` and `_is_register_dtype` functions into `is_fixture` and `is_register_dtype`.
4. Replaced `any()` with a generator expression to check for conditions in a more concise way.
5. Changed function and variable names to follow snake_case naming convention.
6. Made the code more readable and removed redundant code.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56768 | 2024-01-07T19:17:53Z | 2024-01-07T19:38:59Z | null | 2024-01-07T19:39:00Z |
BUG: Series.round raising for nullable bool dtype | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index b138e91b41661..93b63f99ea399 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -790,6 +790,7 @@ Numeric
- Bug in :meth:`Series.__floordiv__` for :class:`ArrowDtype` with integral dtypes raising for large values (:issue:`56645`)
- Bug in :meth:`Series.pow` not filling missing values correctly (:issue:`55512`)
- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` matching float ``0.0`` with ``False`` and vice versa (:issue:`55398`)
+- Bug in :meth:`Series.round` raising for nullable boolean dtype (:issue:`55936`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 9ce19ced2b356..a3ff616cb8f98 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -404,6 +404,8 @@ def round(self, decimals: int = 0, *args, **kwargs):
DataFrame.round : Round values of a DataFrame.
Series.round : Round values of a Series.
"""
+ if self.dtype.kind == "b":
+ return self
nv.validate_round(args, kwargs)
values = np.round(self._data, decimals=decimals, **kwargs)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 90073e21cfd66..8098d990a6a89 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2789,13 +2789,11 @@ def round(self, decimals: int = 0, *args, **kwargs) -> Series:
dtype: float64
"""
nv.validate_round(args, kwargs)
- result = self._values.round(decimals)
- result = self._constructor(result, index=self.index, copy=False).__finalize__(
+ new_mgr = self._mgr.round(decimals=decimals, using_cow=using_copy_on_write())
+ return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(
self, method="round"
)
- return result
-
@overload
def quantile(
self, q: float = ..., interpolation: QuantileInterpolation = ...
diff --git a/pandas/tests/series/methods/test_round.py b/pandas/tests/series/methods/test_round.py
index 7f60c94f10e4f..c330b7a7dfbbb 100644
--- a/pandas/tests/series/methods/test_round.py
+++ b/pandas/tests/series/methods/test_round.py
@@ -63,3 +63,12 @@ def test_round_nat(self, method, freq, unit):
round_method = getattr(ser.dt, method)
result = round_method(freq)
tm.assert_series_equal(result, expected)
+
+ def test_round_ea_boolean(self):
+ # GH#55936
+ ser = Series([True, False], dtype="boolean")
+ expected = ser.copy()
+ result = ser.round(2)
+ tm.assert_series_equal(result, expected)
+ result.iloc[0] = False
+ tm.assert_series_equal(ser, expected)
| - [ ] closes #55936 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56767 | 2024-01-07T18:16:26Z | 2024-01-08T21:15:15Z | 2024-01-08T21:15:15Z | 2024-01-08T21:33:10Z |
BUG: IntervalIndex.from_tuples raising with masked subtype | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0b04a1d313a6d..8cabae95258c6 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -815,6 +815,7 @@ Interval
- Bug in :class:`Interval` ``__repr__`` not displaying UTC offsets for :class:`Timestamp` bounds. Additionally the hour, minute and second components will now be shown (:issue:`55015`)
- Bug in :meth:`IntervalIndex.factorize` and :meth:`Series.factorize` with :class:`IntervalDtype` with datetime64 or timedelta64 intervals not preserving non-nanosecond units (:issue:`56099`)
- Bug in :meth:`IntervalIndex.from_arrays` when passed ``datetime64`` or ``timedelta64`` arrays with mismatched resolutions constructing an invalid ``IntervalArray`` object (:issue:`55714`)
+- Bug in :meth:`IntervalIndex.from_tuples` raising if subtype is a nullable extension dtype (:issue:`56765`)
- Bug in :meth:`IntervalIndex.get_indexer` with datetime or timedelta intervals incorrectly matching on integer targets (:issue:`47772`)
- Bug in :meth:`IntervalIndex.get_indexer` with timezone-aware datetime intervals incorrectly matching on a sequence of timezone-naive targets (:issue:`47772`)
- Bug in setting values on a :class:`Series` with an :class:`IntervalIndex` using a slice incorrectly raising (:issue:`54722`)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 96ee728d6dcb7..2dbc2a663c8a8 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -80,6 +80,7 @@
unique,
value_counts_internal as value_counts,
)
+from pandas.core.arrays import ArrowExtensionArray
from pandas.core.arrays.base import (
ExtensionArray,
_extension_array_shared_docs,
@@ -369,11 +370,18 @@ def _ensure_simple_new_inputs(
right = ensure_wrapped_if_datetimelike(right)
right = extract_array(right, extract_numpy=True)
- lbase = getattr(left, "_ndarray", left).base
- rbase = getattr(right, "_ndarray", right).base
- if lbase is not None and lbase is rbase:
- # If these share data, then setitem could corrupt our IA
- right = right.copy()
+ if isinstance(left, ArrowExtensionArray) or isinstance(
+ right, ArrowExtensionArray
+ ):
+ pass
+ else:
+ lbase = getattr(left, "_ndarray", left)
+ lbase = getattr(lbase, "_data", lbase).base
+ rbase = getattr(right, "_ndarray", right)
+ rbase = getattr(rbase, "_data", rbase).base
+ if lbase is not None and lbase is rbase:
+ # If these share data, then setitem could corrupt our IA
+ right = right.copy()
dtype = IntervalDtype(left.dtype, closed=closed)
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index e9864723f026e..b0289ded55604 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
@@ -508,3 +510,17 @@ def test_dtype_closed_mismatch():
with pytest.raises(ValueError, match=msg):
IntervalArray([], dtype=dtype, closed="neither")
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ ["Float64", pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow"))],
+)
+def test_ea_dtype(dtype):
+ # GH#56765
+ bins = [(0.0, 0.4), (0.4, 0.6)]
+ interval_dtype = IntervalDtype(subtype=dtype, closed="left")
+ result = IntervalIndex.from_tuples(bins, closed="left", dtype=interval_dtype)
+ assert result.dtype == interval_dtype
+ expected = IntervalIndex.from_tuples(bins, closed="left").astype(interval_dtype)
+ tm.assert_index_equal(result, expected)
| - [ ] closes #56765 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56766 | 2024-01-07T17:36:37Z | 2024-01-08T21:34:27Z | 2024-01-08T21:34:27Z | 2024-01-08T21:34:29Z |
Remove warning about unused groups | diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 7c6dca3bad7d9..6302fb29d0ea4 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -1320,14 +1320,6 @@ def contains(
4 False
dtype: bool
"""
- if regex and re.compile(pat).groups:
- warnings.warn(
- "This pattern is interpreted as a regular expression, and has "
- "match groups. To actually get the groups, use str.extract.",
- UserWarning,
- stacklevel=find_stack_level(),
- )
-
result = self._data.array._str_contains(pat, case, flags, na, regex)
return self._wrap_result(result, fill_value=na, returns_string=False)
| This warning is annoying and serves to discourage use of named capturing groups (which are more maintainable) because users must either switch to extracting the groups (not always necessary) or replace their named groups with "(?:" (unnamed groups are harder to maintain because it's less clear what is their objective within a greater regex pattern).
If users need to specialize their regex patterns to each command, then they need to maintain multiple copies, some with unnamed groups, some without, just to silence some warning, also, if they remove the groups, then later on when they want to use them, they might have to figure out how to replace groups they removed just to silence a warning, and be frustrated.
If we remove this unnecessary warning, then we no longer discourage users who use named capturing groups, thus facilitating readability of the patterns, and portability to other contexts, such as debuggers or the "extract" method mentioned in the removed warning.
TL;DR: This warning doesn't need to exist, so I'm removing it. If there are tests which expect it, then they could also be removed. Code to silence the warning should still work, but could also be removed.
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56763 | 2024-01-07T15:28:08Z | 2024-01-07T17:00:26Z | null | 2024-01-07T17:00:27Z |
DOC: Avoid requesting data from s3 buckets from our docs | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index b3ad23e0d4104..bb5b4e056d527 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -1704,7 +1704,7 @@ option parameter:
.. code-block:: python
- storage_options = {"client_kwargs": {"endpoint_url": "http://127.0.0.1:5555"}}}
+ storage_options = {"client_kwargs": {"endpoint_url": "http://127.0.0.1:5555"}}
df = pd.read_json("s3://pandas-test/test-1", storage_options=storage_options)
More sample configurations and documentation can be found at `S3Fs documentation
@@ -3015,14 +3015,15 @@ Read in the content of the "books.xml" as instance of ``StringIO`` or
Even read XML from AWS S3 buckets such as NIH NCBI PMC Article Datasets providing
Biomedical and Life Science Jorurnals:
-.. ipython:: python
- :okwarning:
+.. code-block:: python
- df = pd.read_xml(
- "s3://pmc-oa-opendata/oa_comm/xml/all/PMC1236943.xml",
- xpath=".//journal-meta",
- )
- df
+ >>> df = pd.read_xml(
+ ... "s3://pmc-oa-opendata/oa_comm/xml/all/PMC1236943.xml",
+ ... xpath=".//journal-meta",
+ ...)
+ >>> df
+ journal-id journal-title issn publisher
+ 0 Cardiovasc Ultrasound Cardiovascular Ultrasound 1476-7120 NaN
With `lxml`_ as default ``parser``, you access the full-featured XML library
that extends Python's ElementTree API. One powerful tool is ability to query
| - [x] closes #56592
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v2.3.0.rst` file if fixing a bug or adding a new feature.
ipython code chunk would make call to S3 bucket URL. Most often harmless and therefore not easy to replicate, but some users reported error when building html documentation (see issue #56592) when there was some access issue to the S3 bucket URL. Decision was to change to code block to avoid calls. | https://api.github.com/repos/pandas-dev/pandas/pulls/56762 | 2024-01-07T14:23:50Z | 2024-01-15T18:15:49Z | 2024-01-15T18:15:49Z | 2024-01-15T18:16:06Z |
BUG: fix subclass metadata preservation in groupby column selection | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0b04a1d313a6d..2b436bc5d1855 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -873,6 +873,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.asfreq` and :meth:`Series.asfreq` with a :class:`DatetimeIndex` with non-nanosecond resolution incorrectly converting to nanosecond resolution (:issue:`55958`)
- Bug in :meth:`DataFrame.ewm` when passed ``times`` with non-nanosecond ``datetime64`` or :class:`DatetimeTZDtype` dtype (:issue:`56262`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` where grouping by a combination of ``Decimal`` and NA values would fail when ``sort=True`` (:issue:`54847`)
+- Bug in :meth:`DataFrame.groupby` for DataFrame subclasses when selecting a subset of columns to apply the function to (:issue:`56761`)
- Bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`)
- Bug in :meth:`DataFrame.resample` when resampling on a :class:`ArrowDtype` of ``pyarrow.timestamp`` or ``pyarrow.duration`` type (:issue:`55989`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8db437ccec389..021c7b74adb7f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4016,7 +4016,9 @@ def _getitem_nocopy(self, key: list):
copy=False,
only_slice=True,
)
- return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)
+ result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)
+ result = result.__finalize__(self)
+ return result
def __getitem__(self, key):
check_dict_or_set_indexers(key)
diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py
index bf809bd5db437..17ef6ee913463 100644
--- a/pandas/tests/groupby/test_groupby_subclass.py
+++ b/pandas/tests/groupby/test_groupby_subclass.py
@@ -69,6 +69,7 @@ def test_groupby_preserves_metadata():
def func(group):
assert isinstance(group, tm.SubclassedDataFrame)
assert hasattr(group, "testattr")
+ assert group.testattr == "hello"
return group.testattr
msg = "DataFrameGroupBy.apply operated on the grouping columns"
@@ -79,6 +80,13 @@ def func(group):
expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c"))
tm.assert_series_equal(result, expected)
+ result = custom_df.groupby("c").apply(func, include_groups=False)
+ tm.assert_series_equal(result, expected)
+
+ # https://github.com/pandas-dev/pandas/pull/56761
+ result = custom_df.groupby("c")[["a", "b"]].apply(func)
+ tm.assert_series_equal(result, expected)
+
def func2(group):
assert isinstance(group, tm.SubclassedSeries)
assert hasattr(group, "testattr")
| This is a small regression that was introduced with https://github.com/pandas-dev/pandas/pull/51090 (in pandas 2.0): before that PR, the column subselection in the groupby code was done with a standard `__getitem__` call, while the PR introduced an optimized helper `_getitem_nocopy` to avoid the copy that a standard `__getitem__` does. However, that new helper forgot to add a `__finalize__` call after `_constructor` from a manager.
The reason we noticed this in geopandas (https://github.com/geopandas/geopandas/pull/3130) was to update our tests for the `apply` deprecation to include group columns (for which one alternative is to specifically select the non-group columns, but so that didn't work with recent pandas versions) | https://api.github.com/repos/pandas-dev/pandas/pulls/56761 | 2024-01-07T09:57:38Z | 2024-01-07T19:42:02Z | 2024-01-07T19:42:02Z | 2024-01-08T14:42:33Z |
TYP: Persist typing information for pipe args and kwargs | diff --git a/pandas/_typing.py b/pandas/_typing.py
index a80f9603493a7..fa9dc14bb4bd7 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -90,18 +90,29 @@
from typing import SupportsIndex
if sys.version_info >= (3, 10):
+ from typing import Concatenate # pyright: ignore[reportUnusedImport]
+ from typing import ParamSpec
from typing import TypeGuard # pyright: ignore[reportUnusedImport]
else:
- from typing_extensions import TypeGuard # pyright: ignore[reportUnusedImport]
+ from typing_extensions import ( # pyright: ignore[reportUnusedImport]
+ Concatenate,
+ ParamSpec,
+ TypeGuard,
+ )
+
+ P = ParamSpec("P")
if sys.version_info >= (3, 11):
from typing import Self # pyright: ignore[reportUnusedImport]
else:
from typing_extensions import Self # pyright: ignore[reportUnusedImport]
+
else:
npt: Any = None
+ ParamSpec: Any = None
Self: Any = None
TypeGuard: Any = None
+ Concatenate: Any = None
HashableT = TypeVar("HashableT", bound=Hashable)
MutableMappingT = TypeVar("MutableMappingT", bound=MutableMapping)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 7d864e02be54e..69b602feee3ea 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -24,6 +24,7 @@
TYPE_CHECKING,
Any,
Callable,
+ TypeVar,
cast,
overload,
)
@@ -51,7 +52,9 @@
from pandas._typing import (
AnyArrayLike,
ArrayLike,
+ Concatenate,
NpDtype,
+ P,
RandomState,
T,
)
@@ -463,8 +466,34 @@ def random_state(state: RandomState | None = None):
)
+_T = TypeVar("_T") # Secondary TypeVar for use in pipe's type hints
+
+
+@overload
+def pipe(
+ obj: _T,
+ func: Callable[Concatenate[_T, P], T],
+ *args: P.args,
+ **kwargs: P.kwargs,
+) -> T:
+ ...
+
+
+@overload
+def pipe(
+ obj: Any,
+ func: tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
+) -> T:
+ ...
+
+
def pipe(
- obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
+ obj: _T,
+ func: Callable[Concatenate[_T, P], T] | tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
) -> T:
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
@@ -490,12 +519,13 @@ def pipe(
object : the return type of ``func``.
"""
if isinstance(func, tuple):
- func, target = func
+ # Assigning to func_ so pyright understands that it's a callable
+ func_, target = func
if target in kwargs:
msg = f"{target} is both the pipe target and a keyword argument"
raise ValueError(msg)
kwargs[target] = obj
- return func(*args, **kwargs)
+ return func_(*args, **kwargs)
else:
return func(obj, *args, **kwargs)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b37f22339fcfd..40ce9499d69c2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -50,6 +50,7 @@
Axis,
AxisInt,
CompressionOptions,
+ Concatenate,
DtypeArg,
DtypeBackend,
DtypeObj,
@@ -213,6 +214,7 @@
)
from pandas._libs.tslibs import BaseOffset
+ from pandas._typing import P
from pandas import (
DataFrame,
@@ -6118,13 +6120,31 @@ def sample(
return result
+ @overload
+ def pipe(
+ self,
+ func: Callable[Concatenate[Self, P], T],
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> T:
+ ...
+
+ @overload
+ def pipe(
+ self,
+ func: tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
+ ) -> T:
+ ...
+
@final
@doc(klass=_shared_doc_kwargs["klass"])
def pipe(
self,
- func: Callable[..., T] | tuple[Callable[..., T], str],
- *args,
- **kwargs,
+ func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
) -> T:
r"""
Apply chainable functions that expect Series or DataFrames.
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c9beaee55d608..f1ca05a312d2a 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -29,6 +29,7 @@ class providing the base-class of operations.
Union,
cast,
final,
+ overload,
)
import warnings
@@ -55,7 +56,6 @@ class providing the base-class of operations.
PositionalIndexer,
RandomState,
Scalar,
- T,
npt,
)
from pandas.compat.numpy import function as nv
@@ -147,7 +147,13 @@ class providing the base-class of operations.
)
if TYPE_CHECKING:
- from typing import Any
+ from pandas._typing import (
+ Any,
+ Concatenate,
+ P,
+ Self,
+ T,
+ )
from pandas.core.resample import Resampler
from pandas.core.window import (
@@ -988,6 +994,24 @@ def _selected_obj(self):
def _dir_additions(self) -> set[str]:
return self.obj._dir_additions()
+ @overload
+ def pipe(
+ self,
+ func: Callable[Concatenate[Self, P], T],
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> T:
+ ...
+
+ @overload
+ def pipe(
+ self,
+ func: tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
+ ) -> T:
+ ...
+
@Substitution(
klass="GroupBy",
examples=dedent(
@@ -1013,9 +1037,9 @@ def _dir_additions(self) -> set[str]:
@Appender(_pipe_template)
def pipe(
self,
- func: Callable[..., T] | tuple[Callable[..., T], str],
- *args,
- **kwargs,
+ func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
) -> T:
return com.pipe(self, func, *args, **kwargs)
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 31309777c154d..924f9e6d49040 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -9,6 +9,7 @@
cast,
final,
no_type_check,
+ overload,
)
import warnings
@@ -97,12 +98,16 @@
from collections.abc import Hashable
from pandas._typing import (
+ Any,
AnyArrayLike,
Axis,
AxisInt,
+ Concatenate,
Frequency,
IndexLabel,
InterpolateOptions,
+ P,
+ Self,
T,
TimedeltaConvertibleTypes,
TimeGrouperOrigin,
@@ -254,6 +259,24 @@ def _get_binner(self):
bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer)
return binner, bin_grouper
+ @overload
+ def pipe(
+ self,
+ func: Callable[Concatenate[Self, P], T],
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> T:
+ ...
+
+ @overload
+ def pipe(
+ self,
+ func: tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
+ ) -> T:
+ ...
+
@final
@Substitution(
klass="Resampler",
@@ -278,9 +301,9 @@ def _get_binner(self):
@Appender(_pipe_template)
def pipe(
self,
- func: Callable[..., T] | tuple[Callable[..., T], str],
- *args,
- **kwargs,
+ func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
) -> T:
return super().pipe(func, *args, **kwargs)
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index b62f7581ac220..db4a7e42f3bd3 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -9,7 +9,6 @@
import operator
from typing import (
TYPE_CHECKING,
- Any,
Callable,
overload,
)
@@ -66,15 +65,20 @@
from matplotlib.colors import Colormap
from pandas._typing import (
+ Any,
Axis,
AxisInt,
+ Concatenate,
FilePath,
IndexLabel,
IntervalClosedType,
Level,
+ P,
QuantileInterpolation,
Scalar,
+ Self,
StorageOptions,
+ T,
WriteBuffer,
WriteExcelBuffer,
)
@@ -3614,7 +3618,30 @@ class MyStyler(cls): # type: ignore[valid-type,misc]
return MyStyler
- def pipe(self, func: Callable, *args, **kwargs):
+ @overload
+ def pipe(
+ self,
+ func: Callable[Concatenate[Self, P], T],
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> T:
+ ...
+
+ @overload
+ def pipe(
+ self,
+ func: tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
+ ) -> T:
+ ...
+
+ def pipe(
+ self,
+ func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str],
+ *args: Any,
+ **kwargs: Any,
+ ) -> T:
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
| - [x] closes #56359
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Porting work done in:
- https://github.com/pandas-dev/pandas-stubs/pull/823
over to this repository.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56760 | 2024-01-07T05:12:19Z | 2024-01-15T16:01:44Z | 2024-01-15T16:01:44Z | 2024-01-15T18:37:46Z |
TST: assert that informative error is raised when offset not supported as a period frequency is passed to DataFrame.asfreq | diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py
index 87d1745774487..f6b71626b6fee 100644
--- a/pandas/tests/frame/methods/test_asfreq.py
+++ b/pandas/tests/frame/methods/test_asfreq.py
@@ -8,6 +8,7 @@
from pandas import (
DataFrame,
DatetimeIndex,
+ PeriodIndex,
Series,
date_range,
period_range,
@@ -257,3 +258,28 @@ def test_asfreq_frequency_M_Q_Y_A_deprecated(self, freq, freq_depr):
with tm.assert_produces_warning(FutureWarning, match=depr_msg):
result = df.asfreq(freq=freq_depr)
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "freq, error_msg",
+ [
+ (
+ "2MS",
+ "MS is not supported as period frequency",
+ ),
+ (
+ offsets.MonthBegin(),
+ r"\<MonthBegin\> is not supported as period frequency",
+ ),
+ (
+ offsets.DateOffset(months=2),
+ r"\<DateOffset: months=2\> is not supported as period frequency",
+ ),
+ ],
+ )
+ def test_asfreq_unsupported_freq(self, freq, error_msg):
+ # https://github.com/pandas-dev/pandas/issues/56718
+ index = PeriodIndex(["2020-01-01", "2021-01-01"], freq="M")
+ df = DataFrame({"a": Series([0, 1], index=index)})
+
+ with pytest.raises(ValueError, match=error_msg):
+ df.asfreq(freq=freq)
| - [x] closes #56718 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature (python -m pytest pandas/tests/frame/methods/test_asfreq.py)
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] N/A: Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] N/A: Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56758 | 2024-01-07T02:37:05Z | 2024-02-12T21:00:41Z | 2024-02-12T21:00:41Z | 2024-02-12T21:00:41Z |
ENH: Implement interpolation for arrow and masked dtypes | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 0b04a1d313a6d..999f0ae212be7 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -323,6 +323,7 @@ Other enhancements
- :meth:`ExtensionArray.duplicated` added to allow extension type implementations of the ``duplicated`` method (:issue:`55255`)
- :meth:`Series.ffill`, :meth:`Series.bfill`, :meth:`DataFrame.ffill`, and :meth:`DataFrame.bfill` have gained the argument ``limit_area``; 3rd party :class:`.ExtensionArray` authors need to add this argument to the method ``_pad_or_backfill`` (:issue:`56492`)
- Allow passing ``read_only``, ``data_only`` and ``keep_links`` arguments to openpyxl using ``engine_kwargs`` of :func:`read_excel` (:issue:`55027`)
+- Implement :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` for :class:`ArrowDtype` and masked dtypes (:issue:`56267`)
- Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`)
- Implemented :meth:`Series.dt` methods and attributes for :class:`ArrowDtype` with ``pyarrow.duration`` type (:issue:`52284`)
- Implemented :meth:`Series.str.extract` for :class:`ArrowDtype` (:issue:`56268`)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 23b5c6385c13b..b9889a65feb34 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -184,6 +184,7 @@ def floordiv_compat(
AxisInt,
Dtype,
FillnaOptions,
+ InterpolateOptions,
Iterator,
NpDtype,
NumpySorter,
@@ -2068,6 +2069,45 @@ def _maybe_convert_setitem_value(self, value):
raise TypeError(msg) from err
return value
+ def interpolate(
+ self,
+ *,
+ method: InterpolateOptions,
+ axis: int,
+ index,
+ limit,
+ limit_direction,
+ limit_area,
+ copy: bool,
+ **kwargs,
+ ) -> Self:
+ """
+ See NDFrame.interpolate.__doc__.
+ """
+ # NB: we return type(self) even if copy=False
+ mask = self.isna()
+ if self.dtype.kind == "f":
+ data = self._pa_array.to_numpy()
+ elif self.dtype.kind in "iu":
+ data = self.to_numpy(dtype="f8", na_value=0.0)
+ else:
+ raise NotImplementedError(
+ f"interpolate is not implemented for dtype={self.dtype}"
+ )
+
+ missing.interpolate_2d_inplace(
+ data,
+ method=method,
+ axis=0,
+ index=index,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ mask=mask,
+ **kwargs,
+ )
+ return type(self)(self._box_pa_array(pa.array(data, mask=mask)))
+
@classmethod
def _if_else(
cls,
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 9ce19ced2b356..0ee090d992552 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -22,6 +22,7 @@
AxisInt,
DtypeObj,
FillnaOptions,
+ InterpolateOptions,
NpDtype,
PositionalIndexer,
Scalar,
@@ -99,6 +100,7 @@
NumpyValueArrayLike,
)
from pandas._libs.missing import NAType
+ from pandas.core.arrays import FloatingArray
from pandas.compat.numpy import function as nv
@@ -1519,6 +1521,58 @@ def all(
else:
return self.dtype.na_value
+ def interpolate(
+ self,
+ *,
+ method: InterpolateOptions,
+ axis: int,
+ index,
+ limit,
+ limit_direction,
+ limit_area,
+ copy: bool,
+ **kwargs,
+ ) -> FloatingArray:
+ """
+ See NDFrame.interpolate.__doc__.
+ """
+ # NB: we return type(self) even if copy=False
+ if self.dtype.kind == "f":
+ if copy:
+ data = self._data.copy()
+ mask = self._mask.copy()
+ else:
+ data = self._data
+ mask = self._mask
+ elif self.dtype.kind in "iu":
+ copy = True
+ data = self._data.astype("f8")
+ mask = self._mask.copy()
+ else:
+ raise NotImplementedError(
+ f"interpolate is not implemented for dtype={self.dtype}"
+ )
+
+ missing.interpolate_2d_inplace(
+ data,
+ method=method,
+ axis=0,
+ index=index,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ mask=mask,
+ **kwargs,
+ )
+ if not copy:
+ return self # type: ignore[return-value]
+ if self.dtype.kind == "f":
+ return type(self)._simple_new(data, mask) # type: ignore[return-value]
+ else:
+ from pandas.core.arrays import FloatingArray
+
+ return FloatingArray._simple_new(data, mask)
+
def _accumulate(
self, name: str, *, skipna: bool = True, **kwargs
) -> BaseMaskedArray:
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index cd76883d50541..ae2cdacf09e82 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -335,6 +335,7 @@ def interpolate_2d_inplace(
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
+ mask=None,
**kwargs,
) -> None:
"""
@@ -382,6 +383,7 @@ def func(yvalues: np.ndarray) -> None:
limit_area=limit_area_validated,
fill_value=fill_value,
bounds_error=False,
+ mask=mask,
**kwargs,
)
@@ -426,6 +428,7 @@ def _interpolate_1d(
fill_value: Any | None = None,
bounds_error: bool = False,
order: int | None = None,
+ mask=None,
**kwargs,
) -> None:
"""
@@ -439,8 +442,10 @@ def _interpolate_1d(
-----
Fills 'yvalues' in-place.
"""
-
- invalid = isna(yvalues)
+ if mask is not None:
+ invalid = mask
+ else:
+ invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
@@ -517,7 +522,10 @@ def _interpolate_1d(
**kwargs,
)
- if is_datetimelike:
+ if mask is not None:
+ mask[:] = False
+ mask[preserve_nans] = True
+ elif is_datetimelike:
yvalues[preserve_nans] = NaT.value
else:
yvalues[preserve_nans] = np.nan
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index e377fdd635bfe..5eb9aee2ffb15 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -498,8 +498,41 @@ def test_interpolate_empty_df(self):
assert result is None
tm.assert_frame_equal(df, expected)
- def test_interpolate_ea_raise(self):
+ def test_interpolate_ea(self, any_int_ea_dtype):
# GH#55347
- df = DataFrame({"a": [1, None, 2]}, dtype="Int64")
- with pytest.raises(NotImplementedError, match="does not implement"):
- df.interpolate()
+ df = DataFrame({"a": [1, None, None, None, 3]}, dtype=any_int_ea_dtype)
+ orig = df.copy()
+ result = df.interpolate(limit=2)
+ expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="Float64")
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(df, orig)
+
+ @pytest.mark.parametrize(
+ "dtype",
+ [
+ "Float64",
+ "Float32",
+ pytest.param("float32[pyarrow]", marks=td.skip_if_no("pyarrow")),
+ pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow")),
+ ],
+ )
+ def test_interpolate_ea_float(self, dtype):
+ # GH#55347
+ df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype)
+ orig = df.copy()
+ result = df.interpolate(limit=2)
+ expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(df, orig)
+
+ @pytest.mark.parametrize(
+ "dtype",
+ ["int64", "uint64", "int32", "int16", "int8", "uint32", "uint16", "uint8"],
+ )
+ def test_interpolate_arrow(self, dtype):
+ # GH#55347
+ pytest.importorskip("pyarrow")
+ df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype + "[pyarrow]")
+ result = df.interpolate(limit=2)
+ expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="float64[pyarrow]")
+ tm.assert_frame_equal(result, expected)
| - [ ] closes #56267 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56757 | 2024-01-07T00:21:05Z | 2024-01-10T01:27:05Z | 2024-01-10T01:27:05Z | 2024-01-10T16:27:07Z |
Create function program | diff --git a/function program b/function program
new file mode 100644
index 0000000000000..ba90b7d802077
--- /dev/null
+++ b/function program
@@ -0,0 +1,26 @@
+#include<stdio.h>
+void postfix()
+{
+char symbol;
+precdurce token;
+int n=0;
+int top=;
+stack[0]=eos;
+for(token=get token(&symbol,&n);token!=eos;token=get token(&symbol&n))
+{
+if(token==operand)
+printf("%c",symbol);
+else if(token==rparen)
+{
+while(stack[top]!=lparon)
+printf("%c",pop());
+else
+{
+while(icp[token]<=isp[stack[top]])
+printf("%c",pop());
+}
+}
+while((tocken=pop())!=eos)
+printf("%c",token);
+}
+}
| - [x] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/56752 | 2024-01-06T14:39:12Z | 2024-01-06T14:51:24Z | null | 2024-01-06T14:51:25Z |
CI: Allow no argument in code_checks | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index e41f625e583c0..16500aade0476 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -16,12 +16,18 @@
set -uo pipefail
-[[ -z "$1" || "$1" == "code" || "$1" == "doctests" || "$1" == "docstrings" || "$1" == "single-docs" || "$1" == "notebooks" ]] || \
+if [[ -v 1 ]]; then
+ CHECK=$1
+else
+ # script will fail if it uses an unset variable (i.e. $1 is not provided)
+ CHECK=""
+fi
+
+[[ -z "$CHECK" || "$CHECK" == "code" || "$CHECK" == "doctests" || "$CHECK" == "docstrings" || "$CHECK" == "single-docs" || "$CHECK" == "notebooks" ]] || \
{ echo "Unknown command $1. Usage: $0 [code|doctests|docstrings|single-docs|notebooks]"; exit 9999; }
BASE_DIR="$(dirname $0)/.."
RET=0
-CHECK=$1
### CODE ###
if [[ -z "$CHECK" || "$CHECK" == "code" ]]; then
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
#56408 introduced `set -u`, meaning the script will fail when using any unset variable. This includes `$1`. | https://api.github.com/repos/pandas-dev/pandas/pulls/56751 | 2024-01-06T14:19:06Z | 2024-01-08T00:29:26Z | 2024-01-08T00:29:26Z | 2024-01-08T00:34:17Z |
DEPR: by_row="compat" in DataFrame.apply and Series.apply | diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst
index 15e98cbb2a4d7..ad9b0d8d5b1d1 100644
--- a/doc/source/whatsnew/v2.2.0.rst
+++ b/doc/source/whatsnew/v2.2.0.rst
@@ -675,6 +675,7 @@ Other Deprecations
- Deprecated including the groups in computations when using :meth:`.DataFrameGroupBy.apply` and :meth:`.DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`)
- Deprecated indexing an :class:`Index` with a boolean indexer of length zero (:issue:`55820`)
- Deprecated not passing a tuple to :class:`.DataFrameGroupBy.get_group` or :class:`.SeriesGroupBy.get_group` when grouping by a length-1 list-like (:issue:`25971`)
+- Deprecated specifying ``by_row="compat"`` in :meth:`DataFrame.apply` and :meth:`Series.apply` (:issue:`53400`)
- Deprecated string ``AS`` denoting frequency in :class:`YearBegin` and strings ``AS-DEC``, ``AS-JAN``, etc. denoting annual frequencies with various fiscal year starts (:issue:`54275`)
- Deprecated string ``A`` denoting frequency in :class:`YearEnd` and strings ``A-DEC``, ``A-JAN``, etc. denoting annual frequencies with various fiscal year ends (:issue:`54275`)
- Deprecated string ``BAS`` denoting frequency in :class:`BYearBegin` and strings ``BAS-DEC``, ``BAS-JAN``, etc. denoting annual frequencies with various fiscal year starts (:issue:`54275`)
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 784e11415ade6..fb43bf6713847 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1475,6 +1475,15 @@ def apply_compat(self):
try:
result = obj.apply(func, by_row="compat")
+ warnings.warn(
+ "apply operated row-by-row. This behavior is "
+ "deprecated and will be removed in a future version of pandas. To keep "
+ "the current behavior of operating row-by-row, use "
+ "map. To have apply operate on the entire Series, "
+ "pass by_row=False.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
except (ValueError, AttributeError, TypeError):
result = obj.apply(func, by_row=False)
return result
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 73b5804d8c168..2e128f33acdd5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10182,6 +10182,11 @@ def apply(
.. versionadded:: 2.1.0
+ .. versionchanged:: 2.2.0
+
+ Specifying ``by_row="compat"`` is deprecated and will be removed in
+ a future version of pandas. To operate row-by-row, use DataFrame.map.
+
engine : {'python', 'numba'}, default 'python'
Choose between the python (default) engine or the numba engine in apply.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 487f57b7390a8..cff792e96d354 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4803,6 +4803,12 @@ def apply(
``by_row`` has no effect when ``func`` is a string.
.. versionadded:: 2.1.0
+
+ .. versionchanged:: 2.2.0
+
+ Specifying ``by_row="compat"`` is deprecated and will be removed in
+ a future version of pandas. To operate row-by-row, use Series.map.
+
**kwargs
Additional keyword arguments passed to func.
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index 0839f005305a5..a14464cb71f6b 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -715,30 +715,33 @@ def test_infer_row_shape():
@pytest.mark.parametrize(
- "ops, by_row, expected",
+ "ops, by_row, warn, expected",
[
- ({"a": lambda x: x + 1}, "compat", DataFrame({"a": [2, 3]})),
- ({"a": lambda x: x + 1}, False, DataFrame({"a": [2, 3]})),
- ({"a": lambda x: x.sum()}, "compat", Series({"a": 3})),
- ({"a": lambda x: x.sum()}, False, Series({"a": 3})),
+ ({"a": lambda x: x + 1}, "compat", FutureWarning, DataFrame({"a": [2, 3]})),
+ ({"a": lambda x: x + 1}, False, None, DataFrame({"a": [2, 3]})),
+ ({"a": lambda x: x.sum()}, "compat", None, Series({"a": 3})),
+ ({"a": lambda x: x.sum()}, False, None, Series({"a": 3})),
(
{"a": ["sum", np.sum, lambda x: x.sum()]},
"compat",
+ None,
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
),
(
{"a": ["sum", np.sum, lambda x: x.sum()]},
False,
+ None,
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
),
- ({"a": lambda x: 1}, "compat", DataFrame({"a": [1, 1]})),
- ({"a": lambda x: 1}, False, Series({"a": 1})),
+ ({"a": lambda x: 1}, "compat", FutureWarning, DataFrame({"a": [1, 1]})),
+ ({"a": lambda x: 1}, False, None, Series({"a": 1})),
],
)
-def test_dictlike_lambda(ops, by_row, expected):
+def test_dictlike_lambda(ops, by_row, warn, expected):
# GH53601
df = DataFrame({"a": [1, 2]})
- result = df.apply(ops, by_row=by_row)
+ with tm.assert_produces_warning(warn, match="apply operated row-by-row"):
+ result = df.apply(ops, by_row=by_row)
tm.assert_equal(result, expected)
@@ -808,38 +811,53 @@ def test_with_dictlike_columns_with_infer():
@pytest.mark.parametrize(
- "ops, by_row, expected",
+ "ops, by_row, warn, expected",
[
- ([lambda x: x + 1], "compat", DataFrame({("a", "<lambda>"): [2, 3]})),
- ([lambda x: x + 1], False, DataFrame({("a", "<lambda>"): [2, 3]})),
- ([lambda x: x.sum()], "compat", DataFrame({"a": [3]}, index=["<lambda>"])),
- ([lambda x: x.sum()], False, DataFrame({"a": [3]}, index=["<lambda>"])),
+ (
+ [lambda x: x + 1],
+ "compat",
+ FutureWarning,
+ DataFrame({("a", "<lambda>"): [2, 3]}),
+ ),
+ ([lambda x: x + 1], False, None, DataFrame({("a", "<lambda>"): [2, 3]})),
+ (
+ [lambda x: x.sum()],
+ "compat",
+ None,
+ DataFrame({"a": [3]}, index=["<lambda>"]),
+ ),
+ ([lambda x: x.sum()], False, None, DataFrame({"a": [3]}, index=["<lambda>"])),
(
["sum", np.sum, lambda x: x.sum()],
"compat",
+ None,
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
),
(
["sum", np.sum, lambda x: x.sum()],
False,
+ None,
DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", "<lambda>"]),
),
(
[lambda x: x + 1, lambda x: 3],
"compat",
+ FutureWarning,
DataFrame([[2, 3], [3, 3]], columns=[["a", "a"], ["<lambda>", "<lambda>"]]),
),
(
[lambda x: 2, lambda x: 3],
False,
+ None,
DataFrame({"a": [2, 3]}, ["<lambda>", "<lambda>"]),
),
],
)
-def test_listlike_lambda(ops, by_row, expected):
+def test_listlike_lambda(ops, by_row, warn, expected):
# GH53601
df = DataFrame({"a": [1, 2]})
- result = df.apply(ops, by_row=by_row)
+ with tm.assert_produces_warning(warn, match="apply operated row-by-row"):
+ result = df.apply(ops, by_row=by_row)
tm.assert_equal(result, expected)
@@ -1106,7 +1124,10 @@ def test_agg_transform(axis, float_frame):
tm.assert_frame_equal(result, expected)
# list-like
- result = float_frame.apply([np.sqrt], axis=axis)
+
+ msg = "apply operated row-by-row"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = float_frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, "index"}:
expected.columns = MultiIndex.from_product([float_frame.columns, ["sqrt"]])
@@ -1117,7 +1138,8 @@ def test_agg_transform(axis, float_frame):
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
- result = float_frame.apply([np.abs, np.sqrt], axis=axis)
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = float_frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, "index"}:
expected.columns = MultiIndex.from_product(
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index df24fa08f48e1..7fe1000d5e7bd 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -287,6 +287,8 @@ def retrieve(targetRow, targetDF):
def test_transform(string_series, by_row):
# transforming functions
+ warn = FutureWarning if by_row == "compat" else None
+
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
@@ -297,7 +299,9 @@ def test_transform(string_series, by_row):
tm.assert_series_equal(result, expected)
# list-like
- result = string_series.apply([np.sqrt], by_row=by_row)
+ msg = "apply operated row-by-row"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = string_series.apply([np.sqrt], by_row=by_row)
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
@@ -310,7 +314,8 @@ def test_transform(string_series, by_row):
# series and then concatting
expected = concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
- result = string_series.apply([np.sqrt, np.abs], by_row=by_row)
+ with tm.assert_produces_warning(warn, match=msg):
+ result = string_series.apply([np.sqrt, np.abs], by_row=by_row)
tm.assert_frame_equal(result, expected)
# dict, provide renaming
@@ -318,7 +323,8 @@ def test_transform(string_series, by_row):
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
- result = string_series.apply({"foo": np.sqrt, "bar": np.abs}, by_row=by_row)
+ with tm.assert_produces_warning(warn, match=msg):
+ result = string_series.apply({"foo": np.sqrt, "bar": np.abs}, by_row=by_row)
tm.assert_series_equal(result.reindex_like(expected), expected)
@@ -617,10 +623,13 @@ def test_apply_dictlike_reducer(string_series, ops, how, kwargs, by_row):
)
def test_apply_listlike_transformer(string_series, ops, names, by_row):
# GH 39140
+ warn = FutureWarning if by_row == "compat" else None
with np.errstate(all="ignore"):
expected = concat([op(string_series) for op in ops], axis=1)
expected.columns = names
- result = string_series.apply(ops, by_row=by_row)
+ msg = "apply operated row-by-row"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = string_series.apply(ops, by_row=by_row)
tm.assert_frame_equal(result, expected)
@@ -634,7 +643,13 @@ def test_apply_listlike_transformer(string_series, ops, names, by_row):
def test_apply_listlike_lambda(ops, expected, by_row):
# GH53400
ser = Series([1, 2, 3])
- result = ser.apply(ops, by_row=by_row)
+ if by_row == "compat" and isinstance(expected, DataFrame):
+ warn = FutureWarning
+ else:
+ warn = None
+ msg = "apply operated row-by-row"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = ser.apply(ops, by_row=by_row)
tm.assert_equal(result, expected)
@@ -649,10 +664,13 @@ def test_apply_listlike_lambda(ops, expected, by_row):
)
def test_apply_dictlike_transformer(string_series, ops, by_row):
# GH 39140
+ warn = FutureWarning if by_row == "compat" else None
with np.errstate(all="ignore"):
expected = concat({name: op(string_series) for name, op in ops.items()})
expected.name = string_series.name
- result = string_series.apply(ops, by_row=by_row)
+ msg = "apply operated row-by-row"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = string_series.apply(ops, by_row=by_row)
tm.assert_series_equal(result, expected)
@@ -669,7 +687,13 @@ def test_apply_dictlike_transformer(string_series, ops, by_row):
def test_apply_dictlike_lambda(ops, by_row, expected):
# GH53400
ser = Series([1, 2, 3])
- result = ser.apply(ops, by_row=by_row)
+ if by_row == "compat" and len(expected) == 3:
+ warn = FutureWarning
+ else:
+ warn = None
+ msg = "apply operated row-by-row"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = ser.apply(ops, by_row=by_row)
tm.assert_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Followup to #53400. Fell off my radar that this needed to be deprecated. Will only be able to specify `by_row=False` in 3.0, and then the argument will be removed in 4.0.
cc @topper-123 | https://api.github.com/repos/pandas-dev/pandas/pulls/56750 | 2024-01-05T22:05:22Z | 2024-01-06T14:33:17Z | null | 2024-02-14T01:30:45Z |
TST/CLN: Remove unnecessary copies in tests | diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 11b53d711fce2..5c6377349304c 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -90,7 +90,12 @@ def test_read_empty_dta_with_dtypes(self, version):
"f64": np.array([0], dtype=np.float64),
}
)
- expected = empty_df_typed.copy()
+ # GH 7369, make sure can read a 0-obs dta file
+ with tm.ensure_clean() as path:
+ empty_df_typed.to_stata(path, write_index=False, version=version)
+ empty_reread = read_stata(path)
+
+ expected = empty_df_typed
# No uint# support. Downcast since values in range for int#
expected["u8"] = expected["u8"].astype(np.int8)
expected["u16"] = expected["u16"].astype(np.int16)
@@ -99,12 +104,8 @@ def test_read_empty_dta_with_dtypes(self, version):
expected["u64"] = expected["u64"].astype(np.int32)
expected["i64"] = expected["i64"].astype(np.int32)
- # GH 7369, make sure can read a 0-obs dta file
- with tm.ensure_clean() as path:
- empty_df_typed.to_stata(path, write_index=False, version=version)
- empty_reread = read_stata(path)
- tm.assert_frame_equal(expected, empty_reread)
- tm.assert_series_equal(expected.dtypes, empty_reread.dtypes)
+ tm.assert_frame_equal(expected, empty_reread)
+ tm.assert_series_equal(expected.dtypes, empty_reread.dtypes)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_index_col_none(self, version):
@@ -115,7 +116,7 @@ def test_read_index_col_none(self, version):
read_df = read_stata(path)
assert isinstance(read_df.index, pd.RangeIndex)
- expected = df.copy()
+ expected = df
expected["a"] = expected["a"].astype(np.int32)
tm.assert_frame_equal(read_df, expected, check_index_type=True)
@@ -325,7 +326,7 @@ def test_read_write_dta5(self):
original.to_stata(path, convert_dates=None)
written_and_read_again = self.read_dta(path)
- expected = original.copy()
+ expected = original
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@@ -424,7 +425,7 @@ def test_read_write_dta11(self):
written_and_read_again = self.read_dta(path)
- expected = formatted.copy()
+ expected = formatted
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@@ -462,7 +463,7 @@ def test_read_write_dta12(self, version):
written_and_read_again = self.read_dta(path)
- expected = formatted.copy()
+ expected = formatted
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@@ -480,7 +481,7 @@ def test_read_write_dta13(self):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
- expected = formatted.copy()
+ expected = formatted
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@@ -561,7 +562,7 @@ def test_numeric_column_names(self):
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
- expected = original.copy()
+ expected = original
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(expected, written_and_read_again)
@@ -579,7 +580,7 @@ def test_nan_to_missing_value(self, version):
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
- expected = original.copy()
+ expected = original
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again, expected)
@@ -602,7 +603,7 @@ def test_string_no_dates(self):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
- expected = original.copy()
+ expected = original
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@@ -619,7 +620,7 @@ def test_large_value_conversion(self):
written_and_read_again = self.read_dta(path)
- modified = original.copy()
+ modified = original
modified["s1"] = Series(modified["s1"], dtype=np.int16)
modified["s2"] = Series(modified["s2"], dtype=np.int32)
modified["s3"] = Series(modified["s3"], dtype=np.float64)
@@ -635,7 +636,7 @@ def test_dates_invalid_column(self):
written_and_read_again = self.read_dta(path)
- modified = original.copy()
+ modified = original
modified.columns = ["_0"]
modified.index = original.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
@@ -721,8 +722,15 @@ def test_bool_uint(self, byteorder, version):
{"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6}
)
original.index.name = "index"
- expected = original.copy()
- expected.index = original.index.astype(np.int32)
+
+ with tm.ensure_clean() as path:
+ original.to_stata(path, byteorder=byteorder, version=version)
+ written_and_read_again = self.read_dta(path)
+
+ written_and_read_again = written_and_read_again.set_index("index")
+
+ expected = original
+ expected.index = expected.index.astype(np.int32)
expected_types = (
np.int8,
np.int8,
@@ -735,11 +743,6 @@ def test_bool_uint(self, byteorder, version):
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
- with tm.ensure_clean() as path:
- original.to_stata(path, byteorder=byteorder, version=version)
- written_and_read_again = self.read_dta(path)
-
- written_and_read_again = written_and_read_again.set_index("index")
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self, datapath):
@@ -1000,18 +1003,19 @@ def test_categorical_writing(self, version):
"unlabeled",
],
)
- expected = original.copy()
+ with tm.ensure_clean() as path:
+ original.astype("category").to_stata(path, version=version)
+ written_and_read_again = self.read_dta(path)
- # these are all categoricals
- original = pd.concat(
- [original[col].astype("category") for col in original], axis=1
- )
+ res = written_and_read_again.set_index("index")
+
+ expected = original
expected.index = expected.index.set_names("index").astype(np.int32)
expected["incompletely_labeled"] = expected["incompletely_labeled"].apply(str)
expected["unlabeled"] = expected["unlabeled"].apply(str)
for col in expected:
- orig = expected[col].copy()
+ orig = expected[col]
cat = orig.astype("category")._values
cat = cat.as_ordered()
@@ -1022,11 +1026,6 @@ def test_categorical_writing(self, version):
expected[col] = cat
- with tm.ensure_clean() as path:
- original.to_stata(path, version=version)
- written_and_read_again = self.read_dta(path)
-
- res = written_and_read_again.set_index("index")
tm.assert_frame_equal(res, expected)
def test_categorical_warnings_and_errors(self):
@@ -1037,9 +1036,7 @@ def test_categorical_warnings_and_errors(self):
columns=["Too_long"],
)
- original = pd.concat(
- [original[col].astype("category") for col in original], axis=1
- )
+ original = original.astype("category")
with tm.ensure_clean() as path:
msg = (
"Stata value labels for a single variable must have "
@@ -1050,10 +1047,7 @@ def test_categorical_warnings_and_errors(self):
original = DataFrame.from_records(
[["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"]
- )
- original = pd.concat(
- [original[col].astype("category") for col in original], axis=1
- )
+ ).astype("category")
with tm.assert_produces_warning(ValueLabelTypeMismatch):
original.to_stata(path)
@@ -1074,7 +1068,7 @@ def test_categorical_with_stata_missing_values(self, version):
res = written_and_read_again.set_index("index")
- expected = original.copy()
+ expected = original
for col in expected:
cat = expected[col]._values
new_cats = cat.remove_unused_categories().categories
@@ -1525,7 +1519,7 @@ def test_out_of_range_float(self):
reread = read_stata(path)
original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64)
- expected = original.copy()
+ expected = original
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(reread.set_index("index"), expected)
@@ -1672,13 +1666,13 @@ def test_writer_117(self):
version=117,
)
written_and_read_again = self.read_dta(path)
- # original.index is np.int32, read index is np.int64
- tm.assert_frame_equal(
- written_and_read_again.set_index("index"),
- original,
- check_index_type=False,
- )
- tm.assert_frame_equal(original, copy)
+ # original.index is np.int32, read index is np.int64
+ tm.assert_frame_equal(
+ written_and_read_again.set_index("index"),
+ original,
+ check_index_type=False,
+ )
+ tm.assert_frame_equal(original, copy)
def test_convert_strl_name_swap(self):
original = DataFrame(
@@ -2052,7 +2046,7 @@ def test_compression(compression, version, use_dict, infer, compression_to_exten
fp = path
reread = read_stata(fp, index_col="index")
- expected = df.copy()
+ expected = df
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(reread, expected)
@@ -2078,7 +2072,7 @@ def test_compression_dict(method, file_ext):
fp = path
reread = read_stata(fp, index_col="index")
- expected = df.copy()
+ expected = df
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(reread, expected)
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 6ba2ac0104e75..ab75dd7469b73 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -134,7 +134,7 @@ def test_resample_empty_series(freq, index, resample_method):
if resample_method == "ohlc":
expected = DataFrame(
- [], index=ser.index[:0].copy(), columns=["open", "high", "low", "close"]
+ [], index=ser.index[:0], columns=["open", "high", "low", "close"]
)
expected.index = _asfreq_compat(ser.index, freq)
tm.assert_frame_equal(result, expected, check_dtype=False)
@@ -167,7 +167,7 @@ def test_resample_nat_index_series(freq, resample_method):
if resample_method == "ohlc":
expected = DataFrame(
- [], index=ser.index[:0].copy(), columns=["open", "high", "low", "close"]
+ [], index=ser.index[:0], columns=["open", "high", "low", "close"]
)
tm.assert_frame_equal(result, expected, check_dtype=False)
else:
@@ -248,9 +248,7 @@ def test_resample_empty_dataframe(index, freq, resample_method):
if resample_method == "ohlc":
# TODO: no tests with len(df.columns) > 0
mi = MultiIndex.from_product([df.columns, ["open", "high", "low", "close"]])
- expected = DataFrame(
- [], index=df.index[:0].copy(), columns=mi, dtype=np.float64
- )
+ expected = DataFrame([], index=df.index[:0], columns=mi, dtype=np.float64)
expected.index = _asfreq_compat(df.index, freq)
elif resample_method != "size":
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 337c5ff53bd14..6b406a6e6f67a 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -397,10 +397,9 @@ def test_median_duplicate_columns():
columns=list("aaa"),
index=date_range("2012-01-01", periods=20, freq="s"),
)
- df2 = df.copy()
- df2.columns = ["a", "b", "c"]
- expected = df2.resample("5s").median()
result = df.resample("5s").median()
+ df.columns = ["a", "b", "c"]
+ expected = df.resample("5s").median()
expected.columns = result.columns
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index d8bc7974b4139..7174245ec16d8 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -412,7 +412,7 @@ def test_concat_bug_1719(self):
ts1 = Series(
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
)
- ts2 = ts1.copy()[::2]
+ ts2 = ts1[::2]
# to join with union
# these two are of different length!
diff --git a/pandas/tests/reshape/concat/test_series.py b/pandas/tests/reshape/concat/test_series.py
index c12b835cb61e1..9a481fed384d3 100644
--- a/pandas/tests/reshape/concat/test_series.py
+++ b/pandas/tests/reshape/concat/test_series.py
@@ -30,11 +30,11 @@ def test_concat_series(self):
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
-
- ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
-
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
- exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
+ exp_index = MultiIndex(
+ levels=[[0, 1, 2], DatetimeIndex(ts.index.to_numpy(dtype="M8[ns]"))],
+ codes=exp_codes,
+ )
expected.index = exp_index
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 5a1f47e341222..21804d32e76ca 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -153,13 +153,12 @@ def test_join_on(self, target_source, infer_string):
target.join(source, on="E")
# overlap
- source_copy = source.copy()
msg = (
"You are trying to merge on float64 and object|string columns for key "
"'A'. If you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=msg):
- target.join(source_copy, on="A")
+ target.join(source, on="A")
def test_join_on_fails_with_different_right_index(self):
df = DataFrame(
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index bc02da0d5b97b..cc05f49550e9b 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -126,9 +126,7 @@ def run_asserts(left, right, sort):
"2nd",
np.random.default_rng(2).integers(0, 10, len(left)).astype("float"),
)
-
- i = np.random.default_rng(2).permutation(len(left))
- right = left.iloc[i].copy()
+ right = left.sample(frac=1, random_state=np.random.default_rng(2))
left["4th"] = bind_cols(left)
right["5th"] = -bind_cols(right)
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index ff9f927597956..eb858e06c15dd 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -349,13 +349,12 @@ def test_melt_missing_columns_raises(self):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
- multi = df.copy()
- multi.columns = [list("ABCD"), list("abcd")]
+ df.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg):
- multi.melt([("E", "a")], [("B", "b")])
+ df.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(KeyError, match=msg):
- multi.melt(["A"], ["F"], col_level=0)
+ df.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index fc1c80eb4dec6..a6e4b4f78e25a 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -430,7 +430,7 @@ def test_indexing():
result = ts["2001"]
tm.assert_series_equal(result, ts.iloc[:12])
- df = DataFrame({"A": ts.copy()})
+ df = DataFrame({"A": ts})
# GH#36179 pre-2.0 df["2001"] operated as slicing on rows. in 2.0 it behaves
# like any other key, so raises
diff --git a/pandas/tests/series/methods/test_cov_corr.py b/pandas/tests/series/methods/test_cov_corr.py
index bd60265582652..7a4d48fb76940 100644
--- a/pandas/tests/series/methods/test_cov_corr.py
+++ b/pandas/tests/series/methods/test_cov_corr.py
@@ -86,22 +86,24 @@ def test_corr(self, datetime_series, any_float_dtype):
index=date_range("2020-01-01", periods=10),
name="ts",
)
- B = A.copy()
- result = A.corr(B)
- expected, _ = stats.pearsonr(A, B)
+ result = A.corr(A)
+ expected, _ = stats.pearsonr(A, A)
tm.assert_almost_equal(result, expected)
def test_corr_rank(self):
stats = pytest.importorskip("scipy.stats")
# kendall and spearman
- A = Series(
+ B = Series(
np.arange(10, dtype=np.float64),
index=date_range("2020-01-01", periods=10),
name="ts",
)
- B = A.copy()
- A[-5:] = A[:5].copy()
+ A = Series(
+ np.concatenate([np.arange(5, dtype=np.float64)] * 2),
+ index=date_range("2020-01-01", periods=10),
+ name="ts",
+ )
result = A.corr(B, method="kendall")
expected = stats.kendalltau(A, B)[0]
tm.assert_almost_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py
index d854f0b787759..0f43c1bc72c45 100644
--- a/pandas/tests/series/methods/test_interpolate.py
+++ b/pandas/tests/series/methods/test_interpolate.py
@@ -224,8 +224,7 @@ def test_interpolate_index_values(self):
result = s.interpolate(method="index")
- expected = s.copy()
- bad = isna(expected.values)
+ bad = isna(s)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
diff --git a/pandas/tests/series/methods/test_nlargest.py b/pandas/tests/series/methods/test_nlargest.py
index bf13ea04ca9f9..c37f57771e29d 100644
--- a/pandas/tests/series/methods/test_nlargest.py
+++ b/pandas/tests/series/methods/test_nlargest.py
@@ -177,7 +177,7 @@ def test_nlargest_nullable(self, any_numeric_ea_dtype):
arr = np.random.default_rng(2).standard_normal(10)
arr = arr.astype(dtype.lower(), copy=False)
- ser = Series(arr.copy(), dtype=dtype)
+ ser = Series(arr, dtype=dtype, copy=True)
ser[1] = pd.NA
result = ser.nlargest(5)
diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py
index 4d48f290e6a44..776c5633cb4b3 100644
--- a/pandas/tests/series/methods/test_rank.py
+++ b/pandas/tests/series/methods/test_rank.py
@@ -106,7 +106,7 @@ def test_rank(self, datetime_series):
tm.assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
- exp = iseries.copy()
+ exp = iseries
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py
index 00142c4d82327..cb83bc5833fba 100644
--- a/pandas/tests/series/methods/test_sort_values.py
+++ b/pandas/tests/series/methods/test_sort_values.py
@@ -92,8 +92,7 @@ def test_sort_values(self, datetime_series, using_copy_on_write):
s.sort_values(inplace=True)
def test_sort_values_categorical(self):
- c = Categorical(["a", "b", "b", "a"], ordered=False)
- cat = Series(c.copy())
+ cat = Series(Categorical(["a", "b", "b", "a"], ordered=False))
# sort in the categories order
expected = Series(
diff --git a/pandas/tests/series/methods/test_to_csv.py b/pandas/tests/series/methods/test_to_csv.py
index 1c17013d621c7..e292861012c8f 100644
--- a/pandas/tests/series/methods/test_to_csv.py
+++ b/pandas/tests/series/methods/test_to_csv.py
@@ -173,10 +173,10 @@ def test_to_csv_interval_index(self, using_infer_string):
s.to_csv(path, header=False)
result = self.read_csv(path, index_col=0)
- # can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
- expected = s.copy()
- if using_infer_string:
- expected.index = expected.index.astype("string[pyarrow_numpy]")
- else:
- expected.index = expected.index.astype(str)
- tm.assert_series_equal(result, expected)
+ # can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
+ expected = s
+ if using_infer_string:
+ expected.index = expected.index.astype("string[pyarrow_numpy]")
+ else:
+ expected.index = expected.index.astype(str)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py
index 3c70e839c8e20..ad11827117209 100644
--- a/pandas/tests/series/methods/test_unstack.py
+++ b/pandas/tests/series/methods/test_unstack.py
@@ -148,7 +148,7 @@ def test_unstack_multi_index_categorical_values():
dti = ser.index.levels[0]
c = pd.Categorical(["foo"] * len(dti))
expected = DataFrame(
- {"A": c.copy(), "B": c.copy(), "C": c.copy(), "D": c.copy()},
+ {"A": c, "B": c, "C": c, "D": c},
columns=Index(list("ABCD"), name="minor"),
index=dti.rename("major"),
)
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index b40e2e99dae2e..5e52e4166b902 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -241,7 +241,7 @@ def test_add_corner_cases(self, datetime_series):
result = datetime_series + empty
assert np.isnan(result).all()
- result = empty + empty.copy()
+ result = empty + empty
assert len(result) == 0
def test_add_float_plus_int(self, datetime_series):
diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py
index d9c94e871bd4b..0e6d4932102d1 100644
--- a/pandas/tests/series/test_logical_ops.py
+++ b/pandas/tests/series/test_logical_ops.py
@@ -396,11 +396,11 @@ def test_logical_ops_label_based(self, using_infer_string):
# vs empty
empty = Series([], dtype=object)
- result = a & empty.copy()
+ result = a & empty
expected = Series([False, False, False], list("abc"))
tm.assert_series_equal(result, expected)
- result = a | empty.copy()
+ result = a | empty
expected = Series([True, True, False], list("abc"))
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index de0338b39d91a..3fd771c7fe31a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1788,7 +1788,6 @@ def test_scipy_compat(self, arr):
arr = np.array(arr)
mask = ~np.isfinite(arr)
- arr = arr.copy()
result = libalgos.rank_1d(arr)
arr[mask] = np.inf
exp = sp_stats.rankdata(arr)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 71994d186163e..838fee1db878c 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -32,7 +32,7 @@ def _frame2():
def _mixed(_frame):
return DataFrame(
{
- "A": _frame["A"].copy(),
+ "A": _frame["A"],
"B": _frame["B"].astype("float32"),
"C": _frame["C"].astype("int64"),
"D": _frame["D"].astype("int32"),
@@ -44,7 +44,7 @@ def _mixed(_frame):
def _mixed2(_frame2):
return DataFrame(
{
- "A": _frame2["A"].copy(),
+ "A": _frame2["A"],
"B": _frame2["B"].astype("float32"),
"C": _frame2["C"].astype("int64"),
"D": _frame2["D"].astype("int32"),
@@ -78,22 +78,22 @@ def _integer2():
@pytest.fixture
def _array(_frame):
- return _frame["A"].values.copy()
+ return _frame["A"].to_numpy()
@pytest.fixture
def _array2(_frame2):
- return _frame2["A"].values.copy()
+ return _frame2["A"].to_numpy()
@pytest.fixture
def _array_mixed(_mixed):
- return _mixed["D"].values.copy()
+ return _mixed["D"].to_numpy()
@pytest.fixture
def _array_mixed2(_mixed2):
- return _mixed2["D"].values.copy()
+ return _mixed2["D"].to_numpy()
@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr")
@@ -170,7 +170,7 @@ def test_run_binary(self, request, fixture, flex, comparison_op, monkeypatch):
df = request.getfixturevalue(fixture)
arith = comparison_op.__name__
with option_context("compute.use_numexpr", False):
- other = df.copy() + 1
+ other = df + 1
with monkeypatch.context() as m:
m.setattr(expr, "_MIN_ELEMENTS", 0)
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 329fbac925539..132608d7df115 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -33,12 +33,10 @@ def left_right():
np.random.default_rng(2).integers(low, high, (n, 7)), columns=list("ABCDEFG")
)
left["left"] = left.sum(axis=1)
-
- # one-2-one match
- i = np.random.default_rng(2).permutation(len(left))
- right = left.iloc[i].copy()
+ right = left.sample(
+ frac=1, random_state=np.random.default_rng(2), ignore_index=True
+ )
right.columns = right.columns[:-1].tolist() + ["right"]
- right.index = np.arange(len(right))
right["right"] *= -1
return left, right
@@ -267,13 +265,12 @@ def test_int64_overflow_one_to_many_none_match(self, join_type, sort):
right["right"] = np.random.default_rng(2).standard_normal(len(right))
# shuffle left & right frames
- i = np.random.default_rng(5).permutation(len(left))
- left = left.iloc[i].copy()
- left.index = np.arange(len(left))
-
- i = np.random.default_rng(6).permutation(len(right))
- right = right.iloc[i].copy()
- right.index = np.arange(len(right))
+ left = left.sample(
+ frac=1, ignore_index=True, random_state=np.random.default_rng(5)
+ )
+ right = right.sample(
+ frac=1, ignore_index=True, random_state=np.random.default_rng(6)
+ )
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
@@ -307,13 +304,8 @@ def test_int64_overflow_one_to_many_none_match(self, join_type, sort):
for rv in rval
)
- def align(df):
- df = df.sort_values(df.columns.tolist())
- df.index = np.arange(len(df))
- return df
-
out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"])
- out = align(out)
+ out = out.sort_values(out.columns.to_list(), ignore_index=True)
jmask = {
"left": out["left"].notna(),
@@ -323,19 +315,21 @@ def align(df):
}
mask = jmask[how]
- frame = align(out[mask].copy())
+ frame = out[mask].sort_values(out.columns.to_list(), ignore_index=True)
assert mask.all() ^ mask.any() or how == "outer"
res = merge(left, right, how=how, sort=sort)
if sort:
kcols = list("ABCDEFG")
tm.assert_frame_equal(
- res[kcols].copy(), res[kcols].sort_values(kcols, kind="mergesort")
+ res[kcols], res[kcols].sort_values(kcols, kind="mergesort")
)
# as in GH9092 dtypes break with outer/right join
# 2021-12-18: dtype does not break anymore
- tm.assert_frame_equal(frame, align(res))
+ tm.assert_frame_equal(
+ frame, res.sort_values(res.columns.to_list(), ignore_index=True)
+ )
@pytest.mark.parametrize(
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index c452382ec572b..7f49c5f0f86fd 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -204,11 +204,9 @@ def test_numeric_df_columns(columns):
)
expected = DataFrame({"a": [1.2, 3.14, np.inf, 0.1], "b": [1.0, 2.0, 3.0, 4.0]})
+ df[columns] = df[columns].apply(to_numeric)
- df_copy = df.copy()
- df_copy[columns] = df_copy[columns].apply(to_numeric)
-
- tm.assert_frame_equal(df_copy, expected)
+ tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index 400bf10817ab8..48247cd480083 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -447,7 +447,7 @@ def get_window_bounds(
):
min_periods = self.window_size if min_periods is None else 0
end = np.arange(num_values, dtype=np.int64) + 1
- start = end.copy() - self.window_size
+ start = end - self.window_size
start[start < 0] = min_periods
return start, end
diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py
index 3ceb58756bac6..6fae79ee70702 100644
--- a/pandas/tests/window/test_pairwise.py
+++ b/pandas/tests/window/test_pairwise.py
@@ -96,11 +96,10 @@ def test_flex_binary_frame(method, frame):
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
- frame2 = frame.copy()
frame2 = DataFrame(
- np.random.default_rng(2).standard_normal(frame2.shape),
- index=frame2.index,
- columns=frame2.columns,
+ np.random.default_rng(2).standard_normal(frame.shape),
+ index=frame.index,
+ columns=frame.columns,
)
res3 = getattr(frame.rolling(window=10), method)(frame2)
diff --git a/pandas/tests/window/test_win_type.py b/pandas/tests/window/test_win_type.py
index 5052019ddb726..5c785ed3fccb2 100644
--- a/pandas/tests/window/test_win_type.py
+++ b/pandas/tests/window/test_win_type.py
@@ -445,12 +445,12 @@ def test_cmov_window_regular_linear_range(win_types, step):
# GH 8238
pytest.importorskip("scipy")
vals = np.array(range(10), dtype=float)
- xp = vals.copy()
+ rs = Series(vals).rolling(5, win_type=win_types, center=True, step=step).mean()
+ xp = vals
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)[::step]
- rs = Series(vals).rolling(5, win_type=win_types, center=True, step=step).mean()
tm.assert_series_equal(xp, rs)
@@ -648,16 +648,15 @@ def test_cmov_window_special_linear_range(win_types_special, step):
}
vals = np.array(range(10), dtype=float)
- xp = vals.copy()
- xp[:2] = np.nan
- xp[-2:] = np.nan
- xp = Series(xp)[::step]
-
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True, step=step)
.mean(**kwds[win_types_special])
)
+ xp = vals
+ xp[:2] = np.nan
+ xp[-2:] = np.nan
+ xp = Series(xp)[::step]
tm.assert_series_equal(xp, rs)
| AFAICT these copies are not necessary or related to whats being tested | https://api.github.com/repos/pandas-dev/pandas/pulls/56749 | 2024-01-05T22:00:43Z | 2024-01-08T23:45:12Z | 2024-01-08T23:45:12Z | 2024-01-08T23:46:58Z |
TST: Scope pytest.raises closer to failing line | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index ed3ea1b0bd0dc..7969e684f5b04 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -141,8 +141,8 @@ class TestEval:
def test_complex_cmp_ops(self, cmp1, cmp2, binop, lhs, rhs, engine, parser):
if parser == "python" and binop in ["and", "or"]:
msg = "'BoolOp' nodes are not implemented"
+ ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
with pytest.raises(NotImplementedError, match=msg):
- ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)"
pd.eval(ex, engine=engine, parser=parser)
return
@@ -161,9 +161,8 @@ def test_simple_cmp_ops(self, cmp_op, lhs, rhs, engine, parser):
if parser == "python" and cmp_op in ["in", "not in"]:
msg = "'(In|NotIn)' nodes are not implemented"
-
+ ex = f"lhs {cmp_op} rhs"
with pytest.raises(NotImplementedError, match=msg):
- ex = f"lhs {cmp_op} rhs"
pd.eval(ex, engine=engine, parser=parser)
return
@@ -193,8 +192,8 @@ def test_simple_cmp_ops(self, cmp_op, lhs, rhs, engine, parser):
def test_compound_invert_op(self, op, lhs, rhs, request, engine, parser):
if parser == "python" and op in ["in", "not in"]:
msg = "'(In|NotIn)' nodes are not implemented"
+ ex = f"~(lhs {op} rhs)"
with pytest.raises(NotImplementedError, match=msg):
- ex = f"~(lhs {op} rhs)"
pd.eval(ex, engine=engine, parser=parser)
return
diff --git a/pandas/tests/frame/methods/test_compare.py b/pandas/tests/frame/methods/test_compare.py
index a4d0a7068a3a6..75e60a4816902 100644
--- a/pandas/tests/frame/methods/test_compare.py
+++ b/pandas/tests/frame/methods/test_compare.py
@@ -168,25 +168,25 @@ def test_compare_multi_index(align_axis):
tm.assert_frame_equal(result, expected)
-def test_compare_unaligned_objects():
- # test DataFrames with different indices
+def test_compare_different_indices():
msg = (
r"Can only compare identically-labeled \(both index and columns\) DataFrame "
"objects"
)
+ df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"])
+ df2 = pd.DataFrame([1, 2, 3], index=["a", "b", "d"])
with pytest.raises(ValueError, match=msg):
- df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"])
- df2 = pd.DataFrame([1, 2, 3], index=["a", "b", "d"])
df1.compare(df2)
- # test DataFrames with different shapes
+
+def test_compare_different_shapes():
msg = (
r"Can only compare identically-labeled \(both index and columns\) DataFrame "
"objects"
)
+ df1 = pd.DataFrame(np.ones((3, 3)))
+ df2 = pd.DataFrame(np.zeros((2, 1)))
with pytest.raises(ValueError, match=msg):
- df1 = pd.DataFrame(np.ones((3, 3)))
- df2 = pd.DataFrame(np.zeros((2, 1)))
df1.compare(df2)
diff --git a/pandas/tests/frame/methods/test_sample.py b/pandas/tests/frame/methods/test_sample.py
index 6b3459fbdc035..e65225a33a479 100644
--- a/pandas/tests/frame/methods/test_sample.py
+++ b/pandas/tests/frame/methods/test_sample.py
@@ -111,12 +111,10 @@ def test_sample_invalid_weight_lengths(self, obj):
obj.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError, match=msg):
- bad_weights = [0.5] * 11
- obj.sample(n=3, weights=bad_weights)
+ obj.sample(n=3, weights=[0.5] * 11)
with pytest.raises(ValueError, match="Fewer non-zero entries in p than size"):
- bad_weight_series = Series([0, 0, 0.2])
- obj.sample(n=4, weights=bad_weight_series)
+ obj.sample(n=4, weights=Series([0, 0, 0.2]))
def test_sample_negative_weights(self, obj):
# Check won't accept negative weights
diff --git a/pandas/tests/frame/methods/test_tz_convert.py b/pandas/tests/frame/methods/test_tz_convert.py
index bcb8e423980fd..90bec4dfb5be6 100644
--- a/pandas/tests/frame/methods/test_tz_convert.py
+++ b/pandas/tests/frame/methods/test_tz_convert.py
@@ -98,21 +98,23 @@ def test_tz_convert_and_localize(self, fn):
tm.assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
- # Bad Inputs
-
+ @pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"])
+ def test_tz_convert_and_localize_bad_input(self, fn):
+ int_idx = Index(range(5))
+ l0 = date_range("20140701", periods=5, freq="D")
# Not DatetimeIndex / PeriodIndex
+ df = DataFrame(index=int_idx)
with pytest.raises(TypeError, match="DatetimeIndex"):
- df = DataFrame(index=int_idx)
getattr(df, fn)("US/Pacific")
# Not DatetimeIndex / PeriodIndex
+ df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))
with pytest.raises(TypeError, match="DatetimeIndex"):
- df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))
getattr(df, fn)("US/Pacific", level=0)
# Invalid level
+ df = DataFrame(index=l0)
with pytest.raises(ValueError, match="not valid"):
- df = DataFrame(index=l0)
getattr(df, fn)("US/Pacific", level=1)
@pytest.mark.parametrize("copy", [True, False])
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 5a69c26f2ab16..8198cc532d998 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -466,8 +466,8 @@ def numpystd(x):
# this uses column selection & renaming
msg = r"nested renamer is not supported"
+ d = {"C": "mean", "D": {"foo": "mean", "bar": "std"}}
with pytest.raises(SpecificationError, match=msg):
- d = {"C": "mean", "D": {"foo": "mean", "bar": "std"}}
grouped.aggregate(d)
# But without renaming, these functions are OK
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index 0ebb88afb6c86..18a5d7db3753e 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -450,8 +450,8 @@ def test_getitem_with_datestring_with_UTC_offset(self, start, end):
with pytest.raises(ValueError, match="Both dates must"):
df[start : end[:-4] + "1:00"]
+ df = df.tz_localize(None)
with pytest.raises(ValueError, match="The index must be timezone"):
- df = df.tz_localize(None)
df[start:end]
def test_slice_reduce_to_series(self):
diff --git a/pandas/tests/indexing/test_na_indexing.py b/pandas/tests/indexing/test_na_indexing.py
index 5364cfe852430..d4ad350a64e4d 100644
--- a/pandas/tests/indexing/test_na_indexing.py
+++ b/pandas/tests/indexing/test_na_indexing.py
@@ -54,7 +54,6 @@ def test_series_mask_boolean(values, dtype, mask, indexer_class, frame):
msg = "iLocation based boolean indexing cannot use an indexable as a mask"
with pytest.raises(ValueError, match=msg):
result = obj.iloc[mask]
- tm.assert_equal(result, expected)
else:
result = obj.iloc[mask]
tm.assert_equal(result, expected)
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index ef4cd402aaf24..a51334c03a302 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -53,8 +53,8 @@ def test_iat_set_ints(self, dtype, frame_or_series):
def test_iat_set_other(self, index, frame_or_series):
f = frame_or_series(range(len(index)), index=index)
msg = "iAt based indexing can only have integer indexers"
+ idx = next(generate_indices(f, False))
with pytest.raises(ValueError, match=msg):
- idx = next(generate_indices(f, False))
f.iat[idx] = 1
@pytest.mark.parametrize(
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 66dd893df51de..c9708bfea7106 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -381,8 +381,8 @@ def test_duplicate_ref_loc_failure(self):
msg = "Gaps in blk ref_locs"
+ mgr = BlockManager(blocks, axes)
with pytest.raises(AssertionError, match=msg):
- mgr = BlockManager(blocks, axes)
mgr._rebuild_blknos_and_blklocs()
blocks[0].mgr_locs = BlockPlacement(np.array([0]))
diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py
index 2df9ec9e53516..c20c6daf92931 100644
--- a/pandas/tests/io/excel/test_openpyxl.py
+++ b/pandas/tests/io/excel/test_openpyxl.py
@@ -269,8 +269,8 @@ def test_if_sheet_exists_raises(ext, if_sheet_exists, msg):
# GH 40230
df = DataFrame({"fruit": ["pear"]})
with tm.ensure_clean(ext) as f:
+ df.to_excel(f, sheet_name="foo", engine="openpyxl")
with pytest.raises(ValueError, match=re.escape(msg)):
- df.to_excel(f, sheet_name="foo", engine="openpyxl")
with ExcelWriter(
f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists
) as writer:
diff --git a/pandas/tests/io/parser/dtypes/test_empty.py b/pandas/tests/io/parser/dtypes/test_empty.py
index f34385b190c5f..609c4cbe77fc8 100644
--- a/pandas/tests/io/parser/dtypes/test_empty.py
+++ b/pandas/tests/io/parser/dtypes/test_empty.py
@@ -125,8 +125,7 @@ def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
- data = ""
- parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
+ parser.read_csv(StringIO(""), names=["one", "one"], dtype={0: "u1", 1: "f"})
@pytest.mark.parametrize(
diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py
index 0dbd4e3569ad6..d185e83bfc027 100644
--- a/pandas/tests/io/parser/test_header.py
+++ b/pandas/tests/io/parser/test_header.py
@@ -32,8 +32,7 @@ def test_read_with_bad_header(all_parsers):
msg = r"but only \d+ lines in file"
with pytest.raises(ValueError, match=msg):
- s = StringIO(",,")
- parser.read_csv(s, header=[10])
+ parser.read_csv(StringIO(",,"), header=[10])
def test_negative_header(all_parsers):
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 4e1f09b929224..f6c7f66abe5d3 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -309,13 +309,13 @@ def test_write_explicit(self, compression, get_random_path):
@pytest.mark.parametrize("compression", ["", "None", "bad", "7z"])
def test_write_explicit_bad(self, compression, get_random_path):
- with pytest.raises(ValueError, match="Unrecognized compression type"):
- with tm.ensure_clean(get_random_path) as path:
- df = DataFrame(
- 1.1 * np.arange(120).reshape((30, 4)),
- columns=Index(list("ABCD"), dtype=object),
- index=Index([f"i-{i}" for i in range(30)], dtype=object),
- )
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ with tm.ensure_clean(get_random_path) as path:
+ with pytest.raises(ValueError, match="Unrecognized compression type"):
df.to_pickle(path, compression=compression)
def test_write_infer(self, compression_ext, get_random_path):
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 3e4e1a107da9d..799b0a63feb53 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -957,20 +957,18 @@ def test_drop_column(self, datapath):
msg = "columns contains duplicate entries"
with pytest.raises(ValueError, match=msg):
- columns = ["byte_", "byte_"]
read_stata(
datapath("io", "data", "stata", "stata6_117.dta"),
convert_dates=True,
- columns=columns,
+ columns=["byte_", "byte_"],
)
msg = "The following columns were not found in the Stata data set: not_found"
with pytest.raises(ValueError, match=msg):
- columns = ["byte_", "int_", "long_", "not_found"]
read_stata(
datapath("io", "data", "stata", "stata6_117.dta"),
convert_dates=True,
- columns=columns,
+ columns=["byte_", "int_", "long_", "not_found"],
)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@@ -2196,16 +2194,16 @@ def test_non_categorical_value_labels():
assert reader_value_labels == expected
msg = "Can't create value labels for notY, it wasn't found in the dataset."
+ value_labels = {"notY": {7: "label1", 8: "label2"}}
with pytest.raises(KeyError, match=msg):
- value_labels = {"notY": {7: "label1", 8: "label2"}}
StataWriter(path, data, value_labels=value_labels)
msg = (
"Can't create value labels for Z, value labels "
"can only be applied to numeric columns."
)
+ value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}}
with pytest.raises(ValueError, match=msg):
- value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}}
StataWriter(path, data, value_labels=value_labels)
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index 6f429c1ecbf8a..5451f7b2f16f5 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -471,20 +471,22 @@ def test_empty_string_lxml(val):
r"None \(line 0\)",
]
)
+ if isinstance(val, str):
+ data = StringIO(val)
+ else:
+ data = BytesIO(val)
with pytest.raises(lxml_etree.XMLSyntaxError, match=msg):
- if isinstance(val, str):
- read_xml(StringIO(val), parser="lxml")
- else:
- read_xml(BytesIO(val), parser="lxml")
+ read_xml(data, parser="lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_etree(val):
+ if isinstance(val, str):
+ data = StringIO(val)
+ else:
+ data = BytesIO(val)
with pytest.raises(ParseError, match="no element found"):
- if isinstance(val, str):
- read_xml(StringIO(val), parser="etree")
- else:
- read_xml(BytesIO(val), parser="etree")
+ read_xml(data, parser="etree")
def test_wrong_file_path(parser):
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 76f7fa1f22eec..2470aae78d701 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -663,8 +663,8 @@ def test_grouped_box_multiple_axes_ax_error(self, hist_df):
# GH 6970, GH 7069
df = hist_df
msg = "The number of passed axes must be 3, the same as the output plot"
+ _, axes = mpl.pyplot.subplots(2, 3)
with pytest.raises(ValueError, match=msg):
- fig, axes = mpl.pyplot.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby("classroom").boxplot(ax=axes)
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 4d17f87fdc7bc..0318abe7bdfac 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -655,8 +655,8 @@ def test_hist_with_nans_and_weights(self):
idxerror_weights = np.array([[0.3, 0.25], [0.45, 0.45]])
msg = "weights must have the same shape as data, or be a single column"
+ _, ax2 = mpl.pyplot.subplots()
with pytest.raises(ValueError, match=msg):
- _, ax2 = mpl.pyplot.subplots()
no_nan_df.plot.hist(ax=ax2, weights=idxerror_weights)
diff --git a/pandas/tests/reshape/test_util.py b/pandas/tests/reshape/test_util.py
index 4d0be7464cb3d..d2971db3d7aa2 100644
--- a/pandas/tests/reshape/test_util.py
+++ b/pandas/tests/reshape/test_util.py
@@ -72,8 +72,8 @@ def test_exceed_product_space(self):
# GH31355: raise useful error when produce space is too large
msg = "Product space too large to allocate arrays!"
+ dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [
+ (np.arange(15128, dtype=np.int16)),
+ ]
with pytest.raises(ValueError, match=msg):
- dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [
- (np.arange(15128, dtype=np.int16)),
- ]
cartesian_product(X=dims)
diff --git a/pandas/tests/series/methods/test_between.py b/pandas/tests/series/methods/test_between.py
index 3913419038876..e67eafbd118ce 100644
--- a/pandas/tests/series/methods/test_between.py
+++ b/pandas/tests/series/methods/test_between.py
@@ -70,6 +70,6 @@ def test_between_error_args(self, inclusive):
"'left', 'right', or 'neither'."
)
+ series = Series(date_range("1/1/2000", periods=10))
with pytest.raises(ValueError, match=value_error_msg):
- series = Series(date_range("1/1/2000", periods=10))
series.between(left, right, inclusive=inclusive)
diff --git a/pandas/tests/series/methods/test_compare.py b/pandas/tests/series/methods/test_compare.py
index 304045e46702b..2a57d5139b62c 100644
--- a/pandas/tests/series/methods/test_compare.py
+++ b/pandas/tests/series/methods/test_compare.py
@@ -99,19 +99,19 @@ def test_compare_multi_index():
tm.assert_series_equal(result, expected)
-def test_compare_unaligned_objects():
- # test Series with different indices
+def test_compare_different_indices():
msg = "Can only compare identically-labeled Series objects"
+ ser1 = pd.Series([1, 2, 3], index=["a", "b", "c"])
+ ser2 = pd.Series([1, 2, 3], index=["a", "b", "d"])
with pytest.raises(ValueError, match=msg):
- ser1 = pd.Series([1, 2, 3], index=["a", "b", "c"])
- ser2 = pd.Series([1, 2, 3], index=["a", "b", "d"])
ser1.compare(ser2)
- # test Series with different lengths
+
+def test_compare_different_lengths():
msg = "Can only compare identically-labeled Series objects"
+ ser1 = pd.Series([1, 2, 3])
+ ser2 = pd.Series([1, 2, 3, 4])
with pytest.raises(ValueError, match=msg):
- ser1 = pd.Series([1, 2, 3])
- ser2 = pd.Series([1, 2, 3, 4])
ser1.compare(ser2)
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 4a012f34ddc3b..806a498b98853 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -2254,12 +2254,12 @@ def test_dataframe_coerce(self, cache):
expected = Series([Timestamp("20150204 00:00:00"), NaT])
tm.assert_series_equal(result, expected)
- def test_dataframe_extra_keys_raisesm(self, df, cache):
+ def test_dataframe_extra_keys_raises(self, df, cache):
# extra columns
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
+ df2 = df.copy()
+ df2["foo"] = 1
with pytest.raises(ValueError, match=msg):
- df2 = df.copy()
- df2["foo"] = 1
to_datetime(df2, cache=cache)
@pytest.mark.parametrize(
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 104acc1d527cb..4f91e56a7d82b 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -157,13 +157,13 @@ def test_rolling_forward_window(
indexer = FixedForwardWindowIndexer(window_size=3)
match = "Forward-looking windows can't have center=True"
+ rolling = frame_or_series(values).rolling(window=indexer, center=True)
with pytest.raises(ValueError, match=match):
- rolling = frame_or_series(values).rolling(window=indexer, center=True)
getattr(rolling, func)()
match = "Forward-looking windows don't support setting the closed argument"
+ rolling = frame_or_series(values).rolling(window=indexer, closed="right")
with pytest.raises(ValueError, match=match):
- rolling = frame_or_series(values).rolling(window=indexer, closed="right")
getattr(rolling, func)()
rolling = frame_or_series(values).rolling(window=indexer, min_periods=2, step=step)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/56746 | 2024-01-05T18:52:08Z | 2024-01-05T20:22:18Z | 2024-01-05T20:22:18Z | 2024-01-05T20:22:21Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.