title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Doc: fix RT03 pandas.timedelta_range and pandas.util.hash_pandas_object | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c4e43b88a0097..d01db76a8fab3 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -858,9 +858,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.read_sas\
pandas.read_spss\
pandas.read_stata\
- pandas.set_eng_float_format\
- pandas.timedelta_range\
- pandas.util.hash_pandas_object # There should be no backslash in the final line, please keep this comment in the last ignored function
+ pandas.set_eng_float_format # There should be no backslash in the final line, please keep this comment in the last ignored function
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Partially validate docstrings (SA01)' ; echo $MSG
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 4a4b0ac1444d6..6a2c04b0ddf51 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -304,6 +304,7 @@ def timedelta_range(
Returns
-------
TimedeltaIndex
+ Fixed frequency, with day as the default.
Notes
-----
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index f7e9ff220eded..3b9dd40a92ce8 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -106,7 +106,8 @@ def hash_pandas_object(
Returns
-------
- Series of uint64, same length as the object
+ Series of uint64
+ Same length as the object.
Examples
--------
| All RT03 Errors resolved in the following cases:
1. scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.timedelta_range
2. scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.util.hash_pandas_object
- [x] xref #57416
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57799 | 2024-03-10T20:04:00Z | 2024-03-10T22:44:35Z | 2024-03-10T22:44:35Z | 2024-03-10T23:08:13Z |
DOC: Remove RT03 docstring errors for selected methods | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c4e43b88a0097..c994975c1a08e 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -623,18 +623,12 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.DataFrame.mean\
pandas.DataFrame.median\
pandas.DataFrame.min\
- pandas.DataFrame.pop\
pandas.DataFrame.prod\
pandas.DataFrame.product\
- pandas.DataFrame.reindex\
- pandas.DataFrame.reorder_levels\
pandas.DataFrame.sem\
pandas.DataFrame.skew\
pandas.DataFrame.std\
pandas.DataFrame.sum\
- pandas.DataFrame.swapaxes\
- pandas.DataFrame.to_numpy\
- pandas.DataFrame.to_orc\
pandas.DataFrame.to_parquet\
pandas.DataFrame.unstack\
pandas.DataFrame.value_counts\
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 88fa1148c0dfc..2a6daf4bab937 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1883,6 +1883,7 @@ def to_numpy(
Returns
-------
numpy.ndarray
+ The NumPy array representing the values in the DataFrame.
See Also
--------
@@ -2930,7 +2931,7 @@ def to_orc(
engine_kwargs: dict[str, Any] | None = None,
) -> bytes | None:
"""
- Write a DataFrame to the ORC format.
+ Write a DataFrame to the Optimized Row Columnar (ORC) format.
.. versionadded:: 1.5.0
@@ -2957,7 +2958,8 @@ def to_orc(
Returns
-------
- bytes if no path argument is provided else None
+ bytes if no ``path`` argument is provided else None
+ Bytes object with DataFrame data if ``path`` is not specified else None.
Raises
------
@@ -2977,6 +2979,8 @@ def to_orc(
Notes
-----
+ * Find more information on ORC
+ `here <https://en.wikipedia.org/wiki/Apache_ORC>`__.
* Before using this function you should read the :ref:`user guide about
ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`.
* This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_
@@ -5473,7 +5477,7 @@ def rename(
def pop(self, item: Hashable) -> Series:
"""
- Return item and drop from frame. Raise KeyError if not found.
+ Return item and drop it from DataFrame. Raise KeyError if not found.
Parameters
----------
@@ -5483,6 +5487,7 @@ def pop(self, item: Hashable) -> Series:
Returns
-------
Series
+ Series representing the item that is dropped.
Examples
--------
@@ -7612,7 +7617,9 @@ def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:
def reorder_levels(self, order: Sequence[int | str], axis: Axis = 0) -> DataFrame:
"""
- Rearrange index levels using input order. May not drop or duplicate levels.
+ Rearrange index or column levels using input ``order``.
+
+ May not drop or duplicate levels.
Parameters
----------
@@ -7625,6 +7632,7 @@ def reorder_levels(self, order: Sequence[int | str], axis: Axis = 0) -> DataFram
Returns
-------
DataFrame
+ DataFrame with indices or columns with reordered levels.
Examples
--------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5119e799e6de1..bf10a36ea7dda 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -583,7 +583,7 @@ def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]:
@final
def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]:
"""
- Return the special character free column resolvers of a dataframe.
+ Return the special character free column resolvers of a DataFrame.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
@@ -5077,7 +5077,8 @@ def reindex(
Returns
-------
- {klass} with changed index.
+ {klass}
+ {klass} with changed index.
See Also
--------
@@ -5095,7 +5096,7 @@ def reindex(
We *highly* recommend using keyword arguments to clarify your
intent.
- Create a dataframe with some fictional data.
+ Create a DataFrame with some fictional data.
>>> index = ["Firefox", "Chrome", "Safari", "IE10", "Konqueror"]
>>> columns = ["http_status", "response_time"]
@@ -5112,9 +5113,9 @@ def reindex(
IE10 404 0.08
Konqueror 301 1.00
- Create a new index and reindex the dataframe. By default
+ Create a new index and reindex the DataFrame. By default
values in the new index that do not have corresponding
- records in the dataframe are assigned ``NaN``.
+ records in the DataFrame are assigned ``NaN``.
>>> new_index = ["Safari", "Iceweasel", "Comodo Dragon", "IE10", "Chrome"]
>>> df.reindex(new_index)
@@ -5167,7 +5168,7 @@ def reindex(
Konqueror 301 NaN
To further illustrate the filling functionality in
- ``reindex``, we will create a dataframe with a
+ ``reindex``, we will create a DataFrame with a
monotonically increasing index (for example, a sequence
of dates).
@@ -5184,7 +5185,7 @@ def reindex(
2010-01-05 89.0
2010-01-06 88.0
- Suppose we decide to expand the dataframe to cover a wider
+ Suppose we decide to expand the DataFrame to cover a wider
date range.
>>> date_index2 = pd.date_range("12/29/2009", periods=10, freq="D")
@@ -5222,12 +5223,12 @@ def reindex(
2010-01-06 88.0
2010-01-07 NaN
- Please note that the ``NaN`` value present in the original dataframe
+ Please note that the ``NaN`` value present in the original DataFrame
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
- does not look at dataframe values, but only compares the original and
+ does not look at DataFrame values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
- in the original dataframe, use the ``fillna()`` method.
+ in the original DataFrame, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
@@ -8373,7 +8374,7 @@ def clip(
See Also
--------
Series.clip : Trim values at input threshold in series.
- DataFrame.clip : Trim values at input threshold in dataframe.
+ DataFrame.clip : Trim values at input threshold in DataFrame.
numpy.clip : Clip (limit) the values in an array.
Examples
@@ -10909,7 +10910,7 @@ def describe(
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
- return only an analysis of numeric columns. If the dataframe consists
+ return only an analysis of numeric columns. If the DataFrame consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
@@ -12052,7 +12053,7 @@ def last_valid_index(self) -> Hashable:
**DataFrames**
-Create a dataframe from a dictionary.
+Create a DataFrame from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
| Resolve all RT03 errors for the following cases:
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.pop
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.reindex
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.reorder_levels
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.swapaxes - deprecated in favor of .transpose, which already has valid docstring
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.to_numpy
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.to_orc
xref DOC: fix RT03 errors in docstrings DOC: fix RT03 errors in docstrings #57416
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57797 | 2024-03-10T15:03:33Z | 2024-03-10T22:44:03Z | 2024-03-10T22:44:03Z | 2024-03-10T22:44:09Z |
Fix issue with Tempita recompilation | diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build
index c27386743c6e9..7621915ebcfdb 100644
--- a/pandas/_libs/meson.build
+++ b/pandas/_libs/meson.build
@@ -54,25 +54,37 @@ _intervaltree_helper = custom_target('intervaltree_helper_pxi',
py, tempita, '@INPUT@', '-o', '@OUTDIR@'
]
)
-_khash_primitive_helper_dep = declare_dependency(sources: _khash_primitive_helper)
+
+_algos_pxi_dep = declare_dependency(sources: [_algos_take_helper, _algos_common_helper])
+_khash_pxi_dep = declare_dependency(sources: _khash_primitive_helper)
+_hashtable_pxi_dep = declare_dependency(
+ sources: [_hashtable_class_helper, _hashtable_func_helper]
+)
+_index_pxi_dep = declare_dependency(sources: _index_class_helper)
+_intervaltree_pxi_dep = declare_dependency(sources: _intervaltree_helper)
+_sparse_pxi_dep = declare_dependency(sources: _sparse_op_helper)
+
subdir('tslibs')
libs_sources = {
# Dict of extension name -> dict of {sources, include_dirs, and deps}
# numpy include dir is implicitly included
- 'algos': {'sources': ['algos.pyx', _algos_common_helper, _algos_take_helper], 'deps': _khash_primitive_helper_dep},
+ 'algos': {'sources': ['algos.pyx'],
+ 'deps': [_khash_pxi_dep, _algos_pxi_dep]},
'arrays': {'sources': ['arrays.pyx']},
'groupby': {'sources': ['groupby.pyx']},
'hashing': {'sources': ['hashing.pyx']},
- 'hashtable': {'sources': ['hashtable.pyx', _hashtable_class_helper, _hashtable_func_helper], 'deps': _khash_primitive_helper_dep},
- 'index': {'sources': ['index.pyx', _index_class_helper], 'deps': _khash_primitive_helper_dep},
+ 'hashtable': {'sources': ['hashtable.pyx'],
+ 'deps': [_khash_pxi_dep, _hashtable_pxi_dep]},
+ 'index': {'sources': ['index.pyx'],
+ 'deps': [_khash_pxi_dep, _index_pxi_dep]},
'indexing': {'sources': ['indexing.pyx']},
'internals': {'sources': ['internals.pyx']},
- 'interval': {'sources': ['interval.pyx', _intervaltree_helper],
- 'deps': _khash_primitive_helper_dep},
- 'join': {'sources': ['join.pyx', _khash_primitive_helper],
- 'deps': _khash_primitive_helper_dep},
+ 'interval': {'sources': ['interval.pyx'],
+ 'deps': [_khash_pxi_dep, _intervaltree_pxi_dep]},
+ 'join': {'sources': ['join.pyx'],
+ 'deps': [_khash_pxi_dep]},
'lib': {'sources': ['lib.pyx', 'src/parser/tokenizer.c']},
'missing': {'sources': ['missing.pyx']},
'pandas_datetime': {'sources': ['src/vendored/numpy/datetime/np_datetime.c',
@@ -83,7 +95,7 @@ libs_sources = {
'src/parser/io.c',
'src/parser/pd_parser.c']},
'parsers': {'sources': ['parsers.pyx', 'src/parser/tokenizer.c', 'src/parser/io.c'],
- 'deps': _khash_primitive_helper_dep},
+ 'deps': [_khash_pxi_dep]},
'json': {'sources': ['src/vendored/ujson/python/ujson.c',
'src/vendored/ujson/python/objToJSON.c',
'src/vendored/ujson/python/JSONtoObj.c',
@@ -95,7 +107,8 @@ libs_sources = {
'reshape': {'sources': ['reshape.pyx']},
'sas': {'sources': ['sas.pyx']},
'byteswap': {'sources': ['byteswap.pyx']},
- 'sparse': {'sources': ['sparse.pyx', _sparse_op_helper]},
+ 'sparse': {'sources': ['sparse.pyx'],
+ 'deps': [_sparse_pxi_dep]},
'tslib': {'sources': ['tslib.pyx']},
'testing': {'sources': ['testing.pyx']},
'writers': {'sources': ['writers.pyx']}
| xref https://github.com/mesonbuild/meson-python/issues/589#issuecomment-1987206217 @rhshadrach I think you mentioned this originally on Slack | https://api.github.com/repos/pandas-dev/pandas/pulls/57796 | 2024-03-10T14:10:26Z | 2024-03-13T23:29:49Z | 2024-03-13T23:29:49Z | 2024-03-18T23:09:59Z |
Fix some typing errors | diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 26c44c2613cb2..28985a1380bee 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -64,7 +64,7 @@
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat and uses a non-public class of the pickle module.
class Unpickler(pickle._Unpickler):
- def find_class(self, module, name):
+ def find_class(self, module: str, name: str) -> Any:
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super().find_class(module, name)
diff --git a/pandas/core/array_algos/datetimelike_accumulations.py b/pandas/core/array_algos/datetimelike_accumulations.py
index 8737381890ae0..55942f2c9350d 100644
--- a/pandas/core/array_algos/datetimelike_accumulations.py
+++ b/pandas/core/array_algos/datetimelike_accumulations.py
@@ -18,7 +18,7 @@ def _cum_func(
values: np.ndarray,
*,
skipna: bool = True,
-):
+) -> np.ndarray:
"""
Accumulations for 1D datetimelike arrays.
@@ -61,9 +61,9 @@ def cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray:
return _cum_func(np.cumsum, values, skipna=skipna)
-def cummin(values: np.ndarray, *, skipna: bool = True):
+def cummin(values: np.ndarray, *, skipna: bool = True) -> np.ndarray:
return _cum_func(np.minimum.accumulate, values, skipna=skipna)
-def cummax(values: np.ndarray, *, skipna: bool = True):
+def cummax(values: np.ndarray, *, skipna: bool = True) -> np.ndarray:
return _cum_func(np.maximum.accumulate, values, skipna=skipna)
diff --git a/pandas/core/array_algos/masked_accumulations.py b/pandas/core/array_algos/masked_accumulations.py
index fb4fbd53772de..b31d32a606eed 100644
--- a/pandas/core/array_algos/masked_accumulations.py
+++ b/pandas/core/array_algos/masked_accumulations.py
@@ -22,7 +22,7 @@ def _cum_func(
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
-):
+) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
"""
Accumulations for 1D masked array.
@@ -74,17 +74,25 @@ def _cum_func(
return values, mask
-def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
+def cumsum(
+ values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True
+) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
return _cum_func(np.cumsum, values, mask, skipna=skipna)
-def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
+def cumprod(
+ values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True
+) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
return _cum_func(np.cumprod, values, mask, skipna=skipna)
-def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
+def cummin(
+ values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True
+) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna)
-def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
+def cummax(
+ values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True
+) -> tuple[np.ndarray, npt.NDArray[np.bool_]]:
return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna)
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index d9a8b4dfd95fd..1a5d0842d6eee 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -37,7 +37,7 @@
"""
-def use_bottleneck_cb(key) -> None:
+def use_bottleneck_cb(key: str) -> None:
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
@@ -51,7 +51,7 @@ def use_bottleneck_cb(key) -> None:
"""
-def use_numexpr_cb(key) -> None:
+def use_numexpr_cb(key: str) -> None:
from pandas.core.computation import expressions
expressions.set_use_numexpr(cf.get_option(key))
@@ -65,7 +65,7 @@ def use_numexpr_cb(key) -> None:
"""
-def use_numba_cb(key) -> None:
+def use_numba_cb(key: str) -> None:
from pandas.core.util import numba_
numba_.set_use_numba(cf.get_option(key))
@@ -287,7 +287,7 @@ def use_numba_cb(key) -> None:
"""
-def table_schema_cb(key) -> None:
+def table_schema_cb(key: str) -> None:
from pandas.io.formats.printing import enable_data_resource_formatter
enable_data_resource_formatter(cf.get_option(key))
@@ -612,7 +612,7 @@ def is_terminal() -> bool:
"""
-def register_plotting_backend_cb(key) -> None:
+def register_plotting_backend_cb(key: str | None) -> None:
if key == "matplotlib":
# We defer matplotlib validation, since it's the default
return
@@ -626,7 +626,7 @@ def register_plotting_backend_cb(key) -> None:
"backend",
defval="matplotlib",
doc=plotting_backend_doc,
- validator=register_plotting_backend_cb,
+ validator=register_plotting_backend_cb, # type: ignore[arg-type]
)
@@ -638,7 +638,7 @@ def register_plotting_backend_cb(key) -> None:
"""
-def register_converter_cb(key) -> None:
+def register_converter_cb(key: str) -> None:
from pandas.plotting import (
deregister_matplotlib_converters,
register_matplotlib_converters,
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index f702d5a60e86f..3a34481ab3f33 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -42,7 +42,7 @@
)
-def _is_nonempty(x, axis) -> bool:
+def _is_nonempty(x: ArrayLike, axis: AxisInt) -> bool:
# filter empty arrays
# 1-d dtypes always are included here
if x.ndim <= axis:
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index 6ab98cf4fe55e..037ca81477677 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -50,7 +50,7 @@ def recode_for_groupby(
# In cases with c.ordered, this is equivalent to
# return c.remove_unused_categories(), c
- unique_codes = unique1d(c.codes)
+ unique_codes = unique1d(c.codes) # type: ignore[no-untyped-call]
take_codes = unique_codes[unique_codes != -1]
if sort:
@@ -74,7 +74,7 @@ def recode_for_groupby(
# xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories
all_codes = np.arange(c.categories.nunique())
# GH 38140: exclude nan from indexer for categories
- unique_notnan_codes = unique1d(c.codes[c.codes != -1])
+ unique_notnan_codes = unique1d(c.codes[c.codes != -1]) # type: ignore[no-untyped-call]
if sort:
unique_notnan_codes = np.sort(unique_notnan_codes)
if len(all_codes) > len(unique_notnan_codes):
diff --git a/pandas/core/ops/mask_ops.py b/pandas/core/ops/mask_ops.py
index 427ae2fb87e55..86a93e5a3ca2b 100644
--- a/pandas/core/ops/mask_ops.py
+++ b/pandas/core/ops/mask_ops.py
@@ -190,6 +190,6 @@ def kleene_and(
return result, mask
-def raise_for_nan(value, method: str) -> None:
+def raise_for_nan(value: object, method: str) -> None:
if lib.is_float(value) and np.isnan(value):
raise ValueError(f"Cannot perform logical '{method}' with floating NaN")
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py
index 99a790388f3f1..d76593b41a996 100644
--- a/pandas/io/formats/console.py
+++ b/pandas/io/formats/console.py
@@ -63,7 +63,7 @@ def in_interactive_session() -> bool:
"""
from pandas import get_option
- def check_main():
+ def check_main() -> bool:
try:
import __main__ as main
except ModuleNotFoundError:
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index dc18ef2fcd4fc..d3f4072b2ff08 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -36,7 +36,7 @@ def _side_expander(prop_fmt: str) -> Callable:
function: Return to call when a 'border(-{side}): {value}' string is encountered
"""
- def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
+ def expand(self, prop: str, value: str) -> Generator[tuple[str, str], None, None]:
"""
Expand shorthand property into side-specific property (top, right, bottom, left)
@@ -81,7 +81,7 @@ def _border_expander(side: str = "") -> Callable:
if side != "":
side = f"-{side}"
- def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
+ def expand(self, prop: str, value: str) -> Generator[tuple[str, str], None, None]:
"""
Expand border into color, style, and width tuples
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index c3101683b9962..9374b3c7af38f 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1835,7 +1835,7 @@ def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFra
fmtlist = []
lbllist = []
for col in columns:
- i = data.columns.get_loc(col)
+ i = data.columns.get_loc(col) # type: ignore[no-untyped-call]
dtyplist.append(self._dtyplist[i])
typlist.append(self._typlist[i])
fmtlist.append(self._fmtlist[i])
@@ -2155,7 +2155,7 @@ def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:
def _dtype_to_default_stata_fmt(
- dtype, column: Series, dta_version: int = 114, force_strl: bool = False
+ dtype: np.dtype, column: Series, dta_version: int = 114, force_strl: bool = False
) -> str:
"""
Map numpy dtype to stata's default format for this type. Not terribly
@@ -3467,7 +3467,7 @@ def _write_characteristics(self) -> None:
self._update_map("characteristics")
self._write_bytes(self._tag(b"", "characteristics"))
- def _write_data(self, records) -> None:
+ def _write_data(self, records: np.rec.recarray) -> None:
self._update_map("data")
self._write_bytes(b"<data>")
self._write_bytes(records.tobytes())
diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py
index da109a514433f..dfab207635fec 100644
--- a/pandas/util/__init__.py
+++ b/pandas/util/__init__.py
@@ -25,5 +25,5 @@ def __getattr__(key: str):
raise AttributeError(f"module 'pandas.util' has no attribute '{key}'")
-def __dir__():
+def __dir__() -> list[str]:
return list(globals().keys()) + ["hash_array", "hash_pandas_object"]
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index e39c2f7badb1d..6cdd96996cea6 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -33,7 +33,7 @@ def _get_commit_hash() -> str | None:
except ImportError:
from pandas._version import get_versions
- versions = get_versions()
+ versions = get_versions() # type: ignore[no-untyped-call]
return versions["full-revisionid"]
diff --git a/pyproject.toml b/pyproject.toml
index 56d5f59e10a4f..bbcaa73b55ff8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -567,13 +567,9 @@ module = [
"pandas._config.config", # TODO
"pandas._libs.*",
"pandas._testing.*", # TODO
- "pandas.arrays", # TODO
"pandas.compat.numpy.function", # TODO
"pandas.compat.compressors", # TODO
- "pandas.compat.pickle_compat", # TODO
"pandas.core._numba.executor", # TODO
- "pandas.core.array_algos.datetimelike_accumulations", # TODO
- "pandas.core.array_algos.masked_accumulations", # TODO
"pandas.core.array_algos.masked_reductions", # TODO
"pandas.core.array_algos.putmask", # TODO
"pandas.core.array_algos.quantile", # TODO
@@ -588,7 +584,6 @@ module = [
"pandas.core.dtypes.dtypes", # TODO
"pandas.core.dtypes.generic", # TODO
"pandas.core.dtypes.missing", # TODO
- "pandas.core.groupby.categorical", # TODO
"pandas.core.groupby.generic", # TODO
"pandas.core.groupby.grouper", # TODO
"pandas.core.groupby.groupby", # TODO
@@ -603,7 +598,6 @@ module = [
"pandas.core.ops.array_ops", # TODO
"pandas.core.ops.common", # TODO
"pandas.core.ops.invalid", # TODO
- "pandas.core.ops.mask_ops", # TODO
"pandas.core.ops.missing", # TODO
"pandas.core.reshape.*", # TODO
"pandas.core.strings.*", # TODO
@@ -620,7 +614,6 @@ module = [
"pandas.core.arraylike", # TODO
"pandas.core.base", # TODO
"pandas.core.common", # TODO
- "pandas.core.config_init", # TODO
"pandas.core.construction", # TODO
"pandas.core.flags", # TODO
"pandas.core.frame", # TODO
@@ -642,11 +635,9 @@ module = [
"pandas.io.excel._pyxlsb", # TODO
"pandas.io.excel._xlrd", # TODO
"pandas.io.excel._xlsxwriter", # TODO
- "pandas.io.formats.console", # TODO
"pandas.io.formats.css", # TODO
"pandas.io.formats.excel", # TODO
"pandas.io.formats.format", # TODO
- "pandas.io.formats.info", # TODO
"pandas.io.formats.printing", # TODO
"pandas.io.formats.style", # TODO
"pandas.io.formats.style_render", # TODO
@@ -661,7 +652,6 @@ module = [
"pandas.io.parquet", # TODO
"pandas.io.pytables", # TODO
"pandas.io.sql", # TODO
- "pandas.io.stata", # TODO
"pandas.io.xml", # TODO
"pandas.plotting.*", # TODO
"pandas.tests.*",
@@ -669,7 +659,6 @@ module = [
"pandas.tseries.holiday", # TODO
"pandas.util._decorators", # TODO
"pandas.util._doctools", # TODO
- "pandas.util._print_versions", # TODO
"pandas.util._test_decorators", # TODO
"pandas.util._validators", # TODO
"pandas.util", # TODO
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57795 | 2024-03-10T09:26:50Z | 2024-03-10T22:43:11Z | 2024-03-10T22:43:11Z | 2024-03-11T05:40:23Z |
CLN: remove deprecated strings 'BA', 'BAS', 'AS' denoting frequencies for timeseries | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 16be9e0a4fc34..65b68f16b9bf2 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -203,7 +203,10 @@ Removal of prior version deprecations/changes
- Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`)
- Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`)
- Enforced deprecation of passing a dictionary to :meth:`SeriesGroupBy.agg` (:issue:`52268`)
+- Enforced deprecation of string ``AS`` denoting frequency in :class:`YearBegin` and strings ``AS-DEC``, ``AS-JAN``, etc. denoting annual frequencies with various fiscal year starts (:issue:`57793`)
- Enforced deprecation of string ``A`` denoting frequency in :class:`YearEnd` and strings ``A-DEC``, ``A-JAN``, etc. denoting annual frequencies with various fiscal year ends (:issue:`57699`)
+- Enforced deprecation of string ``BAS`` denoting frequency in :class:`BYearBegin` and strings ``BAS-DEC``, ``BAS-JAN``, etc. denoting annual frequencies with various fiscal year starts (:issue:`57793`)
+- Enforced deprecation of string ``BA`` denoting frequency in :class:`BYearEnd` and strings ``BA-DEC``, ``BA-JAN``, etc. denoting annual frequencies with various fiscal year ends (:issue:`57793`)
- Enforced deprecation of the behavior of :func:`concat` when ``len(keys) != len(objs)`` would truncate to the shorter of the two. Now this raises a ``ValueError`` (:issue:`43485`)
- Enforced silent-downcasting deprecation for :ref:`all relevant methods <whatsnew_220.silent_downcasting>` (:issue:`54710`)
- In :meth:`DataFrame.stack`, the default value of ``future_stack`` is now ``True``; specifying ``False`` will raise a ``FutureWarning`` (:issue:`55448`)
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 6a81681369fb7..906842d322e91 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -286,19 +286,6 @@ cdef dict c_OFFSET_DEPR_FREQSTR = {
"BY-SEP": "BYE-SEP",
"BY-OCT": "BYE-OCT",
"BY-NOV": "BYE-NOV",
- "BA": "BYE",
- "BA-DEC": "BYE-DEC",
- "BA-JAN": "BYE-JAN",
- "BA-FEB": "BYE-FEB",
- "BA-MAR": "BYE-MAR",
- "BA-APR": "BYE-APR",
- "BA-MAY": "BYE-MAY",
- "BA-JUN": "BYE-JUN",
- "BA-JUL": "BYE-JUL",
- "BA-AUG": "BYE-AUG",
- "BA-SEP": "BYE-SEP",
- "BA-OCT": "BYE-OCT",
- "BA-NOV": "BYE-NOV",
"BM": "BME",
"CBM": "CBME",
"SM": "SME",
@@ -323,45 +310,6 @@ cdef dict c_REVERSE_OFFSET_DEPR_FREQSTR = {
# Map deprecated resolution abbreviations to correct resolution abbreviations
cdef dict c_DEPR_ABBREVS = {
- "BA": "BY",
- "BA-DEC": "BY-DEC",
- "BA-JAN": "BY-JAN",
- "BA-FEB": "BY-FEB",
- "BA-MAR": "BY-MAR",
- "BA-APR": "BY-APR",
- "BA-MAY": "BY-MAY",
- "BA-JUN": "BY-JUN",
- "BA-JUL": "BY-JUL",
- "BA-AUG": "BY-AUG",
- "BA-SEP": "BY-SEP",
- "BA-OCT": "BY-OCT",
- "BA-NOV": "BY-NOV",
- "AS": "YS",
- "AS-DEC": "YS-DEC",
- "AS-JAN": "YS-JAN",
- "AS-FEB": "YS-FEB",
- "AS-MAR": "YS-MAR",
- "AS-APR": "YS-APR",
- "AS-MAY": "YS-MAY",
- "AS-JUN": "YS-JUN",
- "AS-JUL": "YS-JUL",
- "AS-AUG": "YS-AUG",
- "AS-SEP": "YS-SEP",
- "AS-OCT": "YS-OCT",
- "AS-NOV": "YS-NOV",
- "BAS": "BYS",
- "BAS-DEC": "BYS-DEC",
- "BAS-JAN": "BYS-JAN",
- "BAS-FEB": "BYS-FEB",
- "BAS-MAR": "BYS-MAR",
- "BAS-APR": "BYS-APR",
- "BAS-MAY": "BYS-MAY",
- "BAS-JUN": "BYS-JUN",
- "BAS-JUL": "BYS-JUL",
- "BAS-AUG": "BYS-AUG",
- "BAS-SEP": "BYS-SEP",
- "BAS-OCT": "BYS-OCT",
- "BAS-NOV": "BYS-NOV",
"H": "h",
"BH": "bh",
"CBH": "cbh",
diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py
index ffb14a1008b9e..fb288e19c6e82 100644
--- a/pandas/tests/frame/methods/test_asfreq.py
+++ b/pandas/tests/frame/methods/test_asfreq.py
@@ -242,10 +242,9 @@ def test_asfreq_2ME(self, freq, freq_half):
("2BQE-SEP", "2BQ-SEP"),
("1YE", "1Y"),
("2YE-MAR", "2Y-MAR"),
- ("2BYE-MAR", "2BA-MAR"),
],
)
- def test_asfreq_frequency_M_Q_Y_A_deprecated(self, freq, freq_depr):
+ def test_asfreq_frequency_M_Q_Y_deprecated(self, freq, freq_depr):
# GH#9586, #55978
depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed "
f"in a future version, please use '{freq[1:]}' instead."
@@ -282,11 +281,18 @@ def test_asfreq_unsupported_freq(self, freq, error_msg):
with pytest.raises(ValueError, match=error_msg):
df.asfreq(freq=freq)
- def test_asfreq_frequency_A_raises(self):
- msg = "Invalid frequency: 2A"
+ @pytest.mark.parametrize(
+ "freq, freq_depr",
+ [
+ ("2YE", "2A"),
+ ("2BYE-MAR", "2BA-MAR"),
+ ],
+ )
+ def test_asfreq_frequency_A_BA_raises(self, freq, freq_depr):
+ msg = f"Invalid frequency: {freq_depr}"
- index = date_range("1/1/2000", periods=4, freq="2ME")
+ index = date_range("1/1/2000", periods=4, freq=freq)
df = DataFrame({"s": Series([0.0, 1.0, 2.0, 3.0], index=index)})
with pytest.raises(ValueError, match=msg):
- df.asfreq(freq="2A")
+ df.asfreq(freq=freq_depr)
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index f7fc64d4b0163..84a616f05cd63 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -1,6 +1,5 @@
import datetime as dt
from datetime import date
-import re
import numpy as np
import pytest
@@ -158,42 +157,9 @@ def test_CBH_deprecated(self):
tm.assert_index_equal(result, expected)
- @pytest.mark.parametrize(
- "freq_depr, expected_values, expected_freq",
- [
- (
- "AS-AUG",
- ["2021-08-01", "2022-08-01", "2023-08-01"],
- "YS-AUG",
- ),
- (
- "1BAS-MAY",
- ["2021-05-03", "2022-05-02", "2023-05-01"],
- "1BYS-MAY",
- ),
- ],
- )
- def test_AS_BAS_deprecated(self, freq_depr, expected_values, expected_freq):
- # GH#55479
- freq_msg = re.split("[0-9]*", freq_depr, maxsplit=1)[1]
- msg = f"'{freq_msg}' is deprecated and will be removed in a future version."
-
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = date_range(
- dt.datetime(2020, 12, 1), dt.datetime(2023, 12, 1), freq=freq_depr
- )
- result = DatetimeIndex(
- expected_values,
- dtype="datetime64[ns]",
- freq=expected_freq,
- )
-
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize(
"freq, expected_values, freq_depr",
[
- ("2BYE-MAR", ["2016-03-31"], "2BA-MAR"),
("2BYE-JUN", ["2016-06-30"], "2BY-JUN"),
("2BME", ["2016-02-29", "2016-04-29", "2016-06-30"], "2BM"),
("2BQE", ["2016-03-31"], "2BQ"),
@@ -214,3 +180,10 @@ def test_BM_BQ_BY_deprecated(self, freq, expected_values, freq_depr):
)
tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("freq", ["2BA-MAR", "1BAS-MAY", "2AS-AUG"])
+ def test_BA_BAS_raises(self, freq):
+ msg = f"Invalid frequency: {freq}"
+
+ with pytest.raises(ValueError, match=msg):
+ date_range(start="2016-02-21", end="2016-08-21", freq=freq)
| xref #55479
remove deprecated strings `‘BA, ‘BAS', ‘AS', ‘BA-DEC' ,‘BAS-DEC', ‘AS-DEC',` etc. denoting frequencies for timeseries
| https://api.github.com/repos/pandas-dev/pandas/pulls/57793 | 2024-03-10T00:15:59Z | 2024-03-11T14:56:02Z | 2024-03-11T14:56:02Z | 2024-03-11T14:56:03Z |
Migrate ruff config to the latest format | diff --git a/doc/sphinxext/announce.py b/doc/sphinxext/announce.py
index 89361eb75606c..66e999e251c5e 100755
--- a/doc/sphinxext/announce.py
+++ b/doc/sphinxext/announce.py
@@ -32,6 +32,7 @@
$ ./scripts/announce.py $GITHUB v1.11.0..v1.11.1 > announce.rst
"""
+
import codecs
import os
import re
diff --git a/doc/sphinxext/contributors.py b/doc/sphinxext/contributors.py
index c2b21e40cadad..06f205b5cc3ce 100644
--- a/doc/sphinxext/contributors.py
+++ b/doc/sphinxext/contributors.py
@@ -14,6 +14,7 @@
While the v0.23.1 tag does not exist, that will use the HEAD of the
branch as the end of the revision range.
"""
+
from announce import build_components
from docutils import nodes
from docutils.parsers.rst import Directive
diff --git a/pandas/util/version/__init__.py b/pandas/util/version/__init__.py
index 3a5efbbb09c1e..153424e339c45 100644
--- a/pandas/util/version/__init__.py
+++ b/pandas/util/version/__init__.py
@@ -131,7 +131,7 @@ class InvalidVersion(ValueError):
Examples
--------
- >>> pd.util.version.Version('1.')
+ >>> pd.util.version.Version("1.")
Traceback (most recent call last):
InvalidVersion: Invalid version: '1.'
"""
diff --git a/pyproject.toml b/pyproject.toml
index 5a06e22f4be9b..56d5f59e10a4f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -185,6 +185,8 @@ environment = {CFLAGS="-g0"}
line-length = 88
target-version = "py310"
fix = true
+
+[tool.ruff.lint]
unfixable = []
typing-modules = ["pandas._typing"]
@@ -271,8 +273,8 @@ ignore = [
"PLW0603",
# Use `typing.NamedTuple` instead of `collections.namedtuple`
"PYI024",
- # No builtin `eval()` allowed
- "PGH001",
+ # Use of possibly insecure function; consider using ast.literal_eval
+ "S307",
# while int | float can be shortened to float, the former is more explicit
"PYI041",
# incorrect-dict-iterator, flags valid Series.items usage
@@ -345,7 +347,7 @@ exclude = [
[tool.ruff.lint.flake8-import-conventions.aliases]
"pandas.core.construction.array" = "pd_array"
-[tool.ruff.per-file-ignores]
+[tool.ruff.lint.per-file-ignores]
# relative imports allowed for asv_bench
"asv_bench/*" = ["TID", "NPY002"]
# to be enabled gradually
| Resolves these warnings (available if running `ruff` directly) and fix some formatting errors discovered in the new config that's arguably should be equivalent to the old one.
```bash
warning: The top-level linter settings are deprecated in favour of their counterparts in the `lint` section. Please update the following options in `pyproject.toml`:
- 'ignore' -> 'lint.ignore'
- 'select' -> 'lint.select'
- 'typing-modules' -> 'lint.typing-modules'
- 'unfixable' -> 'lint.unfixable'
- 'per-file-ignores' -> 'lint.per-file-ignores'
warning: `PGH001` has been remapped to `S307`.
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/57791 | 2024-03-09T21:49:25Z | 2024-03-09T22:19:03Z | 2024-03-09T22:19:03Z | 2024-03-17T07:56:12Z |
Small refactoring | diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 36edf6116609b..424af58958f04 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -904,13 +904,7 @@ def _build_names_mapper(
a list of column names with duplicate names replaced by dummy names
"""
-
- def get_duplicates(names):
- seen: set = set()
- return {name for name in names if name not in seen}
-
- shared_names = set(rownames).intersection(set(colnames))
- dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names
+ dup_names = set(rownames) | set(colnames)
rownames_mapper = {
f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names
| - [x] closes #52229(Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57789 | 2024-03-09T07:01:18Z | 2024-03-09T19:51:00Z | 2024-03-09T19:51:00Z | 2024-03-09T21:13:33Z |
API: Revert 57042 - MultiIndex.names|codes|levels returns tuples | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 66c209efb740b..da0b29760e498 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -147,7 +147,6 @@ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for mor
Other API changes
^^^^^^^^^^^^^^^^^
- 3rd party ``py.path`` objects are no longer explicitly supported in IO methods. Use :py:class:`pathlib.Path` objects instead (:issue:`57091`)
-- :attr:`MultiIndex.codes`, :attr:`MultiIndex.levels`, and :attr:`MultiIndex.names` now returns a ``tuple`` instead of a ``FrozenList`` (:issue:`53531`)
- :func:`read_table`'s ``parse_dates`` argument defaults to ``None`` to improve consistency with :func:`read_csv` (:issue:`57476`)
- Made ``dtype`` a required argument in :meth:`ExtensionArray._from_sequence_of_strings` (:issue:`56519`)
- Updated :meth:`DataFrame.to_excel` so that the output spreadsheet has no styling. Custom styling can still be done using :meth:`Styler.to_excel` (:issue:`54154`)
diff --git a/pandas/_libs/index.pyi b/pandas/_libs/index.pyi
index 8cd135c944dc6..12a5bf245977e 100644
--- a/pandas/_libs/index.pyi
+++ b/pandas/_libs/index.pyi
@@ -73,13 +73,13 @@ class MaskedUInt8Engine(MaskedIndexEngine): ...
class MaskedBoolEngine(MaskedUInt8Engine): ...
class BaseMultiIndexCodesEngine:
- levels: tuple[np.ndarray]
+ levels: list[np.ndarray]
offsets: np.ndarray # ndarray[uint64_t, ndim=1]
def __init__(
self,
- levels: tuple[Index, ...], # all entries hashable
- labels: tuple[np.ndarray], # all entries integer-dtyped
+ levels: list[Index], # all entries hashable
+ labels: list[np.ndarray], # all entries integer-dtyped
offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1]
) -> None: ...
def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ...
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c91e4233ef540..bc37405b25a16 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -5609,7 +5609,7 @@ def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiInde
idx = cast(MultiIndex, idx)
levels = list(idx.levels) + [lev]
codes = [np.repeat(x, nqs) for x in idx.codes] + [np.tile(lev_codes, len(idx))]
- mi = MultiIndex(levels=levels, codes=codes, names=list(idx.names) + [None])
+ mi = MultiIndex(levels=levels, codes=codes, names=idx.names + [None])
else:
nidx = len(idx)
idx_codes = coerce_indexer_dtype(np.arange(nidx), idx)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 734711942b9f9..d5517a210b39d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -174,6 +174,7 @@
disallow_ndim_indexing,
is_valid_positional_slice,
)
+from pandas.core.indexes.frozen import FrozenList
from pandas.core.missing import clean_reindex_fill_method
from pandas.core.ops import get_op_result_name
from pandas.core.sorting import (
@@ -1726,8 +1727,8 @@ def _get_default_index_names(
return names
- def _get_names(self) -> tuple[Hashable | None, ...]:
- return (self.name,)
+ def _get_names(self) -> FrozenList:
+ return FrozenList((self.name,))
def _set_names(self, values, *, level=None) -> None:
"""
@@ -1821,7 +1822,7 @@ def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
- names=('species', 'year'))
+ names=['species', 'year'])
When renaming levels with a dict, levels can not be passed.
@@ -1830,7 +1831,7 @@ def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
- names=('snake', 'year'))
+ names=['snake', 'year'])
"""
if level is not None and not isinstance(self, ABCMultiIndex):
raise ValueError("Level must be None for non-MultiIndex")
@@ -1915,13 +1916,13 @@ def rename(self, name, *, inplace: bool = False) -> Self | None:
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
- names=('kind', 'year'))
+ names=['kind', 'year'])
>>> idx.rename(["species", "year"])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
- names=('species', 'year'))
+ names=['species', 'year'])
>>> idx.rename("species")
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
@@ -2085,22 +2086,22 @@ def droplevel(self, level: IndexLabel = 0):
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
- names=('x', 'y', 'z'))
+ names=['x', 'y', 'z'])
>>> mi.droplevel()
MultiIndex([(3, 5),
(4, 6)],
- names=('y', 'z'))
+ names=['y', 'z'])
>>> mi.droplevel(2)
MultiIndex([(1, 3),
(2, 4)],
- names=('x', 'y'))
+ names=['x', 'y'])
>>> mi.droplevel("z")
MultiIndex([(1, 3),
(2, 4)],
- names=('x', 'y'))
+ names=['x', 'y'])
>>> mi.droplevel(["x", "y"])
Index([5, 6], dtype='int64', name='z')
@@ -4437,9 +4438,7 @@ def _join_level(
"""
from pandas.core.indexes.multi import MultiIndex
- def _get_leaf_sorter(
- labels: tuple[np.ndarray, ...] | list[np.ndarray],
- ) -> npt.NDArray[np.intp]:
+ def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:
"""
Returns sorter for the inner most level while preserving the
order of higher levels.
@@ -6184,13 +6183,13 @@ def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays(
- ... [[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
+ ... [[1, 2, 3], ["red", "blue", "green"]], names=["number", "color"]
... )
>>> midx
MultiIndex([(1, 'red'),
(2, 'blue'),
(3, 'green')],
- names=('number', 'color'))
+ names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
@@ -7178,7 +7177,7 @@ def ensure_index_from_sequences(sequences, names=None) -> Index:
>>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"])
MultiIndex([('a', 'a'),
('a', 'b')],
- names=('L1', 'L2'))
+ names=['L1', 'L2'])
See Also
--------
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
new file mode 100644
index 0000000000000..c559c529586b5
--- /dev/null
+++ b/pandas/core/indexes/frozen.py
@@ -0,0 +1,121 @@
+"""
+frozen (immutable) data structures to support MultiIndexing
+
+These are used for:
+
+- .names (FrozenList)
+
+"""
+
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ NoReturn,
+)
+
+from pandas.core.base import PandasObject
+
+from pandas.io.formats.printing import pprint_thing
+
+if TYPE_CHECKING:
+ from pandas._typing import Self
+
+
+class FrozenList(PandasObject, list):
+ """
+ Container that doesn't allow setting item *but*
+ because it's technically hashable, will be used
+ for lookups, appropriately, etc.
+ """
+
+ # Side note: This has to be of type list. Otherwise,
+ # it messes up PyTables type checks.
+
+ def union(self, other) -> FrozenList:
+ """
+ Returns a FrozenList with other concatenated to the end of self.
+
+ Parameters
+ ----------
+ other : array-like
+ The array-like whose elements we are concatenating.
+
+ Returns
+ -------
+ FrozenList
+ The collection difference between self and other.
+ """
+ if isinstance(other, tuple):
+ other = list(other)
+ return type(self)(super().__add__(other))
+
+ def difference(self, other) -> FrozenList:
+ """
+ Returns a FrozenList with elements from other removed from self.
+
+ Parameters
+ ----------
+ other : array-like
+ The array-like whose elements we are removing self.
+
+ Returns
+ -------
+ FrozenList
+ The collection difference between self and other.
+ """
+ other = set(other)
+ temp = [x for x in self if x not in other]
+ return type(self)(temp)
+
+ # TODO: Consider deprecating these in favor of `union` (xref gh-15506)
+ # error: Incompatible types in assignment (expression has type
+ # "Callable[[FrozenList, Any], FrozenList]", base class "list" defined the
+ # type as overloaded function)
+ __add__ = __iadd__ = union # type: ignore[assignment]
+
+ def __getitem__(self, n):
+ if isinstance(n, slice):
+ return type(self)(super().__getitem__(n))
+ return super().__getitem__(n)
+
+ def __radd__(self, other) -> Self:
+ if isinstance(other, tuple):
+ other = list(other)
+ return type(self)(other + list(self))
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, (tuple, FrozenList)):
+ other = list(other)
+ return super().__eq__(other)
+
+ __req__ = __eq__
+
+ def __mul__(self, other) -> Self:
+ return type(self)(super().__mul__(other))
+
+ __imul__ = __mul__
+
+ def __reduce__(self):
+ return type(self), (list(self),)
+
+ # error: Signature of "__hash__" incompatible with supertype "list"
+ def __hash__(self) -> int: # type: ignore[override]
+ return hash(tuple(self))
+
+ def _disabled(self, *args, **kwargs) -> NoReturn:
+ """
+ This method will not function because object is immutable.
+ """
+ raise TypeError(f"'{type(self).__name__}' does not support mutable operations.")
+
+ def __str__(self) -> str:
+ return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n"))
+
+ def __repr__(self) -> str:
+ return f"{type(self).__name__}({self!s})"
+
+ __setitem__ = __setslice__ = _disabled # type: ignore[assignment]
+ __delitem__ = __delslice__ = _disabled
+ pop = append = extend = _disabled
+ remove = sort = insert = _disabled # type: ignore[assignment]
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d6149bcd6fdac..4affa1337aa2a 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -101,6 +101,7 @@
ensure_index,
get_unanimous_names,
)
+from pandas.core.indexes.frozen import FrozenList
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
get_group_index,
@@ -299,7 +300,7 @@ class MultiIndex(Index):
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
- names=('number', 'color'))
+ names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
@@ -309,9 +310,9 @@ class MultiIndex(Index):
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
- _names: tuple[Hashable | None, ...] = ()
- _levels: tuple[Index, ...] = ()
- _codes: tuple[np.ndarray, ...] = ()
+ _names: list[Hashable | None] = []
+ _levels = FrozenList()
+ _codes = FrozenList()
_comparables = ["names"]
sortorder: int | None
@@ -347,7 +348,7 @@ def __new__(
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
- result._names = (None,) * len(levels)
+ result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
@@ -389,16 +390,16 @@ def _validate_codes(self, level: Index, code: np.ndarray) -> np.ndarray:
def _verify_integrity(
self,
- codes: tuple | None = None,
- levels: tuple | None = None,
+ codes: list | None = None,
+ levels: list | None = None,
levels_to_verify: list[int] | range | None = None,
- ) -> tuple:
+ ) -> FrozenList:
"""
Parameters
----------
- codes : optional tuple
+ codes : optional list
Codes to check for validity. Defaults to current codes.
- levels : optional tuple
+ levels : optional list
Levels to check for validity. Defaults to current levels.
levels_to_validate: optional list
Specifies the levels to verify.
@@ -462,7 +463,7 @@ def _verify_integrity(
else:
result_codes.append(codes[i])
- new_codes = tuple(result_codes)
+ new_codes = FrozenList(result_codes)
return new_codes
@classmethod
@@ -505,7 +506,7 @@ def from_arrays(
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
- names=('number', 'color'))
+ names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
@@ -576,7 +577,7 @@ def from_tuples(
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
- names=('number', 'color'))
+ names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
@@ -659,7 +660,7 @@ def from_product(
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
- names=('number', 'color'))
+ names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
@@ -728,7 +729,7 @@ def from_frame(
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
- names=('a', 'b'))
+ names=['a', 'b'])
Using explicit names, instead of the column names
@@ -737,7 +738,7 @@ def from_frame(
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
- names=('state', 'observation'))
+ names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
@@ -760,9 +761,7 @@ def _values(self) -> np.ndarray:
vals = index
if isinstance(vals.dtype, CategoricalDtype):
vals = cast("CategoricalIndex", vals)
- # Incompatible types in assignment (expression has type
- # "ExtensionArray | ndarray[Any, Any]", variable has type "Index")
- vals = vals._data._internal_get_values() # type: ignore[assignment]
+ vals = vals._data._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or lib.is_np_dtype(
vals.dtype, "mM"
@@ -812,7 +811,7 @@ def dtypes(self) -> Series:
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
- names=('number', 'color'))
+ names=['number', 'color'])
>>> idx.dtypes
number int64
color object
@@ -838,7 +837,7 @@ def size(self) -> int:
# Levels Methods
@cache_readonly
- def levels(self) -> tuple[Index, ...]:
+ def levels(self) -> FrozenList:
"""
Levels of the MultiIndex.
@@ -871,8 +870,7 @@ def levels(self) -> tuple[Index, ...]:
dog 4
>>> leg_num.index.levels
- (Index(['mammal'], dtype='object', name='Category'),
- Index(['cat', 'dog', 'goat', 'human'], dtype='object', name='Animals'))
+ FrozenList([['mammal'], ['cat', 'dog', 'goat', 'human']])
MultiIndex levels will not change even if the DataFrame using the MultiIndex
does not contain all them anymore.
@@ -887,8 +885,7 @@ def levels(self) -> tuple[Index, ...]:
dog 4
>>> large_leg_num.index.levels
- (Index(['mammal'], dtype='object', name='Category'),
- Index(['cat', 'dog', 'goat', 'human'], dtype='object', name='Animals'))
+ FrozenList([['mammal'], ['cat', 'dog', 'goat', 'human']])
"""
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
@@ -897,7 +894,7 @@ def levels(self) -> tuple[Index, ...]:
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
- return tuple(result)
+ return FrozenList(result)
def _set_levels(
self,
@@ -920,14 +917,16 @@ def _set_levels(
raise ValueError("Length of levels must match length of level.")
if level is None:
- new_levels = tuple(ensure_index(lev, copy=copy)._view() for lev in levels)
+ new_levels = FrozenList(
+ ensure_index(lev, copy=copy)._view() for lev in levels
+ )
level_numbers: range | list[int] = range(len(new_levels))
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels_list = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()
- new_levels = tuple(new_levels_list)
+ new_levels = FrozenList(new_levels_list)
if verify_integrity:
new_codes = self._verify_integrity(
@@ -936,7 +935,7 @@ def _set_levels(
self._codes = new_codes
names = self.names
- self._levels: tuple[Index, ...] = new_levels
+ self._levels = new_levels
if any(names):
self._set_names(names)
@@ -981,7 +980,7 @@ def set_levels(
(2, 'two'),
(3, 'one'),
(3, 'two')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_levels([["a", "b", "c"], [1, 2]])
MultiIndex([('a', 1),
@@ -990,7 +989,7 @@ def set_levels(
('b', 2),
('c', 1),
('c', 2)],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_levels(["a", "b", "c"], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
@@ -998,7 +997,7 @@ def set_levels(
('b', 'two'),
('c', 'one'),
('c', 'two')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_levels(["a", "b"], level="bar")
MultiIndex([(1, 'a'),
(1, 'b'),
@@ -1006,7 +1005,7 @@ def set_levels(
(2, 'b'),
(3, 'a'),
(3, 'b')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
@@ -1020,10 +1019,10 @@ def set_levels(
('b', 2),
('c', 1),
('c', 2)],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_levels([["a", "b", "c"], [1, 2, 3, 4]], level=[0, 1]).levels
- (Index(['a', 'b', 'c'], dtype='object', name='foo'), Index([1, 2, 3, 4], dtype='int64', name='bar'))
- """ # noqa: E501
+ FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
+ """
if isinstance(levels, Index):
pass
@@ -1076,7 +1075,7 @@ def levshape(self) -> Shape:
# Codes Methods
@property
- def codes(self) -> tuple:
+ def codes(self) -> FrozenList:
"""
Codes of the MultiIndex.
@@ -1098,7 +1097,7 @@ def codes(self) -> tuple:
>>> arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
>>> mi = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
>>> mi.codes
- (array([0, 0, 1, 1], dtype=int8), array([1, 0, 1, 0], dtype=int8))
+ FrozenList([[0, 0, 1, 1], [1, 0, 1, 0]])
"""
return self._codes
@@ -1119,7 +1118,7 @@ def _set_codes(
level_numbers: list[int] | range
if level is None:
- new_codes = tuple(
+ new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
@@ -1132,7 +1131,7 @@ def _set_codes(
new_codes_list[lev_num] = _coerce_indexer_frozen(
level_codes, lev, copy=copy
)
- new_codes = tuple(new_codes_list)
+ new_codes = FrozenList(new_codes_list)
if verify_integrity:
new_codes = self._verify_integrity(
@@ -1173,32 +1172,32 @@ def set_codes(
(1, 'two'),
(2, 'one'),
(2, 'two')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level="bar")
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
- names=('foo', 'bar'))
+ names=['foo', 'bar'])
"""
level, codes = _require_listlike(level, codes, "Codes")
@@ -1451,7 +1450,6 @@ def _format_multi(
if len(self) == 0:
return []
- formatted: Iterable
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = _get_na_rep(lev.dtype)
@@ -1476,9 +1474,7 @@ def _format_multi(
stringified_levels.append(formatted)
result_levels = []
- # Incompatible types in assignment (expression has type "Iterable[Any]",
- # variable has type "Index")
- for lev, lev_name in zip(stringified_levels, self.names): # type: ignore[assignment]
+ for lev, lev_name in zip(stringified_levels, self.names):
level = []
if include_names:
@@ -1510,8 +1506,8 @@ def _format_multi(
# --------------------------------------------------------------------
# Names Methods
- def _get_names(self) -> tuple[Hashable | None, ...]:
- return self._names
+ def _get_names(self) -> FrozenList:
+ return FrozenList(self._names)
def _set_names(self, names, *, level=None, validate: bool = True) -> None:
"""
@@ -1558,7 +1554,6 @@ def _set_names(self, names, *, level=None, validate: bool = True) -> None:
level = [self._get_level_number(lev) for lev in level]
# set the name
- new_names = list(self._names)
for lev, name in zip(level, names):
if name is not None:
# GH 20527
@@ -1567,8 +1562,7 @@ def _set_names(self, names, *, level=None, validate: bool = True) -> None:
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
- new_names[lev] = name
- self._names = tuple(new_names)
+ self._names[lev] = name
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
@@ -1587,9 +1581,9 @@ def _set_names(self, names, *, level=None, validate: bool = True) -> None:
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
- names=('x', 'y', 'z'))
+ names=['x', 'y', 'z'])
>>> mi.names
- ('x', 'y', 'z')
+ FrozenList(['x', 'y', 'z'])
""",
)
@@ -2063,7 +2057,7 @@ def remove_unused_levels(self) -> MultiIndex:
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
- (RangeIndex(start=1, stop=2, step=1), Index(['a', 'b'], dtype='object'))
+ FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
@@ -2337,13 +2331,13 @@ def drop( # type: ignore[override]
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
- names=('number', 'color'))
+ names=['number', 'color'])
>>> idx.drop([(1, "green"), (2, "purple")])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'purple'),
(2, 'green')],
- names=('number', 'color'))
+ names=['number', 'color'])
We can also drop from a specific level.
@@ -2351,12 +2345,12 @@ def drop( # type: ignore[override]
MultiIndex([(0, 'purple'),
(1, 'purple'),
(2, 'purple')],
- names=('number', 'color'))
+ names=['number', 'color'])
>>> idx.drop([1, 2], level=0)
MultiIndex([(0, 'green'),
(0, 'purple')],
- names=('number', 'color'))
+ names=['number', 'color'])
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
@@ -2497,17 +2491,17 @@ def reorder_levels(self, order) -> MultiIndex:
>>> mi
MultiIndex([(1, 3),
(2, 4)],
- names=('x', 'y'))
+ names=['x', 'y'])
>>> mi.reorder_levels(order=[1, 0])
MultiIndex([(3, 1),
(4, 2)],
- names=('y', 'x'))
+ names=['y', 'x'])
>>> mi.reorder_levels(order=["y", "x"])
MultiIndex([(3, 1),
(4, 2)],
- names=('y', 'x'))
+ names=['y', 'x'])
"""
order = [self._get_level_number(i) for i in order]
result = self._reorder_ilevels(order)
@@ -2876,9 +2870,7 @@ def _partial_tup_index(self, tup: tuple, side: Literal["left", "right"] = "left"
if lab not in lev and not isna(lab):
# short circuit
try:
- # Argument 1 to "searchsorted" has incompatible type "Index";
- # expected "ExtensionArray | ndarray[Any, Any]"
- loc = algos.searchsorted(lev, lab, side=side) # type: ignore[arg-type]
+ loc = algos.searchsorted(lev, lab, side=side)
except TypeError as err:
# non-comparable e.g. test_slice_locs_with_type_mismatch
raise TypeError(f"Level type mismatch: {lab}") from err
@@ -3546,7 +3538,7 @@ def _reorder_indexer(
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
- need_sort = bool((k_codes[:-1] > k_codes[1:]).any())
+ need_sort = (k_codes[:-1] > k_codes[1:]).any()
else:
need_sort = True
elif isinstance(k, slice):
@@ -3979,7 +3971,7 @@ def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
__invert__ = make_invalid_op("__invert__")
-def _lexsort_depth(codes: tuple[np.ndarray], nlevels: int) -> int:
+def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:
"""Count depth (up to a maximum of `nlevels`) with which codes are lexsorted."""
int64_codes = [ensure_int64(level_codes) for level_codes in codes]
for k in range(nlevels, 0, -1):
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 43077e7aeecb4..4392f54d9c442 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1681,9 +1681,8 @@ def _wrap_result(self, result):
if self.kind == "period" and not isinstance(result.index, PeriodIndex):
if isinstance(result.index, MultiIndex):
# GH 24103 - e.g. groupby resample
- new_level = result.index.levels[-1]
- if not isinstance(new_level, PeriodIndex):
- new_level = new_level.to_period(self.freq) # type: ignore[attr-defined]
+ if not isinstance(result.index.levels[-1], PeriodIndex):
+ new_level = result.index.levels[-1].to_period(self.freq)
result.index = result.index.set_levels(new_level, level=-1)
else:
result.index = result.index.to_period(self.freq)
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index f51a833e5f906..b4720306094e9 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -237,7 +237,7 @@ def melt(
else:
mdata[col] = np.tile(id_data._values, num_cols_adjusted)
- mcolumns = id_vars + list(var_name) + [value_name]
+ mcolumns = id_vars + var_name + [value_name]
if frame.shape[1] > 0 and not any(
not isinstance(dt, np.dtype) and dt._supports_2d for dt in frame.dtypes
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index dcb638cfee97b..19e53a883d1e2 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -106,6 +106,7 @@
from pandas import DataFrame
from pandas.core import groupby
from pandas.core.arrays import DatetimeArray
+ from pandas.core.indexes.frozen import FrozenList
_factorizers = {
np.int64: libhashtable.Int64Factorizer,
@@ -1803,7 +1804,7 @@ def restore_dropped_levels_multijoin(
join_index: Index,
lindexer: npt.NDArray[np.intp],
rindexer: npt.NDArray[np.intp],
-) -> tuple[tuple, tuple, tuple]:
+) -> tuple[FrozenList, FrozenList, FrozenList]:
"""
*this is an internal non-public method*
@@ -1835,7 +1836,7 @@ def restore_dropped_levels_multijoin(
levels of combined multiindexes
labels : np.ndarray[np.intp]
labels of combined multiindexes
- names : tuple[Hashable]
+ names : List[Hashable]
names of combined multiindex levels
"""
@@ -1877,11 +1878,12 @@ def _convert_to_multiindex(index: Index) -> MultiIndex:
else:
restore_codes = algos.take_nd(codes, indexer, fill_value=-1)
- join_levels = join_levels + (restore_levels,)
- join_codes = join_codes + (restore_codes,)
- join_names = join_names + (dropped_level_name,)
+ # error: Cannot determine type of "__add__"
+ join_levels = join_levels + [restore_levels] # type: ignore[has-type]
+ join_codes = join_codes + [restore_codes] # type: ignore[has-type]
+ join_names = join_names + [dropped_level_name]
- return tuple(join_levels), tuple(join_codes), tuple(join_names)
+ return join_levels, join_codes, join_names
class _OrderedMerge(_MergeOperation):
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index b62f550662f5d..e0126d439a79c 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -397,7 +397,11 @@ def _all_key(key):
if isinstance(piece.index, MultiIndex):
# We are adding an empty level
transformed_piece.index = MultiIndex.from_tuples(
- [all_key], names=piece.index.names + (None,)
+ [all_key],
+ names=piece.index.names
+ + [
+ None,
+ ],
)
else:
transformed_piece.index = Index([all_key], name=piece.index.name)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 574e6839070be..01cc85ceff181 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -62,6 +62,7 @@
)
from pandas.core.arrays import ExtensionArray
+ from pandas.core.indexes.frozen import FrozenList
class _Unstacker:
@@ -349,15 +350,21 @@ def get_new_columns(self, value_columns: Index | None):
width = len(value_columns)
propagator = np.repeat(np.arange(width), stride)
- new_levels: tuple[Index, ...]
+ new_levels: FrozenList | list[Index]
if isinstance(value_columns, MultiIndex):
- new_levels = value_columns.levels + (self.removed_level_full,)
+ # error: Cannot determine type of "__add__" [has-type]
+ new_levels = value_columns.levels + ( # type: ignore[has-type]
+ self.removed_level_full,
+ )
new_names = value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator) for lab in value_columns.codes]
else:
- new_levels = (value_columns, self.removed_level_full)
+ new_levels = [
+ value_columns,
+ self.removed_level_full,
+ ]
new_names = [value_columns.name, self.removed_name]
new_codes = [propagator]
@@ -987,26 +994,27 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:
# Construct the correct MultiIndex by combining the frame's index and
# stacked columns.
+ index_levels: list | FrozenList
if isinstance(frame.index, MultiIndex):
index_levels = frame.index.levels
- index_codes = tuple(np.tile(frame.index.codes, (1, ratio)))
+ index_codes = list(np.tile(frame.index.codes, (1, ratio)))
else:
codes, uniques = factorize(frame.index, use_na_sentinel=False)
- # Incompatible types in assignment (expression has type
- # "tuple[ndarray[Any, Any] | Index]", variable has type "tuple[Index, ...]")
- index_levels = (uniques,) # type: ignore[assignment]
- index_codes = tuple(np.tile(codes, (1, ratio)))
+ index_levels = [uniques]
+ index_codes = list(np.tile(codes, (1, ratio)))
if isinstance(ordered_stack_cols, MultiIndex):
column_levels = ordered_stack_cols.levels
column_codes = ordered_stack_cols.drop_duplicates().codes
else:
- column_levels = (ordered_stack_cols.unique(),)
- column_codes = (factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0],)
- column_codes = tuple(np.repeat(codes, len(frame)) for codes in column_codes)
+ column_levels = [ordered_stack_cols.unique()]
+ column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]]
+ # error: Incompatible types in assignment (expression has type "list[ndarray[Any,
+ # dtype[Any]]]", variable has type "FrozenList")
+ column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] # type: ignore[assignment]
result.index = MultiIndex(
levels=index_levels + column_levels,
codes=index_codes + column_codes,
- names=frame.index.names + ordered_stack_cols.names,
+ names=frame.index.names + list(ordered_stack_cols.names),
verify_integrity=False,
)
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index ef115e350462f..d274c1d7a5aff 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -3608,7 +3608,7 @@ def str_extractall(arr, pat, flags: int = 0) -> DataFrame:
from pandas import MultiIndex
- index = MultiIndex.from_tuples(index_list, names=arr.index.names + ("match",))
+ index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
dtype = _result_dtype(arr)
result = arr._constructor_expanddim(
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 07998cdbd40b5..db6078ae636e3 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -820,12 +820,12 @@ def _apply_pairwise(
else:
idx_codes, idx_levels = factorize(result.index)
result_codes = [idx_codes]
- result_levels = [idx_levels] # type: ignore[list-item]
+ result_levels = [idx_levels]
result_names = [result.index.name]
- # 3) Create the resulting index by combining 1) + 2)
+ # 3) Create the resulting index by combining 1) + 2)
result_codes = groupby_codes + result_codes
- result_levels = groupby_levels + result_levels # type: ignore[assignment]
+ result_levels = groupby_levels + result_levels
result_names = self._grouper.names + result_names
result_index = MultiIndex(
diff --git a/pandas/tests/frame/methods/test_rename_axis.py b/pandas/tests/frame/methods/test_rename_axis.py
index 908a3f728c749..dd4a77c6509b8 100644
--- a/pandas/tests/frame/methods/test_rename_axis.py
+++ b/pandas/tests/frame/methods/test_rename_axis.py
@@ -60,15 +60,15 @@ def test_rename_axis_mapper(self):
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
- assert result.index.names == ("foo", "nn")
+ assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
- assert result.index.names == ("LL", "NN")
+ assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
- assert result.index.names == ("foo", "goo")
+ assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py
index a1968c6c694d5..198cab0e91eab 100644
--- a/pandas/tests/frame/methods/test_set_index.py
+++ b/pandas/tests/frame/methods/test_set_index.py
@@ -163,7 +163,7 @@ def test_set_index_names(self):
)
df.index.name = "name"
- assert df.set_index(df.index).index.names == ("name",)
+ assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
@@ -172,7 +172,7 @@ def test_set_index_names(self):
df = df.set_index(["A", "B"])
- assert df.set_index(df.index).index.names == ("A", "B")
+ assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
@@ -292,7 +292,7 @@ def test_set_index_pass_single_array(
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
- expected.index.names = [index_name] + list(name) if append else name
+ expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
@@ -464,12 +464,12 @@ def test_set_index_datetime(self):
df = df.set_index("label", append=True)
tm.assert_index_equal(df.index.levels[0], expected)
tm.assert_index_equal(df.index.levels[1], Index(["a", "b"], name="label"))
- assert df.index.names == ("datetime", "label")
+ assert df.index.names == ["datetime", "label"]
df = df.swaplevel(0, 1)
tm.assert_index_equal(df.index.levels[0], Index(["a", "b"], name="label"))
tm.assert_index_equal(df.index.levels[1], expected)
- assert df.index.names == ("label", "datetime")
+ assert df.index.names == ["label", "datetime"]
df = DataFrame(np.random.default_rng(2).random(6))
idx1 = DatetimeIndex(
diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py
index b856a7ff5d26b..c146dcc9c2d71 100644
--- a/pandas/tests/frame/methods/test_sort_values.py
+++ b/pandas/tests/frame/methods/test_sort_values.py
@@ -857,7 +857,7 @@ def test_sort_index_level_and_column_label(
)
# Get index levels from df_idx
- levels = list(df_idx.index.names)
+ levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
@@ -875,7 +875,7 @@ def test_sort_column_level_and_index_label(
# GH#14353
# Get levels from df_idx
- levels = list(df_idx.index.names)
+ levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index 09235f154b188..03db284d892e3 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -805,7 +805,7 @@ def test_unstack_multi_level_cols(self):
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
- assert df.unstack(["i2", "i1"]).columns.names[-2:] == ("i2", "i1")
+ assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
@@ -1848,7 +1848,7 @@ def test_stack_unstack_preserve_names(
unstacked = frame.unstack()
assert unstacked.index.name == "first"
- assert unstacked.columns.names == ("exp", "second")
+ assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack(future_stack=future_stack)
assert restacked.index.names == frame.index.names
diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py
index 81676a5d8520a..1d0f491529b56 100644
--- a/pandas/tests/generic/test_frame.py
+++ b/pandas/tests/generic/test_frame.py
@@ -35,15 +35,15 @@ def test_set_axis_name_mi(self, func):
columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]),
)
- level_names = ("L1", "L2")
+ level_names = ["L1", "L2"]
result = methodcaller(func, level_names)(df)
assert result.index.names == level_names
- assert result.columns.names == (None, None)
+ assert result.columns.names == [None, None]
result = methodcaller(func, level_names, axis=1)(df)
- assert result.columns.names == level_names
- assert result.index.names == (None, None)
+ assert result.columns.names == ["L1", "L2"]
+ assert result.index.names == [None, None]
def test_nonzero_single_element(self):
df = DataFrame([[False, False]])
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index cbaf064c379ea..7dcdcd96cce51 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -24,9 +24,9 @@ def test_set_axis_name_mi(self, func):
result = methodcaller(func, ["L1", "L2"])(ser)
assert ser.index.name is None
- assert ser.index.names == ("l1", "l2")
+ assert ser.index.names == ["l1", "l2"]
assert result.index.name is None
- assert result.index.names == ("L1", "L2")
+ assert result.index.names, ["L1", "L2"]
def test_set_axis_name_raises(self):
ser = Series([1])
diff --git a/pandas/tests/groupby/methods/test_quantile.py b/pandas/tests/groupby/methods/test_quantile.py
index 9b825b73c26c0..af0deba138469 100644
--- a/pandas/tests/groupby/methods/test_quantile.py
+++ b/pandas/tests/groupby/methods/test_quantile.py
@@ -454,8 +454,5 @@ def test_groupby_quantile_nonmulti_levels_order():
tm.assert_series_equal(result, expected)
# We need to check that index levels are not sorted
- tm.assert_index_equal(
- result.index.levels[0], Index(["B", "A"], dtype=object, name="cat1")
- )
- tm.assert_index_equal(result.index.levels[1], Index([0.2, 0.8]))
- assert isinstance(result.index.levels, tuple)
+ expected_levels = pd.core.indexes.frozen.FrozenList([["B", "A"], [0.2, 0.8]])
+ tm.assert_equal(result.index.levels, expected_levels)
diff --git a/pandas/tests/groupby/methods/test_value_counts.py b/pandas/tests/groupby/methods/test_value_counts.py
index a8d359f3206c2..be52b4a591c26 100644
--- a/pandas/tests/groupby/methods/test_value_counts.py
+++ b/pandas/tests/groupby/methods/test_value_counts.py
@@ -108,7 +108,7 @@ def rebuild_index(df):
gr = df.groupby(keys, sort=isort)
right = gr["3rd"].apply(Series.value_counts, **kwargs)
- right.index.names = tuple(list(right.index.names[:-1]) + ["3rd"])
+ right.index.names = right.index.names[:-1] + ["3rd"]
# https://github.com/pandas-dev/pandas/issues/49909
right = right.rename(name)
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9bd2c22788fac..1a2589fe94ea5 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -987,7 +987,7 @@ def test_apply_multi_level_name(category):
).set_index(["A", "B"])
result = df.groupby("B", observed=False).apply(lambda x: x.sum())
tm.assert_frame_equal(result, expected)
- assert df.index.names == ("A", "B")
+ assert df.index.names == ["A", "B"]
def test_groupby_apply_datetime_result_dtypes(using_infer_string):
diff --git a/pandas/tests/indexes/multi/test_astype.py b/pandas/tests/indexes/multi/test_astype.py
index c993f425fa132..29908537fbe59 100644
--- a/pandas/tests/indexes/multi/test_astype.py
+++ b/pandas/tests/indexes/multi/test_astype.py
@@ -11,7 +11,7 @@ def test_astype(idx):
actual = idx.astype("O")
tm.assert_copy(actual.levels, expected.levels)
tm.assert_copy(actual.codes, expected.codes)
- assert actual.names == expected.names
+ assert actual.names == list(expected.names)
with pytest.raises(TypeError, match="^Setting.*dtype.*object"):
idx.astype(np.dtype(int))
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index 2b16f2c4c095d..38e0920b7004e 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -27,7 +27,7 @@ def test_constructor_single_level():
assert isinstance(result, MultiIndex)
expected = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_index_equal(result.levels[0], expected)
- assert result.names == ("first",)
+ assert result.names == ["first"]
def test_constructor_no_levels():
@@ -277,7 +277,7 @@ def test_from_arrays_empty():
assert isinstance(result, MultiIndex)
expected = Index([], name="A")
tm.assert_index_equal(result.levels[0], expected)
- assert result.names == ("A",)
+ assert result.names == ["A"]
# N levels
for N in [2, 3]:
@@ -424,7 +424,7 @@ def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=["A"])
expected = Index([], name="A")
tm.assert_index_equal(result.levels[0], expected)
- assert result.names == ("A",)
+ assert result.names == ["A"]
@pytest.mark.parametrize(
@@ -712,7 +712,7 @@ def test_from_frame_dtype_fidelity():
@pytest.mark.parametrize(
- "names_in,names_out", [(None, (("L1", "x"), ("L2", "y"))), (["x", "y"], ("x", "y"))]
+ "names_in,names_out", [(None, [("L1", "x"), ("L2", "y")]), (["x", "y"], ["x", "y"])]
)
def test_from_frame_valid_names(names_in, names_out):
# GH 22420
@@ -812,13 +812,13 @@ def test_constructor_with_tz():
result = MultiIndex.from_arrays([index, columns])
- assert result.names == ("dt1", "dt2")
+ assert result.names == ["dt1", "dt2"]
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
- assert result.names == ("dt1", "dt2")
+ assert result.names == ["dt1", "dt2"]
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py
index 14d327093500e..2e09a580f9528 100644
--- a/pandas/tests/indexes/multi/test_copy.py
+++ b/pandas/tests/indexes/multi/test_copy.py
@@ -70,7 +70,7 @@ def test_copy_method(deep):
@pytest.mark.parametrize(
"kwarg, value",
[
- ("names", ("third", "fourth")),
+ ("names", ["third", "fourth"]),
],
)
def test_copy_method_kwargs(deep, kwarg, value):
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index 622520f45f904..1bbeedac3fb10 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -112,7 +112,7 @@ def test_duplicate_multiindex_codes():
mi.set_levels([["A", "B", "A", "A", "B"], [2, 1, 3, -2, 5]])
-@pytest.mark.parametrize("names", [("a", "b", "a"), (1, 1, 2), (1, "a", 1)])
+@pytest.mark.parametrize("names", [["a", "b", "a"], [1, 1, 2], [1, "a", 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
diff --git a/pandas/tests/indexes/multi/test_formats.py b/pandas/tests/indexes/multi/test_formats.py
index cc6a33c22503d..6ea42349bd04a 100644
--- a/pandas/tests/indexes/multi/test_formats.py
+++ b/pandas/tests/indexes/multi/test_formats.py
@@ -56,14 +56,14 @@ def test_repr_max_seq_items_equal_to_n(self, idx):
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
- names=('first', 'second'))"""
+ names=['first', 'second'])"""
assert result == expected
def test_repr(self, idx):
result = idx[:1].__repr__()
expected = """\
MultiIndex([('foo', 'one')],
- names=('first', 'second'))"""
+ names=['first', 'second'])"""
assert result == expected
result = idx.__repr__()
@@ -74,7 +74,7 @@ def test_repr(self, idx):
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
- names=('first', 'second'))"""
+ names=['first', 'second'])"""
assert result == expected
with pd.option_context("display.max_seq_items", 5):
@@ -85,7 +85,7 @@ def test_repr(self, idx):
...
('qux', 'one'),
('qux', 'two')],
- names=('first', 'second'), length=6)"""
+ names=['first', 'second'], length=6)"""
assert result == expected
# display.max_seq_items == 1
@@ -94,7 +94,7 @@ def test_repr(self, idx):
expected = """\
MultiIndex([...
('qux', 'two')],
- names=('first', ...), length=6)"""
+ names=['first', ...], length=6)"""
assert result == expected
def test_rjust(self):
@@ -105,7 +105,7 @@ def test_rjust(self):
result = mi[:1].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00')],
- names=('a', 'b', 'dti'))"""
+ names=['a', 'b', 'dti'])"""
assert result == expected
result = mi[::500].__repr__()
@@ -114,7 +114,7 @@ def test_rjust(self):
( 'a', 9, '2000-01-01 00:08:20'),
('abc', 10, '2000-01-01 00:16:40'),
('abc', 10, '2000-01-01 00:25:00')],
- names=('a', 'b', 'dti'))"""
+ names=['a', 'b', 'dti'])"""
assert result == expected
result = mi.__repr__()
@@ -140,7 +140,7 @@ def test_rjust(self):
('abc', 10, '2000-01-01 00:33:17'),
('abc', 10, '2000-01-01 00:33:18'),
('abc', 10, '2000-01-01 00:33:19')],
- names=('a', 'b', 'dti'), length=2000)"""
+ names=['a', 'b', 'dti'], length=2000)"""
assert result == expected
def test_tuple_width(self):
@@ -152,7 +152,7 @@ def test_tuple_width(self):
mi = MultiIndex.from_arrays(levels, names=names)
result = mi[:1].__repr__()
expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],
- names=('a', 'b', 'dti_1', 'dti_2', 'dti_3'))""" # noqa: E501
+ names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa: E501
assert result == expected
result = mi[:10].__repr__()
@@ -167,7 +167,7 @@ def test_tuple_width(self):
('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],
- names=('a', 'b', 'dti_1', 'dti_2', 'dti_3'))"""
+ names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi.__repr__()
@@ -193,7 +193,7 @@ def test_tuple_width(self):
('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
- names=('a', 'b', 'dti_1', 'dti_2', 'dti_3'), length=2000)"""
+ names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
assert result == expected
def test_multiindex_long_element(self):
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index d17b0aae953cd..dd4bba42eda6f 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -101,16 +101,16 @@ def test_get_level_number_out_of_bounds(multiindex_dataframe_random_data):
def test_set_name_methods(idx):
# so long as these are synonyms, we don't need to test set_names
- index_names = ("first", "second")
+ index_names = ["first", "second"]
assert idx.rename == idx.set_names
- new_names = tuple(name + "SUFFIX" for name in index_names)
+ new_names = [name + "SUFFIX" for name in index_names]
ind = idx.set_names(new_names)
assert idx.names == index_names
assert ind.names == new_names
msg = "Length of names must match number of levels in MultiIndex"
with pytest.raises(ValueError, match=msg):
ind.set_names(new_names + new_names)
- new_names2 = tuple(name + "SUFFIX2" for name in new_names)
+ new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
@@ -118,11 +118,11 @@ def test_set_name_methods(idx):
# set names for specific level (# GH7792)
ind = idx.set_names(new_names[0], level=0)
assert idx.names == index_names
- assert ind.names == (new_names[0], index_names[1])
+ assert ind.names == [new_names[0], index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
- assert ind.names == (new_names2[0], index_names[1])
+ assert ind.names == [new_names2[0], index_names[1]]
# set names for multiple levels
ind = idx.set_names(new_names, level=[0, 1])
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index f6d960bd41925..d570e911bf584 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -216,9 +216,7 @@ def test_can_hold_identifiers(idx):
def test_metadata_immutable(idx):
levels, codes = idx.levels, idx.codes
# shouldn't be able to set at either the top level or base level
- mutable_regex = re.compile(
- "does not support mutable operations|does not support item assignment"
- )
+ mutable_regex = re.compile("does not support mutable operations")
with pytest.raises(TypeError, match=mutable_regex):
levels[0] = levels[0]
with pytest.raises(TypeError, match=mutable_regex):
diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py
index aff9ebfb1c1e3..45f19b4d70fb9 100644
--- a/pandas/tests/indexes/multi/test_names.py
+++ b/pandas/tests/indexes/multi/test_names.py
@@ -60,20 +60,20 @@ def test_copy_names():
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
- assert multi_idx.names == ("MyName1", "MyName2")
- assert multi_idx1.names == ("MyName1", "MyName2")
+ assert multi_idx.names == ["MyName1", "MyName2"]
+ assert multi_idx1.names == ["MyName1", "MyName2"]
multi_idx2 = multi_idx.copy(names=["NewName1", "NewName2"])
assert multi_idx.equals(multi_idx2)
- assert multi_idx.names == ("MyName1", "MyName2")
- assert multi_idx2.names == ("NewName1", "NewName2")
+ assert multi_idx.names == ["MyName1", "MyName2"]
+ assert multi_idx2.names == ["NewName1", "NewName2"]
multi_idx3 = multi_idx.copy(name=["NewName1", "NewName2"])
assert multi_idx.equals(multi_idx3)
- assert multi_idx.names == ("MyName1", "MyName2")
- assert multi_idx3.names == ("NewName1", "NewName2")
+ assert multi_idx.names == ["MyName1", "MyName2"]
+ assert multi_idx3.names == ["NewName1", "NewName2"]
# gh-35592
with pytest.raises(ValueError, match="Length of new names must be 2, got 1"):
@@ -85,8 +85,8 @@ def test_copy_names():
def test_names(idx):
# names are assigned in setup
- assert idx.names == ("first", "second")
- level_names = tuple(level.name for level in idx.levels)
+ assert idx.names == ["first", "second"]
+ level_names = [level.name for level in idx.levels]
assert level_names == idx.names
# setting bad names on existing
diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py
index d949a390bd97f..d1b4fe8b98760 100644
--- a/pandas/tests/indexes/multi/test_reindex.py
+++ b/pandas/tests/indexes/multi/test_reindex.py
@@ -12,13 +12,13 @@
def test_reindex(idx):
result, indexer = idx.reindex(list(idx[:4]))
assert isinstance(result, MultiIndex)
- assert result.names == ("first", "second")
+ assert result.names == ["first", "second"]
assert [level.name for level in result.levels] == ["first", "second"]
result, indexer = idx.reindex(list(idx))
assert isinstance(result, MultiIndex)
assert indexer is None
- assert result.names == ("first", "second")
+ assert result.names == ["first", "second"]
assert [level.name for level in result.levels] == ["first", "second"]
@@ -52,27 +52,27 @@ def test_reindex_preserves_names_when_target_is_list_or_ndarray(idx):
other_dtype = MultiIndex.from_product([[1, 2], [3, 4]])
# list & ndarray cases
- assert idx.reindex([])[0].names == (None, None)
- assert idx.reindex(np.array([]))[0].names == (None, None)
- assert idx.reindex(target.tolist())[0].names == (None, None)
- assert idx.reindex(target.values)[0].names == (None, None)
- assert idx.reindex(other_dtype.tolist())[0].names == (None, None)
- assert idx.reindex(other_dtype.values)[0].names == (None, None)
+ assert idx.reindex([])[0].names == [None, None]
+ assert idx.reindex(np.array([]))[0].names == [None, None]
+ assert idx.reindex(target.tolist())[0].names == [None, None]
+ assert idx.reindex(target.values)[0].names == [None, None]
+ assert idx.reindex(other_dtype.tolist())[0].names == [None, None]
+ assert idx.reindex(other_dtype.values)[0].names == [None, None]
idx.names = ["foo", "bar"]
- assert idx.reindex([])[0].names == ("foo", "bar")
- assert idx.reindex(np.array([]))[0].names == ("foo", "bar")
- assert idx.reindex(target.tolist())[0].names == ("foo", "bar")
- assert idx.reindex(target.values)[0].names == ("foo", "bar")
- assert idx.reindex(other_dtype.tolist())[0].names == ("foo", "bar")
- assert idx.reindex(other_dtype.values)[0].names == ("foo", "bar")
+ assert idx.reindex([])[0].names == ["foo", "bar"]
+ assert idx.reindex(np.array([]))[0].names == ["foo", "bar"]
+ assert idx.reindex(target.tolist())[0].names == ["foo", "bar"]
+ assert idx.reindex(target.values)[0].names == ["foo", "bar"]
+ assert idx.reindex(other_dtype.tolist())[0].names == ["foo", "bar"]
+ assert idx.reindex(other_dtype.values)[0].names == ["foo", "bar"]
def test_reindex_lvl_preserves_names_when_target_is_list_or_array():
# GH7774
idx = MultiIndex.from_product([[0, 1], ["a", "b"]], names=["foo", "bar"])
- assert idx.reindex([], level=0)[0].names == ("foo", "bar")
- assert idx.reindex([], level=1)[0].names == ("foo", "bar")
+ assert idx.reindex([], level=0)[0].names == ["foo", "bar"]
+ assert idx.reindex([], level=1)[0].names == ["foo", "bar"]
def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(
diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py
index 1bf91a09ee754..06dbb33aadf97 100644
--- a/pandas/tests/indexes/multi/test_reshape.py
+++ b/pandas/tests/indexes/multi/test_reshape.py
@@ -23,7 +23,7 @@ def test_insert(idx):
exp0 = Index(list(idx.levels[0]) + ["abc"], name="first")
tm.assert_index_equal(new_index.levels[0], exp0)
- assert new_index.names == ("first", "second")
+ assert new_index.names == ["first", "second"]
exp1 = Index(list(idx.levels[1]) + ["three"], name="second")
tm.assert_index_equal(new_index.levels[1], exp1)
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 15076b8705bdc..9354984538c58 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -121,7 +121,7 @@ def test_multiindex_symmetric_difference():
idx2 = idx.copy().rename(["A", "B"])
result = idx.symmetric_difference(idx2)
- assert result.names == (None, None)
+ assert result.names == [None, None]
def test_empty(idx):
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index a5a678af4aba7..3d21ee8a57716 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -13,6 +13,7 @@
Timestamp,
)
import pandas._testing as tm
+from pandas.core.indexes.frozen import FrozenList
def test_sortlevel(idx):
@@ -285,9 +286,8 @@ def test_remove_unused_levels_with_nan():
idx = idx.set_levels(["a", np.nan], level="id1")
idx = idx.remove_unused_levels()
result = idx.levels
- expected = (Index(["a", np.nan], name="id1"), Index([4], name="id2"))
- for res, exp in zip(result, expected):
- tm.assert_index_equal(res, exp)
+ expected = FrozenList([["a", np.nan], [4]])
+ assert str(result) == str(expected)
def test_sort_values_nan():
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 484f647c7a8f9..3a2d04d3ffdc2 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -908,7 +908,7 @@ def test_isin_level_kwarg_bad_level_raises(self, index):
@pytest.mark.parametrize("label", [1.0, "foobar", "xyzzy", np.nan])
def test_isin_level_kwarg_bad_label_raises(self, label, index):
if isinstance(index, MultiIndex):
- index = index.rename(("foo", "bar") + index.names[2:])
+ index = index.rename(["foo", "bar"] + index.names[2:])
msg = f"'Level {label} not found'"
else:
index = index.rename("foo")
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 732f7cc624f86..b6e1c3698c258 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -119,7 +119,7 @@ def test_set_name_methods(self, index_flat):
# should return None
assert res is None
assert index.name == new_name
- assert index.names == (new_name,)
+ assert index.names == [new_name]
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
@@ -127,7 +127,7 @@ def test_set_name_methods(self, index_flat):
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
- assert index.names == (name,)
+ assert index.names == [name]
@pytest.mark.xfail
def test_set_names_single_label_no_level(self, index_flat):
diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py
new file mode 100644
index 0000000000000..ace66b5b06a51
--- /dev/null
+++ b/pandas/tests/indexes/test_frozen.py
@@ -0,0 +1,113 @@
+import re
+
+import pytest
+
+from pandas.core.indexes.frozen import FrozenList
+
+
+@pytest.fixture
+def lst():
+ return [1, 2, 3, 4, 5]
+
+
+@pytest.fixture
+def container(lst):
+ return FrozenList(lst)
+
+
+@pytest.fixture
+def unicode_container():
+ return FrozenList(["\u05d0", "\u05d1", "c"])
+
+
+class TestFrozenList:
+ def check_mutable_error(self, *args, **kwargs):
+ # Pass whatever function you normally would to pytest.raises
+ # (after the Exception kind).
+ mutable_regex = re.compile("does not support mutable operations")
+ msg = "'(_s)?re.(SRE_)?Pattern' object is not callable"
+ with pytest.raises(TypeError, match=msg):
+ mutable_regex(*args, **kwargs)
+
+ def test_no_mutable_funcs(self, container):
+ def setitem():
+ container[0] = 5
+
+ self.check_mutable_error(setitem)
+
+ def setslice():
+ container[1:2] = 3
+
+ self.check_mutable_error(setslice)
+
+ def delitem():
+ del container[0]
+
+ self.check_mutable_error(delitem)
+
+ def delslice():
+ del container[0:3]
+
+ self.check_mutable_error(delslice)
+
+ mutable_methods = ("extend", "pop", "remove", "insert")
+
+ for meth in mutable_methods:
+ self.check_mutable_error(getattr(container, meth))
+
+ def test_slicing_maintains_type(self, container, lst):
+ result = container[1:2]
+ expected = lst[1:2]
+ self.check_result(result, expected)
+
+ def check_result(self, result, expected):
+ assert isinstance(result, FrozenList)
+ assert result == expected
+
+ def test_string_methods_dont_fail(self, container):
+ repr(container)
+ str(container)
+ bytes(container)
+
+ def test_tricky_container(self, unicode_container):
+ repr(unicode_container)
+ str(unicode_container)
+
+ def test_add(self, container, lst):
+ result = container + (1, 2, 3)
+ expected = FrozenList(lst + [1, 2, 3])
+ self.check_result(result, expected)
+
+ result = (1, 2, 3) + container
+ expected = FrozenList([1, 2, 3] + lst)
+ self.check_result(result, expected)
+
+ def test_iadd(self, container, lst):
+ q = r = container
+
+ q += [5]
+ self.check_result(q, lst + [5])
+
+ # Other shouldn't be mutated.
+ self.check_result(r, lst)
+
+ def test_union(self, container, lst):
+ result = container.union((1, 2, 3))
+ expected = FrozenList(lst + [1, 2, 3])
+ self.check_result(result, expected)
+
+ def test_difference(self, container):
+ result = container.difference([2])
+ expected = FrozenList([1, 3, 4, 5])
+ self.check_result(result, expected)
+
+ def test_difference_dupe(self):
+ result = FrozenList([1, 2, 3, 2]).difference([2])
+ expected = FrozenList([1, 3])
+ self.check_result(result, expected)
+
+ def test_tricky_container_to_bytes_raises(self, unicode_container):
+ # GH 26447
+ msg = "^'str' object cannot be interpreted as an integer$"
+ with pytest.raises(TypeError, match=msg):
+ bytes(unicode_container)
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index 78f701fff6e29..dbfabf7666d25 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -150,7 +150,7 @@ def test_getitem_intkey_leading_level(
# GH#33355 dont fall-back to positional when leading level is int
ymd = multiindex_year_month_day_dataframe_random_data
levels = ymd.index.levels
- ymd.index = ymd.index.set_levels((levels[0].astype(dtype),) + levels[1:])
+ ymd.index = ymd.index.set_levels([levels[0].astype(dtype)] + levels[1:])
ser = ymd["A"]
mi = ser.index
assert isinstance(mi, MultiIndex)
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index afc9974c75e6a..a728f6ec6ca9a 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -115,7 +115,7 @@ def test_multiindex(self, df_schema, using_infer_string):
{"name": "C", "type": "datetime"},
{"name": "D", "type": "duration"},
],
- "primaryKey": ("level_0", "level_1"),
+ "primaryKey": ["level_0", "level_1"],
}
if using_infer_string:
expected["fields"][0] = {
@@ -128,7 +128,7 @@ def test_multiindex(self, df_schema, using_infer_string):
df.index.names = ["idx0", None]
expected["fields"][0]["name"] = "idx0"
- expected["primaryKey"] = ("idx0", "level_1")
+ expected["primaryKey"] = ["idx0", "level_1"]
result = build_table_schema(df, version=False)
assert result == expected
@@ -598,21 +598,21 @@ def test_categorical(self):
(pd.Index([1], name="myname"), "myname", "name"),
(
pd.MultiIndex.from_product([("a", "b"), ("c", "d")]),
- ("level_0", "level_1"),
+ ["level_0", "level_1"],
"names",
),
(
pd.MultiIndex.from_product(
[("a", "b"), ("c", "d")], names=["n1", "n2"]
),
- ("n1", "n2"),
+ ["n1", "n2"],
"names",
),
(
pd.MultiIndex.from_product(
[("a", "b"), ("c", "d")], names=["n1", None]
),
- ("n1", "level_1"),
+ ["n1", "level_1"],
"names",
),
],
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index e62df0bc1c977..471f7b8958ee4 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -1024,7 +1024,7 @@ def test_columns_multiindex_modified(tmp_path, setup_path):
df.index.name = "letters"
df = df.set_index(keys="E", append=True)
- data_columns = list(df.index.names) + df.columns.tolist()
+ data_columns = df.index.names + df.columns.tolist()
path = tmp_path / setup_path
df.to_hdf(
path,
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 67b1311a5a798..3083fa24ba8b5 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2346,18 +2346,15 @@ def test_read_table_index_col(conn, request, test_frame1):
sql.to_sql(test_frame1, "test_frame", conn)
result = sql.read_sql_table("test_frame", conn, index_col="index")
- assert result.index.names == ("index",)
+ assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", conn, index_col=["A", "B"])
- assert result.index.names == ("A", "B")
+ assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", conn, index_col=["A", "B"], columns=["C", "D"]
)
- assert result.index.names == (
- "A",
- "B",
- )
+ assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index 92e756756547d..2f9fd1eb421d4 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -125,7 +125,7 @@ def test_concat_keys_specific_levels(self):
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
- assert result.columns.names == ("group_key", None)
+ assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index 33d9a721df6b7..7ae2fffa04205 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -814,10 +814,12 @@ def test_join_multi_levels2(self):
class TestJoinMultiMulti:
def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi):
+ left_names = left_multi.index.names
+ right_names = right_multi.index.names
if join_type == "right":
- level_order = ["Origin", "Destination", "Period", "LinkType", "TripPurp"]
+ level_order = right_names + left_names.difference(right_names)
else:
- level_order = ["Origin", "Destination", "Period", "TripPurp", "LinkType"]
+ level_order = left_names + right_names.difference(left_names)
# Multi-index join tests
expected = (
merge(
@@ -839,10 +841,12 @@ def test_join_multi_empty_frames(
left_multi = left_multi.drop(columns=left_multi.columns)
right_multi = right_multi.drop(columns=right_multi.columns)
+ left_names = left_multi.index.names
+ right_names = right_multi.index.names
if join_type == "right":
- level_order = ["Origin", "Destination", "Period", "LinkType", "TripPurp"]
+ level_order = right_names + left_names.difference(right_names)
else:
- level_order = ["Origin", "Destination", "Period", "TripPurp", "LinkType"]
+ level_order = left_names + right_names.difference(left_names)
expected = (
merge(
diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py
index c4af63fe5cc81..070c756e8c928 100644
--- a/pandas/tests/reshape/test_crosstab.py
+++ b/pandas/tests/reshape/test_crosstab.py
@@ -135,7 +135,7 @@ def test_crosstab_margins(self):
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"), margins=True)
assert result.index.names == ("a",)
- assert result.columns.names == ("b", "c")
+ assert result.columns.names == ["b", "c"]
all_cols = result["All", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
@@ -173,7 +173,7 @@ def test_crosstab_margins_set_margin_name(self):
)
assert result.index.names == ("a",)
- assert result.columns.names == ("b", "c")
+ assert result.columns.names == ["b", "c"]
all_cols = result["TOTAL", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
diff --git a/pandas/tests/series/methods/test_rename_axis.py b/pandas/tests/series/methods/test_rename_axis.py
index 60175242a06b5..58c095d697ede 100644
--- a/pandas/tests/series/methods/test_rename_axis.py
+++ b/pandas/tests/series/methods/test_rename_axis.py
@@ -15,13 +15,13 @@ def test_rename_axis_mapper(self):
ser = Series(list(range(len(mi))), index=mi)
result = ser.rename_axis(index={"ll": "foo"})
- assert result.index.names == ("foo", "nn")
+ assert result.index.names == ["foo", "nn"]
result = ser.rename_axis(index=str.upper, axis=0)
- assert result.index.names == ("LL", "NN")
+ assert result.index.names == ["LL", "NN"]
result = ser.rename_axis(index=["foo", "goo"])
- assert result.index.names == ("foo", "goo")
+ assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
ser.rename_axis(columns="wrong")
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 9c2b9a76bbb83..bcecd1b2d5eec 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -205,6 +205,18 @@ class MyList(list):
val = MyList([True])
assert com.is_bool_indexer(val)
+ def test_frozenlist(self):
+ # GH#42461
+ data = {"col1": [1, 2], "col2": [3, 4]}
+ df = pd.DataFrame(data=data)
+
+ frozen = df.index.names[1:]
+ assert not com.is_bool_indexer(frozen)
+
+ result = df[frozen]
+ expected = df[[]]
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize("with_exception", [True, False])
def test_temp_setattr(with_exception):
diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py
index 78ff774c188fe..dc6efdcec380e 100644
--- a/pandas/tests/util/test_assert_index_equal.py
+++ b/pandas/tests/util/test_assert_index_equal.py
@@ -198,8 +198,8 @@ def test_index_equal_names(name1, name2):
msg = f"""Index are different
Attribute "names" are different
-\\[left\\]: \\({name1},\\)
-\\[right\\]: \\({name2},\\)"""
+\\[left\\]: \\[{name1}\\]
+\\[right\\]: \\[{name2}\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 47bfc219d0fe9..85821ed2cfb6f 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -589,7 +589,7 @@ def test_multi_index_names():
result = df.rolling(3).cov()
tm.assert_index_equal(result.columns, df.columns)
- assert result.index.names == (None, "1", "2")
+ assert result.index.names == [None, "1", "2"]
def test_rolling_axis_sum():
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Manual revert of #57042
Closes #57607
The behavior described in that issue seems quite undesirable, especially for a breaking change.
cc @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/57788 | 2024-03-09T03:18:34Z | 2024-04-11T15:38:21Z | 2024-04-11T15:38:21Z | 2024-04-11T21:31:05Z |
PERF: Categorical(range).categories returns RangeIndex instead of Index | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index ca2ca07ff2fae..94501c4fddfd9 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -256,6 +256,7 @@ Removal of prior version deprecations/changes
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- :attr:`Categorical.categories` returns a :class:`RangeIndex` columns instead of an :class:`Index` if the constructed ``values`` was a ``range``. (:issue:`57787`)
- :func:`concat` returns a :class:`RangeIndex` level in the :class:`MultiIndex` result when ``keys`` is a ``range`` or :class:`RangeIndex` (:issue:`57542`)
- :meth:`RangeIndex.append` returns a :class:`RangeIndex` instead of a :class:`Index` when appending values that could continue the :class:`RangeIndex` (:issue:`57467`)
- :meth:`Series.str.extract` returns a :class:`RangeIndex` columns instead of an :class:`Index` column when possible (:issue:`57542`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index af8dc08c1ec26..7710d2930ddae 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -431,6 +431,10 @@ def __init__(
if isinstance(vdtype, CategoricalDtype):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
+ elif isinstance(values, range):
+ from pandas.core.indexes.range import RangeIndex
+
+ values = RangeIndex(values)
elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)):
values = com.convert_to_list_like(values)
if isinstance(values, list) and len(values) == 0:
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 03678fb64d3e9..857b14e2a2558 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -24,6 +24,7 @@
IntervalIndex,
MultiIndex,
NaT,
+ RangeIndex,
Series,
Timestamp,
date_range,
@@ -779,3 +780,17 @@ def test_constructor_preserves_freq(self):
result = cat.categories.freq
assert expected == result
+
+ @pytest.mark.parametrize(
+ "values, categories",
+ [
+ [range(5), None],
+ [range(4), range(5)],
+ [[0, 1, 2, 3], range(5)],
+ [[], range(5)],
+ ],
+ )
+ def test_range_values_preserves_rangeindex_categories(self, values, categories):
+ result = Categorical(values=values, categories=categories).categories
+ expected = RangeIndex(range(5))
+ tm.assert_index_equal(result, expected, exact=True)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/57787 | 2024-03-08T23:48:13Z | 2024-03-11T03:18:09Z | 2024-03-11T03:18:09Z | 2024-03-11T04:32:33Z |
PERF: Allow ensure_index_from_sequence to return RangeIndex | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c2df773326dc9..3c01778e05f3d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -7154,6 +7154,43 @@ def shape(self) -> Shape:
return (len(self),)
+def maybe_sequence_to_range(sequence) -> Any | range:
+ """
+ Convert a 1D, non-pandas sequence to a range if possible.
+
+ Returns the input if not possible.
+
+ Parameters
+ ----------
+ sequence : 1D sequence
+ names : sequence of str
+
+ Returns
+ -------
+ Any : input or range
+ """
+ if isinstance(sequence, (ABCSeries, Index)):
+ return sequence
+ np_sequence = np.asarray(sequence)
+ if np_sequence.dtype.kind != "i" or len(np_sequence) == 1:
+ return sequence
+ elif len(np_sequence) == 0:
+ return range(0)
+ diff = np_sequence[1] - np_sequence[0]
+ if diff == 0:
+ return sequence
+ elif len(np_sequence) == 2:
+ return range(np_sequence[0], np_sequence[1] + diff, diff)
+ maybe_range_indexer, remainder = np.divmod(np_sequence - np_sequence[0], diff)
+ if (
+ lib.is_range_indexer(maybe_range_indexer, len(maybe_range_indexer))
+ and not remainder.any()
+ ):
+ return range(np_sequence[0], np_sequence[-1] + diff, diff)
+ else:
+ return sequence
+
+
def ensure_index_from_sequences(sequences, names=None) -> Index:
"""
Construct an index from sequences of data.
@@ -7172,8 +7209,8 @@ def ensure_index_from_sequences(sequences, names=None) -> Index:
Examples
--------
- >>> ensure_index_from_sequences([[1, 2, 3]], names=["name"])
- Index([1, 2, 3], dtype='int64', name='name')
+ >>> ensure_index_from_sequences([[1, 2, 4]], names=["name"])
+ Index([1, 2, 4], dtype='int64', name='name')
>>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"])
MultiIndex([('a', 'a'),
@@ -7189,8 +7226,9 @@ def ensure_index_from_sequences(sequences, names=None) -> Index:
if len(sequences) == 1:
if names is not None:
names = names[0]
- return Index(sequences[0], name=names)
+ return Index(maybe_sequence_to_range(sequences[0]), name=names)
else:
+ # TODO: Apply maybe_sequence_to_range to sequences?
return MultiIndex.from_arrays(sequences, names=names)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 82bf8d7c70c7e..c573828a22032 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -29,7 +29,6 @@
doc,
)
-from pandas.core.dtypes import missing
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
ensure_platform_int,
@@ -475,28 +474,13 @@ def _shallow_copy(self, values, name: Hashable = no_default):
if values.dtype.kind == "i" and values.ndim == 1:
# GH 46675 & 43885: If values is equally spaced, return a
# more memory-compact RangeIndex instead of Index with 64-bit dtype
- if len(values) == 0:
- return type(self)._simple_new(_empty_range, name=name)
- elif len(values) == 1:
+ if len(values) == 1:
start = values[0]
new_range = range(start, start + self.step, self.step)
return type(self)._simple_new(new_range, name=name)
- diff = values[1] - values[0]
- if not missing.isna(diff) and diff != 0:
- if len(values) == 2:
- # Can skip is_range_indexer check
- new_range = range(values[0], values[-1] + diff, diff)
- return type(self)._simple_new(new_range, name=name)
- else:
- maybe_range_indexer, remainder = np.divmod(values - values[0], diff)
- if (
- lib.is_range_indexer(
- maybe_range_indexer, len(maybe_range_indexer)
- )
- and not remainder.any()
- ):
- new_range = range(values[0], values[-1] + diff, diff)
- return type(self)._simple_new(new_range, name=name)
+ maybe_range = ibase.maybe_sequence_to_range(values)
+ if isinstance(maybe_range, range):
+ return type(self)._simple_new(maybe_range, name=name)
return self._constructor._simple_new(values, name=name)
def _view(self) -> Self:
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 4c703c3af944b..beee14197bfb8 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1514,8 +1514,10 @@ class TestIndexUtils:
@pytest.mark.parametrize(
"data, names, expected",
[
- ([[1, 2, 3]], None, Index([1, 2, 3])),
- ([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")),
+ ([[1, 2, 4]], None, Index([1, 2, 4])),
+ ([[1, 2, 4]], ["name"], Index([1, 2, 4], name="name")),
+ ([[1, 2, 3]], None, RangeIndex(1, 4)),
+ ([[1, 2, 3]], ["name"], RangeIndex(1, 4, name="name")),
(
[["a", "a"], ["c", "d"]],
None,
@@ -1530,7 +1532,7 @@ class TestIndexUtils:
)
def test_ensure_index_from_sequences(self, data, names, expected):
result = ensure_index_from_sequences(data, names)
- tm.assert_index_equal(result, expected)
+ tm.assert_index_equal(result, expected, exact=True)
def test_ensure_index_mixed_closed_intervals(self):
# GH27172
| Discovered in https://github.com/pandas-dev/pandas/pull/57441
Builds on https://github.com/pandas-dev/pandas/pull/57752
| https://api.github.com/repos/pandas-dev/pandas/pulls/57786 | 2024-03-08T23:15:11Z | 2024-03-19T20:01:32Z | 2024-03-19T20:01:32Z | 2024-03-19T20:37:37Z |
DOC: Resolve RT03 errors in several methods #2 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 1c37d9bf1c4b3..5437b4c66e300 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -628,13 +628,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.DataFrame.mean\
pandas.DataFrame.median\
pandas.DataFrame.min\
- pandas.DataFrame.nsmallest\
- pandas.DataFrame.nunique\
- pandas.DataFrame.pipe\
- pandas.DataFrame.plot.box\
- pandas.DataFrame.plot.density\
- pandas.DataFrame.plot.kde\
- pandas.DataFrame.plot.scatter\
pandas.DataFrame.pop\
pandas.DataFrame.prod\
pandas.DataFrame.product\
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 25501ff245e46..88fa1148c0dfc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7432,6 +7432,7 @@ def nsmallest(
Returns
-------
DataFrame
+ DataFrame with the first `n` rows ordered by `columns` in ascending order.
See Also
--------
@@ -11898,6 +11899,7 @@ def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
Returns
-------
Series
+ Series with counts of unique values per row or column, depending on `axis`.
See Also
--------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5c8842162007d..1227f157a9fef 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5839,7 +5839,8 @@ def pipe(
Returns
-------
- the return type of ``func``.
+ The return type of ``func``.
+ The result of applying ``func`` to the Series or DataFrame.
See Also
--------
@@ -5855,7 +5856,7 @@ def pipe(
Examples
--------
- Constructing a income DataFrame from a dictionary.
+ Constructing an income DataFrame from a dictionary.
>>> data = [[8000, 1000], [9500, np.nan], [5000, 2000]]
>>> df = pd.DataFrame(data, columns=["Salary", "Others"])
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index edd619c264c7a..c9d1e5a376bfd 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1335,6 +1335,7 @@ def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor:
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
+ The matplotlib axes containing the box plot.
See Also
--------
@@ -1466,6 +1467,7 @@ def kde(
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
+ The matplotlib axes containing the KDE plot.
See Also
--------
@@ -1745,6 +1747,7 @@ def scatter(
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
+ The matplotlib axes containing the scatter plot.
See Also
--------
| Resolve all RT03 errors for the following cases:
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.nsmallest
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.nunique
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.pipe
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.plot.box
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.plot.density
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.plot.kde
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.plot.scatter
- xref DOC: fix RT03 errors in docstrings #57416
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57785 | 2024-03-08T21:54:56Z | 2024-03-08T23:50:31Z | 2024-03-08T23:50:31Z | 2024-03-08T23:50:38Z |
Backport PR #57780 on branch 2.2.x (COMPAT: Adapt to Numpy 2.0 dtype changes) | diff --git a/pandas/_libs/src/datetime/pd_datetime.c b/pandas/_libs/src/datetime/pd_datetime.c
index 19de51be6e1b2..4c1969f6d9f57 100644
--- a/pandas/_libs/src/datetime/pd_datetime.c
+++ b/pandas/_libs/src/datetime/pd_datetime.c
@@ -20,6 +20,9 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#include <Python.h>
#include "datetime.h"
+/* Need to import_array for np_datetime.c (for NumPy 1.x support only) */
+#define PY_ARRAY_UNIQUE_SYMBOL PANDAS_DATETIME_NUMPY
+#include "numpy/ndarrayobject.h"
#include "pandas/datetime/pd_datetime.h"
#include "pandas/portable.h"
@@ -255,5 +258,6 @@ static struct PyModuleDef pandas_datetimemodule = {
PyMODINIT_FUNC PyInit_pandas_datetime(void) {
PyDateTime_IMPORT;
+ import_array();
return PyModuleDef_Init(&pandas_datetimemodule);
}
diff --git a/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c b/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c
index 277d01807f2f3..934c54fafb634 100644
--- a/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c
+++ b/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c
@@ -16,8 +16,6 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
// Licence at LICENSES/NUMPY_LICENSE
-#define NO_IMPORT
-
#ifndef NPY_NO_DEPRECATED_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#endif // NPY_NO_DEPRECATED_API
@@ -25,7 +23,10 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#include <Python.h>
#include "pandas/vendored/numpy/datetime/np_datetime.h"
-#include <numpy/ndarraytypes.h>
+
+#define NO_IMPORT_ARRAY
+#define PY_ARRAY_UNIQUE_SYMBOL PANDAS_DATETIME_NUMPY
+#include <numpy/ndarrayobject.h>
#include <numpy/npy_common.h>
#if defined(_WIN32)
@@ -1070,5 +1071,8 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta td,
*/
PyArray_DatetimeMetaData
get_datetime_metadata_from_dtype(PyArray_Descr *dtype) {
- return (((PyArray_DatetimeDTypeMetaData *)dtype->c_metadata)->meta);
+#if NPY_ABI_VERSION < 0x02000000
+#define PyDataType_C_METADATA(dtype) ((dtype)->c_metadata)
+#endif
+ return ((PyArray_DatetimeDTypeMetaData *)PyDataType_C_METADATA(dtype))->meta;
}
| Backport PR #57780: COMPAT: Adapt to Numpy 2.0 dtype changes | https://api.github.com/repos/pandas-dev/pandas/pulls/57784 | 2024-03-08T20:28:21Z | 2024-03-08T21:36:00Z | 2024-03-08T21:36:00Z | 2024-03-08T21:36:00Z |
Fix SparseDtype comparison | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index ca2ca07ff2fae..16be9e0a4fc34 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -280,6 +280,7 @@ Performance improvements
Bug fixes
~~~~~~~~~
+- Fixed bug in :class:`SparseDtype` for equal comparison with na fill value. (:issue:`54770`)
- Fixed bug in :meth:`DataFrame.join` inconsistently setting result index name (:issue:`55815`)
- Fixed bug in :meth:`DataFrame.to_string` that raised ``StopIteration`` with nested DataFrames. (:issue:`16098`)
- Fixed bug in :meth:`DataFrame.update` bool dtype being converted to object (:issue:`55509`)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 2bb2556c88204..f94d32a3b8547 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -1705,17 +1705,15 @@ def __eq__(self, other: object) -> bool:
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
- if self._is_na_fill_value:
+ if self._is_na_fill_value or other._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
- fill_value = (
- other._is_na_fill_value
- and isinstance(self.fill_value, type(other.fill_value))
- or isinstance(other.fill_value, type(self.fill_value))
- )
+ fill_value = isinstance(
+ self.fill_value, type(other.fill_value)
+ ) or isinstance(other.fill_value, type(self.fill_value))
else:
with warnings.catch_warnings():
# Ignore spurious numpy warning
diff --git a/pandas/tests/arrays/sparse/test_dtype.py b/pandas/tests/arrays/sparse/test_dtype.py
index 6fcbfe96a3df7..6f0d41333f2fd 100644
--- a/pandas/tests/arrays/sparse/test_dtype.py
+++ b/pandas/tests/arrays/sparse/test_dtype.py
@@ -68,6 +68,14 @@ def test_nans_equal():
assert b == a
+def test_nans_not_equal():
+ # GH 54770
+ a = SparseDtype(float, 0)
+ b = SparseDtype(float, pd.NA)
+ assert a != b
+ assert b != a
+
+
with warnings.catch_warnings():
msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated"
warnings.filterwarnings("ignore", msg, category=FutureWarning)
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index c3a1d584170fb..cbca306ab0041 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -240,10 +240,6 @@ def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
def test_fillna_no_op_returns_copy(self, data, request):
- if np.isnan(data.fill_value):
- request.applymarker(
- pytest.mark.xfail(reason="returns array with different fill value")
- )
super().test_fillna_no_op_returns_copy(data)
@pytest.mark.xfail(reason="Unsupported")
@@ -400,6 +396,8 @@ def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
"rmul",
"floordiv",
"rfloordiv",
+ "truediv",
+ "rtruediv",
"pow",
"mod",
"rmod",
| - [x] closes #54770(Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57783 | 2024-03-08T18:49:49Z | 2024-03-09T20:09:43Z | 2024-03-09T20:09:43Z | 2024-03-09T21:13:09Z |
DOC: Resolve RT03 errors for selected methods | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 1c37d9bf1c4b3..d37f4bcf44ee4 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -614,15 +614,10 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
MSG='Partially validate docstrings (RT03)' ; echo $MSG
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=RT03 --ignore_functions \
- pandas.DataFrame.expanding\
- pandas.DataFrame.filter\
- pandas.DataFrame.first_valid_index\
- pandas.DataFrame.get\
pandas.DataFrame.hist\
pandas.DataFrame.infer_objects\
pandas.DataFrame.kurt\
pandas.DataFrame.kurtosis\
- pandas.DataFrame.last_valid_index\
pandas.DataFrame.mask\
pandas.DataFrame.max\
pandas.DataFrame.mean\
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5c8842162007d..e65764b56428b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4183,15 +4183,19 @@ def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
- Returns default value if not found.
+ Returns ``default`` value if not found.
Parameters
----------
key : object
+ Key for which item should be returned.
+ default : object, default None
+ Default value to return if key is not found.
Returns
-------
same type as items contained in object
+ Item for given key or ``default`` value, if key is not found.
Examples
--------
@@ -5362,10 +5366,11 @@ def filter(
axis: Axis | None = None,
) -> Self:
"""
- Subset the dataframe rows or columns according to the specified index labels.
+ Subset the DataFrame or Series according to the specified index labels.
- Note that this routine does not filter a dataframe on its
- contents. The filter is applied to the labels of the index.
+ For DataFrame, filter rows or columns depending on ``axis`` argument.
+ Note that this routine does not filter based on content.
+ The filter is applied to the labels of the index.
Parameters
----------
@@ -5378,11 +5383,13 @@ def filter(
axis : {0 or 'index', 1 or 'columns', None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis, 'columns' for
- DataFrame. For `Series` this parameter is unused and defaults to `None`.
+ ``DataFrame``. For ``Series`` this parameter is unused and defaults to
+ ``None``.
Returns
-------
- same type as input object
+ Same type as caller
+ The filtered subset of the DataFrame or Series.
See Also
--------
@@ -11744,11 +11751,15 @@ def _find_valid_index(self, *, how: str) -> Hashable:
@doc(position="first", klass=_shared_doc_kwargs["klass"])
def first_valid_index(self) -> Hashable:
"""
- Return index for {position} non-NA value or None, if no non-NA value is found.
+ Return index for {position} non-missing value or None, if no value is found.
+
+ See the :ref:`User Guide <missing_data>` for more information
+ on which values are considered missing.
Returns
-------
type of index
+ Index of {position} non-missing value.
Examples
--------
| Resolve all RT03 errors for the following cases:
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.expanding
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.filter
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.first_valid_index
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.last_valid_index
scripts/validate_docstrings.py --format=actions --errors=RT03 pandas.DataFrame.get
- xref DOC: fix RT03 errors in docstrings #57416
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57782 | 2024-03-08T18:15:20Z | 2024-03-08T22:47:25Z | 2024-03-08T22:47:25Z | 2024-03-08T22:47:32Z |
COMPAT: Adapt to Numpy 2.0 dtype changes | diff --git a/pandas/_libs/src/datetime/pd_datetime.c b/pandas/_libs/src/datetime/pd_datetime.c
index 19de51be6e1b2..4c1969f6d9f57 100644
--- a/pandas/_libs/src/datetime/pd_datetime.c
+++ b/pandas/_libs/src/datetime/pd_datetime.c
@@ -20,6 +20,9 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#include <Python.h>
#include "datetime.h"
+/* Need to import_array for np_datetime.c (for NumPy 1.x support only) */
+#define PY_ARRAY_UNIQUE_SYMBOL PANDAS_DATETIME_NUMPY
+#include "numpy/ndarrayobject.h"
#include "pandas/datetime/pd_datetime.h"
#include "pandas/portable.h"
@@ -255,5 +258,6 @@ static struct PyModuleDef pandas_datetimemodule = {
PyMODINIT_FUNC PyInit_pandas_datetime(void) {
PyDateTime_IMPORT;
+ import_array();
return PyModuleDef_Init(&pandas_datetimemodule);
}
diff --git a/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c b/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c
index 277d01807f2f3..934c54fafb634 100644
--- a/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c
+++ b/pandas/_libs/src/vendored/numpy/datetime/np_datetime.c
@@ -16,8 +16,6 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
// Licence at LICENSES/NUMPY_LICENSE
-#define NO_IMPORT
-
#ifndef NPY_NO_DEPRECATED_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#endif // NPY_NO_DEPRECATED_API
@@ -25,7 +23,10 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#include <Python.h>
#include "pandas/vendored/numpy/datetime/np_datetime.h"
-#include <numpy/ndarraytypes.h>
+
+#define NO_IMPORT_ARRAY
+#define PY_ARRAY_UNIQUE_SYMBOL PANDAS_DATETIME_NUMPY
+#include <numpy/ndarrayobject.h>
#include <numpy/npy_common.h>
#if defined(_WIN32)
@@ -1070,5 +1071,8 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta td,
*/
PyArray_DatetimeMetaData
get_datetime_metadata_from_dtype(PyArray_Descr *dtype) {
- return (((PyArray_DatetimeDTypeMetaData *)dtype->c_metadata)->meta);
+#if NPY_ABI_VERSION < 0x02000000
+#define PyDataType_C_METADATA(dtype) ((dtype)->c_metadata)
+#endif
+ return ((PyArray_DatetimeDTypeMetaData *)PyDataType_C_METADATA(dtype))->meta;
}
| Based on Thomas Li's work, but wanted to try and see that this is right.
Replaces gh-13466. | https://api.github.com/repos/pandas-dev/pandas/pulls/57780 | 2024-03-08T17:34:01Z | 2024-03-08T20:27:52Z | 2024-03-08T20:27:52Z | 2024-03-08T20:49:47Z |
Fix rank method with nullable int | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index cd6977f43d322..1146d7fb1ae4a 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -281,6 +281,7 @@ Bug fixes
- Fixed bug in :meth:`DataFrame.to_string` that raised ``StopIteration`` with nested DataFrames. (:issue:`16098`)
- Fixed bug in :meth:`DataFrame.update` bool dtype being converted to object (:issue:`55509`)
- Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`)
+- Fixed bug in :meth:`Series.rank` that doesn't preserve missing values for nullable integers when ``na_option='keep'``. (:issue:`56976`)
Categorical
^^^^^^^^^^^
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 399be217af9d1..86831f072bb8f 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -2206,7 +2206,7 @@ def _rank(
raise NotImplementedError
return rank(
- self._values_for_argsort(),
+ self,
axis=axis,
method=method,
na_option=na_option,
diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py
index 776c5633cb4b3..2d7fde130ce70 100644
--- a/pandas/tests/series/methods/test_rank.py
+++ b/pandas/tests/series/methods/test_rank.py
@@ -234,6 +234,16 @@ def test_rank_categorical(self):
tm.assert_series_equal(na_ser.rank(na_option="bottom", pct=True), exp_bot)
tm.assert_series_equal(na_ser.rank(na_option="keep", pct=True), exp_keep)
+ def test_rank_nullable_integer(self):
+ # GH 56976
+ exp = Series([np.nan, 2, np.nan, 3, 3, 2, 3, 1])
+ exp = exp.astype("Int64")
+ result = exp.rank(na_option="keep")
+
+ expected = Series([np.nan, 2.5, np.nan, 5.0, 5.0, 2.5, 5.0, 1.0])
+
+ tm.assert_series_equal(result, expected)
+
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method="average")
| We can't pass only `self._values_for_argsort()` to `rank` as the missing data info will be lost. Fortunately, passing `self` directly to `rank` seems to work just fine.
- [x] closes #56976 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57779 | 2024-03-08T12:23:59Z | 2024-03-08T21:37:04Z | 2024-03-08T21:37:04Z | 2024-03-17T07:53:37Z |
DOC: Fix description for pd.concat sort argument | diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 88323e5304cc4..f4df4fbb54827 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -203,10 +203,10 @@ def concat(
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default False
- Sort non-concatenation axis if it is not already aligned. One exception to
- this is when the non-concatentation axis is a DatetimeIndex and join='outer'
- and the axis is not already aligned. In that case, the non-concatenation
- axis is always sorted lexicographically.
+ Sort non-concatenation axis. One exception to this is when the
+ non-concatentation axis is a DatetimeIndex and join='outer' and the axis is
+ not already aligned. In that case, the non-concatenation axis is always
+ sorted lexicographically.
copy : bool, default True
If False, do not copy data unnecessarily.
| - [x] closes #57753
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57776 | 2024-03-08T01:28:07Z | 2024-03-08T17:52:32Z | 2024-03-08T17:52:32Z | 2024-03-08T22:25:57Z |
Enforce numpydoc's GL05 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c3fe73acabcbf..1c37d9bf1c4b3 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -65,8 +65,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (EX01, EX03, EX04, GL01, GL02, GL03, GL04, GL06, GL07, GL09, GL10, PD01, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SA05, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX03,EX04,GL01,GL02,GL03,GL04,GL06,GL07,GL09,GL10,PD01,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SA05,SS01,SS02,SS03,SS04,SS05,SS06
+ MSG='Validate docstrings (EX01, EX03, EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PD01, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SA05, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX03,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PD01,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SA05,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Partially validate docstrings (PR02)' ; echo $MSG
| This error code was shadowed by another with the same name in `pandas`. The old error code was renamed to `PD01` in https://github.com/pandas-dev/pandas/pull/57767
I think `numpydoc`'s GL05 might also be interesting to enforce
```
"GL05": 'Tabs found at the start of line "{line_with_tabs}", please use '
"whitespace only",
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/57772 | 2024-03-07T22:15:45Z | 2024-03-07T23:41:37Z | 2024-03-07T23:41:37Z | 2024-03-17T07:53:34Z |
PERF: RangeIndex.__getitem__ with integers return RangeIndex | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 157b87c93e729..8408f609bdc99 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -255,11 +255,11 @@ Performance improvements
- Performance improvement in :meth:`Index.join` by propagating cached attributes in cases where the result matches one of the inputs (:issue:`57023`)
- Performance improvement in :meth:`Index.take` when ``indices`` is a full range indexer from zero to length of index (:issue:`56806`)
- Performance improvement in :meth:`MultiIndex.equals` for equal length indexes (:issue:`56990`)
-- Performance improvement in :meth:`RangeIndex.__getitem__` with a boolean mask returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57588`)
+- Performance improvement in :meth:`RangeIndex.__getitem__` with a boolean mask or integers returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57588`)
- Performance improvement in :meth:`RangeIndex.append` when appending the same index (:issue:`57252`)
-- Performance improvement in :meth:`RangeIndex.join` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57651`)
-- Performance improvement in :meth:`RangeIndex.reindex` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57647`)
-- Performance improvement in :meth:`RangeIndex.take` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57445`)
+- Performance improvement in :meth:`RangeIndex.join` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57651`, :issue:`57752`)
+- Performance improvement in :meth:`RangeIndex.reindex` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57647`, :issue:`57752`)
+- Performance improvement in :meth:`RangeIndex.take` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57445`, :issue:`57752`)
- Performance improvement in ``DataFrameGroupBy.__len__`` and ``SeriesGroupBy.__len__`` (:issue:`57595`)
- Performance improvement in indexing operations for string dtypes (:issue:`56997`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c72c5fa019bd7..4e5cc986b7325 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4241,7 +4241,6 @@ def join(
return self._join_via_get_indexer(other, how, sort)
- @final
def _join_empty(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 0781a86e5d57e..3fc3f7b4d50bb 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -472,18 +472,31 @@ def _shallow_copy(self, values, name: Hashable = no_default):
if values.dtype.kind == "f":
return Index(values, name=name, dtype=np.float64)
- if values.dtype.kind == "i" and values.ndim == 1 and len(values) > 1:
+ if values.dtype.kind == "i" and values.ndim == 1:
# GH 46675 & 43885: If values is equally spaced, return a
# more memory-compact RangeIndex instead of Index with 64-bit dtype
+ if len(values) == 0:
+ return type(self)._simple_new(_empty_range, name=name)
+ elif len(values) == 1:
+ start = values[0]
+ new_range = range(start, start + self.step, self.step)
+ return type(self)._simple_new(new_range, name=name)
diff = values[1] - values[0]
if not missing.isna(diff) and diff != 0:
- maybe_range_indexer, remainder = np.divmod(values - values[0], diff)
- if (
- lib.is_range_indexer(maybe_range_indexer, len(maybe_range_indexer))
- and not remainder.any()
- ):
+ if len(values) == 2:
+ # Can skip is_range_indexer check
new_range = range(values[0], values[-1] + diff, diff)
return type(self)._simple_new(new_range, name=name)
+ else:
+ maybe_range_indexer, remainder = np.divmod(values - values[0], diff)
+ if (
+ lib.is_range_indexer(
+ maybe_range_indexer, len(maybe_range_indexer)
+ )
+ and not remainder.any()
+ ):
+ new_range = range(values[0], values[-1] + diff, diff)
+ return type(self)._simple_new(new_range, name=name)
return self._constructor._simple_new(values, name=name)
def _view(self) -> Self:
@@ -897,12 +910,19 @@ def symmetric_difference(
result = result.rename(result_name)
return result
+ def _join_empty(
+ self, other: Index, how: JoinHow, sort: bool
+ ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
+ if other.dtype.kind == "i":
+ other = self._shallow_copy(other._values, name=other.name)
+ return super()._join_empty(other, how=how, sort=sort)
+
def _join_monotonic(
self, other: Index, how: JoinHow = "left"
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# This currently only gets called for the monotonic increasing case
if not isinstance(other, type(self)):
- maybe_ri = self._shallow_copy(other._values)
+ maybe_ri = self._shallow_copy(other._values, name=other.name)
if not isinstance(maybe_ri, type(self)):
return super()._join_monotonic(other, how=how)
other = maybe_ri
@@ -1078,6 +1098,8 @@ def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
+ if key is Ellipsis:
+ key = slice(None)
if isinstance(key, slice):
return self._getitem_slice(key)
elif is_integer(key):
@@ -1097,17 +1119,20 @@ def __getitem__(self, key):
)
elif com.is_bool_indexer(key):
if isinstance(getattr(key, "dtype", None), ExtensionDtype):
- np_key = key.to_numpy(dtype=bool, na_value=False)
+ key = key.to_numpy(dtype=bool, na_value=False)
else:
- np_key = np.asarray(key, dtype=bool)
- check_array_indexer(self._range, np_key) # type: ignore[arg-type]
+ key = np.asarray(key, dtype=bool)
+ check_array_indexer(self._range, key) # type: ignore[arg-type]
# Short circuit potential _shallow_copy check
- if np_key.all():
+ if key.all():
return self._simple_new(self._range, name=self.name)
- elif not np_key.any():
+ elif not key.any():
return self._simple_new(_empty_range, name=self.name)
- return self.take(np.flatnonzero(np_key))
- return super().__getitem__(key)
+ key = np.flatnonzero(key)
+ try:
+ return self.take(key)
+ except (TypeError, ValueError):
+ return super().__getitem__(key)
def _getitem_slice(self, slobj: slice) -> Self:
"""
diff --git a/pandas/tests/indexes/ranges/test_join.py b/pandas/tests/indexes/ranges/test_join.py
index ca3af607c0a38..09db30b1d4c51 100644
--- a/pandas/tests/indexes/ranges/test_join.py
+++ b/pandas/tests/indexes/ranges/test_join.py
@@ -207,9 +207,15 @@ def test_join_self(self, join_type):
[-1, -1, 0, 1],
"outer",
],
+ [RangeIndex(2), RangeIndex(0), RangeIndex(2), None, [-1, -1], "left"],
+ [RangeIndex(2), RangeIndex(0), RangeIndex(0), [], None, "right"],
+ [RangeIndex(2), RangeIndex(0), RangeIndex(0), [], None, "inner"],
+ [RangeIndex(2), RangeIndex(0), RangeIndex(2), None, [-1, -1], "outer"],
],
)
-@pytest.mark.parametrize("right_type", [RangeIndex, lambda x: Index(list(x))])
+@pytest.mark.parametrize(
+ "right_type", [RangeIndex, lambda x: Index(list(x), dtype=x.dtype)]
+)
def test_join_preserves_rangeindex(
left, right, expected, expected_lidx, expected_ridx, how, right_type
):
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index 8c24ce5d699d5..3040b4c13dc17 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -608,6 +608,26 @@ def test_range_index_rsub_by_const(self):
tm.assert_index_equal(result, expected)
+def test_reindex_1_value_returns_rangeindex():
+ ri = RangeIndex(0, 10, 2, name="foo")
+ result, result_indexer = ri.reindex([2])
+ expected = RangeIndex(2, 4, 2, name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
+
+ expected_indexer = np.array([1], dtype=np.intp)
+ tm.assert_numpy_array_equal(result_indexer, expected_indexer)
+
+
+def test_reindex_empty_returns_rangeindex():
+ ri = RangeIndex(0, 10, 2, name="foo")
+ result, result_indexer = ri.reindex([])
+ expected = RangeIndex(0, 0, 2, name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
+
+ expected_indexer = np.array([], dtype=np.intp)
+ tm.assert_numpy_array_equal(result_indexer, expected_indexer)
+
+
def test_append_non_rangeindex_return_rangeindex():
ri = RangeIndex(1)
result = ri.append(Index([1]))
@@ -653,6 +673,21 @@ def test_take_return_rangeindex():
tm.assert_index_equal(result, expected, exact=True)
+@pytest.mark.parametrize(
+ "rng, exp_rng",
+ [
+ [range(5), range(3, 4)],
+ [range(0, -10, -2), range(-6, -8, -2)],
+ [range(0, 10, 2), range(6, 8, 2)],
+ ],
+)
+def test_take_1_value_returns_rangeindex(rng, exp_rng):
+ ri = RangeIndex(rng, name="foo")
+ result = ri.take([3])
+ expected = RangeIndex(exp_rng, name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
+
+
def test_append_one_nonempty_preserve_step():
expected = RangeIndex(0, -1, -1)
result = RangeIndex(0).append([expected])
@@ -695,3 +730,25 @@ def test_getitem_boolmask_wrong_length():
ri = RangeIndex(4, name="foo")
with pytest.raises(IndexError, match="Boolean index has wrong length"):
ri[[True]]
+
+
+def test_getitem_integers_return_rangeindex():
+ result = RangeIndex(0, 10, 2, name="foo")[[0, -1]]
+ expected = RangeIndex(start=0, stop=16, step=8, name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
+
+ result = RangeIndex(0, 10, 2, name="foo")[[3]]
+ expected = RangeIndex(start=6, stop=8, step=2, name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
+
+
+def test_getitem_empty_return_rangeindex():
+ result = RangeIndex(0, 10, 2, name="foo")[[]]
+ expected = RangeIndex(start=0, stop=0, step=1, name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
+
+
+def test_getitem_integers_return_index():
+ result = RangeIndex(0, 10, 2, name="foo")[[0, 1, -1]]
+ expected = Index([0, 2, 8], dtype="int64", name="foo")
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 9c33d15c01cd6..61a3a7fbe87f2 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -508,7 +508,7 @@ def test_loc_getitem_list_with_fail(self):
s.loc[[2]]
- msg = f"\"None of [Index([3], dtype='{np.dtype(int)}')] are in the [index]"
+ msg = "None of [RangeIndex(start=3, stop=4, step=1)] are in the [index]"
with pytest.raises(KeyError, match=re.escape(msg)):
s.loc[[3]]
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index 529d6d789596f..72abbcec63357 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -967,6 +967,8 @@ def test_append_to_multiple_min_itemsize(setup_path):
}
)
expected = df.iloc[[0]]
+ # Reading/writing RangeIndex info is not supported yet
+ expected.index = Index(list(range(len(expected.index))))
with ensure_clean_store(setup_path) as store:
store.append_to_multiple(
| Builds on #57752
Discovered in #57441 | https://api.github.com/repos/pandas-dev/pandas/pulls/57770 | 2024-03-07T21:13:56Z | 2024-03-12T23:51:14Z | 2024-03-12T23:51:14Z | 2024-03-12T23:51:17Z |
Backport PR #57665 on branch 2.2.x (BUG: interchange protocol with nullable datatypes a non-null validity) | diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst
index 058f7aebcd538..96f210ce6b7b9 100644
--- a/doc/source/whatsnew/v2.2.2.rst
+++ b/doc/source/whatsnew/v2.2.2.rst
@@ -13,6 +13,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pandas nullable on with missing values (:issue:`56702`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index e273ecad8b51e..7b39403ca1916 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -190,6 +190,10 @@ def describe_categorical(self):
@property
def describe_null(self):
+ if isinstance(self._col.dtype, BaseMaskedDtype):
+ column_null_dtype = ColumnNullType.USE_BYTEMASK
+ null_value = 1
+ return column_null_dtype, null_value
kind = self.dtype[0]
try:
null, value = _NULL_DESCRIPTION[kind]
@@ -290,7 +294,13 @@ def _get_data_buffer(
if self.dtype[0] == DtypeKind.DATETIME and len(self.dtype[2]) > 4:
np_arr = self._col.dt.tz_convert(None).to_numpy()
else:
- np_arr = self._col.to_numpy()
+ arr = self._col.array
+ if isinstance(self._col.dtype, BaseMaskedDtype):
+ np_arr = arr._data # type: ignore[attr-defined]
+ elif isinstance(self._col.dtype, ArrowDtype):
+ raise NotImplementedError("ArrowDtype not handled yet")
+ else:
+ np_arr = arr._ndarray # type: ignore[attr-defined]
buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy)
dtype = self.dtype
elif self.dtype[0] == DtypeKind.CATEGORICAL:
@@ -328,6 +338,12 @@ def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]:
"""
null, invalid = self.describe_null
+ if isinstance(self._col.dtype, BaseMaskedDtype):
+ mask = self._col.array._mask # type: ignore[attr-defined]
+ buffer = PandasBuffer(mask)
+ dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)
+ return buffer, dtype
+
if self.dtype[0] == DtypeKind.STRING:
# For now, use byte array as the mask.
# TODO: maybe store as bit array to save space?..
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index d47b533f92235..a1dedb6be456c 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -9,7 +9,6 @@
is_platform_windows,
)
from pandas.compat.numpy import np_version_lt1p23
-import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
@@ -404,17 +403,50 @@ def test_non_str_names_w_duplicates():
pd.api.interchange.from_dataframe(dfi, allow_copy=False)
-@pytest.mark.parametrize(
- "dtype", ["Int8", pytest.param("Int8[pyarrow]", marks=td.skip_if_no("pyarrow"))]
-)
-def test_nullable_integers(dtype: str) -> None:
+def test_nullable_integers() -> None:
+ # https://github.com/pandas-dev/pandas/issues/55069
+ df = pd.DataFrame({"a": [1]}, dtype="Int8")
+ expected = pd.DataFrame({"a": [1]}, dtype="int8")
+ result = pd.api.interchange.from_dataframe(df.__dataframe__())
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/57664")
+def test_nullable_integers_pyarrow() -> None:
# https://github.com/pandas-dev/pandas/issues/55069
- df = pd.DataFrame({"a": [1]}, dtype=dtype)
+ df = pd.DataFrame({"a": [1]}, dtype="Int8[pyarrow]")
expected = pd.DataFrame({"a": [1]}, dtype="int8")
result = pd.api.interchange.from_dataframe(df.__dataframe__())
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize(
+ ("data", "dtype", "expected_dtype"),
+ [
+ ([1, 2, None], "Int64", "int64"),
+ (
+ [1, 2, None],
+ "UInt64",
+ "uint64",
+ ),
+ ([1.0, 2.25, None], "Float32", "float32"),
+ ],
+)
+def test_pandas_nullable_w_missing_values(
+ data: list, dtype: str, expected_dtype: str
+) -> None:
+ # https://github.com/pandas-dev/pandas/issues/57643
+ pytest.importorskip("pyarrow", "11.0.0")
+ import pyarrow.interchange as pai
+
+ df = pd.DataFrame({"a": data}, dtype=dtype)
+ result = pai.from_dataframe(df.__dataframe__())["a"]
+ assert result.type == expected_dtype
+ assert result[0].as_py() == data[0]
+ assert result[1].as_py() == data[1]
+ assert result[2].as_py() is None
+
+
def test_empty_dataframe():
# https://github.com/pandas-dev/pandas/issues/56700
df = pd.DataFrame({"a": []}, dtype="int8")
| backports #57665 | https://api.github.com/repos/pandas-dev/pandas/pulls/57769 | 2024-03-07T21:08:40Z | 2024-03-08T07:02:16Z | 2024-03-08T07:02:16Z | 2024-03-08T07:02:16Z |
PERF: Return RangeIndex columns instead of Index for str.partition with ArrowDtype | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index cd6977f43d322..37d4d09159d36 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -256,6 +256,7 @@ Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- :meth:`RangeIndex.append` returns a :class:`RangeIndex` instead of a :class:`Index` when appending values that could continue the :class:`RangeIndex` (:issue:`57467`)
- :meth:`Series.str.extract` returns a :class:`RangeIndex` columns instead of an :class:`Index` column when possible (:issue:`57542`)
+- :meth:`Series.str.partition` with :class:`ArrowDtype` returns a :class:`RangeIndex` columns instead of an :class:`Index` column when possible (:issue:`57768`)
- Performance improvement in :class:`DataFrame` when ``data`` is a ``dict`` and ``columns`` is specified (:issue:`24368`)
- Performance improvement in :meth:`DataFrame.join` for sorted but non-unique indexes (:issue:`56941`)
- Performance improvement in :meth:`DataFrame.join` when left and/or right are non-unique and ``how`` is ``"left"``, ``"right"``, or ``"inner"`` (:issue:`56817`)
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 0dddfc4f4c4c1..6a03e6b1f5ab0 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -321,10 +321,8 @@ def _wrap_result(
new_values.append(row)
pa_type = result._pa_array.type
result = ArrowExtensionArray(pa.array(new_values, type=pa_type))
- if name is not None:
- labels = name
- else:
- labels = range(max_len)
+ if name is None:
+ name = range(max_len)
result = (
pa.compute.list_flatten(result._pa_array)
.to_numpy()
@@ -332,7 +330,7 @@ def _wrap_result(
)
result = {
label: ArrowExtensionArray(pa.array(res))
- for label, res in zip(labels, result.T)
+ for label, res in zip(name, result.T)
}
elif is_object_dtype(result):
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 5d634c9aeb14f..4168bed918db7 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -2267,9 +2267,11 @@ def test_str_partition():
ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string()))
result = ser.str.partition("b")
expected = pd.DataFrame(
- [["a", "b", "cba"], [None, None, None]], dtype=ArrowDtype(pa.string())
+ [["a", "b", "cba"], [None, None, None]],
+ dtype=ArrowDtype(pa.string()),
+ columns=pd.RangeIndex(3),
)
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected, check_column_type=True)
result = ser.str.partition("b", expand=False)
expected = pd.Series(ArrowExtensionArray(pa.array([["a", "b", "cba"], None])))
@@ -2277,9 +2279,11 @@ def test_str_partition():
result = ser.str.rpartition("b")
expected = pd.DataFrame(
- [["abc", "b", "a"], [None, None, None]], dtype=ArrowDtype(pa.string())
+ [["abc", "b", "a"], [None, None, None]],
+ dtype=ArrowDtype(pa.string()),
+ columns=pd.RangeIndex(3),
)
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected, check_column_type=True)
result = ser.str.rpartition("b", expand=False)
expected = pd.Series(ArrowExtensionArray(pa.array([["abc", "b", "a"], None])))
| Discovered in https://github.com/pandas-dev/pandas/pull/57441
| https://api.github.com/repos/pandas-dev/pandas/pulls/57768 | 2024-03-07T20:51:14Z | 2024-03-11T03:22:43Z | 2024-03-11T03:22:43Z | 2024-03-11T04:32:42Z |
Validate docstring error code | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 5bbad800b7aa9..c3fe73acabcbf 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -65,8 +65,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (EX01, EX03, EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SA05, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX03,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SA05,SS01,SS02,SS03,SS04,SS05,SS06
+ MSG='Validate docstrings (EX01, EX03, EX04, GL01, GL02, GL03, GL04, GL06, GL07, GL09, GL10, PD01, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SA05, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01,EX03,EX04,GL01,GL02,GL03,GL04,GL06,GL07,GL09,GL10,PD01,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SA05,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Partially validate docstrings (PR02)' ; echo $MSG
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index baa27d14acc8c..ea44bd3fcc4cf 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -420,7 +420,6 @@ def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch) -> None:
assert exit_status == 0
def test_exit_status_for_validate_all_json(self, monkeypatch) -> None:
- print("EXECUTED")
monkeypatch.setattr(
validate_docstrings,
"validate_all",
@@ -471,6 +470,15 @@ def test_errors_param_filters_errors(self, monkeypatch) -> None:
},
},
)
+ monkeypatch.setattr(
+ validate_docstrings,
+ "ERROR_MSGS",
+ {
+ "ER01": "err desc",
+ "ER02": "err desc",
+ "ER03": "err desc",
+ },
+ )
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index a4d53d360a12b..6138afba4d880 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -29,6 +29,7 @@
import matplotlib.pyplot as plt
from numpydoc.docscrape import get_doc_object
from numpydoc.validate import (
+ ERROR_MSGS as NUMPYDOC_ERROR_MSGS,
Validator,
validate,
)
@@ -56,7 +57,7 @@
ERROR_MSGS = {
"GL04": "Private classes ({mentioned_private_classes}) should not be "
"mentioned in public docstrings",
- "GL05": "Use 'array-like' rather than 'array_like' in docstrings.",
+ "PD01": "Use 'array-like' rather than 'array_like' in docstrings.",
"SA05": "{reference_name} in `See Also` section does not need `pandas` "
"prefix, use {right_reference} instead.",
"EX03": "flake8 error: line {line_number}, col {col_number}: {error_code} "
@@ -239,7 +240,6 @@ def pandas_validate(func_name: str):
doc_obj = get_doc_object(func_obj, doc=func_obj.__doc__)
doc = PandasDocstring(func_name, doc_obj)
result = validate(doc_obj)
-
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
result["errors"].append(
@@ -277,7 +277,7 @@ def pandas_validate(func_name: str):
)
if doc.non_hyphenated_array_like():
- result["errors"].append(pandas_error("GL05"))
+ result["errors"].append(pandas_error("PD01"))
plt.close("all")
return result
@@ -400,11 +400,19 @@ def header(title, width=80, char="#") -> str:
sys.stderr.write(header("Doctests"))
sys.stderr.write(result["examples_errs"])
+def validate_error_codes(errors):
+ overlapped_errors = set(NUMPYDOC_ERROR_MSGS).intersection(set(ERROR_MSGS))
+ assert not overlapped_errors, f"{overlapped_errors} is overlapped."
+ all_errors = set(NUMPYDOC_ERROR_MSGS).union(set(ERROR_MSGS))
+ nonexistent_errors = set(errors) - all_errors
+ assert not nonexistent_errors, f"{nonexistent_errors} don't exist."
+
def main(func_name, prefix, errors, output_format, ignore_deprecated, ignore_functions):
"""
Main entry point. Call the validation for one or for all docstrings.
"""
+ validate_error_codes(errors)
if func_name is None:
return print_validate_all_results(
prefix,
| This PR does 2 things
- Validate that all `pandas` error codes are not overlapped with `numpydoc`'s
- Validate that all input error codes are valid
I wonder if this is a welcome change? Currently, I can detect 1 duplicated error code. | https://api.github.com/repos/pandas-dev/pandas/pulls/57767 | 2024-03-07T19:05:31Z | 2024-03-07T21:15:50Z | 2024-03-07T21:15:50Z | 2024-03-07T21:32:41Z |
Bump ruff to latest version | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 201820c6a8b28..190ea32203807 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -19,7 +19,7 @@ ci:
skip: [pylint, pyright, mypy]
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.13
+ rev: v0.3.1
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index 86da26bead64d..15e691d46f693 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -3,6 +3,7 @@
lower-level methods directly on Index and subclasses, see index_object.py,
indexing_engine.py, and index_cached.py
"""
+
from datetime import datetime
import warnings
diff --git a/asv_bench/benchmarks/libs.py b/asv_bench/benchmarks/libs.py
index 3419163bcfe09..7da2d27d98dbb 100644
--- a/asv_bench/benchmarks/libs.py
+++ b/asv_bench/benchmarks/libs.py
@@ -5,6 +5,7 @@
If a PR does not edit anything in _libs/, then it is unlikely that the
benchmarks will be affected.
"""
+
import numpy as np
from pandas._libs.lib import (
diff --git a/asv_bench/benchmarks/package.py b/asv_bench/benchmarks/package.py
index 257c82cba8878..f8b51a523dab8 100644
--- a/asv_bench/benchmarks/package.py
+++ b/asv_bench/benchmarks/package.py
@@ -1,6 +1,7 @@
"""
Benchmarks for pandas at the package-level.
"""
+
import subprocess
import sys
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index ccd86cae06d58..3b8b60e790380 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -2,6 +2,7 @@
Period benchmarks with non-tslibs dependencies. See
benchmarks.tslibs.period for benchmarks that rely only on tslibs.
"""
+
from pandas import (
DataFrame,
Period,
diff --git a/asv_bench/benchmarks/tslibs/offsets.py b/asv_bench/benchmarks/tslibs/offsets.py
index 1f48ec504acf1..55bd3c31c055c 100644
--- a/asv_bench/benchmarks/tslibs/offsets.py
+++ b/asv_bench/benchmarks/tslibs/offsets.py
@@ -2,6 +2,7 @@
offsets benchmarks that rely only on tslibs. See benchmarks.offset for
offsets benchmarks that rely on other parts of pandas.
"""
+
from datetime import datetime
import numpy as np
diff --git a/asv_bench/benchmarks/tslibs/resolution.py b/asv_bench/benchmarks/tslibs/resolution.py
index 44f288c7de216..6317d299379d3 100644
--- a/asv_bench/benchmarks/tslibs/resolution.py
+++ b/asv_bench/benchmarks/tslibs/resolution.py
@@ -17,6 +17,7 @@
df.loc[key] = (val.average, val.stdev)
"""
+
import numpy as np
try:
diff --git a/asv_bench/benchmarks/tslibs/timedelta.py b/asv_bench/benchmarks/tslibs/timedelta.py
index 2daf1861eb80a..dcc73aefc6c7a 100644
--- a/asv_bench/benchmarks/tslibs/timedelta.py
+++ b/asv_bench/benchmarks/tslibs/timedelta.py
@@ -2,6 +2,7 @@
Timedelta benchmarks that rely only on tslibs. See benchmarks.timedeltas for
Timedelta benchmarks that rely on other parts of pandas.
"""
+
import datetime
import numpy as np
diff --git a/asv_bench/benchmarks/tslibs/tslib.py b/asv_bench/benchmarks/tslibs/tslib.py
index 97ec80201dd16..4a011d4bb3f06 100644
--- a/asv_bench/benchmarks/tslibs/tslib.py
+++ b/asv_bench/benchmarks/tslibs/tslib.py
@@ -15,6 +15,7 @@
val = %timeit -o tr.time_ints_to_pydatetime(box, size, tz)
df.loc[key] = (val.average, val.stdev)
"""
+
from datetime import (
timedelta,
timezone,
diff --git a/doc/make.py b/doc/make.py
index c9588ffb80517..02deb5002fea1 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -11,6 +11,7 @@
$ python make.py html
$ python make.py latex
"""
+
import argparse
import csv
import importlib
diff --git a/pandas/_config/__init__.py b/pandas/_config/__init__.py
index c43d59654b44c..e1999dd536999 100644
--- a/pandas/_config/__init__.py
+++ b/pandas/_config/__init__.py
@@ -5,6 +5,7 @@
importing `dates` and `display` ensures that keys needed by _libs
are initialized.
"""
+
__all__ = [
"config",
"detect_console_encoding",
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 9decc7eecf033..ebf2ba2510aa4 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -688,15 +688,13 @@ def _build_option_description(k: str) -> str:
@overload
def pp_options_list(
keys: Iterable[str], *, width: int = ..., _print: Literal[False] = ...
-) -> str:
- ...
+) -> str: ...
@overload
def pp_options_list(
keys: Iterable[str], *, width: int = ..., _print: Literal[True]
-) -> None:
- ...
+) -> None: ...
def pp_options_list(
diff --git a/pandas/_config/dates.py b/pandas/_config/dates.py
index b37831f96eb73..2d9f5d390dc9c 100644
--- a/pandas/_config/dates.py
+++ b/pandas/_config/dates.py
@@ -1,6 +1,7 @@
"""
config for datetime formatting
"""
+
from __future__ import annotations
from pandas._config import config as cf
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index 69a56d3911316..61d88c43f0e4a 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -3,6 +3,7 @@
Name `localization` is chosen to avoid overlap with builtin `locale` module.
"""
+
from __future__ import annotations
from contextlib import contextmanager
diff --git a/pandas/_testing/_hypothesis.py b/pandas/_testing/_hypothesis.py
index 4e584781122a3..b7fc175b10d17 100644
--- a/pandas/_testing/_hypothesis.py
+++ b/pandas/_testing/_hypothesis.py
@@ -1,6 +1,7 @@
"""
Hypothesis data generator helpers.
"""
+
from datetime import datetime
from hypothesis import strategies as st
diff --git a/pandas/_testing/compat.py b/pandas/_testing/compat.py
index cc352ba7b8f2f..722ba61a3227f 100644
--- a/pandas/_testing/compat.py
+++ b/pandas/_testing/compat.py
@@ -1,6 +1,7 @@
"""
Helpers for sharing tests between DataFrame/Series
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/_typing.py b/pandas/_typing.py
index d7325fed93d62..f868a92554b39 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -140,30 +140,22 @@
class SequenceNotStr(Protocol[_T_co]):
@overload
- def __getitem__(self, index: SupportsIndex, /) -> _T_co:
- ...
+ def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
@overload
- def __getitem__(self, index: slice, /) -> Sequence[_T_co]:
- ...
+ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
- def __contains__(self, value: object, /) -> bool:
- ...
+ def __contains__(self, value: object, /) -> bool: ...
- def __len__(self) -> int:
- ...
+ def __len__(self) -> int: ...
- def __iter__(self) -> Iterator[_T_co]:
- ...
+ def __iter__(self) -> Iterator[_T_co]: ...
- def index(self, value: Any, start: int = ..., stop: int = ..., /) -> int:
- ...
+ def index(self, value: Any, start: int = ..., stop: int = ..., /) -> int: ...
- def count(self, value: Any, /) -> int:
- ...
+ def count(self, value: Any, /) -> int: ...
- def __reversed__(self) -> Iterator[_T_co]:
- ...
+ def __reversed__(self) -> Iterator[_T_co]: ...
ListLike = Union[AnyArrayLike, SequenceNotStr, range]
@@ -317,13 +309,11 @@ def flush(self) -> Any:
class ReadPickleBuffer(ReadBuffer[bytes], Protocol):
- def readline(self) -> bytes:
- ...
+ def readline(self) -> bytes: ...
class WriteExcelBuffer(WriteBuffer[bytes], Protocol):
- def truncate(self, size: int | None = ...) -> int:
- ...
+ def truncate(self, size: int | None = ...) -> int: ...
class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol):
diff --git a/pandas/_version.py b/pandas/_version.py
index 08a7111324e3b..7bd9da2bb1cfa 100644
--- a/pandas/_version.py
+++ b/pandas/_version.py
@@ -358,9 +358,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
- pieces[
- "error"
- ] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
+ pieces["error"] = (
+ f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'"
+ )
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py
index a0d42b6541fdf..9b007e8fe8da4 100644
--- a/pandas/api/__init__.py
+++ b/pandas/api/__init__.py
@@ -1,4 +1,5 @@
-""" public toolkit API """
+"""public toolkit API"""
+
from pandas.api import (
extensions,
indexers,
diff --git a/pandas/arrays/__init__.py b/pandas/arrays/__init__.py
index a11755275d00e..bcf295fd6b490 100644
--- a/pandas/arrays/__init__.py
+++ b/pandas/arrays/__init__.py
@@ -3,6 +3,7 @@
See :ref:`extending.extension-types` for more.
"""
+
from pandas.core.arrays import (
ArrowExtensionArray,
ArrowStringArray,
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 738442fab8c70..1c08df80df477 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -7,6 +7,7 @@
Other items:
* platform checker
"""
+
from __future__ import annotations
import os
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index 26beca6a0e4b6..f9273ba4bbc62 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -91,8 +91,7 @@ def import_optional_dependency(
min_version: str | None = ...,
*,
errors: Literal["raise"] = ...,
-) -> types.ModuleType:
- ...
+) -> types.ModuleType: ...
@overload
@@ -102,8 +101,7 @@ def import_optional_dependency(
min_version: str | None = ...,
*,
errors: Literal["warn", "ignore"],
-) -> types.ModuleType | None:
- ...
+) -> types.ModuleType | None: ...
def import_optional_dependency(
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 7fc4b8d1d9b10..54a12c76a230b 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -1,4 +1,5 @@
-""" support numpy compatibility across versions """
+"""support numpy compatibility across versions"""
+
import warnings
import numpy as np
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index 4df30f7f4a8a7..9432635f62a35 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -15,6 +15,7 @@
methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
+
from __future__ import annotations
from typing import (
@@ -179,13 +180,11 @@ def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs)
@overload
-def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None:
- ...
+def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: ...
@overload
-def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT:
- ...
+def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: ...
def validate_clip_with_axis(
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index f4698bee5cb02..26c44c2613cb2 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -1,6 +1,7 @@
"""
Pickle compatibility to pandas version 1.0
"""
+
from __future__ import annotations
import contextlib
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index beb4814914101..5c9e885f8e9f5 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -1,4 +1,4 @@
-""" support pyarrow compatibility across versions """
+"""support pyarrow compatibility across versions"""
from __future__ import annotations
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 5cdb3b59698f5..c9f7ea2096008 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -17,6 +17,7 @@
- Dtypes
- Misc
"""
+
from __future__ import annotations
from collections import abc
diff --git a/pandas/core/_numba/kernels/mean_.py b/pandas/core/_numba/kernels/mean_.py
index 4ed9e8cb2bf50..cc10bd003af7e 100644
--- a/pandas/core/_numba/kernels/mean_.py
+++ b/pandas/core/_numba/kernels/mean_.py
@@ -6,6 +6,7 @@
Mirrors pandas/_libs/window/aggregation.pyx
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/_numba/kernels/min_max_.py b/pandas/core/_numba/kernels/min_max_.py
index c9803980e64a6..59d36732ebae6 100644
--- a/pandas/core/_numba/kernels/min_max_.py
+++ b/pandas/core/_numba/kernels/min_max_.py
@@ -6,6 +6,7 @@
Mirrors pandas/_libs/window/aggregation.pyx
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/_numba/kernels/sum_.py b/pandas/core/_numba/kernels/sum_.py
index 94db84267ceec..76f4e22b43c4b 100644
--- a/pandas/core/_numba/kernels/sum_.py
+++ b/pandas/core/_numba/kernels/sum_.py
@@ -6,6 +6,7 @@
Mirrors pandas/_libs/window/aggregation.pyx
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/_numba/kernels/var_.py b/pandas/core/_numba/kernels/var_.py
index c63d0b90b0fc3..69aec4d6522c4 100644
--- a/pandas/core/_numba/kernels/var_.py
+++ b/pandas/core/_numba/kernels/var_.py
@@ -6,6 +6,7 @@
Mirrors pandas/_libs/window/aggregation.pyx
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 39a5ffd947009..99b5053ce250c 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -4,6 +4,7 @@
that can be mixed into or pinned onto other pandas classes.
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 3672cdb13d4a3..774bbbe2463e9 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -2,6 +2,7 @@
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
+
from __future__ import annotations
import decimal
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index d9d95c96ba0fe..f2fb503be86f5 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1300,9 +1300,10 @@ def apply_with_numba(self) -> dict[int, Any]:
# Convert from numba dict to regular dict
# Our isinstance checks in the df constructor don't pass for numbas typed dict
- with set_numba_data(self.obj.index) as index, set_numba_data(
- self.columns
- ) as columns:
+ with (
+ set_numba_data(self.obj.index) as index,
+ set_numba_data(self.columns) as columns,
+ ):
res = dict(nb_func(self.values, columns, index))
return res
diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py
index 335fa1afc0f4e..3784689995802 100644
--- a/pandas/core/array_algos/masked_reductions.py
+++ b/pandas/core/array_algos/masked_reductions.py
@@ -2,6 +2,7 @@
masked_reductions.py is for reduction algorithms using a mask-based approach
for missing values.
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py
index f65d2d20e028e..464a4d552af68 100644
--- a/pandas/core/array_algos/putmask.py
+++ b/pandas/core/array_algos/putmask.py
@@ -1,6 +1,7 @@
"""
EA-compatible analogue to np.putmask
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py
index 7293a46eb9a60..6cc867c60fd82 100644
--- a/pandas/core/array_algos/replace.py
+++ b/pandas/core/array_algos/replace.py
@@ -1,6 +1,7 @@
"""
Methods used by Block.replace and related methods.
"""
+
from __future__ import annotations
import operator
diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py
index ac674e31586e7..ca2c7a3b9664f 100644
--- a/pandas/core/array_algos/take.py
+++ b/pandas/core/array_algos/take.py
@@ -41,8 +41,7 @@ def take_nd(
axis: AxisInt = ...,
fill_value=...,
allow_fill: bool = ...,
-) -> np.ndarray:
- ...
+) -> np.ndarray: ...
@overload
@@ -52,8 +51,7 @@ def take_nd(
axis: AxisInt = ...,
fill_value=...,
allow_fill: bool = ...,
-) -> ArrayLike:
- ...
+) -> ArrayLike: ...
def take_nd(
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index dde1b8a35e2f0..1fa610f35f56b 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -4,6 +4,7 @@
Index
ExtensionArray
"""
+
from __future__ import annotations
import operator
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 83d2b6f1ca84f..c1d0ade572e8a 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -267,15 +267,13 @@ def _validate_setitem_value(self, value):
return value
@overload
- def __getitem__(self, key: ScalarIndexer) -> Any:
- ...
+ def __getitem__(self, key: ScalarIndexer) -> Any: ...
@overload
def __getitem__(
self,
key: SequenceIndexer | PositionalIndexerTuple,
- ) -> Self:
- ...
+ ) -> Self: ...
def __getitem__(
self,
diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py
index 3e89391324ad4..600ddc7f717a8 100644
--- a/pandas/core/arrays/_ranges.py
+++ b/pandas/core/arrays/_ranges.py
@@ -2,6 +2,7 @@
Helper functions to generate range-like data for DatetimeArray
(and possibly TimedeltaArray/PeriodArray)
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index cddccd7b45a3e..aaf43662ebde2 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -852,12 +852,10 @@ def isna(self) -> npt.NDArray[np.bool_]:
return self._pa_array.is_null().to_numpy()
@overload
- def any(self, *, skipna: Literal[True] = ..., **kwargs) -> bool:
- ...
+ def any(self, *, skipna: Literal[True] = ..., **kwargs) -> bool: ...
@overload
- def any(self, *, skipna: bool, **kwargs) -> bool | NAType:
- ...
+ def any(self, *, skipna: bool, **kwargs) -> bool | NAType: ...
def any(self, *, skipna: bool = True, **kwargs) -> bool | NAType:
"""
@@ -918,12 +916,10 @@ def any(self, *, skipna: bool = True, **kwargs) -> bool | NAType:
return self._reduce("any", skipna=skipna, **kwargs)
@overload
- def all(self, *, skipna: Literal[True] = ..., **kwargs) -> bool:
- ...
+ def all(self, *, skipna: Literal[True] = ..., **kwargs) -> bool: ...
@overload
- def all(self, *, skipna: bool, **kwargs) -> bool | NAType:
- ...
+ def all(self, *, skipna: bool, **kwargs) -> bool | NAType: ...
def all(self, *, skipna: bool = True, **kwargs) -> bool | NAType:
"""
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index a0da3518f8e5e..399be217af9d1 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -6,6 +6,7 @@
This is an experimental API and subject to breaking changes
without warning.
"""
+
from __future__ import annotations
import operator
@@ -397,12 +398,10 @@ def _from_factorized(cls, values, original):
# Must be a Sequence
# ------------------------------------------------------------------------
@overload
- def __getitem__(self, item: ScalarIndexer) -> Any:
- ...
+ def __getitem__(self, item: ScalarIndexer) -> Any: ...
@overload
- def __getitem__(self, item: SequenceIndexer) -> Self:
- ...
+ def __getitem__(self, item: SequenceIndexer) -> Self: ...
def __getitem__(self, item: PositionalIndexer) -> Self | Any:
"""
@@ -648,16 +647,13 @@ def nbytes(self) -> int:
# ------------------------------------------------------------------------
@overload
- def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
- ...
+ def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: ...
@overload
- def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
- ...
+ def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: ...
@overload
- def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
- ...
+ def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: ...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
"""
@@ -2411,23 +2407,19 @@ def _groupby_op(
class ExtensionArraySupportsAnyAll(ExtensionArray):
@overload
- def any(self, *, skipna: Literal[True] = ...) -> bool:
- ...
+ def any(self, *, skipna: Literal[True] = ...) -> bool: ...
@overload
- def any(self, *, skipna: bool) -> bool | NAType:
- ...
+ def any(self, *, skipna: bool) -> bool | NAType: ...
def any(self, *, skipna: bool = True) -> bool | NAType:
raise AbstractMethodError(self)
@overload
- def all(self, *, skipna: Literal[True] = ...) -> bool:
- ...
+ def all(self, *, skipna: Literal[True] = ...) -> bool: ...
@overload
- def all(self, *, skipna: bool) -> bool | NAType:
- ...
+ def all(self, *, skipna: bool) -> bool | NAType: ...
def all(self, *, skipna: bool = True) -> bool | NAType:
raise AbstractMethodError(self)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f37513b2bc8fd..af8dc08c1ec26 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -554,16 +554,13 @@ def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self:
return res
@overload
- def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
- ...
+ def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: ...
@overload
- def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
- ...
+ def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: ...
@overload
- def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
- ...
+ def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: ...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
"""
@@ -1975,14 +1972,12 @@ def sort_values(
inplace: Literal[False] = ...,
ascending: bool = ...,
na_position: str = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def sort_values(
self, *, inplace: Literal[True], ascending: bool = ..., na_position: str = ...
- ) -> None:
- ...
+ ) -> None: ...
def sort_values(
self,
@@ -2667,12 +2662,10 @@ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
return algorithms.isin(self.codes, code_values)
@overload
- def _replace(self, *, to_replace, value, inplace: Literal[False] = ...) -> Self:
- ...
+ def _replace(self, *, to_replace, value, inplace: Literal[False] = ...) -> Self: ...
@overload
- def _replace(self, *, to_replace, value, inplace: Literal[True]) -> None:
- ...
+ def _replace(self, *, to_replace, value, inplace: Literal[True]) -> None: ...
def _replace(self, *, to_replace, value, inplace: bool = False) -> Self | None:
from pandas import Index
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 14967bb81125d..dd7274c3d79f7 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -362,15 +362,13 @@ def __array__(
return self._ndarray
@overload
- def __getitem__(self, key: ScalarIndexer) -> DTScalarOrNaT:
- ...
+ def __getitem__(self, key: ScalarIndexer) -> DTScalarOrNaT: ...
@overload
def __getitem__(
self,
key: SequenceIndexer | PositionalIndexerTuple,
- ) -> Self:
- ...
+ ) -> Self: ...
def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT:
"""
@@ -498,20 +496,16 @@ def astype(self, dtype, copy: bool = True):
return np.asarray(self, dtype=dtype)
@overload
- def view(self) -> Self:
- ...
+ def view(self) -> Self: ...
@overload
- def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray:
- ...
+ def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray: ...
@overload
- def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray:
- ...
+ def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray: ...
@overload
- def view(self, dtype: Dtype | None = ...) -> ArrayLike:
- ...
+ def view(self, dtype: Dtype | None = ...) -> ArrayLike: ...
# pylint: disable-next=useless-parent-delegation
def view(self, dtype: Dtype | None = None) -> ArrayLike:
@@ -2527,13 +2521,11 @@ def ensure_arraylike_for_datetimelike(
@overload
-def validate_periods(periods: None) -> None:
- ...
+def validate_periods(periods: None) -> None: ...
@overload
-def validate_periods(periods: int | float) -> int:
- ...
+def validate_periods(periods: int | float) -> int: ...
def validate_periods(periods: int | float | None) -> int | None:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 4ef5c04461ce9..931f19a7901bd 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -103,13 +103,11 @@
@overload
-def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype:
- ...
+def tz_to_dtype(tz: tzinfo, unit: str = ...) -> DatetimeTZDtype: ...
@overload
-def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]:
- ...
+def tz_to_dtype(tz: None, unit: str = ...) -> np.dtype[np.datetime64]: ...
def tz_to_dtype(
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 5e7e7e949169b..1ea32584403ba 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -706,12 +706,10 @@ def __len__(self) -> int:
return len(self._left)
@overload
- def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA:
- ...
+ def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: ...
@overload
- def __getitem__(self, key: SequenceIndexer) -> Self:
- ...
+ def __getitem__(self, key: SequenceIndexer) -> Self: ...
def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA:
key = check_array_indexer(self, key)
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index cf9ba3c3dbad5..108202f5e510b 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -175,12 +175,10 @@ def dtype(self) -> BaseMaskedDtype:
raise AbstractMethodError(self)
@overload
- def __getitem__(self, item: ScalarIndexer) -> Any:
- ...
+ def __getitem__(self, item: ScalarIndexer) -> Any: ...
@overload
- def __getitem__(self, item: SequenceIndexer) -> Self:
- ...
+ def __getitem__(self, item: SequenceIndexer) -> Self: ...
def __getitem__(self, item: PositionalIndexer) -> Self | Any:
item = check_array_indexer(self, item)
@@ -535,16 +533,13 @@ def tolist(self) -> list:
return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist()
@overload
- def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
- ...
+ def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: ...
@overload
- def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
- ...
+ def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: ...
@overload
- def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
- ...
+ def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: ...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
dtype = pandas_dtype(dtype)
@@ -1342,14 +1337,12 @@ def map(self, mapper, na_action=None):
@overload
def any(
self, *, skipna: Literal[True] = ..., axis: AxisInt | None = ..., **kwargs
- ) -> np.bool_:
- ...
+ ) -> np.bool_: ...
@overload
def any(
self, *, skipna: bool, axis: AxisInt | None = ..., **kwargs
- ) -> np.bool_ | NAType:
- ...
+ ) -> np.bool_ | NAType: ...
def any(
self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs
@@ -1437,14 +1430,12 @@ def any(
@overload
def all(
self, *, skipna: Literal[True] = ..., axis: AxisInt | None = ..., **kwargs
- ) -> np.bool_:
- ...
+ ) -> np.bool_: ...
@overload
def all(
self, *, skipna: bool, axis: AxisInt | None = ..., **kwargs
- ) -> np.bool_ | NAType:
- ...
+ ) -> np.bool_ | NAType: ...
def all(
self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 73cc8e4345d3c..d05f857f46179 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1124,13 +1124,11 @@ def period_array(
@overload
-def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT:
- ...
+def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: ...
@overload
-def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset:
- ...
+def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: ...
def validate_dtype_freq(
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
index 6608fcce2cd62..58199701647d1 100644
--- a/pandas/core/arrays/sparse/accessor.py
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -1,4 +1,5 @@
"""Sparse accessor"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 9b1d4d70ee32e..8d94662ab4303 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -1,6 +1,7 @@
"""
SparseArray data structure
"""
+
from __future__ import annotations
from collections import abc
@@ -930,15 +931,13 @@ def value_counts(self, dropna: bool = True) -> Series:
# Indexing
# --------
@overload
- def __getitem__(self, key: ScalarIndexer) -> Any:
- ...
+ def __getitem__(self, key: ScalarIndexer) -> Any: ...
@overload
def __getitem__(
self,
key: SequenceIndexer | tuple[int | ellipsis, ...],
- ) -> Self:
- ...
+ ) -> Self: ...
def __getitem__(
self,
@@ -1916,13 +1915,11 @@ def _make_sparse(
@overload
-def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex:
- ...
+def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex: ...
@overload
-def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex:
- ...
+def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex: ...
def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex:
diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py
index 31e09c923d933..cc9fd2d5fb8b0 100644
--- a/pandas/core/arrays/sparse/scipy_sparse.py
+++ b/pandas/core/arrays/sparse/scipy_sparse.py
@@ -3,6 +3,7 @@
Currently only includes to_coo helpers.
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4556b9ab4d4c9..33b37319675ae 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1319,8 +1319,7 @@ def searchsorted( # type: ignore[overload-overlap]
value: ScalarLike_co,
side: Literal["left", "right"] = ...,
sorter: NumpySorter = ...,
- ) -> np.intp:
- ...
+ ) -> np.intp: ...
@overload
def searchsorted(
@@ -1328,8 +1327,7 @@ def searchsorted(
value: npt.ArrayLike | ExtensionArray,
side: Literal["left", "right"] = ...,
sorter: NumpySorter = ...,
- ) -> npt.NDArray[np.intp]:
- ...
+ ) -> npt.NDArray[np.intp]: ...
@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 5f37f3de578e8..77e986a26fbe9 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -3,6 +3,7 @@
Note: pandas.core.common is *not* part of the public API.
"""
+
from __future__ import annotations
import builtins
@@ -227,8 +228,7 @@ def asarray_tuplesafe(
@overload
-def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
- ...
+def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike: ...
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
@@ -422,15 +422,13 @@ def standardize_mapping(into):
@overload
-def random_state(state: np.random.Generator) -> np.random.Generator:
- ...
+def random_state(state: np.random.Generator) -> np.random.Generator: ...
@overload
def random_state(
state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None,
-) -> np.random.RandomState:
- ...
+) -> np.random.RandomState: ...
def random_state(state: RandomState | None = None):
@@ -477,8 +475,7 @@ def pipe(
func: Callable[Concatenate[_T, P], T],
*args: P.args,
**kwargs: P.kwargs,
-) -> T:
- ...
+) -> T: ...
@overload
@@ -487,8 +484,7 @@ def pipe(
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
-) -> T:
- ...
+) -> T: ...
def pipe(
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py
index 2a48bb280a35f..c5562fb0284b7 100644
--- a/pandas/core/computation/align.py
+++ b/pandas/core/computation/align.py
@@ -1,6 +1,7 @@
"""
Core eval alignment algorithms.
"""
+
from __future__ import annotations
from functools import (
diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py
index a3a05a9d75c6e..5db05ebe33efd 100644
--- a/pandas/core/computation/engines.py
+++ b/pandas/core/computation/engines.py
@@ -1,6 +1,7 @@
"""
Engine classes for :func:`~pandas.eval`
"""
+
from __future__ import annotations
import abc
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 6c234b40d27e6..c949cfd1bc657 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -1,6 +1,7 @@
"""
Top level ``eval`` module.
"""
+
from __future__ import annotations
import tokenize
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index f0aa7363d2644..a8123a898b4fe 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -1,6 +1,7 @@
"""
:func:`~pandas.eval` parsers.
"""
+
from __future__ import annotations
import ast
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index 17a68478196da..e2acd9a2c97c2 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -5,6 +5,7 @@
Offer fast expression evaluation through numexpr
"""
+
from __future__ import annotations
import operator
diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py
index 4cfa0f2baffd5..8fbf8936d31ef 100644
--- a/pandas/core/computation/parsing.py
+++ b/pandas/core/computation/parsing.py
@@ -1,6 +1,7 @@
"""
:func:`~pandas.eval` source string parsing functions
"""
+
from __future__ import annotations
from io import StringIO
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index cec8a89abc0b2..39511048abf49 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -1,4 +1,5 @@
-""" manage PyTables query interface via Expressions """
+"""manage PyTables query interface via Expressions"""
+
from __future__ import annotations
import ast
diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py
index 7e553ca448218..7b31e03e58b4b 100644
--- a/pandas/core/computation/scope.py
+++ b/pandas/core/computation/scope.py
@@ -1,6 +1,7 @@
"""
Module for scope operations
"""
+
from __future__ import annotations
from collections import ChainMap
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 38cc5a9ab10e6..d9a8b4dfd95fd 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -9,6 +9,7 @@
module is imported, register them here rather than in the module.
"""
+
from __future__ import annotations
import os
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index af2aea11dcf6d..e6d99ab773db9 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -4,6 +4,7 @@
These should not depend on core.internals.
"""
+
from __future__ import annotations
from collections.abc import Sequence
@@ -402,15 +403,13 @@ def array(
@overload
def extract_array(
obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
-) -> ArrayLike:
- ...
+) -> ArrayLike: ...
@overload
def extract_array(
obj: T, extract_numpy: bool = ..., extract_range: bool = ...
-) -> T | ArrayLike:
- ...
+) -> T | ArrayLike: ...
def extract_array(
diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py
index f5579082c679b..bdb16aa202297 100644
--- a/pandas/core/dtypes/astype.py
+++ b/pandas/core/dtypes/astype.py
@@ -2,6 +2,7 @@
Functions for implementing 'astype' methods according to pandas conventions,
particularly ones that differ from numpy.
"""
+
from __future__ import annotations
import inspect
@@ -42,15 +43,13 @@
@overload
def _astype_nansafe(
arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...
-) -> np.ndarray:
- ...
+) -> np.ndarray: ...
@overload
def _astype_nansafe(
arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...
-) -> ExtensionArray:
- ...
+) -> ExtensionArray: ...
def _astype_nansafe(
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 41407704dfc8a..2f8e59cd6e89c 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -1,6 +1,7 @@
"""
Extend pandas with custom array types.
"""
+
from __future__ import annotations
from typing import (
@@ -97,8 +98,7 @@ class property**.
>>> class ExtensionDtype:
... def __from_arrow__(
... self, array: pyarrow.Array | pyarrow.ChunkedArray
- ... ) -> ExtensionArray:
- ... ...
+ ... ) -> ExtensionArray: ...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
@@ -528,22 +528,18 @@ def register(self, dtype: type_t[ExtensionDtype]) -> None:
self.dtypes.append(dtype)
@overload
- def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
- ...
+ def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: ...
@overload
- def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT:
- ...
+ def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT: ...
@overload
- def find(self, dtype: str) -> ExtensionDtype | None:
- ...
+ def find(self, dtype: str) -> ExtensionDtype | None: ...
@overload
def find(
self, dtype: npt.DTypeLike
- ) -> type_t[ExtensionDtype] | ExtensionDtype | None:
- ...
+ ) -> type_t[ExtensionDtype] | ExtensionDtype | None: ...
def find(
self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 01b7d500179bf..a130983337f64 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -247,13 +247,15 @@ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj) -> None:
@overload
-def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:
- ...
+def maybe_downcast_to_dtype(
+ result: np.ndarray, dtype: str | np.dtype
+) -> np.ndarray: ...
@overload
-def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:
- ...
+def maybe_downcast_to_dtype(
+ result: ExtensionArray, dtype: str | np.dtype
+) -> ArrayLike: ...
def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:
@@ -317,15 +319,13 @@ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLi
@overload
def maybe_downcast_numeric(
result: np.ndarray, dtype: np.dtype, do_round: bool = False
-) -> np.ndarray:
- ...
+) -> np.ndarray: ...
@overload
def maybe_downcast_numeric(
result: ExtensionArray, dtype: DtypeObj, do_round: bool = False
-) -> ArrayLike:
- ...
+) -> ArrayLike: ...
def maybe_downcast_numeric(
@@ -513,13 +513,11 @@ def _maybe_cast_to_extension_array(
@overload
-def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:
- ...
+def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: ...
@overload
-def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:
- ...
+def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: ...
def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:
@@ -1418,18 +1416,15 @@ def np_find_common_type(*dtypes: np.dtype) -> np.dtype:
@overload
-def find_common_type(types: list[np.dtype]) -> np.dtype:
- ...
+def find_common_type(types: list[np.dtype]) -> np.dtype: ...
@overload
-def find_common_type(types: list[ExtensionDtype]) -> DtypeObj:
- ...
+def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: ...
@overload
-def find_common_type(types: list[DtypeObj]) -> DtypeObj:
- ...
+def find_common_type(types: list[DtypeObj]) -> DtypeObj: ...
def find_common_type(types):
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 15c51b98aea0b..aa621fea6c39a 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1,6 +1,7 @@
"""
Common type operations.
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 7d5e88b502a00..f702d5a60e86f 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -1,6 +1,7 @@
"""
Utility functions related to concat.
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 27b9c0dec2796..2bb2556c88204 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -1,6 +1,7 @@
"""
Define extension dtypes.
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 8abde2ab7010f..8d3d86217dedf 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -1,4 +1,5 @@
-""" define generic base classes for pandas objects """
+"""define generic base classes for pandas objects"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 02189dd10e5b8..f042911b53d2b 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -1,4 +1,4 @@
-""" basic inference routines """
+"""basic inference routines"""
from __future__ import annotations
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 97efb5db9baa9..f127c736e745a 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -1,6 +1,7 @@
"""
missing types & inference
"""
+
from __future__ import annotations
from decimal import Decimal
@@ -68,31 +69,28 @@
@overload
-def isna(obj: Scalar | Pattern | NAType | NaTType) -> bool:
- ...
+def isna(obj: Scalar | Pattern | NAType | NaTType) -> bool: ...
@overload
def isna(
obj: ArrayLike | Index | list,
-) -> npt.NDArray[np.bool_]:
- ...
+) -> npt.NDArray[np.bool_]: ...
@overload
-def isna(obj: NDFrameT) -> NDFrameT:
- ...
+def isna(obj: NDFrameT) -> NDFrameT: ...
# handle unions
@overload
-def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
- ...
+def isna(
+ obj: NDFrameT | ArrayLike | Index | list,
+) -> NDFrameT | npt.NDArray[np.bool_]: ...
@overload
-def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
- ...
+def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ...
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
@@ -285,31 +283,28 @@ def _isna_recarray_dtype(values: np.rec.recarray) -> npt.NDArray[np.bool_]:
@overload
-def notna(obj: Scalar | Pattern | NAType | NaTType) -> bool:
- ...
+def notna(obj: Scalar | Pattern | NAType | NaTType) -> bool: ...
@overload
def notna(
obj: ArrayLike | Index | list,
-) -> npt.NDArray[np.bool_]:
- ...
+) -> npt.NDArray[np.bool_]: ...
@overload
-def notna(obj: NDFrameT) -> NDFrameT:
- ...
+def notna(obj: NDFrameT) -> NDFrameT: ...
# handle unions
@overload
-def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]:
- ...
+def notna(
+ obj: NDFrameT | ArrayLike | Index | list,
+) -> NDFrameT | npt.NDArray[np.bool_]: ...
@overload
-def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
- ...
+def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ...
def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3ab40c1aeb64b..25501ff245e46 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8,6 +8,7 @@
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
+
from __future__ import annotations
import collections
@@ -1216,8 +1217,7 @@ def to_string(
min_rows: int | None = ...,
max_colwidth: int | None = ...,
encoding: str | None = ...,
- ) -> str:
- ...
+ ) -> str: ...
@overload
def to_string(
@@ -1242,8 +1242,7 @@ def to_string(
min_rows: int | None = ...,
max_colwidth: int | None = ...,
encoding: str | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@Substitution(
header_type="bool or list of str",
@@ -1573,12 +1572,10 @@ def __len__(self) -> int:
return len(self.index)
@overload
- def dot(self, other: Series) -> Series:
- ...
+ def dot(self, other: Series) -> Series: ...
@overload
- def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame:
- ...
+ def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ...
def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
"""
@@ -1699,12 +1696,10 @@ def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
raise TypeError(f"unsupported type: {type(other)}")
@overload
- def __matmul__(self, other: Series) -> Series:
- ...
+ def __matmul__(self, other: Series) -> Series: ...
@overload
- def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
- ...
+ def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ...
def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
"""
@@ -1930,8 +1925,7 @@ def to_dict(
*,
into: type[MutableMappingT] | MutableMappingT,
index: bool = ...,
- ) -> MutableMappingT:
- ...
+ ) -> MutableMappingT: ...
@overload
def to_dict(
@@ -1940,8 +1934,7 @@ def to_dict(
*,
into: type[MutableMappingT] | MutableMappingT,
index: bool = ...,
- ) -> list[MutableMappingT]:
- ...
+ ) -> list[MutableMappingT]: ...
@overload
def to_dict(
@@ -1950,8 +1943,7 @@ def to_dict(
*,
into: type[dict] = ...,
index: bool = ...,
- ) -> dict:
- ...
+ ) -> dict: ...
@overload
def to_dict(
@@ -1960,8 +1952,7 @@ def to_dict(
*,
into: type[dict] = ...,
index: bool = ...,
- ) -> list[dict]:
- ...
+ ) -> list[dict]: ...
# error: Incompatible default for argument "into" (default has type "type
# [dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT")
@@ -2697,8 +2688,7 @@ def to_markdown(
index: bool = ...,
storage_options: StorageOptions | None = ...,
**kwargs,
- ) -> str:
- ...
+ ) -> str: ...
@overload
def to_markdown(
@@ -2709,8 +2699,7 @@ def to_markdown(
index: bool = ...,
storage_options: StorageOptions | None = ...,
**kwargs,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def to_markdown(
@@ -2721,8 +2710,7 @@ def to_markdown(
index: bool = ...,
storage_options: StorageOptions | None = ...,
**kwargs,
- ) -> str | None:
- ...
+ ) -> str | None: ...
@doc(
Series.to_markdown,
@@ -2785,8 +2773,7 @@ def to_parquet(
partition_cols: list[str] | None = ...,
storage_options: StorageOptions = ...,
**kwargs,
- ) -> bytes:
- ...
+ ) -> bytes: ...
@overload
def to_parquet(
@@ -2799,8 +2786,7 @@ def to_parquet(
partition_cols: list[str] | None = ...,
storage_options: StorageOptions = ...,
**kwargs,
- ) -> None:
- ...
+ ) -> None: ...
@doc(storage_options=_shared_docs["storage_options"])
def to_parquet(
@@ -2913,8 +2899,7 @@ def to_orc(
engine: Literal["pyarrow"] = ...,
index: bool | None = ...,
engine_kwargs: dict[str, Any] | None = ...,
- ) -> bytes:
- ...
+ ) -> bytes: ...
@overload
def to_orc(
@@ -2924,8 +2909,7 @@ def to_orc(
engine: Literal["pyarrow"] = ...,
index: bool | None = ...,
engine_kwargs: dict[str, Any] | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def to_orc(
@@ -2935,8 +2919,7 @@ def to_orc(
engine: Literal["pyarrow"] = ...,
index: bool | None = ...,
engine_kwargs: dict[str, Any] | None = ...,
- ) -> bytes | None:
- ...
+ ) -> bytes | None: ...
def to_orc(
self,
@@ -3053,8 +3036,7 @@ def to_html(
table_id: str | None = ...,
render_links: bool = ...,
encoding: str | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def to_html(
@@ -3083,8 +3065,7 @@ def to_html(
table_id: str | None = ...,
render_links: bool = ...,
encoding: str | None = ...,
- ) -> str:
- ...
+ ) -> str: ...
@Substitution(
header_type="bool",
@@ -3225,8 +3206,7 @@ def to_xml(
stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ...,
compression: CompressionOptions = ...,
storage_options: StorageOptions | None = ...,
- ) -> str:
- ...
+ ) -> str: ...
@overload
def to_xml(
@@ -3248,8 +3228,7 @@ def to_xml(
stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ...,
compression: CompressionOptions = ...,
storage_options: StorageOptions | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@doc(
storage_options=_shared_docs["storage_options"],
@@ -4384,16 +4363,17 @@ def _get_item(self, item: Hashable) -> Series:
# Unsorted
@overload
- def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame:
- ...
+ def query(
+ self, expr: str, *, inplace: Literal[False] = ..., **kwargs
+ ) -> DataFrame: ...
@overload
- def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None:
- ...
+ def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ...
@overload
- def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None:
- ...
+ def query(
+ self, expr: str, *, inplace: bool = ..., **kwargs
+ ) -> DataFrame | None: ...
def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None:
"""
@@ -4554,12 +4534,10 @@ def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | No
return result
@overload
- def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any:
- ...
+ def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ...
@overload
- def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None:
- ...
+ def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ...
def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None:
"""
@@ -5114,8 +5092,7 @@ def drop(
level: Level = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def drop(
@@ -5128,8 +5105,7 @@ def drop(
level: Level = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def drop(
@@ -5142,8 +5118,7 @@ def drop(
level: Level = ...,
inplace: bool = ...,
errors: IgnoreRaise = ...,
- ) -> DataFrame | None:
- ...
+ ) -> DataFrame | None: ...
def drop(
self,
@@ -5325,8 +5300,7 @@ def rename(
inplace: Literal[True],
level: Level = ...,
errors: IgnoreRaise = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def rename(
@@ -5340,8 +5314,7 @@ def rename(
inplace: Literal[False] = ...,
level: Level = ...,
errors: IgnoreRaise = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def rename(
@@ -5355,8 +5328,7 @@ def rename(
inplace: bool = ...,
level: Level = ...,
errors: IgnoreRaise = ...,
- ) -> DataFrame | None:
- ...
+ ) -> DataFrame | None: ...
def rename(
self,
@@ -5549,14 +5521,12 @@ def pop(self, item: Hashable) -> Series:
@overload
def _replace_columnwise(
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[True], regex
- ) -> None:
- ...
+ ) -> None: ...
@overload
def _replace_columnwise(
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[False], regex
- ) -> Self:
- ...
+ ) -> Self: ...
def _replace_columnwise(
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex
@@ -5710,8 +5680,7 @@ def set_index(
append: bool = ...,
inplace: Literal[False] = ...,
verify_integrity: bool = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def set_index(
@@ -5722,8 +5691,7 @@ def set_index(
append: bool = ...,
inplace: Literal[True],
verify_integrity: bool = ...,
- ) -> None:
- ...
+ ) -> None: ...
def set_index(
self,
@@ -5943,8 +5911,7 @@ def reset_index(
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] | None = None,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def reset_index(
@@ -5957,8 +5924,7 @@ def reset_index(
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] | None = None,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def reset_index(
@@ -5971,8 +5937,7 @@ def reset_index(
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] | None = None,
- ) -> DataFrame | None:
- ...
+ ) -> DataFrame | None: ...
def reset_index(
self,
@@ -6258,8 +6223,7 @@ def dropna(
subset: IndexLabel = ...,
inplace: Literal[False] = ...,
ignore_index: bool = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def dropna(
@@ -6271,8 +6235,7 @@ def dropna(
subset: IndexLabel = ...,
inplace: Literal[True],
ignore_index: bool = ...,
- ) -> None:
- ...
+ ) -> None: ...
def dropna(
self,
@@ -6445,8 +6408,7 @@ def drop_duplicates(
keep: DropKeep = ...,
inplace: Literal[True],
ignore_index: bool = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def drop_duplicates(
@@ -6456,8 +6418,7 @@ def drop_duplicates(
keep: DropKeep = ...,
inplace: Literal[False] = ...,
ignore_index: bool = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def drop_duplicates(
@@ -6467,8 +6428,7 @@ def drop_duplicates(
keep: DropKeep = ...,
inplace: bool = ...,
ignore_index: bool = ...,
- ) -> DataFrame | None:
- ...
+ ) -> DataFrame | None: ...
def drop_duplicates(
self,
@@ -6727,8 +6687,7 @@ def sort_values(
na_position: NaPosition = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def sort_values(
@@ -6742,8 +6701,7 @@ def sort_values(
na_position: str = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> None:
- ...
+ ) -> None: ...
def sort_values(
self,
@@ -7023,8 +6981,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def sort_index(
@@ -7039,8 +6996,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def sort_index(
@@ -7055,8 +7011,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> DataFrame | None:
- ...
+ ) -> DataFrame | None: ...
def sort_index(
self,
@@ -11356,8 +11311,7 @@ def any(
bool_only: bool = ...,
skipna: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def any(
@@ -11367,8 +11321,7 @@ def any(
bool_only: bool = ...,
skipna: bool = ...,
**kwargs,
- ) -> bool:
- ...
+ ) -> bool: ...
@overload
def any(
@@ -11378,8 +11331,7 @@ def any(
bool_only: bool = ...,
skipna: bool = ...,
**kwargs,
- ) -> Series | bool:
- ...
+ ) -> Series | bool: ...
@doc(make_doc("any", ndim=2))
def any(
@@ -11405,8 +11357,7 @@ def all(
bool_only: bool = ...,
skipna: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def all(
@@ -11416,8 +11367,7 @@ def all(
bool_only: bool = ...,
skipna: bool = ...,
**kwargs,
- ) -> bool:
- ...
+ ) -> bool: ...
@overload
def all(
@@ -11427,8 +11377,7 @@ def all(
bool_only: bool = ...,
skipna: bool = ...,
**kwargs,
- ) -> Series | bool:
- ...
+ ) -> Series | bool: ...
@doc(make_doc("all", ndim=2))
def all(
@@ -11454,8 +11403,7 @@ def min(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def min(
@@ -11465,8 +11413,7 @@ def min(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def min(
@@ -11476,8 +11423,7 @@ def min(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("min", ndim=2))
def min(
@@ -11503,8 +11449,7 @@ def max(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def max(
@@ -11514,8 +11459,7 @@ def max(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def max(
@@ -11525,8 +11469,7 @@ def max(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("max", ndim=2))
def max(
@@ -11592,8 +11535,7 @@ def mean(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def mean(
@@ -11603,8 +11545,7 @@ def mean(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def mean(
@@ -11614,8 +11555,7 @@ def mean(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("mean", ndim=2))
def mean(
@@ -11641,8 +11581,7 @@ def median(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def median(
@@ -11652,8 +11591,7 @@ def median(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def median(
@@ -11663,8 +11601,7 @@ def median(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("median", ndim=2))
def median(
@@ -11691,8 +11628,7 @@ def sem(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def sem(
@@ -11703,8 +11639,7 @@ def sem(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def sem(
@@ -11715,8 +11650,7 @@ def sem(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("sem", ndim=2))
def sem(
@@ -11744,8 +11678,7 @@ def var(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def var(
@@ -11756,8 +11689,7 @@ def var(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def var(
@@ -11768,8 +11700,7 @@ def var(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("var", ndim=2))
def var(
@@ -11797,8 +11728,7 @@ def std(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def std(
@@ -11809,8 +11739,7 @@ def std(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def std(
@@ -11821,8 +11750,7 @@ def std(
ddof: int = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("std", ndim=2))
def std(
@@ -11849,8 +11777,7 @@ def skew(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def skew(
@@ -11860,8 +11787,7 @@ def skew(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def skew(
@@ -11871,8 +11797,7 @@ def skew(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("skew", ndim=2))
def skew(
@@ -11898,8 +11823,7 @@ def kurt(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def kurt(
@@ -11909,8 +11833,7 @@ def kurt(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Any:
- ...
+ ) -> Any: ...
@overload
def kurt(
@@ -11920,8 +11843,7 @@ def kurt(
skipna: bool = ...,
numeric_only: bool = ...,
**kwargs,
- ) -> Series | Any:
- ...
+ ) -> Series | Any: ...
@doc(make_doc("kurt", ndim=2))
def kurt(
@@ -12187,8 +12109,7 @@ def quantile(
numeric_only: bool = ...,
interpolation: QuantileInterpolation = ...,
method: Literal["single", "table"] = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def quantile(
@@ -12198,8 +12119,7 @@ def quantile(
numeric_only: bool = ...,
interpolation: QuantileInterpolation = ...,
method: Literal["single", "table"] = ...,
- ) -> Series | DataFrame:
- ...
+ ) -> Series | DataFrame: ...
@overload
def quantile(
@@ -12209,8 +12129,7 @@ def quantile(
numeric_only: bool = ...,
interpolation: QuantileInterpolation = ...,
method: Literal["single", "table"] = ...,
- ) -> Series | DataFrame:
- ...
+ ) -> Series | DataFrame: ...
def quantile(
self,
@@ -12841,9 +12760,9 @@ def values(self) -> np.ndarray:
def _from_nested_dict(
data: Mapping[HashableT, Mapping[HashableT2, T]],
) -> collections.defaultdict[HashableT2, dict[HashableT, T]]:
- new_data: collections.defaultdict[
- HashableT2, dict[HashableT, T]
- ] = collections.defaultdict(dict)
+ new_data: collections.defaultdict[HashableT2, dict[HashableT, T]] = (
+ collections.defaultdict(dict)
+ )
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bfbe257911d0a..5c8842162007d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -723,16 +723,15 @@ def set_axis(
return self._set_axis_nocheck(labels, axis, inplace=False)
@overload
- def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[False]) -> Self:
- ...
+ def _set_axis_nocheck(
+ self, labels, axis: Axis, inplace: Literal[False]
+ ) -> Self: ...
@overload
- def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[True]) -> None:
- ...
+ def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[True]) -> None: ...
@overload
- def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None:
- ...
+ def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None: ...
@final
def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None:
@@ -953,8 +952,7 @@ def _rename(
inplace: Literal[False] = ...,
level: Level | None = ...,
errors: str = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def _rename(
@@ -968,8 +966,7 @@ def _rename(
inplace: Literal[True],
level: Level | None = ...,
errors: str = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def _rename(
@@ -983,8 +980,7 @@ def _rename(
inplace: bool,
level: Level | None = ...,
errors: str = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
def _rename(
@@ -1067,8 +1063,7 @@ def rename_axis(
axis: Axis = ...,
copy: bool | None = ...,
inplace: Literal[False] = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def rename_axis(
@@ -1080,8 +1075,7 @@ def rename_axis(
axis: Axis = ...,
copy: bool | None = ...,
inplace: Literal[True],
- ) -> None:
- ...
+ ) -> None: ...
@overload
def rename_axis(
@@ -1093,8 +1087,7 @@ def rename_axis(
axis: Axis = ...,
copy: bool | None = ...,
inplace: bool = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
def rename_axis(
self,
@@ -1266,16 +1259,17 @@ class name
@overload
def _set_axis_name(
self, name, axis: Axis = ..., *, inplace: Literal[False] = ...
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
- def _set_axis_name(self, name, axis: Axis = ..., *, inplace: Literal[True]) -> None:
- ...
+ def _set_axis_name(
+ self, name, axis: Axis = ..., *, inplace: Literal[True]
+ ) -> None: ...
@overload
- def _set_axis_name(self, name, axis: Axis = ..., *, inplace: bool) -> Self | None:
- ...
+ def _set_axis_name(
+ self, name, axis: Axis = ..., *, inplace: bool
+ ) -> Self | None: ...
@final
def _set_axis_name(
@@ -3200,8 +3194,7 @@ def to_latex(
caption: str | tuple[str, str] | None = ...,
label: str | None = ...,
position: str | None = ...,
- ) -> str:
- ...
+ ) -> str: ...
@overload
def to_latex(
@@ -3228,8 +3221,7 @@ def to_latex(
caption: str | tuple[str, str] | None = ...,
label: str | None = ...,
position: str | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@final
def to_latex(
@@ -3610,8 +3602,7 @@ def to_csv(
decimal: str = ...,
errors: OpenFileErrors = ...,
storage_options: StorageOptions = ...,
- ) -> str:
- ...
+ ) -> str: ...
@overload
def to_csv(
@@ -3638,8 +3629,7 @@ def to_csv(
decimal: str = ...,
errors: OpenFileErrors = ...,
storage_options: StorageOptions = ...,
- ) -> None:
- ...
+ ) -> None: ...
@final
@doc(
@@ -4403,8 +4393,7 @@ def drop(
level: Level | None = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def drop(
@@ -4417,8 +4406,7 @@ def drop(
level: Level | None = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def drop(
@@ -4431,8 +4419,7 @@ def drop(
level: Level | None = ...,
inplace: bool = ...,
errors: IgnoreRaise = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
def drop(
self,
@@ -4726,8 +4713,7 @@ def sort_values(
na_position: NaPosition = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def sort_values(
@@ -4740,8 +4726,7 @@ def sort_values(
na_position: NaPosition = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def sort_values(
@@ -4754,8 +4739,7 @@ def sort_values(
na_position: NaPosition = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
def sort_values(
self,
@@ -4925,8 +4909,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def sort_index(
@@ -4941,8 +4924,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def sort_index(
@@ -4957,8 +4939,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
def sort_index(
self,
@@ -5822,8 +5803,7 @@ def pipe(
func: Callable[Concatenate[Self, P], T],
*args: P.args,
**kwargs: P.kwargs,
- ) -> T:
- ...
+ ) -> T: ...
@overload
def pipe(
@@ -5831,8 +5811,7 @@ def pipe(
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
- ) -> T:
- ...
+ ) -> T: ...
@final
@doc(klass=_shared_doc_kwargs["klass"])
@@ -6773,8 +6752,7 @@ def fillna(
axis: Axis | None = ...,
inplace: Literal[False] = ...,
limit: int | None = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def fillna(
@@ -6785,8 +6763,7 @@ def fillna(
axis: Axis | None = ...,
inplace: Literal[True],
limit: int | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def fillna(
@@ -6797,8 +6774,7 @@ def fillna(
axis: Axis | None = ...,
inplace: bool = ...,
limit: int | None = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
@doc(
@@ -7066,8 +7042,7 @@ def ffill(
inplace: Literal[False] = ...,
limit: None | int = ...,
limit_area: Literal["inside", "outside"] | None = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def ffill(
@@ -7077,8 +7052,7 @@ def ffill(
inplace: Literal[True],
limit: None | int = ...,
limit_area: Literal["inside", "outside"] | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def ffill(
@@ -7088,8 +7062,7 @@ def ffill(
inplace: bool = ...,
limit: None | int = ...,
limit_area: Literal["inside", "outside"] | None = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
@doc(
@@ -7198,8 +7171,7 @@ def bfill(
inplace: Literal[False] = ...,
limit: None | int = ...,
limit_area: Literal["inside", "outside"] | None = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def bfill(
@@ -7208,8 +7180,7 @@ def bfill(
axis: None | Axis = ...,
inplace: Literal[True],
limit: None | int = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def bfill(
@@ -7219,8 +7190,7 @@ def bfill(
inplace: bool = ...,
limit: None | int = ...,
limit_area: Literal["inside", "outside"] | None = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
@doc(
@@ -7338,8 +7308,7 @@ def replace(
limit: int | None = ...,
regex: bool = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def replace(
@@ -7351,8 +7320,7 @@ def replace(
limit: int | None = ...,
regex: bool = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def replace(
@@ -7364,8 +7332,7 @@ def replace(
limit: int | None = ...,
regex: bool = ...,
method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
@doc(
@@ -7626,8 +7593,7 @@ def interpolate(
limit_direction: Literal["forward", "backward", "both"] | None = ...,
limit_area: Literal["inside", "outside"] | None = ...,
**kwargs,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def interpolate(
@@ -7640,8 +7606,7 @@ def interpolate(
limit_direction: Literal["forward", "backward", "both"] | None = ...,
limit_area: Literal["inside", "outside"] | None = ...,
**kwargs,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def interpolate(
@@ -7654,8 +7619,7 @@ def interpolate(
limit_direction: Literal["forward", "backward", "both"] | None = ...,
limit_area: Literal["inside", "outside"] | None = ...,
**kwargs,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
def interpolate(
@@ -8332,8 +8296,7 @@ def clip(
axis: Axis | None = ...,
inplace: Literal[False] = ...,
**kwargs,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def clip(
@@ -8344,8 +8307,7 @@ def clip(
axis: Axis | None = ...,
inplace: Literal[True],
**kwargs,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def clip(
@@ -8356,8 +8318,7 @@ def clip(
axis: Axis | None = ...,
inplace: bool = ...,
**kwargs,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
def clip(
@@ -9722,8 +9683,7 @@ def _where(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level=...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def _where(
@@ -9734,8 +9694,7 @@ def _where(
inplace: Literal[True],
axis: Axis | None = ...,
level=...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def _where(
@@ -9746,8 +9705,7 @@ def _where(
inplace: bool,
axis: Axis | None = ...,
level=...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
def _where(
@@ -9909,8 +9867,7 @@ def where(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def where(
@@ -9921,8 +9878,7 @@ def where(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def where(
@@ -9933,8 +9889,7 @@ def where(
inplace: bool = ...,
axis: Axis | None = ...,
level: Level = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
@doc(
@@ -10115,8 +10070,7 @@ def mask(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def mask(
@@ -10127,8 +10081,7 @@ def mask(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def mask(
@@ -10139,8 +10092,7 @@ def mask(
inplace: bool = ...,
axis: Axis | None = ...,
level: Level = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
@final
@doc(
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 8b776dc7a9f79..bad9749b5ecee 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -1,6 +1,7 @@
"""
Provide basic components for groupby.
"""
+
from __future__ import annotations
import dataclasses
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index d48592d1a61cb..64f55c1df4309 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -5,6 +5,7 @@
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
+
from __future__ import annotations
from collections import abc
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c294ab855e003..46831b922d24e 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -6,6 +6,7 @@ class providing the base-class of operations.
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
+
from __future__ import annotations
from collections.abc import (
@@ -802,8 +803,7 @@ def pipe(
func: Callable[Concatenate[Self, P], T],
*args: P.args,
**kwargs: P.kwargs,
- ) -> T:
- ...
+ ) -> T: ...
@overload
def pipe(
@@ -811,8 +811,7 @@ def pipe(
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
- ) -> T:
- ...
+ ) -> T: ...
@Substitution(
klass="GroupBy",
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 1cf6df426f8b7..3040f9c64beff 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -2,6 +2,7 @@
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py
index 3b7a58e87603e..b22fc9248eeca 100644
--- a/pandas/core/groupby/numba_.py
+++ b/pandas/core/groupby/numba_.py
@@ -1,4 +1,5 @@
"""Common utilities for Numba operations with groupby ops"""
+
from __future__ import annotations
import functools
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index fc5747595ad02..acf4c7bebf52d 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -5,6 +5,7 @@
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
+
from __future__ import annotations
import collections
diff --git a/pandas/core/indexers/objects.py b/pandas/core/indexers/objects.py
index 3dd256e9ce45d..2e6bcda520aba 100644
--- a/pandas/core/indexers/objects.py
+++ b/pandas/core/indexers/objects.py
@@ -1,4 +1,5 @@
"""Indexer objects for computing start/end window bounds for rolling operations"""
+
from __future__ import annotations
from datetime import timedelta
diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index 78dbe3a1ca632..b089be3469d87 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -1,6 +1,7 @@
"""
Low-dependency indexing utilities.
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 5881f5e040370..59d6e313a2d93 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -1,6 +1,7 @@
"""
datetimelike delegation
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c72c5fa019bd7..052ecbafa686a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1767,16 +1767,13 @@ def _set_names(self, values, *, level=None) -> None:
names = property(fset=_set_names, fget=_get_names)
@overload
- def set_names(self, names, *, level=..., inplace: Literal[False] = ...) -> Self:
- ...
+ def set_names(self, names, *, level=..., inplace: Literal[False] = ...) -> Self: ...
@overload
- def set_names(self, names, *, level=..., inplace: Literal[True]) -> None:
- ...
+ def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ...
@overload
- def set_names(self, names, *, level=..., inplace: bool = ...) -> Self | None:
- ...
+ def set_names(self, names, *, level=..., inplace: bool = ...) -> Self | None: ...
def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
"""
@@ -1883,12 +1880,10 @@ def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
return None
@overload
- def rename(self, name, *, inplace: Literal[False] = ...) -> Self:
- ...
+ def rename(self, name, *, inplace: Literal[False] = ...) -> Self: ...
@overload
- def rename(self, name, *, inplace: Literal[True]) -> None:
- ...
+ def rename(self, name, *, inplace: Literal[True]) -> None: ...
def rename(self, name, *, inplace: bool = False) -> Self | None:
"""
@@ -4110,8 +4105,7 @@ def join(
level: Level = ...,
return_indexers: Literal[True],
sort: bool = ...,
- ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
- ...
+ ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ...
@overload
def join(
@@ -4122,8 +4116,7 @@ def join(
level: Level = ...,
return_indexers: Literal[False] = ...,
sort: bool = ...,
- ) -> Index:
- ...
+ ) -> Index: ...
@overload
def join(
@@ -4134,8 +4127,9 @@ def join(
level: Level = ...,
return_indexers: bool = ...,
sort: bool = ...,
- ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
- ...
+ ) -> (
+ Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]
+ ): ...
@final
@_maybe_return_indexers
@@ -5452,8 +5446,7 @@ def sort_values(
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def sort_values(
@@ -5463,8 +5456,7 @@ def sort_values(
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
- ) -> tuple[Self, np.ndarray]:
- ...
+ ) -> tuple[Self, np.ndarray]: ...
@overload
def sort_values(
@@ -5474,8 +5466,7 @@ def sort_values(
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
- ) -> Self | tuple[Self, np.ndarray]:
- ...
+ ) -> Self | tuple[Self, np.ndarray]: ...
def sort_values(
self,
@@ -5872,20 +5863,17 @@ def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[True] = ...
- ) -> npt.NDArray[np.intp]:
- ...
+ ) -> npt.NDArray[np.intp]: ...
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[False]
- ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
- ...
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
- ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
- ...
+ ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
@final
def _get_indexer_non_comparable(
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index a17b585fb1166..7e8d808769bc1 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -1,6 +1,7 @@
"""
Base and utility classes for tseries type pandas objects.
"""
+
from __future__ import annotations
from abc import (
@@ -148,8 +149,7 @@ def freqstr(self) -> str:
@cache_readonly
@abstractmethod
- def _resolution_obj(self) -> Resolution:
- ...
+ def _resolution_obj(self) -> Resolution: ...
@cache_readonly
@doc(DatetimeLikeArrayMixin.resolution)
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index d6fbeb9043bc6..fc806a3546571 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -1,6 +1,7 @@
"""
Shared methods for Index subclasses backed by ExtensionArray.
"""
+
from __future__ import annotations
from inspect import signature
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index ea3e848356ab5..36f181110eccd 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1,4 +1,5 @@
-""" define the IntervalIndex """
+"""define the IntervalIndex"""
+
from __future__ import annotations
from operator import (
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 0781a86e5d57e..24f53f16e1985 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -578,8 +578,7 @@ def sort_values(
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def sort_values(
@@ -589,8 +588,7 @@ def sort_values(
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
- ) -> tuple[Self, np.ndarray | RangeIndex]:
- ...
+ ) -> tuple[Self, np.ndarray | RangeIndex]: ...
@overload
def sort_values(
@@ -600,8 +598,7 @@ def sort_values(
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
- ) -> Self | tuple[Self, np.ndarray | RangeIndex]:
- ...
+ ) -> Self | tuple[Self, np.ndarray | RangeIndex]: ...
def sort_values(
self,
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index a929687544876..4a4b0ac1444d6 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -1,4 +1,5 @@
-""" implement the TimedeltaIndex """
+"""implement the TimedeltaIndex"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py
index b296e6016a1ac..a952887d7eed2 100644
--- a/pandas/core/interchange/from_dataframe.py
+++ b/pandas/core/interchange/from_dataframe.py
@@ -468,8 +468,7 @@ def set_nulls(
col: Column,
validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,
allow_modify_inplace: bool = ...,
-) -> np.ndarray:
- ...
+) -> np.ndarray: ...
@overload
@@ -478,8 +477,7 @@ def set_nulls(
col: Column,
validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,
allow_modify_inplace: bool = ...,
-) -> pd.Series:
- ...
+) -> pd.Series: ...
@overload
@@ -488,8 +486,7 @@ def set_nulls(
col: Column,
validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,
allow_modify_inplace: bool = ...,
-) -> np.ndarray | pd.Series:
- ...
+) -> np.ndarray | pd.Series: ...
def set_nulls(
diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py
index b0b3937ca47ea..d6e1e8b38dfe3 100644
--- a/pandas/core/internals/api.py
+++ b/pandas/core/internals/api.py
@@ -6,6 +6,7 @@
2) Use only functions exposed here (or in core.internals)
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 6bc3556902e80..93f1674fbd328 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -2,6 +2,7 @@
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
+
from __future__ import annotations
from collections import abc
diff --git a/pandas/core/methods/describe.py b/pandas/core/methods/describe.py
index 337b2f7952213..b69c9dbdaf6fd 100644
--- a/pandas/core/methods/describe.py
+++ b/pandas/core/methods/describe.py
@@ -3,6 +3,7 @@
Method NDFrame.describe() delegates actual execution to function describe_ndframe().
"""
+
from __future__ import annotations
from abc import (
diff --git a/pandas/core/methods/to_dict.py b/pandas/core/methods/to_dict.py
index a88cf88ead66e..a5833514a9799 100644
--- a/pandas/core/methods/to_dict.py
+++ b/pandas/core/methods/to_dict.py
@@ -59,8 +59,7 @@ def to_dict(
*,
into: type[MutableMappingT] | MutableMappingT,
index: bool = ...,
-) -> MutableMappingT:
- ...
+) -> MutableMappingT: ...
@overload
@@ -70,8 +69,7 @@ def to_dict(
*,
into: type[MutableMappingT] | MutableMappingT,
index: bool = ...,
-) -> list[MutableMappingT]:
- ...
+) -> list[MutableMappingT]: ...
@overload
@@ -81,8 +79,7 @@ def to_dict(
*,
into: type[dict] = ...,
index: bool = ...,
-) -> dict:
- ...
+) -> dict: ...
@overload
@@ -92,8 +89,7 @@ def to_dict(
*,
into: type[dict] = ...,
index: bool = ...,
-) -> list[dict]:
- ...
+) -> list[dict]: ...
# error: Incompatible default for argument "into" (default has type "type[dict
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index cdc2ff6c51b06..3a5bf64520d75 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -1,6 +1,7 @@
"""
Routines for filling missing data.
"""
+
from __future__ import annotations
from functools import wraps
@@ -141,8 +142,7 @@ def clean_fill_method(
method: Literal["ffill", "pad", "bfill", "backfill"],
*,
allow_nearest: Literal[False] = ...,
-) -> Literal["pad", "backfill"]:
- ...
+) -> Literal["pad", "backfill"]: ...
@overload
@@ -150,8 +150,7 @@ def clean_fill_method(
method: Literal["ffill", "pad", "bfill", "backfill", "nearest"],
*,
allow_nearest: Literal[True],
-) -> Literal["pad", "backfill", "nearest"]:
- ...
+) -> Literal["pad", "backfill", "nearest"]: ...
def clean_fill_method(
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index ae889a7fdbc24..34a0bb1f45e2c 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -3,6 +3,7 @@
This is not a public API.
"""
+
from __future__ import annotations
from pandas.core.ops.array_ops import (
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index 034a231f04488..810e30d369729 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -2,6 +2,7 @@
Functions for arithmetic and comparison operations on NumPy arrays and
ExtensionArrays.
"""
+
from __future__ import annotations
import datetime
diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py
index fa085a1f0262b..d19ac6246e1cd 100644
--- a/pandas/core/ops/common.py
+++ b/pandas/core/ops/common.py
@@ -1,6 +1,7 @@
"""
Boilerplate functions used in defining binary operations.
"""
+
from __future__ import annotations
from functools import wraps
diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py
index a939fdd3d041e..ebafc432dd89b 100644
--- a/pandas/core/ops/dispatch.py
+++ b/pandas/core/ops/dispatch.py
@@ -1,6 +1,7 @@
"""
Functions for defining unary operations.
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py
index 5e97d1b67d826..8063a52a02163 100644
--- a/pandas/core/ops/docstrings.py
+++ b/pandas/core/ops/docstrings.py
@@ -1,6 +1,7 @@
"""
Templating for ops docstrings
"""
+
from __future__ import annotations
@@ -419,12 +420,12 @@ def make_flex_doc(op_name: str, typ: str) -> str:
if reverse_op is not None:
_op_descriptions[reverse_op] = _op_descriptions[key].copy()
_op_descriptions[reverse_op]["reverse"] = key
- _op_descriptions[key][
- "see_also_desc"
- ] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}"
- _op_descriptions[reverse_op][
- "see_also_desc"
- ] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}"
+ _op_descriptions[key]["see_also_desc"] = (
+ f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}"
+ )
+ _op_descriptions[reverse_op]["see_also_desc"] = (
+ f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}"
+ )
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py
index 8af95de285938..7b3af99ee1a95 100644
--- a/pandas/core/ops/invalid.py
+++ b/pandas/core/ops/invalid.py
@@ -1,6 +1,7 @@
"""
Templates for invalid operations.
"""
+
from __future__ import annotations
import operator
diff --git a/pandas/core/ops/mask_ops.py b/pandas/core/ops/mask_ops.py
index e5d0626ad9119..427ae2fb87e55 100644
--- a/pandas/core/ops/mask_ops.py
+++ b/pandas/core/ops/mask_ops.py
@@ -1,6 +1,7 @@
"""
Ops for masked arrays.
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py
index 0404da189dfa5..a4e9e5305f74d 100644
--- a/pandas/core/ops/missing.py
+++ b/pandas/core/ops/missing.py
@@ -21,6 +21,7 @@
3) divmod behavior consistent with 1) and 2).
"""
+
from __future__ import annotations
import operator
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 4c87af9ff14c7..43077e7aeecb4 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -257,8 +257,7 @@ def pipe(
func: Callable[Concatenate[Self, P], T],
*args: P.args,
**kwargs: P.kwargs,
- ) -> T:
- ...
+ ) -> T: ...
@overload
def pipe(
@@ -266,8 +265,7 @@ def pipe(
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
- ) -> T:
- ...
+ ) -> T: ...
@final
@Substitution(
@@ -2355,15 +2353,13 @@ def _set_grouper(
@overload
def _take_new_index(
obj: DataFrame, indexer: npt.NDArray[np.intp], new_index: Index
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
def _take_new_index(
obj: Series, indexer: npt.NDArray[np.intp], new_index: Index
-) -> Series:
- ...
+) -> Series: ...
def _take_new_index(
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 88323e5304cc4..8758ba3a475a6 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -1,6 +1,7 @@
"""
Concat routines.
"""
+
from __future__ import annotations
from collections import abc
@@ -80,8 +81,7 @@ def concat(
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool | None = ...,
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -97,8 +97,7 @@ def concat(
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool | None = ...,
-) -> Series:
- ...
+) -> Series: ...
@overload
@@ -114,8 +113,7 @@ def concat(
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool | None = ...,
-) -> DataFrame | Series:
- ...
+) -> DataFrame | Series: ...
@overload
@@ -131,8 +129,7 @@ def concat(
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool | None = ...,
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -148,8 +145,7 @@ def concat(
verify_integrity: bool = ...,
sort: bool = ...,
copy: bool | None = ...,
-) -> DataFrame | Series:
- ...
+) -> DataFrame | Series: ...
def concat(
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index d54bfec389a38..8ea2ac24e13c8 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1,6 +1,7 @@
"""
SQL-style merge routines
"""
+
from __future__ import annotations
from collections.abc import (
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index c770acb638b46..b28010c13d6dd 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -490,15 +490,13 @@ def _unstack_multiple(
@overload
-def unstack(obj: Series, level, fill_value=..., sort: bool = ...) -> DataFrame:
- ...
+def unstack(obj: Series, level, fill_value=..., sort: bool = ...) -> DataFrame: ...
@overload
def unstack(
obj: Series | DataFrame, level, fill_value=..., sort: bool = ...
-) -> Series | DataFrame:
- ...
+) -> Series | DataFrame: ...
def unstack(
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 82c697306edb2..1499afbde56d3 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -1,6 +1,7 @@
"""
Quantilization functions and related stuff
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/core/roperator.py b/pandas/core/roperator.py
index 2f320f4e9c6b9..9ea4bea41cdea 100644
--- a/pandas/core/roperator.py
+++ b/pandas/core/roperator.py
@@ -2,6 +2,7 @@
Reversed Operations not available in the stdlib operator module.
Defining these instead of using lambdas allows us to reference them by name.
"""
+
from __future__ import annotations
import operator
diff --git a/pandas/core/sample.py b/pandas/core/sample.py
index eebbed3512c4e..5b1c4b6a331f5 100644
--- a/pandas/core/sample.py
+++ b/pandas/core/sample.py
@@ -1,6 +1,7 @@
"""
Module containing utilities for NDFrame.sample() and .GroupBy.sample()
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d7aed54da9014..699ff413efb91 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1,6 +1,7 @@
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
+
from __future__ import annotations
from collections.abc import (
@@ -1282,8 +1283,7 @@ def reset_index(
name: Level = ...,
inplace: Literal[False] = ...,
allow_duplicates: bool = ...,
- ) -> DataFrame:
- ...
+ ) -> DataFrame: ...
@overload
def reset_index(
@@ -1294,8 +1294,7 @@ def reset_index(
name: Level = ...,
inplace: Literal[False] = ...,
allow_duplicates: bool = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def reset_index(
@@ -1306,8 +1305,7 @@ def reset_index(
name: Level = ...,
inplace: Literal[True],
allow_duplicates: bool = ...,
- ) -> None:
- ...
+ ) -> None: ...
def reset_index(
self,
@@ -1487,8 +1485,7 @@ def to_string(
name=...,
max_rows: int | None = ...,
min_rows: int | None = ...,
- ) -> str:
- ...
+ ) -> str: ...
@overload
def to_string(
@@ -1504,8 +1501,7 @@ def to_string(
name=...,
max_rows: int | None = ...,
min_rows: int | None = ...,
- ) -> None:
- ...
+ ) -> None: ...
@deprecate_nonkeyword_arguments(
version="3.0.0", allowed_args=["self", "buf"], name="to_string"
@@ -1603,8 +1599,7 @@ def to_markdown(
index: bool = ...,
storage_options: StorageOptions | None = ...,
**kwargs,
- ) -> str:
- ...
+ ) -> str: ...
@overload
def to_markdown(
@@ -1615,8 +1610,7 @@ def to_markdown(
index: bool = ...,
storage_options: StorageOptions | None = ...,
**kwargs,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def to_markdown(
@@ -1627,8 +1621,7 @@ def to_markdown(
index: bool = ...,
storage_options: StorageOptions | None = ...,
**kwargs,
- ) -> str | None:
- ...
+ ) -> str | None: ...
@doc(
klass=_shared_doc_kwargs["klass"],
@@ -1759,12 +1752,10 @@ def keys(self) -> Index:
@overload
def to_dict(
self, *, into: type[MutableMappingT] | MutableMappingT
- ) -> MutableMappingT:
- ...
+ ) -> MutableMappingT: ...
@overload
- def to_dict(self, *, into: type[dict] = ...) -> dict:
- ...
+ def to_dict(self, *, into: type[dict] = ...) -> dict: ...
# error: Incompatible default for argument "into" (default has type "type[
# dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT")
@@ -2140,20 +2131,17 @@ def drop_duplicates(
keep: DropKeep = ...,
inplace: Literal[False] = ...,
ignore_index: bool = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def drop_duplicates(
self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ...
- ) -> None:
- ...
+ ) -> None: ...
@overload
def drop_duplicates(
self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ...
- ) -> Series | None:
- ...
+ ) -> Series | None: ...
def drop_duplicates(
self,
@@ -2539,24 +2527,21 @@ def round(self, decimals: int = 0, *args, **kwargs) -> Series:
@overload
def quantile(
self, q: float = ..., interpolation: QuantileInterpolation = ...
- ) -> float:
- ...
+ ) -> float: ...
@overload
def quantile(
self,
q: Sequence[float] | AnyArrayLike,
interpolation: QuantileInterpolation = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def quantile(
self,
q: float | Sequence[float] | AnyArrayLike = ...,
interpolation: QuantileInterpolation = ...,
- ) -> float | Series:
- ...
+ ) -> float | Series: ...
def quantile(
self,
@@ -3369,8 +3354,7 @@ def sort_values(
na_position: NaPosition = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def sort_values(
@@ -3383,8 +3367,7 @@ def sort_values(
na_position: NaPosition = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def sort_values(
@@ -3397,8 +3380,7 @@ def sort_values(
na_position: NaPosition = ...,
ignore_index: bool = ...,
key: ValueKeyFunc = ...,
- ) -> Series | None:
- ...
+ ) -> Series | None: ...
def sort_values(
self,
@@ -3607,8 +3589,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def sort_index(
@@ -3623,8 +3604,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def sort_index(
@@ -3639,8 +3619,7 @@ def sort_index(
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
- ) -> Series | None:
- ...
+ ) -> Series | None: ...
def sort_index(
self,
@@ -4668,8 +4647,7 @@ def rename(
inplace: Literal[True],
level: Level | None = ...,
errors: IgnoreRaise = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def rename(
@@ -4681,8 +4659,7 @@ def rename(
inplace: Literal[False] = ...,
level: Level | None = ...,
errors: IgnoreRaise = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def rename(
@@ -4694,8 +4671,7 @@ def rename(
inplace: bool = ...,
level: Level | None = ...,
errors: IgnoreRaise = ...,
- ) -> Series | None:
- ...
+ ) -> Series | None: ...
def rename(
self,
@@ -4874,8 +4850,7 @@ def rename_axis(
axis: Axis = ...,
copy: bool = ...,
inplace: Literal[True],
- ) -> None:
- ...
+ ) -> None: ...
@overload
def rename_axis(
@@ -4886,8 +4861,7 @@ def rename_axis(
axis: Axis = ...,
copy: bool = ...,
inplace: Literal[False] = ...,
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def rename_axis(
@@ -4898,8 +4872,7 @@ def rename_axis(
axis: Axis = ...,
copy: bool = ...,
inplace: bool = ...,
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
def rename_axis(
self,
@@ -4989,8 +4962,7 @@ def drop(
level: Level | None = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def drop(
@@ -5003,8 +4975,7 @@ def drop(
level: Level | None = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def drop(
@@ -5017,8 +4988,7 @@ def drop(
level: Level | None = ...,
inplace: bool = ...,
errors: IgnoreRaise = ...,
- ) -> Series | None:
- ...
+ ) -> Series | None: ...
def drop(
self,
@@ -5172,20 +5142,17 @@ def info(
@overload
def _replace_single(
self, to_replace, method: str, inplace: Literal[False], limit
- ) -> Self:
- ...
+ ) -> Self: ...
@overload
def _replace_single(
self, to_replace, method: str, inplace: Literal[True], limit
- ) -> None:
- ...
+ ) -> None: ...
@overload
def _replace_single(
self, to_replace, method: str, inplace: bool, limit
- ) -> Self | None:
- ...
+ ) -> Self | None: ...
# TODO(3.0): this can be removed once GH#33302 deprecation is enforced
def _replace_single(
@@ -5591,8 +5558,7 @@ def dropna(
inplace: Literal[False] = ...,
how: AnyAll | None = ...,
ignore_index: bool = ...,
- ) -> Series:
- ...
+ ) -> Series: ...
@overload
def dropna(
@@ -5602,8 +5568,7 @@ def dropna(
inplace: Literal[True],
how: AnyAll | None = ...,
ignore_index: bool = ...,
- ) -> None:
- ...
+ ) -> None: ...
def dropna(
self,
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 92ca014e30c1a..7034de365b0c1 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -1,4 +1,5 @@
-""" miscellaneous sorting / groupby utilities """
+"""miscellaneous sorting / groupby utilities"""
+
from __future__ import annotations
from collections import defaultdict
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index c416db4083f9a..b8b1d39d4eb20 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -622,8 +622,7 @@ def to_datetime(
unit: str | None = ...,
origin=...,
cache: bool = ...,
-) -> Timestamp:
- ...
+) -> Timestamp: ...
@overload
@@ -638,8 +637,7 @@ def to_datetime(
unit: str | None = ...,
origin=...,
cache: bool = ...,
-) -> Series:
- ...
+) -> Series: ...
@overload
@@ -654,8 +652,7 @@ def to_datetime(
unit: str | None = ...,
origin=...,
cache: bool = ...,
-) -> DatetimeIndex:
- ...
+) -> DatetimeIndex: ...
def to_datetime(
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index 5f3963c3d405e..409a27ea64488 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -1,6 +1,7 @@
"""
timedelta support tools
"""
+
from __future__ import annotations
from typing import (
@@ -53,8 +54,7 @@ def to_timedelta(
arg: str | float | timedelta,
unit: UnitChoices | None = ...,
errors: DateTimeErrorChoices = ...,
-) -> Timedelta:
- ...
+) -> Timedelta: ...
@overload
@@ -62,8 +62,7 @@ def to_timedelta(
arg: Series,
unit: UnitChoices | None = ...,
errors: DateTimeErrorChoices = ...,
-) -> Series:
- ...
+) -> Series: ...
@overload
@@ -71,8 +70,7 @@ def to_timedelta(
arg: list | tuple | range | ArrayLike | Index,
unit: UnitChoices | None = ...,
errors: DateTimeErrorChoices = ...,
-) -> TimedeltaIndex:
- ...
+) -> TimedeltaIndex: ...
def to_timedelta(
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index 4933de3212581..f7e9ff220eded 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -1,6 +1,7 @@
"""
data hash pandas / numpy objects
"""
+
from __future__ import annotations
import itertools
diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py
index 4825c9fee24b1..a6079785e7475 100644
--- a/pandas/core/util/numba_.py
+++ b/pandas/core/util/numba_.py
@@ -1,4 +1,5 @@
"""Common utilities for Numba operations"""
+
from __future__ import annotations
import types
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index fc8eddca09c84..004a3555f0212 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -1,4 +1,5 @@
"""Common utility functions for rolling operations"""
+
from __future__ import annotations
from collections import defaultdict
diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py
index c3ccb471c973e..cdb670ee218b4 100644
--- a/pandas/core/window/doc.py
+++ b/pandas/core/window/doc.py
@@ -1,4 +1,5 @@
"""Any shareable docstring components for rolling/expanding/ewm"""
+
from __future__ import annotations
from textwrap import dedent
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index ac2c10447dee9..52eb8cf45d170 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2,6 +2,7 @@
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
+
from __future__ import annotations
import copy
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 6d124bec72137..402bbdb872a18 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -1,6 +1,7 @@
"""
Expose public exceptions & warnings
"""
+
from __future__ import annotations
import ctypes
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 8e8b22967ea01..aa20ec237e968 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -1,4 +1,5 @@
-""" io on the clipboard """
+"""io on the clipboard"""
+
from __future__ import annotations
from io import StringIO
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 682780a409a8b..3544883afedd6 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -1,4 +1,5 @@
"""Common IO api utilities"""
+
from __future__ import annotations
from abc import (
@@ -176,13 +177,11 @@ def is_url(url: object) -> bool:
@overload
-def _expand_user(filepath_or_buffer: str) -> str:
- ...
+def _expand_user(filepath_or_buffer: str) -> str: ...
@overload
-def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:
- ...
+def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: ...
def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
@@ -234,15 +233,15 @@ def validate_header_arg(header: object) -> None:
@overload
-def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:
- ...
+def stringify_path(
+ filepath_or_buffer: FilePath, convert_file_like: bool = ...
+) -> str: ...
@overload
def stringify_path(
filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
-) -> BaseBufferT:
- ...
+) -> BaseBufferT: ...
def stringify_path(
@@ -627,8 +626,7 @@ def get_handle(
is_text: Literal[False],
errors: str | None = ...,
storage_options: StorageOptions = ...,
-) -> IOHandles[bytes]:
- ...
+) -> IOHandles[bytes]: ...
@overload
@@ -642,8 +640,7 @@ def get_handle(
is_text: Literal[True] = ...,
errors: str | None = ...,
storage_options: StorageOptions = ...,
-) -> IOHandles[str]:
- ...
+) -> IOHandles[str]: ...
@overload
@@ -657,8 +654,7 @@ def get_handle(
is_text: bool = ...,
errors: str | None = ...,
storage_options: StorageOptions = ...,
-) -> IOHandles[str] | IOHandles[bytes]:
- ...
+) -> IOHandles[str] | IOHandles[bytes]: ...
@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
@@ -953,8 +949,7 @@ class _BufferedWriter(BytesIO, ABC): # type: ignore[misc]
buffer = BytesIO()
@abstractmethod
- def write_to_buffer(self) -> None:
- ...
+ def write_to_buffer(self) -> None: ...
def close(self) -> None:
if self.closed:
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index d77a955e41b00..2977f62b4d3c5 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -406,8 +406,7 @@ def read_excel(
skipfooter: int = ...,
storage_options: StorageOptions = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -445,8 +444,7 @@ def read_excel(
skipfooter: int = ...,
storage_options: StorageOptions = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
-) -> dict[IntStrT, DataFrame]:
- ...
+) -> dict[IntStrT, DataFrame]: ...
@doc(storage_options=_shared_docs["storage_options"])
@@ -1369,7 +1367,7 @@ def close(self) -> None:
b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
- b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
+ b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1", # Compound File Binary
)
ZIP_SIGNATURE = b"PK\x03\x04"
PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index bc7dca2d95b6b..cdb22a57399ed 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -238,12 +238,10 @@ def _make_table_cell(self, cell) -> tuple[object, Any]:
)
@overload
- def _process_style(self, style: dict[str, Any]) -> str:
- ...
+ def _process_style(self, style: dict[str, Any]) -> str: ...
@overload
- def _process_style(self, style: None) -> None:
- ...
+ def _process_style(self, style: None) -> None: ...
def _process_style(self, style: dict[str, Any] | None) -> str | None:
"""Convert a style dictionary to a OpenDocument style sheet
diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py
index 95d43f60a22c5..f879f16aa5dc8 100644
--- a/pandas/io/excel/_util.py
+++ b/pandas/io/excel/_util.py
@@ -161,23 +161,19 @@ def _range2cols(areas: str) -> list[int]:
@overload
-def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
- ...
+def maybe_convert_usecols(usecols: str | list[int]) -> list[int]: ...
@overload
-def maybe_convert_usecols(usecols: list[str]) -> list[str]:
- ...
+def maybe_convert_usecols(usecols: list[str]) -> list[str]: ...
@overload
-def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
- ...
+def maybe_convert_usecols(usecols: usecols_func) -> usecols_func: ...
@overload
-def maybe_convert_usecols(usecols: None) -> None:
- ...
+def maybe_convert_usecols(usecols: None) -> None: ...
def maybe_convert_usecols(
@@ -212,13 +208,11 @@ def maybe_convert_usecols(
@overload
-def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
- ...
+def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]: ...
@overload
-def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
- ...
+def validate_freeze_panes(freeze_panes: None) -> Literal[False]: ...
def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 89cb044511a25..b42dbaa579ee7 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -1,4 +1,5 @@
-""" feather-format compat """
+"""feather-format compat"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py
index 2a6cbe0762903..99a790388f3f1 100644
--- a/pandas/io/formats/console.py
+++ b/pandas/io/formats/console.py
@@ -1,6 +1,7 @@
"""
Internal module for console introspection
"""
+
from __future__ import annotations
from shutil import get_terminal_size
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index 0c6885d789f15..dc18ef2fcd4fc 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -1,6 +1,7 @@
"""
Utilities for interpreting CSS from Stylers for formatting non-HTML outputs.
"""
+
from __future__ import annotations
import re
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index ee7739df49389..b6c6112b05ab3 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -1,6 +1,7 @@
"""
Utilities for conversion to writer-agnostic Excel representation.
"""
+
from __future__ import annotations
from collections.abc import (
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 04d5fcae1a50d..8566751b9f33e 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -2,6 +2,7 @@
Internal module for formatting output data in csv, html, xml,
and latex files. This module also applies to display formatting.
"""
+
from __future__ import annotations
from collections.abc import (
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index 794ce77b3b45e..adaeed017d7bf 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -1,6 +1,7 @@
"""
Module for formatting output data in HTML.
"""
+
from __future__ import annotations
from textwrap import dedent
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 1119cb0ba9b9d..b30351e14332d 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -1,6 +1,7 @@
"""
Printing tools.
"""
+
from __future__ import annotations
from collections.abc import (
diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py
index cdad388592717..ca41726de08cf 100644
--- a/pandas/io/formats/string.py
+++ b/pandas/io/formats/string.py
@@ -1,6 +1,7 @@
"""
Module for formatting output data in console (to string).
"""
+
from __future__ import annotations
from shutil import get_terminal_size
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 08a3edd30c311..7247e11be874e 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1,6 +1,7 @@
"""
Module for applying conditional formatting to DataFrames and Series.
"""
+
from __future__ import annotations
from contextlib import contextmanager
@@ -618,8 +619,7 @@ def to_latex(
environment: str | None = ...,
encoding: str | None = ...,
convert_css: bool = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def to_latex(
@@ -641,8 +641,7 @@ def to_latex(
environment: str | None = ...,
encoding: str | None = ...,
convert_css: bool = ...,
- ) -> str:
- ...
+ ) -> str: ...
def to_latex(
self,
@@ -1234,8 +1233,7 @@ def to_html(
doctype_html: bool = ...,
exclude_styles: bool = ...,
**kwargs,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def to_html(
@@ -1254,8 +1252,7 @@ def to_html(
doctype_html: bool = ...,
exclude_styles: bool = ...,
**kwargs,
- ) -> str:
- ...
+ ) -> str: ...
@Substitution(buf=buffering_args, encoding=encoding_args)
def to_html(
@@ -1414,8 +1411,7 @@ def to_string(
max_rows: int | None = ...,
max_columns: int | None = ...,
delimiter: str = ...,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def to_string(
@@ -1428,8 +1424,7 @@ def to_string(
max_rows: int | None = ...,
max_columns: int | None = ...,
delimiter: str = ...,
- ) -> str:
- ...
+ ) -> str: ...
@Substitution(buf=buffering_args, encoding=encoding_args)
def to_string(
@@ -3629,8 +3624,7 @@ def pipe(
func: Callable[Concatenate[Self, P], T],
*args: P.args,
**kwargs: P.kwargs,
- ) -> T:
- ...
+ ) -> T: ...
@overload
def pipe(
@@ -3638,8 +3632,7 @@ def pipe(
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
- ) -> T:
- ...
+ ) -> T: ...
def pipe(
self,
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index fe03ba519629d..2c93dbe74eace 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -314,9 +314,9 @@ def _translate(
max_cols,
)
- self.cellstyle_map_columns: DefaultDict[
- tuple[CSSPair, ...], list[str]
- ] = defaultdict(list)
+ self.cellstyle_map_columns: DefaultDict[tuple[CSSPair, ...], list[str]] = (
+ defaultdict(list)
+ )
head = self._translate_header(sparse_cols, max_cols)
d.update({"head": head})
@@ -329,9 +329,9 @@ def _translate(
self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(
list
)
- self.cellstyle_map_index: DefaultDict[
- tuple[CSSPair, ...], list[str]
- ] = defaultdict(list)
+ self.cellstyle_map_index: DefaultDict[tuple[CSSPair, ...], list[str]] = (
+ defaultdict(list)
+ )
body: list = self._translate_body(idx_lengths, max_rows, max_cols)
d.update({"body": body})
@@ -776,9 +776,9 @@ def _generate_body_row(
)
if self.cell_ids:
- header_element[
- "id"
- ] = f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given
+ header_element["id"] = (
+ f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given
+ )
if (
header_element_visible
and (r, c) in self.ctx_index
diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py
index e55561902d4d3..702430642a597 100644
--- a/pandas/io/formats/xml.py
+++ b/pandas/io/formats/xml.py
@@ -1,6 +1,7 @@
"""
:mod:`pandas.io.formats.xml` is a module for formatting data in XML.
"""
+
from __future__ import annotations
import codecs
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 0e1426d31f0ee..8f4028c1ead3a 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -107,8 +107,7 @@ def to_json(
indent: int = ...,
storage_options: StorageOptions = ...,
mode: Literal["a", "w"] = ...,
-) -> None:
- ...
+) -> None: ...
@overload
@@ -127,8 +126,7 @@ def to_json(
indent: int = ...,
storage_options: StorageOptions = ...,
mode: Literal["a", "w"] = ...,
-) -> str:
- ...
+) -> str: ...
def to_json(
@@ -415,8 +413,7 @@ def read_json(
storage_options: StorageOptions = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
engine: JSONEngine = ...,
-) -> JsonReader[Literal["frame"]]:
- ...
+) -> JsonReader[Literal["frame"]]: ...
@overload
@@ -440,8 +437,7 @@ def read_json(
storage_options: StorageOptions = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
engine: JSONEngine = ...,
-) -> JsonReader[Literal["series"]]:
- ...
+) -> JsonReader[Literal["series"]]: ...
@overload
@@ -465,8 +461,7 @@ def read_json(
storage_options: StorageOptions = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
engine: JSONEngine = ...,
-) -> Series:
- ...
+) -> Series: ...
@overload
@@ -490,8 +485,7 @@ def read_json(
storage_options: StorageOptions = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
engine: JSONEngine = ...,
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@doc(
@@ -922,16 +916,13 @@ def _combine_lines(self, lines) -> str:
)
@overload
- def read(self: JsonReader[Literal["frame"]]) -> DataFrame:
- ...
+ def read(self: JsonReader[Literal["frame"]]) -> DataFrame: ...
@overload
- def read(self: JsonReader[Literal["series"]]) -> Series:
- ...
+ def read(self: JsonReader[Literal["series"]]) -> Series: ...
@overload
- def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:
- ...
+ def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: ...
def read(self) -> DataFrame | Series:
"""
@@ -1016,16 +1007,15 @@ def __iter__(self) -> Self:
return self
@overload
- def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame:
- ...
+ def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: ...
@overload
- def __next__(self: JsonReader[Literal["series"]]) -> Series:
- ...
+ def __next__(self: JsonReader[Literal["series"]]) -> Series: ...
@overload
- def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:
- ...
+ def __next__(
+ self: JsonReader[Literal["frame", "series"]],
+ ) -> DataFrame | Series: ...
def __next__(self) -> DataFrame | Series:
if self.nrows and self.nrows_seen >= self.nrows:
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index f784004487646..ef717dd9b7ef8 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -53,8 +53,7 @@ def nested_to_record(
sep: str = ...,
level: int = ...,
max_level: int | None = ...,
-) -> dict[str, Any]:
- ...
+) -> dict[str, Any]: ...
@overload
@@ -64,8 +63,7 @@ def nested_to_record(
sep: str = ...,
level: int = ...,
max_level: int | None = ...,
-) -> list[dict[str, Any]]:
- ...
+) -> list[dict[str, Any]]: ...
def nested_to_record(
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index a3b912dec66fd..d4b412404c308 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -3,6 +3,7 @@
https://specs.frictionlessdata.io/table-schema/
"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/io/orc.py b/pandas/io/orc.py
index ed9bc21075e73..9e9a43644f694 100644
--- a/pandas/io/orc.py
+++ b/pandas/io/orc.py
@@ -1,4 +1,5 @@
-""" orc compat """
+"""orc compat"""
+
from __future__ import annotations
import io
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 8052da25f0368..08983ceed44e5 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -1,4 +1,5 @@
-""" parquet compat """
+"""parquet compat"""
+
from __future__ import annotations
import io
diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py
index 85b6afeec1ab9..f8263a65ef5c7 100644
--- a/pandas/io/parsers/arrow_parser_wrapper.py
+++ b/pandas/io/parsers/arrow_parser_wrapper.py
@@ -99,9 +99,9 @@ def _get_pyarrow_options(self) -> None:
if callable(on_bad_lines):
self.parse_options["invalid_row_handler"] = on_bad_lines
elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR:
- self.parse_options[
- "invalid_row_handler"
- ] = None # PyArrow raises an exception by default
+ self.parse_options["invalid_row_handler"] = (
+ None # PyArrow raises an exception by default
+ )
elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN:
def handle_warning(invalid_row) -> str:
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 70a90a3e37d62..7b06c6b6b0d39 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -859,16 +859,14 @@ def _do_date_conversions(
self,
names: Index,
data: DataFrame,
- ) -> tuple[Sequence[Hashable] | Index, DataFrame]:
- ...
+ ) -> tuple[Sequence[Hashable] | Index, DataFrame]: ...
@overload
def _do_date_conversions(
self,
names: Sequence[Hashable],
data: Mapping[Hashable, ArrayLike],
- ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]:
- ...
+ ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]: ...
@final
def _do_date_conversions(
@@ -927,14 +925,12 @@ def _evaluate_usecols(
self,
usecols: Callable[[Hashable], object],
names: Iterable[Hashable],
- ) -> set[int]:
- ...
+ ) -> set[int]: ...
@overload
def _evaluate_usecols(
self, usecols: SequenceT, names: Iterable[Hashable]
- ) -> SequenceT:
- ...
+ ) -> SequenceT: ...
@final
def _evaluate_usecols(
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 8995faa7ad346..539d9abf84f90 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -3,6 +3,7 @@
GH#48849 provides a convenient way of deprecating keyword arguments
"""
+
from __future__ import annotations
from collections import (
@@ -111,9 +112,9 @@ class _read_shared(TypedDict, Generic[HashableT], total=False):
skiprows: list[int] | int | Callable[[Hashable], bool] | None
skipfooter: int
nrows: int | None
- na_values: Hashable | Iterable[Hashable] | Mapping[
- Hashable, Iterable[Hashable]
- ] | None
+ na_values: (
+ Hashable | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None
+ )
keep_default_na: bool
na_filter: bool
verbose: bool | lib.NoDefault
@@ -568,18 +569,15 @@ class _DeprecationConfig(NamedTuple):
@overload
-def validate_integer(name: str, val: None, min_val: int = ...) -> None:
- ...
+def validate_integer(name: str, val: None, min_val: int = ...) -> None: ...
@overload
-def validate_integer(name: str, val: float, min_val: int = ...) -> int:
- ...
+def validate_integer(name: str, val: float, min_val: int = ...) -> int: ...
@overload
-def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None:
- ...
+def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None: ...
def validate_integer(
@@ -691,8 +689,7 @@ def read_csv(
iterator: Literal[True],
chunksize: int | None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> TextFileReader:
- ...
+) -> TextFileReader: ...
@overload
@@ -702,8 +699,7 @@ def read_csv(
iterator: bool = ...,
chunksize: int,
**kwds: Unpack[_read_shared[HashableT]],
-) -> TextFileReader:
- ...
+) -> TextFileReader: ...
@overload
@@ -713,8 +709,7 @@ def read_csv(
iterator: Literal[False] = ...,
chunksize: None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -724,8 +719,7 @@ def read_csv(
iterator: bool = ...,
chunksize: int | None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> DataFrame | TextFileReader:
- ...
+) -> DataFrame | TextFileReader: ...
@Appender(
@@ -896,8 +890,7 @@ def read_table(
iterator: Literal[True],
chunksize: int | None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> TextFileReader:
- ...
+) -> TextFileReader: ...
@overload
@@ -907,8 +900,7 @@ def read_table(
iterator: bool = ...,
chunksize: int,
**kwds: Unpack[_read_shared[HashableT]],
-) -> TextFileReader:
- ...
+) -> TextFileReader: ...
@overload
@@ -918,8 +910,7 @@ def read_table(
iterator: Literal[False] = ...,
chunksize: None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -929,8 +920,7 @@ def read_table(
iterator: bool = ...,
chunksize: int | None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> DataFrame | TextFileReader:
- ...
+) -> DataFrame | TextFileReader: ...
@Appender(
@@ -1097,8 +1087,7 @@ def read_fwf(
iterator: Literal[True],
chunksize: int | None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> TextFileReader:
- ...
+) -> TextFileReader: ...
@overload
@@ -1111,8 +1100,7 @@ def read_fwf(
iterator: bool = ...,
chunksize: int,
**kwds: Unpack[_read_shared[HashableT]],
-) -> TextFileReader:
- ...
+) -> TextFileReader: ...
@overload
@@ -1125,8 +1113,7 @@ def read_fwf(
iterator: Literal[False] = ...,
chunksize: None = ...,
**kwds: Unpack[_read_shared[HashableT]],
-) -> DataFrame:
- ...
+) -> DataFrame: ...
def read_fwf(
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index d37c77182d3fe..f0441f583bea2 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -1,4 +1,5 @@
-""" pickle compat """
+"""pickle compat"""
+
from __future__ import annotations
import pickle
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5703f626e3b04..e804c1b751d4a 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2,6 +2,7 @@
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
+
from __future__ import annotations
from contextlib import suppress
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 275fad2a565bf..49287ddf5ff38 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -13,6 +13,7 @@
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
+
from __future__ import annotations
from collections import abc
diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py
index 62c17bd03927e..8da7becd76d3b 100644
--- a/pandas/io/sas/sas_constants.py
+++ b/pandas/io/sas/sas_constants.py
@@ -181,36 +181,36 @@ class SASIndex:
subheader_signature_to_index: Final = {
- b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
- b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
- b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index,
- b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index,
- b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
- b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
- b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index,
- b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index,
- b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index,
- b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
- b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
- b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index,
- b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
- b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
- b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
- b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index,
- b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
- b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
- b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index,
- b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
- b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
- b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index,
- b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
- b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index,
- b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
+ b"\xf7\xf7\xf7\xf7": SASIndex.row_size_index,
+ b"\x00\x00\x00\x00\xf7\xf7\xf7\xf7": SASIndex.row_size_index,
+ b"\xf7\xf7\xf7\xf7\x00\x00\x00\x00": SASIndex.row_size_index,
+ b"\xf7\xf7\xf7\xf7\xff\xff\xfb\xfe": SASIndex.row_size_index,
+ b"\xf6\xf6\xf6\xf6": SASIndex.column_size_index,
+ b"\x00\x00\x00\x00\xf6\xf6\xf6\xf6": SASIndex.column_size_index,
+ b"\xf6\xf6\xf6\xf6\x00\x00\x00\x00": SASIndex.column_size_index,
+ b"\xf6\xf6\xf6\xf6\xff\xff\xfb\xfe": SASIndex.column_size_index,
+ b"\x00\xfc\xff\xff": SASIndex.subheader_counts_index,
+ b"\xff\xff\xfc\x00": SASIndex.subheader_counts_index,
+ b"\x00\xfc\xff\xff\xff\xff\xff\xff": SASIndex.subheader_counts_index,
+ b"\xff\xff\xff\xff\xff\xff\xfc\x00": SASIndex.subheader_counts_index,
+ b"\xfd\xff\xff\xff": SASIndex.column_text_index,
+ b"\xff\xff\xff\xfd": SASIndex.column_text_index,
+ b"\xfd\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_text_index,
+ b"\xff\xff\xff\xff\xff\xff\xff\xfd": SASIndex.column_text_index,
+ b"\xff\xff\xff\xff": SASIndex.column_name_index,
+ b"\xff\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_name_index,
+ b"\xfc\xff\xff\xff": SASIndex.column_attributes_index,
+ b"\xff\xff\xff\xfc": SASIndex.column_attributes_index,
+ b"\xfc\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_attributes_index,
+ b"\xff\xff\xff\xff\xff\xff\xff\xfc": SASIndex.column_attributes_index,
+ b"\xfe\xfb\xff\xff": SASIndex.format_and_label_index,
+ b"\xff\xff\xfb\xfe": SASIndex.format_and_label_index,
+ b"\xfe\xfb\xff\xff\xff\xff\xff\xff": SASIndex.format_and_label_index,
+ b"\xff\xff\xff\xff\xff\xff\xfb\xfe": SASIndex.format_and_label_index,
+ b"\xfe\xff\xff\xff": SASIndex.column_list_index,
+ b"\xff\xff\xff\xfe": SASIndex.column_list_index,
+ b"\xfe\xff\xff\xff\xff\xff\xff\xff": SASIndex.column_list_index,
+ b"\xff\xff\xff\xff\xff\xff\xff\xfe": SASIndex.column_list_index,
}
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 11b2ed0ee7316..adba9bf117a8e 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -7,6 +7,7 @@
https://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf
"""
+
from __future__ import annotations
from collections import abc
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index ca5a75057fd34..f14943d1e0fce 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -1,6 +1,7 @@
"""
Read SAS sas7bdat or xport files.
"""
+
from __future__ import annotations
from abc import (
@@ -38,12 +39,10 @@ class ReaderBase(ABC):
"""
@abstractmethod
- def read(self, nrows: int | None = None) -> DataFrame:
- ...
+ def read(self, nrows: int | None = None) -> DataFrame: ...
@abstractmethod
- def close(self) -> None:
- ...
+ def close(self) -> None: ...
def __enter__(self) -> Self:
return self
@@ -67,8 +66,7 @@ def read_sas(
chunksize: int = ...,
iterator: bool = ...,
compression: CompressionOptions = ...,
-) -> ReaderBase:
- ...
+) -> ReaderBase: ...
@overload
@@ -81,8 +79,7 @@ def read_sas(
chunksize: None = ...,
iterator: bool = ...,
compression: CompressionOptions = ...,
-) -> DataFrame | ReaderBase:
- ...
+) -> DataFrame | ReaderBase: ...
@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index c0d69472598f1..b80487abbc4ab 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -242,8 +242,7 @@ def read_sql_table(
columns: list[str] | None = ...,
chunksize: None = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -257,8 +256,7 @@ def read_sql_table(
columns: list[str] | None = ...,
chunksize: int = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
-) -> Iterator[DataFrame]:
- ...
+) -> Iterator[DataFrame]: ...
def read_sql_table(
@@ -374,8 +372,7 @@ def read_sql_query(
chunksize: None = ...,
dtype: DtypeArg | None = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -389,8 +386,7 @@ def read_sql_query(
chunksize: int = ...,
dtype: DtypeArg | None = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
-) -> Iterator[DataFrame]:
- ...
+) -> Iterator[DataFrame]: ...
def read_sql_query(
@@ -511,8 +507,7 @@ def read_sql(
chunksize: None = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
dtype: DtypeArg | None = None,
-) -> DataFrame:
- ...
+) -> DataFrame: ...
@overload
@@ -527,8 +522,7 @@ def read_sql(
chunksize: int = ...,
dtype_backend: DtypeBackend | lib.NoDefault = ...,
dtype: DtypeArg | None = None,
-) -> Iterator[DataFrame]:
- ...
+) -> Iterator[DataFrame]: ...
def read_sql(
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 37ea940b3938a..c3101683b9962 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -9,6 +9,7 @@
You can find more information on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
+
from __future__ import annotations
from collections import abc
diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py
index 55c861e384d67..c7a4c1eacfcae 100644
--- a/pandas/plotting/__init__.py
+++ b/pandas/plotting/__init__.py
@@ -55,6 +55,7 @@
For the discussion about the API see
https://github.com/pandas-dev/pandas/issues/26747.
"""
+
from pandas.plotting._core import (
PlotAccessor,
boxplot,
diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py
index 45a077a6151cf..d725d53bd21ec 100644
--- a/pandas/plotting/_matplotlib/style.py
+++ b/pandas/plotting/_matplotlib/style.py
@@ -35,8 +35,7 @@ def get_standard_colors(
color_type: str = ...,
*,
color: dict[str, Color],
-) -> dict[str, Color]:
- ...
+) -> dict[str, Color]: ...
@overload
@@ -46,8 +45,7 @@ def get_standard_colors(
color_type: str = ...,
*,
color: Color | Sequence[Color] | None = ...,
-) -> list[Color]:
- ...
+) -> list[Color]: ...
@overload
@@ -57,8 +55,7 @@ def get_standard_colors(
color_type: str = ...,
*,
color: dict[str, Color] | Color | Sequence[Color] | None = ...,
-) -> dict[str, Color] | list[Color]:
- ...
+) -> dict[str, Color] | list[Color]: ...
def get_standard_colors(
diff --git a/pandas/testing.py b/pandas/testing.py
index 841b55df48556..0445fa5b5efc0 100644
--- a/pandas/testing.py
+++ b/pandas/testing.py
@@ -2,7 +2,6 @@
Public testing utility functions.
"""
-
from pandas._testing import (
assert_extension_array_equal,
assert_frame_equal,
diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py
index b608df1554154..d7a8b0510b50f 100644
--- a/pandas/tests/arithmetic/common.py
+++ b/pandas/tests/arithmetic/common.py
@@ -1,6 +1,7 @@
"""
Assertion helpers for arithmetic tests.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/arrays/interval/test_overlaps.py b/pandas/tests/arrays/interval/test_overlaps.py
index 4853bec51106c..5a48cf024ec0d 100644
--- a/pandas/tests/arrays/interval/test_overlaps.py
+++ b/pandas/tests/arrays/interval/test_overlaps.py
@@ -1,4 +1,5 @@
"""Tests for Interval-Interval operations, such as overlaps, contains, etc."""
+
import numpy as np
import pytest
diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py
index 7a89656bd5aa0..5f73370554473 100644
--- a/pandas/tests/arrays/masked/test_arrow_compat.py
+++ b/pandas/tests/arrays/masked/test_arrow_compat.py
@@ -161,7 +161,7 @@ def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays):
# Add offset to the buffer.
offset = b"\x00" * (pa_array.type.bit_width // 8)
data_buffer_offset = pa.py_buffer(offset + data_buffer_bytes)
- mask_buffer_offset = pa.py_buffer(b"\x0E")
+ mask_buffer_offset = pa.py_buffer(b"\x0e")
pa_array_offset = pa.Array.from_buffers(
type=pa_array.type,
length=len(pa_array),
diff --git a/pandas/tests/arrays/masked_shared.py b/pandas/tests/arrays/masked_shared.py
index 3e74402263cf9..545b14af2c98b 100644
--- a/pandas/tests/arrays/masked_shared.py
+++ b/pandas/tests/arrays/masked_shared.py
@@ -1,6 +1,7 @@
"""
Tests shared by MaskedArray subclasses.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/arrays/numpy_/test_numpy.py b/pandas/tests/arrays/numpy_/test_numpy.py
index 5112ce262f771..e86eb014465e1 100644
--- a/pandas/tests/arrays/numpy_/test_numpy.py
+++ b/pandas/tests/arrays/numpy_/test_numpy.py
@@ -2,6 +2,7 @@
Additional tests for NumpyExtensionArray that aren't covered by
the interface tests.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 4b82d43158b88..597b407a29c94 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -2,6 +2,7 @@
This module tests the functionality of StringArray and ArrowStringArray.
Tests for the str accessors are in pandas/tests/strings/test_string_array.py
"""
+
import operator
import numpy as np
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 8f0576cc65a27..3f2723d258710 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -1,6 +1,7 @@
"""
Tests for DatetimeArray
"""
+
from __future__ import annotations
from datetime import timedelta
diff --git a/pandas/tests/arrays/test_ndarray_backed.py b/pandas/tests/arrays/test_ndarray_backed.py
index 1fe7cc9b03e8a..2af59a03a5b3e 100644
--- a/pandas/tests/arrays/test_ndarray_backed.py
+++ b/pandas/tests/arrays/test_ndarray_backed.py
@@ -1,6 +1,7 @@
"""
Tests for subclasses of NDArrayBackedExtensionArray
"""
+
import numpy as np
from pandas import (
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index d54b15fbe6633..96a67591f6c78 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -3,6 +3,7 @@
related to inference and not otherwise tested in types/test_common.py
"""
+
import collections
from collections import namedtuple
from collections.abc import Iterator
@@ -239,8 +240,9 @@ def test_is_list_like_generic():
# is_list_like was yielding false positives for Generic classes in python 3.11
T = TypeVar("T")
- class MyDataFrame(DataFrame, Generic[T]):
- ...
+ # https://github.com/pylint-dev/pylint/issues/9398
+ # pylint: disable=multiple-statements
+ class MyDataFrame(DataFrame, Generic[T]): ...
tstc = MyDataFrame[int]
tst = MyDataFrame[int]({"x": [1, 2, 3]})
diff --git a/pandas/tests/extension/array_with_attr/array.py b/pandas/tests/extension/array_with_attr/array.py
index 2789d51ec2ce3..4f65424ece145 100644
--- a/pandas/tests/extension/array_with_attr/array.py
+++ b/pandas/tests/extension/array_with_attr/array.py
@@ -2,6 +2,7 @@
Test extension array that has custom attribute information (not stored on the dtype).
"""
+
from __future__ import annotations
import numbers
diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py
index 6efaa95aef1b5..cfbc365568403 100644
--- a/pandas/tests/extension/base/__init__.py
+++ b/pandas/tests/extension/base/__init__.py
@@ -34,6 +34,7 @@ class TestMyDtype(BaseDtypeTests):
wherever the test requires it. You're free to implement additional tests.
"""
+
from pandas.tests.extension.base.accumulate import BaseAccumulateTests
from pandas.tests.extension.base.casting import BaseCastingTests
from pandas.tests.extension.base.constructors import BaseConstructorsTests
diff --git a/pandas/tests/extension/base/dim2.py b/pandas/tests/extension/base/dim2.py
index 4da9fe8917d55..8c7d8ff491cd3 100644
--- a/pandas/tests/extension/base/dim2.py
+++ b/pandas/tests/extension/base/dim2.py
@@ -1,6 +1,7 @@
"""
Tests for 2D compatibility.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/extension/base/index.py b/pandas/tests/extension/base/index.py
index 72c4ebfb5d84a..e7bfebec92287 100644
--- a/pandas/tests/extension/base/index.py
+++ b/pandas/tests/extension/base/index.py
@@ -1,6 +1,7 @@
"""
Tests for Indexes backed by arbitrary ExtensionArrays.
"""
+
import pandas as pd
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index e43b50322bb92..3a4391edc99ef 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -11,6 +11,7 @@
in that case. We *want* the dictionaries to be treated as scalars, so we
hack around pandas by using UserDicts.
"""
+
from __future__ import annotations
from collections import (
diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py
index b3bb35c9396f4..da53bdcb4e37e 100644
--- a/pandas/tests/extension/list/array.py
+++ b/pandas/tests/extension/list/array.py
@@ -3,6 +3,7 @@
The ListArray stores an ndarray of lists.
"""
+
from __future__ import annotations
import numbers
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 5d634c9aeb14f..6c3706881624f 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -10,6 +10,7 @@
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index bd4ab5077c6e8..09662f7b793a9 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -13,6 +13,7 @@
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
+
import string
import numpy as np
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index 6352bf76f96bb..06e85f5c92913 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -13,6 +13,7 @@
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_extension.py b/pandas/tests/extension/test_extension.py
index 1ed626cd51080..456f4863b1c31 100644
--- a/pandas/tests/extension/test_extension.py
+++ b/pandas/tests/extension/test_extension.py
@@ -1,6 +1,7 @@
"""
Tests for behavior if an author does *not* implement EA methods.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 98dd1c5cb615f..6900d6d67f9d9 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -13,6 +13,7 @@
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/tests/extension/test_masked.py b/pandas/tests/extension/test_masked.py
index 651f783b44d1f..5481e50de10bb 100644
--- a/pandas/tests/extension/test_masked.py
+++ b/pandas/tests/extension/test_masked.py
@@ -13,6 +13,7 @@
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
+
import warnings
import numpy as np
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 3f54f6cbbba69..ca79c13ed44e4 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -15,6 +15,7 @@
Note: we do not bother with base.BaseIndexTests because NumpyExtensionArray
will never be held in an Index.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 2d1d213322bac..142bad6db4f95 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -13,6 +13,7 @@
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
+
from __future__ import annotations
from typing import TYPE_CHECKING
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index 2d5a134f8560a..c09d4d315451f 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -13,6 +13,7 @@
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
+
from __future__ import annotations
import string
diff --git a/pandas/tests/frame/indexing/test_coercion.py b/pandas/tests/frame/indexing/test_coercion.py
index ba0d8613b6228..f55605d1ffa12 100644
--- a/pandas/tests/frame/indexing/test_coercion.py
+++ b/pandas/tests/frame/indexing/test_coercion.py
@@ -4,6 +4,7 @@
For the most part, these should be multi-column DataFrames, otherwise
we would share the tests with Series.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/frame/indexing/test_insert.py b/pandas/tests/frame/indexing/test_insert.py
index 26eba5f49bd39..b530cb98ef46c 100644
--- a/pandas/tests/frame/indexing/test_insert.py
+++ b/pandas/tests/frame/indexing/test_insert.py
@@ -3,6 +3,7 @@
confused with tests with "insert" in their names that are really testing
__setitem__.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/frame/methods/test_first_valid_index.py b/pandas/tests/frame/methods/test_first_valid_index.py
index 2e27f1aa71700..5855be2373ae2 100644
--- a/pandas/tests/frame/methods/test_first_valid_index.py
+++ b/pandas/tests/frame/methods/test_first_valid_index.py
@@ -1,6 +1,7 @@
"""
Includes test for last_valid_index.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/frame/methods/test_nlargest.py b/pandas/tests/frame/methods/test_nlargest.py
index 8a7b985c98069..7b6a0487c296a 100644
--- a/pandas/tests/frame/methods/test_nlargest.py
+++ b/pandas/tests/frame/methods/test_nlargest.py
@@ -2,6 +2,7 @@
Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo"
but are implicitly also testing nsmallest_foo.
"""
+
from string import ascii_lowercase
import numpy as np
diff --git a/pandas/tests/frame/test_npfuncs.py b/pandas/tests/frame/test_npfuncs.py
index 6b5c469403130..e9a241202d156 100644
--- a/pandas/tests/frame/test_npfuncs.py
+++ b/pandas/tests/frame/test_npfuncs.py
@@ -1,6 +1,7 @@
"""
Tests for np.foo applied to DataFrame, not necessarily ufuncs.
"""
+
import numpy as np
from pandas import (
diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py
index 6c108847c2bc6..cfa3cabbc1747 100644
--- a/pandas/tests/generic/test_duplicate_labels.py
+++ b/pandas/tests/generic/test_duplicate_labels.py
@@ -1,4 +1,5 @@
"""Tests dealing with the NDFrame.allows_duplicates."""
+
import operator
import numpy as np
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index fd815c85a89b3..f2eecbe86926b 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -1,6 +1,7 @@
"""
An exhaustive list of pandas methods exercising NDFrame.__finalize__.
"""
+
import operator
import re
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index d8f832002dac6..2b9df1b7079da 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1,6 +1,7 @@
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
+
import datetime
import functools
from functools import partial
diff --git a/pandas/tests/groupby/methods/test_value_counts.py b/pandas/tests/groupby/methods/test_value_counts.py
index 24990e64bb51c..a8d359f3206c2 100644
--- a/pandas/tests/groupby/methods/test_value_counts.py
+++ b/pandas/tests/groupby/methods/test_value_counts.py
@@ -4,7 +4,6 @@
and proper parameter handling
"""
-
import numpy as np
import pytest
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 36b5a6f638418..699fffe5d0488 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -1,6 +1,7 @@
"""
test where we are determining what we are grouping, or getting groups
"""
+
from datetime import (
date,
timedelta,
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index aba3b2f27c633..ea556d043be2d 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -1,6 +1,7 @@
"""
test with the TimeGrouper / grouping with datetimes
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index c9ff4608c6563..e91ca64bb8970 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -1,4 +1,5 @@
-""" test with the .transform """
+"""test with the .transform"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/indexes/base_class/test_reshape.py b/pandas/tests/indexes/base_class/test_reshape.py
index 814a6a516904b..e17e39a334acc 100644
--- a/pandas/tests/indexes/base_class/test_reshape.py
+++ b/pandas/tests/indexes/base_class/test_reshape.py
@@ -1,6 +1,7 @@
"""
Tests for ndarray-like method on the base Index class
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/indexes/categorical/test_formats.py b/pandas/tests/indexes/categorical/test_formats.py
index 74e738a543300..491db3a63cc0d 100644
--- a/pandas/tests/indexes/categorical/test_formats.py
+++ b/pandas/tests/indexes/categorical/test_formats.py
@@ -1,6 +1,7 @@
"""
Tests for CategoricalIndex.__repr__ and related methods.
"""
+
import pytest
from pandas._config import using_pyarrow_string_dtype
diff --git a/pandas/tests/indexes/datetimelike_/test_equals.py b/pandas/tests/indexes/datetimelike_/test_equals.py
index fc9fbd33d0d28..08134d9f3efb4 100644
--- a/pandas/tests/indexes/datetimelike_/test_equals.py
+++ b/pandas/tests/indexes/datetimelike_/test_equals.py
@@ -1,6 +1,7 @@
"""
Tests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index bb2c3d921ea1f..173b32b12e2d1 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -1,4 +1,4 @@
-""" test partial slicing on Series/Frame """
+"""test partial slicing on Series/Frame"""
from datetime import datetime
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index daa5b346eb4ec..0c8bdbdd2fb22 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -1,6 +1,7 @@
"""
Tests for DatetimeIndex timezone-related methods
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
index 10204cfb78e89..284e219fd20e4 100644
--- a/pandas/tests/indexes/test_any_index.py
+++ b/pandas/tests/indexes/test_any_index.py
@@ -1,6 +1,7 @@
"""
Tests that can be parametrized over _any_ Index object.
"""
+
import re
import numpy as np
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index d7ef2d39e8df6..eb0010066a7f6 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -3,6 +3,7 @@
any index subclass except for MultiIndex. Makes use of the `index_flat`
fixture defined in pandas/conftest.py.
"""
+
from copy import (
copy,
deepcopy,
diff --git a/pandas/tests/indexes/test_datetimelike.py b/pandas/tests/indexes/test_datetimelike.py
index 330ea50dc1373..7ec73070836b8 100644
--- a/pandas/tests/indexes/test_datetimelike.py
+++ b/pandas/tests/indexes/test_datetimelike.py
@@ -1,4 +1,4 @@
-""" generic datetimelike tests """
+"""generic datetimelike tests"""
import numpy as np
import pytest
diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py
index 2e61340023948..21cb0b8723d59 100644
--- a/pandas/tests/indexes/test_index_new.py
+++ b/pandas/tests/indexes/test_index_new.py
@@ -1,6 +1,7 @@
"""
Tests for the Index constructor conducting inference.
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py
index e6716239cca5a..1bbffcee3b671 100644
--- a/pandas/tests/indexes/test_indexing.py
+++ b/pandas/tests/indexes/test_indexing.py
@@ -14,6 +14,7 @@
The corresponding tests.indexes.[index_type].test_indexing files
contain tests for the corresponding methods specific to those Index subclasses.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 27b54ea66f0ac..9a3471fe526c1 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -2,6 +2,7 @@
The tests in this package are to ensure the proper resultant dtypes of
set operations.
"""
+
from datetime import datetime
import operator
diff --git a/pandas/tests/indexes/test_subclass.py b/pandas/tests/indexes/test_subclass.py
index c3287e1ddcddc..a8ba8c3090cf2 100644
--- a/pandas/tests/indexes/test_subclass.py
+++ b/pandas/tests/indexes/test_subclass.py
@@ -1,6 +1,7 @@
"""
Tests involving custom Index subclasses
"""
+
import numpy as np
from pandas import (
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 2af76f69a4300..a33fb1e6979ec 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -1,4 +1,5 @@
-""" common utilities """
+"""common utilities"""
+
from __future__ import annotations
from typing import (
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 8650a1afb383d..172aa9878caec 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -1,4 +1,4 @@
-""" test positional based indexing with iloc """
+"""test positional based indexing with iloc"""
from datetime import datetime
import re
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 45ec968714aff..60a3ccf0b7483 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1,4 +1,4 @@
-""" test fancy indexing & misc """
+"""test fancy indexing & misc"""
import array
from datetime import datetime
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 9c33d15c01cd6..7112b866018a2 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1,4 +1,5 @@
-""" test label based indexing with loc """
+"""test label based indexing with loc"""
+
from collections import namedtuple
from datetime import (
date,
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index a51334c03a302..730fe584d7f07 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -1,4 +1,5 @@
-""" test scalar indexing, including at and iat """
+"""test scalar indexing, including at and iat"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/interchange/test_spec_conformance.py b/pandas/tests/interchange/test_spec_conformance.py
index 7c02379c11853..55e42ed2023cd 100644
--- a/pandas/tests/interchange/test_spec_conformance.py
+++ b/pandas/tests/interchange/test_spec_conformance.py
@@ -2,6 +2,7 @@
A verbatim copy (vendored) of the spec tests.
Taken from https://github.com/data-apis/dataframe-api
"""
+
import ctypes
import math
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index d3ddc13c1497e..508fc47d0920b 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -1257,18 +1257,18 @@ def test_engine_kwargs(self, engine, tmp_excel):
}
if PY310:
- msgs[
- "openpyxl"
- ] = "Workbook.__init__() got an unexpected keyword argument 'foo'"
- msgs[
- "xlsxwriter"
- ] = "Workbook.__init__() got an unexpected keyword argument 'foo'"
+ msgs["openpyxl"] = (
+ "Workbook.__init__() got an unexpected keyword argument 'foo'"
+ )
+ msgs["xlsxwriter"] = (
+ "Workbook.__init__() got an unexpected keyword argument 'foo'"
+ )
# Handle change in error message for openpyxl (write and append mode)
if engine == "openpyxl" and not os.path.exists(tmp_excel):
- msgs[
- "openpyxl"
- ] = r"load_workbook() got an unexpected keyword argument 'foo'"
+ msgs["openpyxl"] = (
+ r"load_workbook() got an unexpected keyword argument 'foo'"
+ )
with pytest.raises(TypeError, match=re.escape(msgs[engine])):
df.to_excel(
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 43e94b8c55589..b12cfc6876a8e 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -2,6 +2,7 @@
Tests for the file pandas.io.formats.format, *not* tests for general formatting
of pandas objects.
"""
+
from datetime import datetime
from io import StringIO
import re
diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py
index 927a9f4961f6f..3b782713eed6c 100644
--- a/pandas/tests/io/formats/test_to_excel.py
+++ b/pandas/tests/io/formats/test_to_excel.py
@@ -2,6 +2,7 @@
ExcelFormatter is tested implicitly in pandas/tests/io/excel
"""
+
import string
import pytest
diff --git a/pandas/tests/io/json/test_deprecated_kwargs.py b/pandas/tests/io/json/test_deprecated_kwargs.py
index cc88fc3ba1826..9da682c90a285 100644
--- a/pandas/tests/io/json/test_deprecated_kwargs.py
+++ b/pandas/tests/io/json/test_deprecated_kwargs.py
@@ -1,6 +1,7 @@
"""
Tests for the deprecated keyword arguments for `read_json`.
"""
+
from io import StringIO
import pandas as pd
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index b6fa90edbf106..afc9974c75e6a 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -1,4 +1,5 @@
"""Tests for Table Schema integration."""
+
from collections import OrderedDict
from io import StringIO
import json
diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py
index 9f42cf674b0a7..cdf4d6ae77f91 100644
--- a/pandas/tests/io/parser/common/test_chunksize.py
+++ b/pandas/tests/io/parser/common/test_chunksize.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
index 7ffc49e941c14..485680d9de48c 100644
--- a/pandas/tests/io/parser/common/test_common_basic.py
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from datetime import datetime
from inspect import signature
from io import StringIO
diff --git a/pandas/tests/io/parser/common/test_data_list.py b/pandas/tests/io/parser/common/test_data_list.py
index 3b0ff9e08d349..bf9293ddd841d 100644
--- a/pandas/tests/io/parser/common/test_data_list.py
+++ b/pandas/tests/io/parser/common/test_data_list.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
import csv
from io import StringIO
diff --git a/pandas/tests/io/parser/common/test_decimal.py b/pandas/tests/io/parser/common/test_decimal.py
index 4ceca037f589a..eb6c97097e5fb 100644
--- a/pandas/tests/io/parser/common/test_decimal.py
+++ b/pandas/tests/io/parser/common/test_decimal.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import StringIO
import pytest
diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py
index b03e31c21fc81..c93c80a7bb084 100644
--- a/pandas/tests/io/parser/common/test_file_buffer_url.py
+++ b/pandas/tests/io/parser/common/test_file_buffer_url.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import (
BytesIO,
StringIO,
diff --git a/pandas/tests/io/parser/common/test_float.py b/pandas/tests/io/parser/common/test_float.py
index 6069c23936297..4e0b61577f9e7 100644
--- a/pandas/tests/io/parser/common/test_float.py
+++ b/pandas/tests/io/parser/common/test_float.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/parser/common/test_index.py b/pandas/tests/io/parser/common/test_index.py
index 7cdaac1a284cd..2fcc80f58ae30 100644
--- a/pandas/tests/io/parser/common/test_index.py
+++ b/pandas/tests/io/parser/common/test_index.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from datetime import datetime
from io import StringIO
import os
diff --git a/pandas/tests/io/parser/common/test_inf.py b/pandas/tests/io/parser/common/test_inf.py
index dba952b1f9ebd..657aa3278a442 100644
--- a/pandas/tests/io/parser/common/test_inf.py
+++ b/pandas/tests/io/parser/common/test_inf.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import StringIO
import pytest
diff --git a/pandas/tests/io/parser/common/test_ints.py b/pandas/tests/io/parser/common/test_ints.py
index e77958b0e9acc..9322e8d54f5b8 100644
--- a/pandas/tests/io/parser/common/test_ints.py
+++ b/pandas/tests/io/parser/common/test_ints.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/parser/common/test_iterator.py b/pandas/tests/io/parser/common/test_iterator.py
index a521c84aa007d..091edb67f6e19 100644
--- a/pandas/tests/io/parser/common/test_iterator.py
+++ b/pandas/tests/io/parser/common/test_iterator.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import StringIO
import pytest
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index f5a724bad4fa2..0827f64dccf46 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -2,6 +2,7 @@
Tests that work on the Python, C and PyArrow engines but do not have a
specific classification into the other test modules.
"""
+
import codecs
import csv
from io import StringIO
@@ -57,9 +58,12 @@ def test_bad_stream_exception(all_parsers, csv_dir_path):
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
- with open(path, "rb") as handle, codecs.StreamRecoder(
- handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
- ) as stream:
+ with (
+ open(path, "rb") as handle,
+ codecs.StreamRecoder(
+ handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
+ ) as stream,
+ ):
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
diff --git a/pandas/tests/io/parser/common/test_verbose.py b/pandas/tests/io/parser/common/test_verbose.py
index fede54643d2dd..c5490afba1e04 100644
--- a/pandas/tests/io/parser/common/test_verbose.py
+++ b/pandas/tests/io/parser/common/test_verbose.py
@@ -2,6 +2,7 @@
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
+
from io import StringIO
import pytest
diff --git a/pandas/tests/io/parser/dtypes/test_categorical.py b/pandas/tests/io/parser/dtypes/test_categorical.py
index f4aff14a5ce32..15cbac54ff8d9 100644
--- a/pandas/tests/io/parser/dtypes/test_categorical.py
+++ b/pandas/tests/io/parser/dtypes/test_categorical.py
@@ -2,6 +2,7 @@
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
+
from io import StringIO
import os
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index 70fd0b02cc79d..d45368dece6d2 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -2,6 +2,7 @@
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
+
from collections import defaultdict
from io import StringIO
diff --git a/pandas/tests/io/parser/dtypes/test_empty.py b/pandas/tests/io/parser/dtypes/test_empty.py
index 609c4cbe77fc8..ebc61e7f0ca2b 100644
--- a/pandas/tests/io/parser/dtypes/test_empty.py
+++ b/pandas/tests/io/parser/dtypes/test_empty.py
@@ -2,6 +2,7 @@
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index 27d7bc0bb6c07..090235c862a2a 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -4,6 +4,7 @@
these tests out of this module as soon as the Python parser can accept
further arguments when parsing.
"""
+
from decimal import Decimal
from io import (
BytesIO,
@@ -509,7 +510,7 @@ def __next__(self):
def test_buffer_rd_bytes_bad_unicode(c_parser_only):
# see gh-22748
- t = BytesIO(b"\xB0")
+ t = BytesIO(b"\xb0")
t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")
msg = "'utf-8' codec can't encode character"
with pytest.raises(UnicodeError, match=msg):
diff --git a/pandas/tests/io/parser/test_comment.py b/pandas/tests/io/parser/test_comment.py
index abaeeb86476da..ca8df520b171e 100644
--- a/pandas/tests/io/parser/test_comment.py
+++ b/pandas/tests/io/parser/test_comment.py
@@ -2,6 +2,7 @@
Tests that comments are properly handled during parsing
for all of the parsers defined in parsers.py
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/parser/test_converters.py b/pandas/tests/io/parser/test_converters.py
index b6b882b4ec432..7986df62a6b6f 100644
--- a/pandas/tests/io/parser/test_converters.py
+++ b/pandas/tests/io/parser/test_converters.py
@@ -2,6 +2,7 @@
Tests column conversion functionality during parsing
for all of the parsers defined in parsers.py
"""
+
from io import StringIO
from dateutil.parser import parse
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index 31b7e9df1e0ec..5df8c3d27bf84 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -2,6 +2,7 @@
Tests encoding functionality during parsing
for all of the parsers defined in parsers.py
"""
+
from io import (
BytesIO,
TextIOWrapper,
diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py
index ba15d061b2deb..24d0a7626723e 100644
--- a/pandas/tests/io/parser/test_index_col.py
+++ b/pandas/tests/io/parser/test_index_col.py
@@ -3,6 +3,7 @@
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py
index 1d245f81f027c..61d328138da96 100644
--- a/pandas/tests/io/parser/test_mangle_dupes.py
+++ b/pandas/tests/io/parser/test_mangle_dupes.py
@@ -3,6 +3,7 @@
CSV engine. In general, the expected result is that they are either thoroughly
de-duplicated (if mangling requested) or ignored otherwise.
"""
+
from io import StringIO
import pytest
diff --git a/pandas/tests/io/parser/test_multi_thread.py b/pandas/tests/io/parser/test_multi_thread.py
index da9b9bddd30cd..7fac67df44ca2 100644
--- a/pandas/tests/io/parser/test_multi_thread.py
+++ b/pandas/tests/io/parser/test_multi_thread.py
@@ -2,6 +2,7 @@
Tests multithreading behaviour for reading and
parsing files for each parser defined in parsers.py
"""
+
from contextlib import ExitStack
from io import BytesIO
from multiprocessing.pool import ThreadPool
diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py
index 6ebfc8f337c10..ba0e3033321e4 100644
--- a/pandas/tests/io/parser/test_na_values.py
+++ b/pandas/tests/io/parser/test_na_values.py
@@ -2,6 +2,7 @@
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index 9351387dfc337..f63cc3d56bf89 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -2,6 +2,7 @@
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
+
from io import BytesIO
import logging
import re
diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py
index dc3c527e82202..c0ea5936164a1 100644
--- a/pandas/tests/io/parser/test_python_parser_only.py
+++ b/pandas/tests/io/parser/test_python_parser_only.py
@@ -4,6 +4,7 @@
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
+
from __future__ import annotations
import csv
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index 1b3d1d41bc1c9..6aeed2377a3aa 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -2,6 +2,7 @@
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
+
from io import (
BytesIO,
StringIO,
diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py
index 3e52e9b68735d..8d4c28bd61fa1 100644
--- a/pandas/tests/io/parser/test_unsupported.py
+++ b/pandas/tests/io/parser/test_unsupported.py
@@ -6,6 +6,7 @@
Ultimately, the goal is to remove test cases from this
test suite as new feature support is added to the parsers.
"""
+
from io import StringIO
import os
from pathlib import Path
diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py
index bc66189ca064e..75efe87c408c0 100644
--- a/pandas/tests/io/parser/usecols/test_parse_dates.py
+++ b/pandas/tests/io/parser/usecols/test_parse_dates.py
@@ -2,6 +2,7 @@
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
+
from io import StringIO
import pytest
diff --git a/pandas/tests/io/parser/usecols/test_strings.py b/pandas/tests/io/parser/usecols/test_strings.py
index 0d51c2cb3cdb4..1538bd4e805f7 100644
--- a/pandas/tests/io/parser/usecols/test_strings.py
+++ b/pandas/tests/io/parser/usecols/test_strings.py
@@ -2,6 +2,7 @@
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
+
from io import StringIO
import pytest
diff --git a/pandas/tests/io/parser/usecols/test_usecols_basic.py b/pandas/tests/io/parser/usecols/test_usecols_basic.py
index 214070b1ac5f2..d55066d2d70bb 100644
--- a/pandas/tests/io/parser/usecols/test_usecols_basic.py
+++ b/pandas/tests/io/parser/usecols/test_usecols_basic.py
@@ -2,6 +2,7 @@
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
+
from io import StringIO
import numpy as np
diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py
index 529d6d789596f..cc61d8bca7de3 100644
--- a/pandas/tests/io/pytables/test_append.py
+++ b/pandas/tests/io/pytables/test_append.py
@@ -940,8 +940,9 @@ def test_append_to_multiple_dropna_false(setup_path):
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
- with ensure_clean_store(setup_path) as store, pd.option_context(
- "io.hdf.dropna_table", True
+ with (
+ ensure_clean_store(setup_path) as store,
+ pd.option_context("io.hdf.dropna_table", True),
):
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 886bff332a420..f5880d8a894f8 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -1,6 +1,7 @@
"""
Tests for the pandas.io.common functionalities
"""
+
import codecs
import errno
from functools import partial
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index c8b5b690ae118..893728748f276 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -1,4 +1,5 @@
-""" test feather-format compat """
+"""test feather-format compat"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 0ce6a8bf82cd8..a4cf257296b09 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -115,15 +115,17 @@ def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str):
"""
if compression == "zip":
# Only compare the CRC checksum of the file contents
- with zipfile.ZipFile(BytesIO(result)) as exp, zipfile.ZipFile(
- BytesIO(expected)
- ) as res:
+ with (
+ zipfile.ZipFile(BytesIO(result)) as exp,
+ zipfile.ZipFile(BytesIO(expected)) as res,
+ ):
for res_info, exp_info in zip(res.infolist(), exp.infolist()):
assert res_info.CRC == exp_info.CRC
elif compression == "tar":
- with tarfile.open(fileobj=BytesIO(result)) as tar_exp, tarfile.open(
- fileobj=BytesIO(expected)
- ) as tar_res:
+ with (
+ tarfile.open(fileobj=BytesIO(result)) as tar_exp,
+ tarfile.open(fileobj=BytesIO(expected)) as tar_res,
+ ):
for tar_res_info, tar_exp_info in zip(
tar_res.getmembers(), tar_exp.getmembers()
):
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 2251fa20f0b63..2c0f19dc74ed2 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -1461,8 +1461,7 @@ def seekable(self):
return True
# GH 49036 pylint checks for presence of __next__ for iterators
- def __next__(self):
- ...
+ def __next__(self): ...
def __iter__(self) -> Iterator:
# `is_file_like` depends on the presence of
diff --git a/pandas/tests/io/test_http_headers.py b/pandas/tests/io/test_http_headers.py
index 550637a50c1c4..dfae294a147a2 100644
--- a/pandas/tests/io/test_http_headers.py
+++ b/pandas/tests/io/test_http_headers.py
@@ -1,6 +1,7 @@
"""
Tests for the pandas custom headers in http(s) requests
"""
+
from functools import partial
import gzip
from io import BytesIO
diff --git a/pandas/tests/io/test_orc.py b/pandas/tests/io/test_orc.py
index b4a8c713d99ab..de6d46492e916 100644
--- a/pandas/tests/io/test_orc.py
+++ b/pandas/tests/io/test_orc.py
@@ -1,4 +1,5 @@
-""" test orc compat """
+"""test orc compat"""
+
import datetime
from decimal import Decimal
from io import BytesIO
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 3cba7b7da347e..55be48eb572fd 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -1,4 +1,5 @@
-""" test parquet compat """
+"""test parquet compat"""
+
import datetime
from decimal import Decimal
from io import BytesIO
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index ed8d4371e0f3a..1420e24858ffb 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -10,6 +10,7 @@
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
"""
+
from __future__ import annotations
from array import array
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 42a9e84218a81..9078ca865042d 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -419,7 +419,7 @@ def test_read_write_dta11(self):
[(1, 2, 3, 4)],
columns=[
"good",
- "b\u00E4d",
+ "b\u00e4d",
"8number",
"astringwithmorethan32characters______",
],
@@ -1368,7 +1368,7 @@ def test_invalid_variable_label_encoding(self, version, mixed_frame):
)
def test_write_variable_label_errors(self, mixed_frame):
- values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"]
+ values = ["\u03a1", "\u0391", "\u039d", "\u0394", "\u0391", "\u03a3"]
variable_labels_utf8 = {
"a": "City Rank",
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 7e0b8dc7282e4..25669ce75953f 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -1,4 +1,5 @@
-""" Test cases for DataFrame.plot """
+"""Test cases for DataFrame.plot"""
+
from datetime import (
date,
datetime,
@@ -204,7 +205,7 @@ def test_plot_multiindex_unicode(self):
columns=columns,
index=index,
)
- _check_plot_works(df.plot, title="\u03A3")
+ _check_plot_works(df.plot, title="\u03a3")
@pytest.mark.slow
@pytest.mark.parametrize("layout", [None, (-1, 1)])
diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py
index 4f14f1e43cf29..76d3b20aaa2c6 100644
--- a/pandas/tests/plotting/frame/test_frame_color.py
+++ b/pandas/tests/plotting/frame/test_frame_color.py
@@ -1,4 +1,5 @@
-""" Test cases for DataFrame.plot """
+"""Test cases for DataFrame.plot"""
+
import re
import numpy as np
diff --git a/pandas/tests/plotting/frame/test_frame_groupby.py b/pandas/tests/plotting/frame/test_frame_groupby.py
index f1924185a3df1..b7e147bbabde5 100644
--- a/pandas/tests/plotting/frame/test_frame_groupby.py
+++ b/pandas/tests/plotting/frame/test_frame_groupby.py
@@ -1,4 +1,4 @@
-""" Test cases for DataFrame.plot """
+"""Test cases for DataFrame.plot"""
import pytest
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py
index fb34592b288af..16853114d93cd 100644
--- a/pandas/tests/plotting/frame/test_frame_subplots.py
+++ b/pandas/tests/plotting/frame/test_frame_subplots.py
@@ -1,4 +1,4 @@
-""" Test cases for DataFrame.plot """
+"""Test cases for DataFrame.plot"""
import string
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index abafad5b1d7da..2dd45a9abc7a5 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -1,4 +1,4 @@
-""" Test cases for .boxplot method """
+"""Test cases for .boxplot method"""
import itertools
import string
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 5d44c399ee726..2eb44ef4771e0 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1,4 +1,5 @@
-""" Test cases for time series specific (freq conversion, etc) """
+"""Test cases for time series specific (freq conversion, etc)"""
+
from datetime import (
date,
datetime,
diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py
index 5ebf93510a615..0cb125d822fd1 100644
--- a/pandas/tests/plotting/test_groupby.py
+++ b/pandas/tests/plotting/test_groupby.py
@@ -1,5 +1,4 @@
-""" Test cases for GroupBy.plot """
-
+"""Test cases for GroupBy.plot"""
import numpy as np
import pytest
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 0318abe7bdfac..511c1dd7761d5 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -1,4 +1,5 @@
-""" Test cases for .hist method """
+"""Test cases for .hist method"""
+
import re
import numpy as np
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index cfb657c2a800f..d593ddbbaa0b8 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -1,4 +1,5 @@
-""" Test cases for misc plot functions """
+"""Test cases for misc plot functions"""
+
import os
import numpy as np
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 2b2f2f3b84307..9fbc20e10f5c1 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -1,4 +1,5 @@
-""" Test cases for Series.plot """
+"""Test cases for Series.plot"""
+
from datetime import datetime
from itertools import chain
diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py
index a6aaeba1dc3a8..60fcf8cbc142c 100644
--- a/pandas/tests/reductions/test_stat_reductions.py
+++ b/pandas/tests/reductions/test_stat_reductions.py
@@ -1,6 +1,7 @@
"""
Tests for statistical reductions of 2nd moment or higher: var, skew, kurt, ...
"""
+
import inspect
import numpy as np
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index f3edaffdb315d..96721f11cb2d6 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -1,6 +1,7 @@
"""
Tests for scalar Timedelta arithmetic ops
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index d4398f66e6f89..06a0f3324c2cf 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -1,4 +1,5 @@
-""" test the scalar Timedelta """
+"""test the scalar Timedelta"""
+
from datetime import timedelta
import sys
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 44a16e51f2c47..ea970433464fc 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -1,4 +1,4 @@
-""" test the scalar Timestamp """
+"""test the scalar Timestamp"""
import calendar
from datetime import (
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index cb2a35be907cd..8f2ee3ef45075 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -1,6 +1,7 @@
"""
Tests for Timestamp timezone-related methods
"""
+
from datetime import datetime
from pandas._libs.tslibs import timezones
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index a6e4b4f78e25a..e0ca4bf64ea91 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -1,6 +1,7 @@
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 01c775e492888..fac543ac450a5 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -1,6 +1,7 @@
"""
Series.__getitem__ test classes are organized by the type of key passed.
"""
+
from datetime import (
date,
datetime,
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 5c36877e5ac86..a629d18131306 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -1,4 +1,5 @@
-""" test get/set & misc """
+"""test get/set & misc"""
+
from datetime import timedelta
import re
diff --git a/pandas/tests/series/methods/test_isna.py b/pandas/tests/series/methods/test_isna.py
index 7e324aa86a052..92bf2945cc0d1 100644
--- a/pandas/tests/series/methods/test_isna.py
+++ b/pandas/tests/series/methods/test_isna.py
@@ -1,6 +1,7 @@
"""
We also test Series.notna in this file.
"""
+
import numpy as np
from pandas import (
diff --git a/pandas/tests/series/methods/test_item.py b/pandas/tests/series/methods/test_item.py
index 8e8c33619d564..e927fa69db358 100644
--- a/pandas/tests/series/methods/test_item.py
+++ b/pandas/tests/series/methods/test_item.py
@@ -2,6 +2,7 @@
Series.item method, mainly testing that we get python scalars as opposed to
numpy scalars.
"""
+
import pytest
from pandas import (
diff --git a/pandas/tests/series/methods/test_nlargest.py b/pandas/tests/series/methods/test_nlargest.py
index c37f57771e29d..56b7cf42a798d 100644
--- a/pandas/tests/series/methods/test_nlargest.py
+++ b/pandas/tests/series/methods/test_nlargest.py
@@ -2,6 +2,7 @@
Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo"
but are implicitly also testing nsmallest_foo.
"""
+
import numpy as np
import pytest
diff --git a/pandas/tests/series/methods/test_set_name.py b/pandas/tests/series/methods/test_set_name.py
index cbc8ebde7a8ab..137207053c225 100644
--- a/pandas/tests/series/methods/test_set_name.py
+++ b/pandas/tests/series/methods/test_set_name.py
@@ -14,7 +14,7 @@ def test_set_name(self):
def test_set_name_attribute(self):
ser = Series([1, 2, 3])
ser2 = Series([1, 2, 3], name="bar")
- for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]:
+ for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05d0"]:
ser.name = name
assert ser.name == name
ser2.name = name
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index b00074c04257e..68737e86f0c6a 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1587,7 +1587,7 @@ def test_NaT_cast(self):
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
- for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:
+ for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05d0"]:
for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:
s = Series(data, name=n)
assert s.name == n
diff --git a/pandas/tests/series/test_formats.py b/pandas/tests/series/test_formats.py
index 1a3b46ec8196a..c001e0f9b028a 100644
--- a/pandas/tests/series/test_formats.py
+++ b/pandas/tests/series/test_formats.py
@@ -114,13 +114,13 @@ def test_datetime(self, datetime_series):
1,
1.2,
"foo",
- "\u03B1\u03B2\u03B3",
+ "\u03b1\u03b2\u03b3",
"loooooooooooooooooooooooooooooooooooooooooooooooooooong",
("foo", "bar", "baz"),
(1, 2),
("foo", 1, 2.3),
- ("\u03B1", "\u03B2", "\u03B3"),
- ("\u03B1", "bar"),
+ ("\u03b1", "\u03b2", "\u03b3"),
+ ("\u03b1", "bar"),
],
)
def test_various_names(self, name, string_series):
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index 824e550e0f03b..a4fd29878a2d1 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -1,6 +1,7 @@
"""
Testing that we work in the downstream packages
"""
+
import array
from functools import partial
import subprocess
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 42764c121e3d2..9d93a05cf1761 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1,4 +1,4 @@
-""" test to_datetime """
+"""test to_datetime"""
import calendar
from collections import deque
diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py
index efb010addad22..e0838cceb4c7b 100644
--- a/pandas/tests/tseries/offsets/common.py
+++ b/pandas/tests/tseries/offsets/common.py
@@ -1,6 +1,7 @@
"""
Assertion helpers and base class for offsets tests
"""
+
from __future__ import annotations
diff --git a/pandas/tests/tseries/offsets/test_business_day.py b/pandas/tests/tseries/offsets/test_business_day.py
index 7db1921369023..b1ab5fc64804b 100644
--- a/pandas/tests/tseries/offsets/test_business_day.py
+++ b/pandas/tests/tseries/offsets/test_business_day.py
@@ -1,6 +1,7 @@
"""
Tests for offsets.BDay
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/tests/tseries/offsets/test_business_hour.py b/pandas/tests/tseries/offsets/test_business_hour.py
index f01406fb50d23..1b488dc9a47d4 100644
--- a/pandas/tests/tseries/offsets/test_business_hour.py
+++ b/pandas/tests/tseries/offsets/test_business_hour.py
@@ -1,6 +1,7 @@
"""
Tests for offsets.BusinessHour
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/tests/tseries/offsets/test_business_month.py b/pandas/tests/tseries/offsets/test_business_month.py
index a14451e60aa89..3ae2a115d46f7 100644
--- a/pandas/tests/tseries/offsets/test_business_month.py
+++ b/pandas/tests/tseries/offsets/test_business_month.py
@@ -3,6 +3,7 @@
- BMonthBegin
- BMonthEnd
"""
+
from __future__ import annotations
from datetime import datetime
diff --git a/pandas/tests/tseries/offsets/test_business_quarter.py b/pandas/tests/tseries/offsets/test_business_quarter.py
index 3e87ab3e6d397..ab3e55c4989fb 100644
--- a/pandas/tests/tseries/offsets/test_business_quarter.py
+++ b/pandas/tests/tseries/offsets/test_business_quarter.py
@@ -3,6 +3,7 @@
- BQuarterBegin
- BQuarterEnd
"""
+
from __future__ import annotations
from datetime import datetime
diff --git a/pandas/tests/tseries/offsets/test_business_year.py b/pandas/tests/tseries/offsets/test_business_year.py
index 3b7a1025cc19c..cf12b166b30e4 100644
--- a/pandas/tests/tseries/offsets/test_business_year.py
+++ b/pandas/tests/tseries/offsets/test_business_year.py
@@ -3,6 +3,7 @@
- BYearBegin
- BYearEnd
"""
+
from __future__ import annotations
from datetime import datetime
diff --git a/pandas/tests/tseries/offsets/test_custom_business_day.py b/pandas/tests/tseries/offsets/test_custom_business_day.py
index 519fb712d0415..d2f309dd3f33c 100644
--- a/pandas/tests/tseries/offsets/test_custom_business_day.py
+++ b/pandas/tests/tseries/offsets/test_custom_business_day.py
@@ -1,6 +1,7 @@
"""
Tests for offsets.CustomBusinessDay / CDay
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/tseries/offsets/test_custom_business_hour.py b/pandas/tests/tseries/offsets/test_custom_business_hour.py
index 0335f415e2ec2..360ed70fa5b9e 100644
--- a/pandas/tests/tseries/offsets/test_custom_business_hour.py
+++ b/pandas/tests/tseries/offsets/test_custom_business_hour.py
@@ -1,6 +1,7 @@
"""
Tests for offsets.CustomBusinessHour
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/tests/tseries/offsets/test_custom_business_month.py b/pandas/tests/tseries/offsets/test_custom_business_month.py
index b74b210c3b191..fd6565e3908f3 100644
--- a/pandas/tests/tseries/offsets/test_custom_business_month.py
+++ b/pandas/tests/tseries/offsets/test_custom_business_month.py
@@ -4,6 +4,7 @@
- CustomBusinessMonthBegin
- CustomBusinessMonthEnd
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py
index a355b947fc540..8ff80536fc69e 100644
--- a/pandas/tests/tseries/offsets/test_dst.py
+++ b/pandas/tests/tseries/offsets/test_dst.py
@@ -1,6 +1,7 @@
"""
Tests for DateOffset additions over Daylight Savings Time
"""
+
from datetime import timedelta
import pytest
diff --git a/pandas/tests/tseries/offsets/test_easter.py b/pandas/tests/tseries/offsets/test_easter.py
index d11a72cc1b9d5..ada72d94434a3 100644
--- a/pandas/tests/tseries/offsets/test_easter.py
+++ b/pandas/tests/tseries/offsets/test_easter.py
@@ -2,6 +2,7 @@
Tests for the following offsets:
- Easter
"""
+
from __future__ import annotations
from datetime import datetime
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
index 208f8f550086a..f442b363f737d 100644
--- a/pandas/tests/tseries/offsets/test_fiscal.py
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -1,6 +1,7 @@
"""
Tests for Fiscal Year and Fiscal Quarter offset classes
"""
+
from datetime import datetime
from dateutil.relativedelta import relativedelta
diff --git a/pandas/tests/tseries/offsets/test_index.py b/pandas/tests/tseries/offsets/test_index.py
index 7a62944556d11..4fb9815ba92bb 100644
--- a/pandas/tests/tseries/offsets/test_index.py
+++ b/pandas/tests/tseries/offsets/test_index.py
@@ -1,6 +1,7 @@
"""
Tests for offset behavior with indices.
"""
+
import pytest
from pandas import (
diff --git a/pandas/tests/tseries/offsets/test_month.py b/pandas/tests/tseries/offsets/test_month.py
index 2b643999c3ad3..4dd494d0872a1 100644
--- a/pandas/tests/tseries/offsets/test_month.py
+++ b/pandas/tests/tseries/offsets/test_month.py
@@ -5,6 +5,7 @@
- MonthBegin
- MonthEnd
"""
+
from __future__ import annotations
from datetime import datetime
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index fabffa708687b..1e5bfa6033216 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -1,6 +1,7 @@
"""
Tests of pandas.tseries.offsets
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py
index 1b4fa9292c403..99a6a583dd3e9 100644
--- a/pandas/tests/tseries/offsets/test_offsets_properties.py
+++ b/pandas/tests/tseries/offsets/test_offsets_properties.py
@@ -7,6 +7,7 @@
You may wish to consult the previous version for inspiration on further
tests, or when trying to pin down the bugs exposed by the tests below.
"""
+
from hypothesis import (
assume,
given,
diff --git a/pandas/tests/tseries/offsets/test_quarter.py b/pandas/tests/tseries/offsets/test_quarter.py
index d3872b7ce9537..b92ff9d39a3ca 100644
--- a/pandas/tests/tseries/offsets/test_quarter.py
+++ b/pandas/tests/tseries/offsets/test_quarter.py
@@ -3,6 +3,7 @@
- QuarterBegin
- QuarterEnd
"""
+
from __future__ import annotations
from datetime import datetime
diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py
index 07e434e883c04..c8fbdfa11991a 100644
--- a/pandas/tests/tseries/offsets/test_ticks.py
+++ b/pandas/tests/tseries/offsets/test_ticks.py
@@ -1,6 +1,7 @@
"""
Tests for offsets.Tick and subclasses
"""
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/tseries/offsets/test_week.py b/pandas/tests/tseries/offsets/test_week.py
index f9a8755dc6336..d34a7953121c1 100644
--- a/pandas/tests/tseries/offsets/test_week.py
+++ b/pandas/tests/tseries/offsets/test_week.py
@@ -4,6 +4,7 @@
- WeekOfMonth
- LastWeekOfMonth
"""
+
from __future__ import annotations
from datetime import (
diff --git a/pandas/tests/tseries/offsets/test_year.py b/pandas/tests/tseries/offsets/test_year.py
index 28cbdcf6abecc..9d2a0b20e1e7c 100644
--- a/pandas/tests/tseries/offsets/test_year.py
+++ b/pandas/tests/tseries/offsets/test_year.py
@@ -3,6 +3,7 @@
- YearBegin
- YearEnd
"""
+
from __future__ import annotations
from datetime import datetime
diff --git a/pandas/tests/tslibs/test_liboffsets.py b/pandas/tests/tslibs/test_liboffsets.py
index 6ffc065bb61cf..f311284b9dc63 100644
--- a/pandas/tests/tslibs/test_liboffsets.py
+++ b/pandas/tests/tslibs/test_liboffsets.py
@@ -1,6 +1,7 @@
"""
Tests for helper functions in the cython tslibs.offsets
"""
+
from datetime import datetime
import pytest
diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index 4dd9d7b20be69..d1b0595dd50e6 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -1,6 +1,7 @@
"""
Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx
"""
+
from datetime import datetime
import re
diff --git a/pandas/tests/util/test_assert_produces_warning.py b/pandas/tests/util/test_assert_produces_warning.py
index 88e9f0d8fccee..80e3264690f81 100644
--- a/pandas/tests/util/test_assert_produces_warning.py
+++ b/pandas/tests/util/test_assert_produces_warning.py
@@ -1,6 +1,7 @@
-""""
+""" "
Test module for testing ``pandas._testing.assert_produces_warning``.
"""
+
import warnings
import pytest
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 78626781289c4..d4a79cae61772 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -23,6 +23,7 @@ def test_foo():
For more information, refer to the ``pytest`` documentation on ``skipif``.
"""
+
from __future__ import annotations
import locale
diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py
index 7cfddef7ddff8..494f306ec807d 100644
--- a/pandas/util/_tester.py
+++ b/pandas/util/_tester.py
@@ -1,6 +1,7 @@
"""
Entrypoint for testing from the top-level namespace.
"""
+
from __future__ import annotations
import os
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 1c2d6e2d38ab2..9aab19fe340ec 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -2,6 +2,7 @@
Module that contains many useful utilities
for validating data or function arguments
"""
+
from __future__ import annotations
from collections.abc import (
@@ -341,13 +342,11 @@ def validate_percentile(q: float | Iterable[float]) -> np.ndarray:
@overload
-def validate_ascending(ascending: BoolishT) -> BoolishT:
- ...
+def validate_ascending(ascending: BoolishT) -> BoolishT: ...
@overload
-def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]:
- ...
+def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: ...
def validate_ascending(
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index a4d53d360a12b..876e386b83abd 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -193,7 +193,7 @@ def validate_pep8(self):
"flake8",
"--format=%(row)d\t%(col)d\t%(code)s\t%(text)s",
"--max-line-length=88",
- "--ignore=E203,E3,W503,W504,E402,E731,E128,E124",
+ "--ignore=E203,E3,W503,W504,E402,E731,E128,E124,E704",
file.name,
]
response = subprocess.run(cmd, capture_output=True, check=False, text=True)
diff --git a/web/pandas_web.py b/web/pandas_web.py
index 8d4f7d311b716..aac07433f2712 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -23,6 +23,7 @@
The rest of the items in the file will be added directly to the context.
"""
+
import argparse
import collections
import datetime
| Bump `ruff` to the latest version in a separate PR because of the big formatting change. | https://api.github.com/repos/pandas-dev/pandas/pulls/57766 | 2024-03-07T17:23:25Z | 2024-03-07T22:55:10Z | 2024-03-07T22:55:10Z | 2024-03-07T22:56:38Z |
BUG: PyArrow dtypes were not supported in the interchange protocol | diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst
index 96f210ce6b7b9..54084abab7817 100644
--- a/doc/source/whatsnew/v2.2.2.rst
+++ b/doc/source/whatsnew/v2.2.2.rst
@@ -14,6 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pandas nullable on with missing values (:issue:`56702`)
+- :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pyarrow nullable on with missing values (:issue:`57664`)
-
.. ---------------------------------------------------------------------------
@@ -21,7 +22,8 @@ Fixed regressions
Bug fixes
~~~~~~~~~
--
+- :meth:`DataFrame.__dataframe__` was showing bytemask instead of bitmask for ``'string[pyarrow]'`` validity buffer (:issue:`57762`)
+- :meth:`DataFrame.__dataframe__` was showing non-null validity buffer (instead of ``None``) ``'string[pyarrow]'`` without missing values (:issue:`57761`)
.. ---------------------------------------------------------------------------
.. _whatsnew_222.other:
diff --git a/pandas/core/interchange/buffer.py b/pandas/core/interchange/buffer.py
index 5c97fc17d7070..5d24325e67f62 100644
--- a/pandas/core/interchange/buffer.py
+++ b/pandas/core/interchange/buffer.py
@@ -12,6 +12,7 @@
if TYPE_CHECKING:
import numpy as np
+ import pyarrow as pa
class PandasBuffer(Buffer):
@@ -76,3 +77,60 @@ def __repr__(self) -> str:
)
+ ")"
)
+
+
+class PandasBufferPyarrow(Buffer):
+ """
+ Data in the buffer is guaranteed to be contiguous in memory.
+ """
+
+ def __init__(
+ self,
+ buffer: pa.Buffer,
+ *,
+ length: int,
+ ) -> None:
+ """
+ Handle pyarrow chunked arrays.
+ """
+ self._buffer = buffer
+ self._length = length
+
+ @property
+ def bufsize(self) -> int:
+ """
+ Buffer size in bytes.
+ """
+ return self._buffer.size
+
+ @property
+ def ptr(self) -> int:
+ """
+ Pointer to start of the buffer as an integer.
+ """
+ return self._buffer.address
+
+ def __dlpack__(self) -> Any:
+ """
+ Represent this structure as DLPack interface.
+ """
+ raise NotImplementedError()
+
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
+ """
+ Device type and device ID for where the data in the buffer resides.
+ """
+ return (DlpackDeviceType.CPU, None)
+
+ def __repr__(self) -> str:
+ return (
+ "PandasBuffer[pyarrow]("
+ + str(
+ {
+ "bufsize": self.bufsize,
+ "ptr": self.ptr,
+ "device": "CPU",
+ }
+ )
+ + ")"
+ )
diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py
index bf20f0b5433cd..c27a9d8141712 100644
--- a/pandas/core/interchange/column.py
+++ b/pandas/core/interchange/column.py
@@ -1,6 +1,9 @@
from __future__ import annotations
-from typing import Any
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
import numpy as np
@@ -9,15 +12,18 @@
from pandas.errors import NoBufferPresent
from pandas.util._decorators import cache_readonly
-from pandas.core.dtypes.dtypes import (
+from pandas.core.dtypes.dtypes import BaseMaskedDtype
+
+import pandas as pd
+from pandas import (
ArrowDtype,
- BaseMaskedDtype,
DatetimeTZDtype,
)
-
-import pandas as pd
from pandas.api.types import is_string_dtype
-from pandas.core.interchange.buffer import PandasBuffer
+from pandas.core.interchange.buffer import (
+ PandasBuffer,
+ PandasBufferPyarrow,
+)
from pandas.core.interchange.dataframe_protocol import (
Column,
ColumnBuffers,
@@ -30,6 +36,9 @@
dtype_to_arrow_c_fmt,
)
+if TYPE_CHECKING:
+ from pandas.core.interchange.dataframe_protocol import Buffer
+
_NP_KINDS = {
"i": DtypeKind.INT,
"u": DtypeKind.UINT,
@@ -157,6 +166,16 @@ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]:
else:
byteorder = dtype.byteorder
+ if dtype == "bool[pyarrow]":
+ # return early to avoid the `* 8` below, as this is a bitmask
+ # rather than a bytemask
+ return (
+ kind,
+ dtype.itemsize, # pyright: ignore[reportAttributeAccessIssue]
+ ArrowCTypes.BOOL,
+ byteorder,
+ )
+
return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder
@property
@@ -194,6 +213,12 @@ def describe_null(self):
column_null_dtype = ColumnNullType.USE_BYTEMASK
null_value = 1
return column_null_dtype, null_value
+ if isinstance(self._col.dtype, ArrowDtype):
+ # We already rechunk (if necessary / allowed) upon initialization, so this
+ # is already single-chunk by the time we get here.
+ if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined]
+ return ColumnNullType.NON_NULLABLE, None
+ return ColumnNullType.USE_BITMASK, 0
kind = self.dtype[0]
try:
null, value = _NULL_DESCRIPTION[kind]
@@ -278,10 +303,11 @@ def get_buffers(self) -> ColumnBuffers:
def _get_data_buffer(
self,
- ) -> tuple[PandasBuffer, Any]: # Any is for self.dtype tuple
+ ) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]:
"""
Return the buffer containing the data and the buffer's associated dtype.
"""
+ buffer: Buffer
if self.dtype[0] == DtypeKind.DATETIME:
# self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make
# it longer than 4 characters
@@ -302,15 +328,22 @@ def _get_data_buffer(
DtypeKind.FLOAT,
DtypeKind.BOOL,
):
+ dtype = self.dtype
arr = self._col.array
+ if isinstance(self._col.dtype, ArrowDtype):
+ # We already rechunk (if necessary / allowed) upon initialization, so
+ # this is already single-chunk by the time we get here.
+ arr = arr._pa_array.chunks[0] # type: ignore[attr-defined]
+ buffer = PandasBufferPyarrow(
+ arr.buffers()[1], # type: ignore[attr-defined]
+ length=len(arr),
+ )
+ return buffer, dtype
if isinstance(self._col.dtype, BaseMaskedDtype):
np_arr = arr._data # type: ignore[attr-defined]
- elif isinstance(self._col.dtype, ArrowDtype):
- raise NotImplementedError("ArrowDtype not handled yet")
else:
np_arr = arr._ndarray # type: ignore[attr-defined]
buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy)
- dtype = self.dtype
elif self.dtype[0] == DtypeKind.CATEGORICAL:
codes = self._col.values._codes
buffer = PandasBuffer(codes, allow_copy=self._allow_copy)
@@ -343,13 +376,26 @@ def _get_data_buffer(
return buffer, dtype
- def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]:
+ def _get_validity_buffer(self) -> tuple[Buffer, Any] | None:
"""
Return the buffer containing the mask values indicating missing data and
the buffer's associated dtype.
Raises NoBufferPresent if null representation is not a bit or byte mask.
"""
null, invalid = self.describe_null
+ buffer: Buffer
+ if isinstance(self._col.dtype, ArrowDtype):
+ # We already rechunk (if necessary / allowed) upon initialization, so this
+ # is already single-chunk by the time we get here.
+ arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined]
+ dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE)
+ if arr.buffers()[0] is None:
+ return None
+ buffer = PandasBufferPyarrow(
+ arr.buffers()[0],
+ length=len(arr),
+ )
+ return buffer, dtype
if isinstance(self._col.dtype, BaseMaskedDtype):
mask = self._col.array._mask # type: ignore[attr-defined]
diff --git a/pandas/core/interchange/dataframe.py b/pandas/core/interchange/dataframe.py
index 1ffe0e8e8dbb0..1abacddfc7e3b 100644
--- a/pandas/core/interchange/dataframe.py
+++ b/pandas/core/interchange/dataframe.py
@@ -5,6 +5,7 @@
from pandas.core.interchange.column import PandasColumn
from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg
+from pandas.core.interchange.utils import maybe_rechunk
if TYPE_CHECKING:
from collections.abc import (
@@ -34,6 +35,10 @@ def __init__(self, df: DataFrame, allow_copy: bool = True) -> None:
"""
self._df = df.rename(columns=str, copy=False)
self._allow_copy = allow_copy
+ for i, _col in enumerate(self._df.columns):
+ rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy)
+ if rechunked is not None:
+ self._df.isetitem(i, rechunked)
def __dataframe__(
self, nan_as_null: bool = False, allow_copy: bool = True
diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py
index a952887d7eed2..4575837fb12fc 100644
--- a/pandas/core/interchange/from_dataframe.py
+++ b/pandas/core/interchange/from_dataframe.py
@@ -298,13 +298,14 @@ def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
null_pos = None
if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK):
- assert buffers["validity"], "Validity buffers cannot be empty for masks"
- valid_buff, valid_dtype = buffers["validity"]
- null_pos = buffer_to_ndarray(
- valid_buff, valid_dtype, offset=col.offset, length=col.size()
- )
- if sentinel_val == 0:
- null_pos = ~null_pos
+ validity = buffers["validity"]
+ if validity is not None:
+ valid_buff, valid_dtype = validity
+ null_pos = buffer_to_ndarray(
+ valid_buff, valid_dtype, offset=col.offset, length=col.size()
+ )
+ if sentinel_val == 0:
+ null_pos = ~null_pos
# Assemble the strings from the code units
str_list: list[None | float | str] = [None] * col.size()
@@ -516,6 +517,8 @@ def set_nulls(
np.ndarray or pd.Series
Data with the nulls being set.
"""
+ if validity is None:
+ return data
null_kind, sentinel_val = col.describe_null
null_pos = None
diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py
index 2e73e560e5740..2a19dd5046aa3 100644
--- a/pandas/core/interchange/utils.py
+++ b/pandas/core/interchange/utils.py
@@ -16,6 +16,8 @@
DatetimeTZDtype,
)
+import pandas as pd
+
if typing.TYPE_CHECKING:
from pandas._typing import DtypeObj
@@ -145,3 +147,29 @@ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:
raise NotImplementedError(
f"Conversion of {dtype} to Arrow C format string is not implemented."
)
+
+
+def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None:
+ """
+ Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary.
+
+ - Returns `None` if the input series is not backed by a multi-chunk pyarrow array
+ (and so doesn't need rechunking)
+ - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk
+ pyarrow array and `allow_copy` is `True`.
+ - Raises a `RuntimeError` if `allow_copy` is `False` and input is a
+ based by a multi-chunk pyarrow array.
+ """
+ if not isinstance(series.dtype, pd.ArrowDtype):
+ return None
+ chunked_array = series.array._pa_array # type: ignore[attr-defined]
+ if len(chunked_array.chunks) == 1:
+ return None
+ if not allow_copy:
+ raise RuntimeError(
+ "Found multi-chunk pyarrow array, but `allow_copy` is False. "
+ "Please rechunk the array before calling this function, or set "
+ "`allow_copy=True`."
+ )
+ arr = chunked_array.combine_chunks()
+ return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index)
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 94b2da894ad0f..83574e8630d6f 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -1,4 +1,7 @@
-from datetime import datetime
+from datetime import (
+ datetime,
+ timezone,
+)
import numpy as np
import pytest
@@ -291,6 +294,27 @@ def test_multi_chunk_pyarrow() -> None:
pd.api.interchange.from_dataframe(table, allow_copy=False)
+def test_multi_chunk_column() -> None:
+ pytest.importorskip("pyarrow", "11.0.0")
+ ser = pd.Series([1, 2, None], dtype="Int64[pyarrow]")
+ df = pd.concat([ser, ser], ignore_index=True).to_frame("a")
+ df_orig = df.copy()
+ with pytest.raises(
+ RuntimeError, match="Found multi-chunk pyarrow array, but `allow_copy` is False"
+ ):
+ pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=False))
+ result = pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=True))
+ # Interchange protocol defaults to creating numpy-backed columns, so currently this
+ # is 'float64'.
+ expected = pd.DataFrame({"a": [1.0, 2.0, None, 1.0, 2.0, None]}, dtype="float64")
+ tm.assert_frame_equal(result, expected)
+
+ # Check that the rechunking we did didn't modify the original DataFrame.
+ tm.assert_frame_equal(df, df_orig)
+ assert len(df["a"].array._pa_array.chunks) == 2
+ assert len(df_orig["a"].array._pa_array.chunks) == 2
+
+
def test_timestamp_ns_pyarrow():
# GH 56712
pytest.importorskip("pyarrow", "11.0.0")
@@ -416,42 +440,60 @@ def test_non_str_names_w_duplicates():
pd.api.interchange.from_dataframe(dfi, allow_copy=False)
-def test_nullable_integers() -> None:
- # https://github.com/pandas-dev/pandas/issues/55069
- df = pd.DataFrame({"a": [1]}, dtype="Int8")
- expected = pd.DataFrame({"a": [1]}, dtype="int8")
- result = pd.api.interchange.from_dataframe(df.__dataframe__())
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/57664")
-def test_nullable_integers_pyarrow() -> None:
- # https://github.com/pandas-dev/pandas/issues/55069
- df = pd.DataFrame({"a": [1]}, dtype="Int8[pyarrow]")
- expected = pd.DataFrame({"a": [1]}, dtype="int8")
- result = pd.api.interchange.from_dataframe(df.__dataframe__())
- tm.assert_frame_equal(result, expected)
-
-
@pytest.mark.parametrize(
("data", "dtype", "expected_dtype"),
[
([1, 2, None], "Int64", "int64"),
+ ([1, 2, None], "Int64[pyarrow]", "int64"),
+ ([1, 2, None], "Int8", "int8"),
+ ([1, 2, None], "Int8[pyarrow]", "int8"),
(
[1, 2, None],
"UInt64",
"uint64",
),
+ (
+ [1, 2, None],
+ "UInt64[pyarrow]",
+ "uint64",
+ ),
([1.0, 2.25, None], "Float32", "float32"),
+ ([1.0, 2.25, None], "Float32[pyarrow]", "float32"),
+ ([True, False, None], "boolean[pyarrow]", "bool"),
+ (["much ado", "about", None], "string[pyarrow_numpy]", "large_string"),
+ (["much ado", "about", None], "string[pyarrow]", "large_string"),
+ (
+ [datetime(2020, 1, 1), datetime(2020, 1, 2), None],
+ "timestamp[ns][pyarrow]",
+ "timestamp[ns]",
+ ),
+ (
+ [datetime(2020, 1, 1), datetime(2020, 1, 2), None],
+ "timestamp[us][pyarrow]",
+ "timestamp[us]",
+ ),
+ (
+ [
+ datetime(2020, 1, 1, tzinfo=timezone.utc),
+ datetime(2020, 1, 2, tzinfo=timezone.utc),
+ None,
+ ],
+ "timestamp[us, Asia/Kathmandu][pyarrow]",
+ "timestamp[us, tz=Asia/Kathmandu]",
+ ),
],
)
-def test_pandas_nullable_w_missing_values(
+def test_pandas_nullable_with_missing_values(
data: list, dtype: str, expected_dtype: str
) -> None:
# https://github.com/pandas-dev/pandas/issues/57643
- pytest.importorskip("pyarrow", "11.0.0")
+ # https://github.com/pandas-dev/pandas/issues/57664
+ pa = pytest.importorskip("pyarrow", "11.0.0")
import pyarrow.interchange as pai
+ if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
+ expected_dtype = pa.timestamp("us", "Asia/Kathmandu")
+
df = pd.DataFrame({"a": data}, dtype=dtype)
result = pai.from_dataframe(df.__dataframe__())["a"]
assert result.type == expected_dtype
@@ -460,6 +502,86 @@ def test_pandas_nullable_w_missing_values(
assert result[2].as_py() is None
+@pytest.mark.parametrize(
+ ("data", "dtype", "expected_dtype"),
+ [
+ ([1, 2, 3], "Int64", "int64"),
+ ([1, 2, 3], "Int64[pyarrow]", "int64"),
+ ([1, 2, 3], "Int8", "int8"),
+ ([1, 2, 3], "Int8[pyarrow]", "int8"),
+ (
+ [1, 2, 3],
+ "UInt64",
+ "uint64",
+ ),
+ (
+ [1, 2, 3],
+ "UInt64[pyarrow]",
+ "uint64",
+ ),
+ ([1.0, 2.25, 5.0], "Float32", "float32"),
+ ([1.0, 2.25, 5.0], "Float32[pyarrow]", "float32"),
+ ([True, False, False], "boolean[pyarrow]", "bool"),
+ (["much ado", "about", "nothing"], "string[pyarrow_numpy]", "large_string"),
+ (["much ado", "about", "nothing"], "string[pyarrow]", "large_string"),
+ (
+ [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)],
+ "timestamp[ns][pyarrow]",
+ "timestamp[ns]",
+ ),
+ (
+ [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)],
+ "timestamp[us][pyarrow]",
+ "timestamp[us]",
+ ),
+ (
+ [
+ datetime(2020, 1, 1, tzinfo=timezone.utc),
+ datetime(2020, 1, 2, tzinfo=timezone.utc),
+ datetime(2020, 1, 3, tzinfo=timezone.utc),
+ ],
+ "timestamp[us, Asia/Kathmandu][pyarrow]",
+ "timestamp[us, tz=Asia/Kathmandu]",
+ ),
+ ],
+)
+def test_pandas_nullable_without_missing_values(
+ data: list, dtype: str, expected_dtype: str
+) -> None:
+ # https://github.com/pandas-dev/pandas/issues/57643
+ pa = pytest.importorskip("pyarrow", "11.0.0")
+ import pyarrow.interchange as pai
+
+ if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]":
+ expected_dtype = pa.timestamp("us", "Asia/Kathmandu")
+
+ df = pd.DataFrame({"a": data}, dtype=dtype)
+ result = pai.from_dataframe(df.__dataframe__())["a"]
+ assert result.type == expected_dtype
+ assert result[0].as_py() == data[0]
+ assert result[1].as_py() == data[1]
+ assert result[2].as_py() == data[2]
+
+
+def test_string_validity_buffer() -> None:
+ # https://github.com/pandas-dev/pandas/issues/57761
+ pytest.importorskip("pyarrow", "11.0.0")
+ df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]")
+ result = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
+ assert result is None
+
+
+def test_string_validity_buffer_no_missing() -> None:
+ # https://github.com/pandas-dev/pandas/issues/57762
+ pytest.importorskip("pyarrow", "11.0.0")
+ df = pd.DataFrame({"a": ["x", None]}, dtype="large_string[pyarrow]")
+ validity = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"]
+ assert validity is not None
+ result = validity[1]
+ expected = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, "=")
+ assert result == expected
+
+
def test_empty_dataframe():
# https://github.com/pandas-dev/pandas/issues/56700
df = pd.DataFrame({"a": []}, dtype="int8")
| fixes a few things:
- closes #57762
- closes #57761
- closes #57664
| https://api.github.com/repos/pandas-dev/pandas/pulls/57764 | 2024-03-07T14:26:05Z | 2024-03-20T21:10:55Z | 2024-03-20T21:10:55Z | 2024-04-10T12:08:23Z |
Backport PR #57759 on branch 2.2.x (DOC: add whatsnew for v2.2.2) | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 3a2ab4c17d1bd..34a2845290d5a 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -16,6 +16,7 @@ Version 2.2
.. toctree::
:maxdepth: 2
+ v2.2.2
v2.2.1
v2.2.0
diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst
new file mode 100644
index 0000000000000..058f7aebcd538
--- /dev/null
+++ b/doc/source/whatsnew/v2.2.2.rst
@@ -0,0 +1,36 @@
+.. _whatsnew_222:
+
+What's new in 2.2.2 (April XX, 2024)
+---------------------------------------
+
+These are the changes in pandas 2.2.2. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.other:
+
+Other
+~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.contributors:
+
+Contributors
+~~~~~~~~~~~~
| Backport PR #57759: DOC: add whatsnew for v2.2.2 | https://api.github.com/repos/pandas-dev/pandas/pulls/57763 | 2024-03-07T12:52:13Z | 2024-03-07T17:13:45Z | 2024-03-07T17:13:44Z | 2024-03-07T17:13:45Z |
CLN: enforce deprecation of the `method` keyword on `df.fillna` | diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst
index be50c34d7d14c..905583c708905 100644
--- a/doc/source/whatsnew/v0.10.0.rst
+++ b/doc/source/whatsnew/v0.10.0.rst
@@ -242,18 +242,42 @@ labeled the aggregated group with the end of the interval: the next day).
- Calling ``fillna`` on Series or DataFrame with no arguments is no longer
valid code. You must either specify a fill value or an interpolation method:
-.. ipython:: python
- :okwarning:
+.. code-block:: ipython
- s = pd.Series([np.nan, 1.0, 2.0, np.nan, 4])
- s
- s.fillna(0)
- s.fillna(method="pad")
+ In [6]: s = pd.Series([np.nan, 1.0, 2.0, np.nan, 4])
+
+ In [7]: s
+ Out[7]:
+ 0 NaN
+ 1 1.0
+ 2 2.0
+ 3 NaN
+ 4 4.0
+ dtype: float64
+
+ In [8]: s.fillna(0)
+ Out[8]:
+ 0 0.0
+ 1 1.0
+ 2 2.0
+ 3 0.0
+ 4 4.0
+ dtype: float64
+
+ In [9]: s.fillna(method="pad")
+ Out[9]:
+ 0 NaN
+ 1 1.0
+ 2 2.0
+ 3 2.0
+ 4 4.0
+ dtype: float64
Convenience methods ``ffill`` and ``bfill`` have been added:
.. ipython:: python
+ s = pd.Series([np.nan, 1.0, 2.0, np.nan, 4])
s.ffill()
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 7802ef4798659..1aee3862b6f00 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -229,6 +229,7 @@ Removal of prior version deprecations/changes
- Removed ``year``, ``month``, ``quarter``, ``day``, ``hour``, ``minute``, and ``second`` keywords in the :class:`PeriodIndex` constructor, use :meth:`PeriodIndex.from_fields` instead (:issue:`55960`)
- Removed deprecated argument ``obj`` in :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` (:issue:`53545`)
- Removed deprecated behavior of :meth:`Series.agg` using :meth:`Series.apply` (:issue:`53325`)
+- Removed deprecated keyword ``method`` on :meth:`Series.fillna`, :meth:`DataFrame.fillna` (:issue:`57760`)
- Removed option ``mode.use_inf_as_na``, convert inf entries to ``NaN`` before instead (:issue:`51684`)
- Removed support for :class:`DataFrame` in :meth:`DataFrame.from_records`(:issue:`51697`)
- Removed support for ``errors="ignore"`` in :func:`to_datetime`, :func:`to_timedelta` and :func:`to_numeric` (:issue:`55734`)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 5cdb3b59698f5..1d8411c706f90 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -172,10 +172,6 @@ def pytest_collection_modifyitems(items, config) -> None:
"DataFrameGroupBy.fillna",
"DataFrameGroupBy.fillna with 'method' is deprecated",
),
- (
- "DataFrameGroupBy.fillna",
- "DataFrame.fillna with 'method' is deprecated",
- ),
("read_parquet", "Passing a BlockManager to DataFrame is deprecated"),
]
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e501858e73872..8f73003216a41 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6769,7 +6769,6 @@ def fillna(
self,
value: Hashable | Mapping | Series | DataFrame = ...,
*,
- method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[False] = ...,
limit: int | None = ...,
@@ -6781,7 +6780,6 @@ def fillna(
self,
value: Hashable | Mapping | Series | DataFrame = ...,
*,
- method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[True],
limit: int | None = ...,
@@ -6793,7 +6791,6 @@ def fillna(
self,
value: Hashable | Mapping | Series | DataFrame = ...,
*,
- method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: bool = ...,
limit: int | None = ...,
@@ -6809,13 +6806,12 @@ def fillna(
self,
value: Hashable | Mapping | Series | DataFrame | None = None,
*,
- method: FillnaOptions | None = None,
axis: Axis | None = None,
inplace: bool = False,
limit: int | None = None,
) -> Self | None:
"""
- Fill NA/NaN values using the specified method.
+ Fill NA/NaN values with `value`.
Parameters
----------
@@ -6825,15 +6821,6 @@ def fillna(
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
- method : {{'backfill', 'bfill', 'ffill', None}}, default None
- Method to use for filling holes in reindexed Series:
-
- * ffill: propagate last valid observation forward to next valid.
- * backfill / bfill: use next valid observation to fill gap.
-
- .. deprecated:: 2.1.0
- Use ffill or bfill instead.
-
axis : {axes_single_arg}
Axis along which to fill missing values. For `Series`
this parameter is unused and defaults to 0.
@@ -6842,12 +6829,8 @@ def fillna(
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
- If method is specified, this is the maximum number of consecutive
- NaN values to forward/backward fill. In other words, if there is
- a gap with more than this number of consecutive NaNs, it will only
- be partially filled. If method is not specified, this is the
- maximum number of entries along the entire axis where NaNs will be
- filled. Must be greater than 0 if not None.
+ This is the maximum number of entries along the entire axis
+ where NaNs will be filled. Must be greater than 0 if not None.
Returns
-------
@@ -6932,14 +6915,10 @@ def fillna(
stacklevel=2,
)
- value, method = validate_fillna_kwargs(value, method)
- if method is not None:
- warnings.warn(
- f"{type(self).__name__}.fillna with 'method' is deprecated and "
- "will raise in a future version. Use obj.ffill() or obj.bfill() "
- "instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
+ if isinstance(value, (list, tuple)):
+ raise TypeError(
+ '"value" parameter must be a scalar or dict, but '
+ f'you passed a "{type(value).__name__}"'
)
# set the default here, so functions examining the signaure
@@ -6949,15 +6928,7 @@ def fillna(
axis = self._get_axis_number(axis)
if value is None:
- return self._pad_or_backfill(
- # error: Argument 1 to "_pad_or_backfill" of "NDFrame" has
- # incompatible type "Optional[Literal['backfill', 'bfill', 'ffill',
- # 'pad']]"; expected "Literal['ffill', 'bfill', 'pad', 'backfill']"
- method, # type: ignore[arg-type]
- axis=axis,
- limit=limit,
- inplace=inplace,
- )
+ raise ValueError("Must specify a fill 'value'.")
else:
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index b9cba2fc52728..328c6cd6164fb 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -68,9 +68,6 @@ def test_fillna_scalar(self, data_missing):
expected = data_missing.fillna(valid)
tm.assert_extension_array_equal(result, expected)
- @pytest.mark.filterwarnings(
- "ignore:Series.fillna with 'method' is deprecated:FutureWarning"
- )
def test_fillna_limit_pad(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = pd.Series(arr).ffill(limit=2)
@@ -99,12 +96,9 @@ def test_ffill_limit_area(
expected = pd.Series(data_missing.take(expected_ilocs))
tm.assert_series_equal(result, expected)
- @pytest.mark.filterwarnings(
- "ignore:Series.fillna with 'method' is deprecated:FutureWarning"
- )
def test_fillna_limit_backfill(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
- result = pd.Series(arr).fillna(method="backfill", limit=2)
+ result = pd.Series(arr).bfill(limit=2)
expected = pd.Series(data_missing.take([1, 0, 1, 1, 1]))
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 816b7ace69300..bed3ec62f43da 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -187,15 +187,6 @@ def test_ffill_limit_area(
)
def test_fillna_limit_backfill(self, data_missing):
- msg = "Series.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(
- FutureWarning,
- match=msg,
- check_stacklevel=False,
- raise_on_extra_warnings=False,
- ):
- super().test_fillna_limit_backfill(data_missing)
-
msg = "ExtensionArray.fillna 'method' keyword is deprecated"
with tm.assert_produces_warning(
DeprecationWarning,
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index c3a1d584170fb..d8fa1fa0ed5cf 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -234,11 +234,6 @@ def test_isna(self, data_missing):
expected = SparseArray([False, False], fill_value=False, dtype=expected_dtype)
tm.assert_equal(sarr.isna(), expected)
- def test_fillna_limit_backfill(self, data_missing):
- warns = FutureWarning
- with tm.assert_produces_warning(warns, check_stacklevel=False):
- super().test_fillna_limit_backfill(data_missing)
-
def test_fillna_no_op_returns_copy(self, data, request):
if np.isnan(data.fill_value):
request.applymarker(
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index ee660d8b03b40..81f66cfd48b0a 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -58,20 +58,15 @@ def test_fillna_datetime(self, datetime_frame):
zero_filled = datetime_frame.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all()
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- padded = datetime_frame.fillna(method="pad")
+ padded = datetime_frame.ffill()
assert np.isnan(padded.loc[padded.index[:5], "A"]).all()
assert (
padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"]
).all()
- msg = "Must specify a fill 'value' or 'method'"
+ msg = "Must specify a fill 'value'"
with pytest.raises(ValueError, match=msg):
datetime_frame.fillna()
- msg = "Cannot specify both 'value' and 'method'"
- with pytest.raises(ValueError, match=msg):
- datetime_frame.fillna(5, method="ffill")
@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="can't fill 0 in string")
def test_fillna_mixed_type(self, float_string_frame):
@@ -80,9 +75,7 @@ def test_fillna_mixed_type(self, float_string_frame):
mf.loc[mf.index[-10:], "A"] = np.nan
# TODO: make stronger assertion here, GH 25640
mf.fillna(value=0)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- mf.fillna(method="pad")
+ mf.ffill()
def test_fillna_mixed_float(self, mixed_float_frame):
# mixed numeric (but no float16)
@@ -90,10 +83,7 @@ def test_fillna_mixed_float(self, mixed_float_frame):
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype={"C": None})
-
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = mf.fillna(method="pad")
+ result = mf.ffill()
_check_mixed_float(result, dtype={"C": None})
def test_fillna_different_dtype(self, using_infer_string):
@@ -159,9 +149,7 @@ def test_fillna_tzaware(self):
]
}
)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df.fillna(method="pad")
+ res = df.ffill()
tm.assert_frame_equal(res, exp)
df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]})
@@ -173,9 +161,7 @@ def test_fillna_tzaware(self):
]
}
)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df.fillna(method="bfill")
+ res = df.bfill()
tm.assert_frame_equal(res, exp)
def test_fillna_tzaware_different_column(self):
@@ -187,9 +173,7 @@ def test_fillna_tzaware_different_column(self):
"B": [1, 2, np.nan, np.nan],
}
)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.fillna(method="pad")
+ result = df.ffill()
expected = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
@@ -220,9 +204,7 @@ def test_na_actions_categorical(self):
with pytest.raises(TypeError, match=msg):
df.fillna(value={"cats": 4, "vals": "c"})
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df.fillna(method="pad")
+ res = df.ffill()
tm.assert_frame_equal(res, df_exp_fill)
# dropna
@@ -368,19 +350,14 @@ def test_ffill(self, datetime_frame):
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- alt = datetime_frame.fillna(method="ffill")
+ alt = datetime_frame.ffill()
tm.assert_frame_equal(datetime_frame.ffill(), alt)
def test_bfill(self, datetime_frame):
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- alt = datetime_frame.fillna(method="bfill")
-
+ alt = datetime_frame.bfill()
tm.assert_frame_equal(datetime_frame.bfill(), alt)
def test_frame_pad_backfill_limit(self):
@@ -389,16 +366,13 @@ def test_frame_pad_backfill_limit(self):
result = df[:2].reindex(index, method="pad", limit=5)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df[:2].reindex(index).fillna(method="pad")
+ expected = df[:2].reindex(index).ffill()
expected.iloc[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method="backfill", limit=5)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df[-2:].reindex(index).fillna(method="backfill")
+ expected = df[-2:].reindex(index).bfill()
expected.iloc[:3] = np.nan
tm.assert_frame_equal(result, expected)
@@ -407,21 +381,16 @@ def test_frame_fillna_limit(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=index)
result = df[:2].reindex(index)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = result.fillna(method="pad", limit=5)
+ result = result.ffill(limit=5)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df[:2].reindex(index).fillna(method="pad")
+ expected = df[:2].reindex(index).ffill()
expected.iloc[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = result.fillna(method="backfill", limit=5)
+ result = result.bfill(limit=5)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df[-2:].reindex(index).fillna(method="backfill")
+ expected = df[-2:].reindex(index).bfill()
expected.iloc[:3] = np.nan
tm.assert_frame_equal(result, expected)
@@ -465,13 +434,10 @@ def test_fillna_inplace(self):
df.loc[:4, 1] = np.nan
df.loc[-4:, 3] = np.nan
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df.fillna(method="ffill")
+ expected = df.ffill()
assert expected is not df
- with tm.assert_produces_warning(FutureWarning, match=msg):
- df.fillna(method="ffill", inplace=True)
+ df.ffill(inplace=True)
tm.assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
@@ -542,24 +508,15 @@ def test_fillna_columns(self):
arr[:, ::2] = np.nan
df = DataFrame(arr)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.fillna(method="ffill", axis=1)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df.T.fillna(method="pad").T
+ result = df.ffill(axis=1)
+ expected = df.T.ffill().T
tm.assert_frame_equal(result, expected)
df.insert(6, "foo", 5)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.fillna(method="ffill", axis=1)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- expected = df.astype(float).fillna(method="ffill", axis=1)
+ result = df.ffill(axis=1)
+ expected = df.astype(float).ffill(axis=1)
tm.assert_frame_equal(result, expected)
- def test_fillna_invalid_method(self, float_frame):
- with pytest.raises(ValueError, match="ffil"):
- float_frame.fillna(method="ffil")
-
def test_fillna_invalid_value(self, float_frame):
# list
msg = '"value" parameter must be a scalar or dict, but you passed a "{}"'
@@ -580,9 +537,7 @@ def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.default_rng(2).random((20, 5))
df = DataFrame(index=range(20), columns=cols, data=data)
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- filled = df.fillna(method="ffill")
+ filled = df.ffill()
assert df.columns.tolist() == filled.columns.tolist()
@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="can't fill 0 in string")
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 6ca6cbad02d51..eb6d649c296fc 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -757,11 +757,6 @@ def test_replace_for_new_dtypes(self, datetime_frame):
tsframe.loc[tsframe.index[:5], "A"] = np.nan
tsframe.loc[tsframe.index[-5:], "A"] = np.nan
tsframe.loc[tsframe.index[:5], "B"] = np.nan
- msg = "DataFrame.fillna with 'method' is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- # TODO: what is this even testing?
- result = tsframe.fillna(method="bfill")
- tm.assert_frame_equal(result, tsframe.fillna(method="bfill"))
@pytest.mark.parametrize(
"frame, to_replace, value, expected",
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index fd815c85a89b3..d25d909adea8e 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -89,7 +89,6 @@
(pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})),
(pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)),
(pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")),
- (pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")),
(pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")),
(pd.DataFrame, frame_data, operator.methodcaller("reset_index")),
(pd.DataFrame, frame_data, operator.methodcaller("isna")),
@@ -375,9 +374,6 @@ def idfn(x):
return str(x)
-@pytest.mark.filterwarnings(
- "ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning",
-)
@pytest.mark.parametrize("ndframe_method", _all_methods, ids=lambda x: idfn(x[-1]))
def test_finalize_called(ndframe_method):
cls, init_args, method = ndframe_method
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index a458b31480375..0965d36e4827d 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -25,14 +25,11 @@
from pandas.core.arrays import period_array
-@pytest.mark.filterwarnings(
- "ignore:(Series|DataFrame).fillna with 'method' is deprecated:FutureWarning"
-)
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT._value], dtype="M8[ns]")
- filled = series.fillna(method="pad")
+ filled = series.ffill()
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
@@ -42,7 +39,7 @@ def test_fillna_nat(self):
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
- filled = df.fillna(method="pad")
+ filled = df.ffill()
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
@@ -50,7 +47,7 @@ def test_fillna_nat(self):
series = Series([NaT._value, 0, 1, 2], dtype="M8[ns]")
- filled = series.fillna(method="bfill")
+ filled = series.bfill()
filled2 = series.fillna(value=series[1])
expected = series.copy()
@@ -60,39 +57,30 @@ def test_fillna_nat(self):
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
- filled = df.fillna(method="bfill")
+ filled = df.bfill()
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
- def test_fillna_value_or_method(self, datetime_series):
- msg = "Cannot specify both 'value' and 'method'"
- with pytest.raises(ValueError, match=msg):
- datetime_series.fillna(value=0, method="ffill")
-
def test_fillna(self):
ts = Series(
[0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)
)
- tm.assert_series_equal(ts, ts.fillna(method="ffill"))
+ tm.assert_series_equal(ts, ts.ffill())
ts.iloc[2] = np.nan
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
- tm.assert_series_equal(ts.fillna(method="ffill"), exp)
+ tm.assert_series_equal(ts.ffill(), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
- tm.assert_series_equal(ts.fillna(method="backfill"), exp)
+ tm.assert_series_equal(ts.bfill(), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
- msg = "Must specify a fill 'value' or 'method'"
- with pytest.raises(ValueError, match=msg):
- ts.fillna()
-
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
@@ -395,7 +383,7 @@ def test_datetime64_fillna_backfill(self):
],
dtype="M8[ns]",
)
- result = ser.fillna(method="backfill")
+ result = ser.bfill()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
@@ -615,7 +603,7 @@ def test_fillna_dt64tz_with_method(self):
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
- tm.assert_series_equal(ser.fillna(method="pad"), exp)
+ tm.assert_series_equal(ser.ffill(), exp)
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
exp = Series(
@@ -624,7 +612,7 @@ def test_fillna_dt64tz_with_method(self):
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
- tm.assert_series_equal(ser.fillna(method="bfill"), exp)
+ tm.assert_series_equal(ser.bfill(), exp)
def test_fillna_pytimedelta(self):
# GH#8209
@@ -807,12 +795,6 @@ def test_fillna_f32_upcast_with_dict(self):
# ---------------------------------------------------------------
# Invalid Usages
- def test_fillna_invalid_method(self, datetime_series):
- try:
- datetime_series.fillna(method="ffil")
- except ValueError as inst:
- assert "ffil" in str(inst)
-
def test_fillna_listlike_invalid(self):
ser = Series(np.random.default_rng(2).integers(-100, 100, 50))
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
@@ -834,9 +816,8 @@ def test_fillna_method_and_limit_invalid(self):
]
)
for limit in [-1, 0, 1.0, 2.0]:
- for method in ["backfill", "bfill", "pad", "ffill", None]:
- with pytest.raises(ValueError, match=msg):
- ser.fillna(1, limit=limit, method=method)
+ with pytest.raises(ValueError, match=msg):
+ ser.fillna(1, limit=limit)
def test_fillna_datetime64_with_timezone_tzinfo(self):
# https://github.com/pandas-dev/pandas/issues/38851
@@ -877,46 +858,29 @@ def test_fillna_categorical_accept_same_type(
tm.assert_categorical_equal(result, expected)
-@pytest.mark.filterwarnings(
- "ignore:Series.fillna with 'method' is deprecated:FutureWarning"
-)
class TestFillnaPad:
def test_fillna_bug(self):
ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
- filled = ser.fillna(method="ffill")
+ filled = ser.ffill()
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index)
tm.assert_series_equal(filled, expected)
- filled = ser.fillna(method="bfill")
+ filled = ser.bfill()
expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index)
tm.assert_series_equal(filled, expected)
- def test_ffill(self):
- ts = Series(
- [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)
- )
- ts.iloc[2] = np.nan
- tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))
-
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH#14956
series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
tm.assert_series_equal(series, result)
- def test_bfill(self):
- ts = Series(
- [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)
- )
- ts.iloc[2] = np.nan
- tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill"))
-
def test_pad_nan(self):
x = Series(
[np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float
)
- return_value = x.fillna(method="pad", inplace=True)
+ return_value = x.ffill(inplace=True)
assert return_value is None
expected = Series(
@@ -930,16 +894,16 @@ def test_series_fillna_limit(self):
s = Series(np.random.default_rng(2).standard_normal(10), index=index)
result = s[:2].reindex(index)
- result = result.fillna(method="pad", limit=5)
+ result = result.ffill(limit=5)
- expected = s[:2].reindex(index).fillna(method="pad")
+ expected = s[:2].reindex(index).ffill()
expected[-3:] = np.nan
tm.assert_series_equal(result, expected)
result = s[-2:].reindex(index)
- result = result.fillna(method="bfill", limit=5)
+ result = result.bfill(limit=5)
- expected = s[-2:].reindex(index).fillna(method="backfill")
+ expected = s[-2:].reindex(index).bfill()
expected[:3] = np.nan
tm.assert_series_equal(result, expected)
@@ -949,21 +913,21 @@ def test_series_pad_backfill_limit(self):
result = s[:2].reindex(index, method="pad", limit=5)
- expected = s[:2].reindex(index).fillna(method="pad")
+ expected = s[:2].reindex(index).ffill()
expected[-3:] = np.nan
tm.assert_series_equal(result, expected)
result = s[-2:].reindex(index, method="backfill", limit=5)
- expected = s[-2:].reindex(index).fillna(method="backfill")
+ expected = s[-2:].reindex(index).bfill()
expected[:3] = np.nan
tm.assert_series_equal(result, expected)
def test_fillna_int(self):
ser = Series(np.random.default_rng(2).integers(-100, 100, 50))
- return_value = ser.fillna(method="ffill", inplace=True)
+ return_value = ser.ffill(inplace=True)
assert return_value is None
- tm.assert_series_equal(ser.fillna(method="ffill", inplace=False), ser)
+ tm.assert_series_equal(ser.ffill(inplace=False), ser)
def test_datetime64tz_fillna_round_issue(self):
# GH#14872
| xref #53496
enforced deprecation of the `method` keyword on `df.fillna/ser.fillna` | https://api.github.com/repos/pandas-dev/pandas/pulls/57760 | 2024-03-07T09:45:25Z | 2024-03-14T16:19:19Z | 2024-03-14T16:19:19Z | 2024-03-14T21:08:20Z |
DOC: add whatsnew for v2.2.2 | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 5d0e3f3291114..1a1ecdd0effee 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -25,6 +25,7 @@ Version 2.2
.. toctree::
:maxdepth: 2
+ v2.2.2
v2.2.1
v2.2.0
diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst
new file mode 100644
index 0000000000000..058f7aebcd538
--- /dev/null
+++ b/doc/source/whatsnew/v2.2.2.rst
@@ -0,0 +1,36 @@
+.. _whatsnew_222:
+
+What's new in 2.2.2 (April XX, 2024)
+---------------------------------------
+
+These are the changes in pandas 2.2.2. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.other:
+
+Other
+~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_222.contributors:
+
+Contributors
+~~~~~~~~~~~~
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index a88bd4c42edec..d48592d1a61cb 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -20,7 +20,6 @@
Union,
cast,
)
-import warnings
import numpy as np
@@ -32,7 +31,6 @@
Substitution,
doc,
)
-from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_int64,
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57759 | 2024-03-07T09:28:42Z | 2024-03-07T12:51:10Z | 2024-03-07T12:51:10Z | 2024-03-07T12:51:10Z |
BUG: DataFrame Interchange Protocol errors on Boolean columns | diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst
index 54084abab7817..2a48403d9a318 100644
--- a/doc/source/whatsnew/v2.2.2.rst
+++ b/doc/source/whatsnew/v2.2.2.rst
@@ -22,6 +22,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
+- :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the column's type was nullable boolean (:issue:`55332`)
- :meth:`DataFrame.__dataframe__` was showing bytemask instead of bitmask for ``'string[pyarrow]'`` validity buffer (:issue:`57762`)
- :meth:`DataFrame.__dataframe__` was showing non-null validity buffer (instead of ``None``) ``'string[pyarrow]'`` without missing values (:issue:`57761`)
diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py
index 2a19dd5046aa3..fd1c7c9639242 100644
--- a/pandas/core/interchange/utils.py
+++ b/pandas/core/interchange/utils.py
@@ -144,6 +144,9 @@ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:
elif isinstance(dtype, DatetimeTZDtype):
return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz)
+ elif isinstance(dtype, pd.BooleanDtype):
+ return ArrowCTypes.BOOL
+
raise NotImplementedError(
f"Conversion of {dtype} to Arrow C format string is not implemented."
)
diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py
index 83574e8630d6f..60e05c2c65124 100644
--- a/pandas/tests/interchange/test_impl.py
+++ b/pandas/tests/interchange/test_impl.py
@@ -459,6 +459,7 @@ def test_non_str_names_w_duplicates():
),
([1.0, 2.25, None], "Float32", "float32"),
([1.0, 2.25, None], "Float32[pyarrow]", "float32"),
+ ([True, False, None], "boolean", "bool"),
([True, False, None], "boolean[pyarrow]", "bool"),
(["much ado", "about", None], "string[pyarrow_numpy]", "large_string"),
(["much ado", "about", None], "string[pyarrow]", "large_string"),
@@ -521,6 +522,7 @@ def test_pandas_nullable_with_missing_values(
),
([1.0, 2.25, 5.0], "Float32", "float32"),
([1.0, 2.25, 5.0], "Float32[pyarrow]", "float32"),
+ ([True, False, False], "boolean", "bool"),
([True, False, False], "boolean[pyarrow]", "bool"),
(["much ado", "about", "nothing"], "string[pyarrow_numpy]", "large_string"),
(["much ado", "about", "nothing"], "string[pyarrow]", "large_string"),
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
closes #55332
~~needs rebasing onto #57764~~ | https://api.github.com/repos/pandas-dev/pandas/pulls/57758 | 2024-03-07T08:38:04Z | 2024-03-27T17:48:16Z | 2024-03-27T17:48:16Z | 2024-03-27T17:48:23Z |
CLN: Enforce deprecation of passing a dict to SeriesGroupBy.agg | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index e68a935fe6fd3..4f690e9339f6b 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -201,6 +201,7 @@ Removal of prior version deprecations/changes
- Changed the default value of ``observed`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to ``True`` (:issue:`51811`)
- Enforced deprecation disallowing parsing datetimes with mixed time zones unless user passes ``utc=True`` to :func:`to_datetime` (:issue:`57275`)
- Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`)
+- Enforced deprecation of passing a dictionary to :meth:`SeriesGroupBy.agg` (:issue:`52268`)
- Enforced silent-downcasting deprecation for :ref:`all relevant methods <whatsnew_220.silent_downcasting>` (:issue:`54710`)
- In :meth:`DataFrame.stack`, the default value of ``future_stack`` is now ``True``; specifying ``False`` will raise a ``FutureWarning`` (:issue:`55448`)
- Methods ``apply``, ``agg``, and ``transform`` will no longer replace NumPy functions (e.g. ``np.sum``) and built-in functions (e.g. ``min``) with the equivalent pandas implementation; use string aliases (e.g. ``"sum"`` and ``"min"``) if you desire to use the pandas implementation (:issue:`53974`)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 52fd7735b533e..fc2fc366e18db 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -384,23 +384,9 @@ def _python_agg_general(self, func, *args, **kwargs):
def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
if isinstance(arg, dict):
- if self.as_index:
- # GH 15931
- raise SpecificationError("nested renamer is not supported")
- else:
- # GH#50684 - This accidentally worked in 1.x
- msg = (
- "Passing a dictionary to SeriesGroupBy.agg is deprecated "
- "and will raise in a future version of pandas. Pass a list "
- "of aggregations instead."
- )
- warnings.warn(
- message=msg,
- category=FutureWarning,
- stacklevel=find_stack_level(),
- )
- arg = list(arg.items())
- elif any(isinstance(x, (tuple, list)) for x in arg):
+ raise SpecificationError("nested renamer is not supported")
+
+ if any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
else:
# list of functions / function names
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 5d44f11393c93..d8f832002dac6 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1015,10 +1015,9 @@ def test_groupby_as_index_agg(df):
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
- msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result3 = grouped["C"].agg({"Q": "sum"})
- tm.assert_frame_equal(result3, expected3)
+ msg = "nested renamer is not supported"
+ with pytest.raises(SpecificationError, match=msg):
+ grouped["C"].agg({"Q": "sum"})
# GH7115 & GH8112 & GH8582
df = DataFrame(
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 2961369936717..04a3516fd9af7 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -9,6 +9,8 @@
import numpy as np
import pytest
+from pandas.errors import SpecificationError
+
import pandas as pd
from pandas import (
CategoricalIndex,
@@ -530,12 +532,10 @@ def test_multiindex_negative_level(self, multiindex_dataframe_random_data):
).sum()
tm.assert_frame_equal(result, expected)
- def test_multifunc_select_col_integer_cols(self, df):
+ def test_agg_with_dict_raises(self, df):
df.columns = np.arange(len(df.columns))
-
- # it works!
- msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ msg = "nested renamer is not supported"
+ with pytest.raises(SpecificationError, match=msg):
df.groupby(1, as_index=False)[2].agg({"Q": np.mean})
def test_multiindex_columns_empty_level(self):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Ref: #52268 | https://api.github.com/repos/pandas-dev/pandas/pulls/57757 | 2024-03-07T03:12:29Z | 2024-03-07T04:40:31Z | 2024-03-07T04:40:30Z | 2024-03-07T21:09:12Z |
CLN: Enforce deprecation of DataFrameGroupBy.dtypes and Grouper attrs | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index e68a935fe6fd3..6748edcb4ce49 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -243,6 +243,8 @@ Removal of prior version deprecations/changes
- Removed the ``ordinal`` keyword in :class:`PeriodIndex`, use :meth:`PeriodIndex.from_ordinals` instead (:issue:`55960`)
- Removed unused arguments ``*args`` and ``**kwargs`` in :class:`Resampler` methods (:issue:`50977`)
- Unrecognized timezones when parsing strings to datetimes now raises a ``ValueError`` (:issue:`51477`)
+- Removed the :class:`Grouper` attributes ``ax``, ``groups``, ``indexer``, and ``obj`` (:issue:`51206`, :issue:`51182`)
+- Removed the attribute ``dtypes`` from :class:`.DataFrameGroupBy` (:issue:`51997`)
.. ---------------------------------------------------------------------------
.. _whatsnew_300.performance:
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 3f776cf75d43a..8b776dc7a9f79 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -90,7 +90,6 @@ class OutputKey:
"corr",
"cov",
"describe",
- "dtypes",
"expanding",
"ewm",
"filter",
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 52fd7735b533e..e65f44f1acefb 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -2720,22 +2720,6 @@ def hist(
)
return result
- @property
- @doc(DataFrame.dtypes.__doc__)
- def dtypes(self) -> Series:
- # GH#51045
- warnings.warn(
- f"{type(self).__name__}.dtypes is deprecated and will be removed in "
- "a future version. Check the dtypes on the base object instead",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
- # error: Incompatible return value type (got "DataFrame", expected "Series")
- return self._python_apply_general( # type: ignore[return-value]
- lambda df: df.dtypes, self._selected_obj
- )
-
def corrwith(
self,
other: DataFrame | Series,
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 1578bde0781ef..1cf6df426f8b7 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -8,14 +8,12 @@
TYPE_CHECKING,
final,
)
-import warnings
import numpy as np
from pandas._libs.tslibs import OutOfBoundsDatetime
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly
-from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_list_like,
@@ -387,56 +385,6 @@ def _set_grouper(
self._gpr_index = ax
return obj, ax, indexer
- @final
- @property
- def ax(self) -> Index:
- warnings.warn(
- f"{type(self).__name__}.ax is deprecated and will be removed in a "
- "future version. Use Resampler.ax instead",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- index = self._gpr_index
- if index is None:
- raise ValueError("_set_grouper must be called before ax is accessed")
- return index
-
- @final
- @property
- def indexer(self):
- warnings.warn(
- f"{type(self).__name__}.indexer is deprecated and will be removed "
- "in a future version. Use Resampler.indexer instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return self._indexer_deprecated
-
- @final
- @property
- def obj(self):
- # TODO(3.0): enforcing these deprecations on Grouper should close
- # GH#25564, GH#41930
- warnings.warn(
- f"{type(self).__name__}.obj is deprecated and will be removed "
- "in a future version. Use GroupBy.indexer instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return self._obj_deprecated
-
- @final
- @property
- def groups(self):
- warnings.warn(
- f"{type(self).__name__}.groups is deprecated and will be removed "
- "in a future version. Use GroupBy.groups instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- # error: "None" has no attribute "groups"
- return self._grouper_deprecated.groups # type: ignore[attr-defined]
-
@final
def __repr__(self) -> str:
attrs_list = (
diff --git a/pandas/tests/groupby/test_api.py b/pandas/tests/groupby/test_api.py
index 4b0f7890e1aa7..b5fdf058d1ab0 100644
--- a/pandas/tests/groupby/test_api.py
+++ b/pandas/tests/groupby/test_api.py
@@ -80,7 +80,6 @@ def test_tab_completion(multiindex_dataframe_random_data):
"corr",
"corrwith",
"cov",
- "dtypes",
"ndim",
"diff",
"idxmax",
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 52c93d566bc73..50071bc68923c 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2828,9 +2828,6 @@ def test_groupby_selection_other_methods(df):
g_exp = df[["C"]].groupby(df["A"])
# methods which aren't just .foo()
- msg = "DataFrameGroupBy.dtypes is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- tm.assert_frame_equal(g.dtypes, g_exp.dtypes)
tm.assert_frame_equal(g.apply(lambda x: x.sum()), g_exp.apply(lambda x: x.sum()))
tm.assert_frame_equal(g.resample("D").mean(), g_exp.resample("D").mean())
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 2961369936717..f060486223d8a 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -1128,28 +1128,3 @@ def test_grouping_by_key_is_in_axis():
result = gb.sum()
expected = DataFrame({"a": [1, 2], "b": [1, 2], "c": [7, 5]})
tm.assert_frame_equal(result, expected)
-
-
-def test_grouper_groups():
- # GH#51182 check Grouper.groups does not raise AttributeError
- df = DataFrame({"a": [1, 2, 3], "b": 1})
- grper = Grouper(key="a")
- gb = df.groupby(grper)
-
- msg = "Use GroupBy.groups instead"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = grper.groups
- assert res is gb.groups
-
- msg = "Grouper.obj is deprecated and will be removed"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = grper.obj
- assert res is gb.obj
-
- msg = "Use Resampler.ax instead"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- grper.ax
-
- msg = "Grouper.indexer is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- grper.indexer
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Ref: #51206, #51182, and #51997 | https://api.github.com/repos/pandas-dev/pandas/pulls/57756 | 2024-03-07T02:57:27Z | 2024-03-07T04:41:13Z | 2024-03-07T04:41:13Z | 2024-03-07T21:09:05Z |
PERF/CLN: Preserve `concat(keys=range)` RangeIndex level in the result | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 9cae456f21ccf..ca2ca07ff2fae 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -204,6 +204,7 @@ Removal of prior version deprecations/changes
- Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`)
- Enforced deprecation of passing a dictionary to :meth:`SeriesGroupBy.agg` (:issue:`52268`)
- Enforced deprecation of string ``A`` denoting frequency in :class:`YearEnd` and strings ``A-DEC``, ``A-JAN``, etc. denoting annual frequencies with various fiscal year ends (:issue:`57699`)
+- Enforced deprecation of the behavior of :func:`concat` when ``len(keys) != len(objs)`` would truncate to the shorter of the two. Now this raises a ``ValueError`` (:issue:`43485`)
- Enforced silent-downcasting deprecation for :ref:`all relevant methods <whatsnew_220.silent_downcasting>` (:issue:`54710`)
- In :meth:`DataFrame.stack`, the default value of ``future_stack`` is now ``True``; specifying ``False`` will raise a ``FutureWarning`` (:issue:`55448`)
- Iterating over a :class:`.DataFrameGroupBy` or :class:`.SeriesGroupBy` will return tuples of length 1 for the groups when grouping by ``level`` a list of length 1 (:issue:`50064`)
@@ -255,6 +256,7 @@ Removal of prior version deprecations/changes
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- :func:`concat` returns a :class:`RangeIndex` level in the :class:`MultiIndex` result when ``keys`` is a ``range`` or :class:`RangeIndex` (:issue:`57542`)
- :meth:`RangeIndex.append` returns a :class:`RangeIndex` instead of a :class:`Index` when appending values that could continue the :class:`RangeIndex` (:issue:`57467`)
- :meth:`Series.str.extract` returns a :class:`RangeIndex` columns instead of an :class:`Index` column when possible (:issue:`57542`)
- Performance improvement in :class:`DataFrame` when ``data`` is a ``dict`` and ``columns`` is specified (:issue:`24368`)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 46831b922d24e..40d4cabb352a1 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1202,7 +1202,7 @@ def _concat_objects(
else:
# GH5610, returns a MI, with the first level being a
# range index
- keys = list(range(len(values)))
+ keys = RangeIndex(len(values))
result = concat(values, axis=0, keys=keys)
elif not not_indexed_same:
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 4df9cdf5d7b2c..1f0fe0542a0c0 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -12,12 +12,10 @@
cast,
overload,
)
-import warnings
import numpy as np
from pandas.util._decorators import cache_readonly
-from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_bool,
@@ -493,32 +491,27 @@ def _clean_keys_and_objs(
objs_list = list(com.not_none(*objs_list))
else:
# GH#1649
- clean_keys = []
+ key_indices = []
clean_objs = []
if is_iterator(keys):
keys = list(keys)
if len(keys) != len(objs_list):
# GH#43485
- warnings.warn(
- "The behavior of pd.concat with len(keys) != len(objs) is "
- "deprecated. In a future version this will raise instead of "
- "truncating to the smaller of the two sequences",
- FutureWarning,
- stacklevel=find_stack_level(),
+ raise ValueError(
+ f"The length of the keys ({len(keys)}) must match "
+ f"the length of the objects to concatenate ({len(objs_list)})"
)
- for k, v in zip(keys, objs_list):
- if v is None:
- continue
- clean_keys.append(k)
- clean_objs.append(v)
+ for i, obj in enumerate(objs_list):
+ if obj is not None:
+ key_indices.append(i)
+ clean_objs.append(obj)
objs_list = clean_objs
- if isinstance(keys, MultiIndex):
- # TODO: retain levels?
- keys = type(keys).from_tuples(clean_keys, names=keys.names)
- else:
- name = getattr(keys, "name", None)
- keys = Index(clean_keys, name=name, dtype=getattr(keys, "dtype", None))
+ if not isinstance(keys, Index):
+ keys = Index(keys)
+
+ if len(key_indices) < len(keys):
+ keys = keys.take(key_indices)
if len(objs_list) == 0:
raise ValueError("All objects passed were None")
diff --git a/pandas/tests/groupby/methods/test_describe.py b/pandas/tests/groupby/methods/test_describe.py
index 3b6fbfa0a31fc..0f5fc915f9523 100644
--- a/pandas/tests/groupby/methods/test_describe.py
+++ b/pandas/tests/groupby/methods/test_describe.py
@@ -90,20 +90,22 @@ def test_frame_describe_multikey(tsframe):
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
- df1 = DataFrame(
+ name = "k"
+ df = DataFrame(
{
"x": [1, 2, 3, 4, 5] * 3,
- "y": [10, 20, 30, 40, 50] * 3,
- "z": [100, 200, 300, 400, 500] * 3,
+ name: [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5,
}
)
- df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
- df2 = df1.rename(columns={"k": "key"})
- msg = "Names should be list-like for a MultiIndex"
- with pytest.raises(ValueError, match=msg):
- df1.groupby("k").describe()
- with pytest.raises(ValueError, match=msg):
- df2.groupby("key").describe()
+ result = df.groupby(name).describe()
+ expected = DataFrame(
+ [[5.0, 3.0, 1.581139, 1.0, 2.0, 3.0, 4.0, 5.0]] * 3,
+ index=Index([(0, 0, 1), (0, 1, 0), (1, 0, 0)], tupleize_cols=False, name=name),
+ columns=MultiIndex.from_arrays(
+ [["x"] * 8, ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
def test_frame_describe_unstacked_format():
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index e104b99370f07..cf11bf237f615 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -17,6 +17,7 @@
Index,
MultiIndex,
PeriodIndex,
+ RangeIndex,
Series,
concat,
date_range,
@@ -395,6 +396,29 @@ def test_concat_keys_with_none(self):
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize("klass", [range, RangeIndex])
+ @pytest.mark.parametrize("include_none", [True, False])
+ def test_concat_preserves_rangeindex(self, klass, include_none):
+ df = DataFrame([1, 2])
+ df2 = DataFrame([3, 4])
+ data = [df, None, df2, None] if include_none else [df, df2]
+ keys_length = 4 if include_none else 2
+ result = concat(data, keys=klass(keys_length))
+ expected = DataFrame(
+ [1, 2, 3, 4],
+ index=MultiIndex(
+ levels=(
+ RangeIndex(start=0, stop=keys_length, step=keys_length / 2),
+ RangeIndex(start=0, stop=2, step=1),
+ ),
+ codes=(
+ np.array([0, 0, 1, 1], dtype=np.int8),
+ np.array([0, 1, 0, 1], dtype=np.int8),
+ ),
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_concat_bug_1719(self):
ts1 = Series(
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
@@ -705,7 +729,7 @@ def test_concat_multiindex_with_empty_rangeindex():
# GH#41234
mi = MultiIndex.from_tuples([("B", 1), ("C", 1)])
df1 = DataFrame([[1, 2]], columns=mi)
- df2 = DataFrame(index=[1], columns=pd.RangeIndex(0))
+ df2 = DataFrame(index=[1], columns=RangeIndex(0))
result = concat([df1, df2])
expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi)
@@ -830,14 +854,14 @@ def test_concat_mismatched_keys_length():
sers = [ser + n for n in range(4)]
keys = ["A", "B", "C"]
- msg = r"The behavior of pd.concat with len\(keys\) != len\(objs\) is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ msg = r"The length of the keys"
+ with pytest.raises(ValueError, match=msg):
concat(sers, keys=keys, axis=1)
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ with pytest.raises(ValueError, match=msg):
concat(sers, keys=keys, axis=0)
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ with pytest.raises(ValueError, match=msg):
concat((x for x in sers), keys=(y for y in keys), axis=1)
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ with pytest.raises(ValueError, match=msg):
concat((x for x in sers), keys=(y for y in keys), axis=0)
| Enforces #43485
Optimization found in #57441 | https://api.github.com/repos/pandas-dev/pandas/pulls/57755 | 2024-03-07T02:00:07Z | 2024-03-08T22:46:31Z | 2024-03-08T22:46:31Z | 2024-03-08T22:46:36Z |
DOC: fixing GL08 errors for pandas.Series.dt | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 7f4911037cff9..fcbd0a855dcc8 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -155,7 +155,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Period.ordinal\
pandas.PeriodIndex.freq\
pandas.PeriodIndex.qyear\
- pandas.Series.dt\
pandas.Series.dt.as_unit\
pandas.Series.dt.freq\
pandas.Series.dt.qyear\
@@ -437,6 +436,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Series.cat.rename_categories\
pandas.Series.cat.reorder_categories\
pandas.Series.cat.set_categories\
+ pandas.Series.dt `# Accessors are implemented as classes, but we do not document the Parameters section` \
pandas.Series.dt.as_unit\
pandas.Series.dt.ceil\
pandas.Series.dt.day_name\
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 59d6e313a2d93..2bb234e174563 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -572,6 +572,44 @@ class PeriodProperties(Properties):
class CombinedDatetimelikeProperties(
DatetimeProperties, TimedeltaProperties, PeriodProperties
):
+ """
+ Accessor object for Series values' datetime-like, timedelta and period properties.
+
+ See Also
+ --------
+ DatetimeIndex : Index of datetime64 data.
+
+ Examples
+ --------
+ >>> dates = pd.Series(
+ ... ["2024-01-01", "2024-01-15", "2024-02-5"], dtype="datetime64[ns]"
+ ... )
+ >>> dates.dt.day
+ 0 1
+ 1 15
+ 2 5
+ dtype: int32
+ >>> dates.dt.month
+ 0 1
+ 1 1
+ 2 2
+ dtype: int32
+
+ >>> dates = pd.Series(
+ ... ["2024-01-01", "2024-01-15", "2024-02-5"], dtype="datetime64[ns, UTC]"
+ ... )
+ >>> dates.dt.day
+ 0 1
+ 1 15
+ 2 5
+ dtype: int32
+ >>> dates.dt.month
+ 0 1
+ 1 1
+ 2 2
+ dtype: int32
+ """
+
def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor]
# CombinedDatetimelikeProperties isn't really instantiated. Instead
# we need to choose which parent (datetime or timedelta) is
| Resolved GL08 error for `scripts/validate_docstrings.py --format=actions --errors=GL08 pandas.Series.dt`
xref: #57443
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/57751 | 2024-03-06T17:40:15Z | 2024-03-17T00:36:47Z | 2024-03-17T00:36:47Z | 2024-03-17T08:13:18Z |
CLN: Enforce deprecation of groupby.idxmin/idxmax with skipna=False not raising | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index fae7edba057ec..470f25d3860be 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -189,6 +189,7 @@ Other Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- :class:`.DataFrameGroupBy.idxmin`, :class:`.DataFrameGroupBy.idxmax`, :class:`.SeriesGroupBy.idxmin`, and :class:`.SeriesGroupBy.idxmax` will now raise a ``ValueError`` when used with ``skipna=False`` and an NA value is encountered (:issue:`10694`)
- :func:`read_excel`, :func:`read_json`, :func:`read_html`, and :func:`read_xml` no longer accept raw string or byte representation of the data. That type of data must be wrapped in a :py:class:`StringIO` or :py:class:`BytesIO` (:issue:`53767`)
- :meth:`Series.dt.to_pydatetime` now returns a :class:`Series` of :py:class:`datetime.datetime` objects (:issue:`52459`)
- :meth:`SeriesGroupBy.agg` no longer pins the name of the group to the input passed to the provided ``func`` (:issue:`51703`)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 9449e6d7abdec..52fd7735b533e 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1179,8 +1179,7 @@ def idxmin(self, skipna: bool = True) -> Series:
Parameters
----------
skipna : bool, default True
- Exclude NA/null values. If the entire Series is NA, the result
- will be NA.
+ Exclude NA values.
Returns
-------
@@ -1190,7 +1189,7 @@ def idxmin(self, skipna: bool = True) -> Series:
Raises
------
ValueError
- If the Series is empty.
+ If the Series is empty or skipna=False and any value is NA.
See Also
--------
@@ -1233,8 +1232,7 @@ def idxmax(self, skipna: bool = True) -> Series:
Parameters
----------
skipna : bool, default True
- Exclude NA/null values. If the entire Series is NA, the result
- will be NA.
+ Exclude NA values.
Returns
-------
@@ -1244,7 +1242,7 @@ def idxmax(self, skipna: bool = True) -> Series:
Raises
------
ValueError
- If the Series is empty.
+ If the Series is empty or skipna=False and any value is NA.
See Also
--------
@@ -2165,13 +2163,10 @@ def idxmax(
"""
Return index of first occurrence of maximum in each group.
- NA/null values are excluded.
-
Parameters
----------
skipna : bool, default True
- Exclude NA/null values. If an entire row/column is NA, the result
- will be NA.
+ Exclude NA values.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
@@ -2185,7 +2180,7 @@ def idxmax(
Raises
------
ValueError
- * If the row/column is empty
+ * If a column is empty or skipna=False and any value is NA.
See Also
--------
@@ -2230,13 +2225,10 @@ def idxmin(
"""
Return index of first occurrence of minimum in each group.
- NA/null values are excluded.
-
Parameters
----------
skipna : bool, default True
- Exclude NA/null values. If an entire row/column is NA, the result
- will be NA.
+ Exclude NA values.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
@@ -2250,7 +2242,7 @@ def idxmin(
Raises
------
ValueError
- * If the row/column is empty
+ * If a column is empty or skipna=False and any value is NA.
See Also
--------
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 61168f71f4924..d90ef41058a2b 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -5553,15 +5553,11 @@ def _idxmax_idxmin(
f"Can't get {how} of an empty group due to unobserved categories. "
"Specify observed=True in groupby instead."
)
- elif not skipna:
- if self._obj_with_exclusions.isna().any(axis=None):
- warnings.warn(
- f"The behavior of {type(self).__name__}.{how} with all-NA "
- "values, or any-NA and skipna=False, is deprecated. In a future "
- "version this will raise ValueError",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
+ elif not skipna and self._obj_with_exclusions.isna().any(axis=None):
+ raise ValueError(
+ f"{type(self).__name__}.{how} with skipna=False encountered an NA "
+ f"value."
+ )
result = self._agg_general(
numeric_only=numeric_only,
diff --git a/pandas/tests/groupby/test_reductions.py b/pandas/tests/groupby/test_reductions.py
index 2037ded9f20e6..edc94b2beeec1 100644
--- a/pandas/tests/groupby/test_reductions.py
+++ b/pandas/tests/groupby/test_reductions.py
@@ -291,16 +291,14 @@ def test_idxmin_idxmax_extremes_skipna(skipna, how, float_numpy_dtype):
)
gb = df.groupby("a")
- warn = None if skipna else FutureWarning
- msg = f"The behavior of DataFrameGroupBy.{how} with all-NA values"
- with tm.assert_produces_warning(warn, match=msg):
- result = getattr(gb, how)(skipna=skipna)
- if skipna:
- values = [1, 3, 4, 6, np.nan]
- else:
- values = np.nan
+ if not skipna:
+ msg = f"DataFrameGroupBy.{how} with skipna=False"
+ with pytest.raises(ValueError, match=msg):
+ getattr(gb, how)(skipna=skipna)
+ return
+ result = getattr(gb, how)(skipna=skipna)
expected = DataFrame(
- {"b": values}, index=pd.Index(range(1, 6), name="a", dtype="intp")
+ {"b": [1, 3, 4, 6, np.nan]}, index=pd.Index(range(1, 6), name="a", dtype="intp")
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index db327cc689afe..0b4dfb41ab9cc 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -1525,10 +1525,11 @@ def test_idxmin_idxmax_transform_args(how, skipna, numeric_only):
# GH#55268 - ensure *args are passed through when calling transform
df = DataFrame({"a": [1, 1, 1, 2], "b": [3.0, 4.0, np.nan, 6.0], "c": list("abcd")})
gb = df.groupby("a")
- warn = None if skipna else FutureWarning
- msg = f"The behavior of DataFrameGroupBy.{how} with .* any-NA and skipna=False"
- with tm.assert_produces_warning(warn, match=msg):
+ if skipna:
result = gb.transform(how, skipna, numeric_only)
- with tm.assert_produces_warning(warn, match=msg):
expected = gb.transform(how, skipna=skipna, numeric_only=numeric_only)
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
+ else:
+ msg = f"DataFrameGroupBy.{how} with skipna=False encountered an NA value"
+ with pytest.raises(ValueError, match=msg):
+ gb.transform(how, skipna, numeric_only)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Ref: #54234
In the original deprecation, I missed the case of all NA values. Opened https://github.com/pandas-dev/pandas/issues/57745 to track. | https://api.github.com/repos/pandas-dev/pandas/pulls/57746 | 2024-03-06T04:01:24Z | 2024-03-06T23:58:10Z | 2024-03-06T23:58:10Z | 2024-03-06T23:59:05Z |
CLN: Enforce deprecation of groupby.quantile supporting bool dtype | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index fae7edba057ec..98e872221722c 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -202,6 +202,7 @@ Removal of prior version deprecations/changes
- In :meth:`DataFrame.stack`, the default value of ``future_stack`` is now ``True``; specifying ``False`` will raise a ``FutureWarning`` (:issue:`55448`)
- Methods ``apply``, ``agg``, and ``transform`` will no longer replace NumPy functions (e.g. ``np.sum``) and built-in functions (e.g. ``min``) with the equivalent pandas implementation; use string aliases (e.g. ``"sum"`` and ``"min"``) if you desire to use the pandas implementation (:issue:`53974`)
- Passing both ``freq`` and ``fill_value`` in :meth:`DataFrame.shift` and :meth:`Series.shift` and :meth:`.DataFrameGroupBy.shift` now raises a ``ValueError`` (:issue:`54818`)
+- Removed :meth:`.DataFrameGroupBy.quantile` and :meth:`.SeriesGroupBy.quantile` supporting bool dtype (:issue:`53975`)
- Removed :meth:`DateOffset.is_anchored` and :meth:`offsets.Tick.is_anchored` (:issue:`56594`)
- Removed ``DataFrame.applymap``, ``Styler.applymap`` and ``Styler.applymap_index`` (:issue:`52364`)
- Removed ``DataFrame.bool`` and ``Series.bool`` (:issue:`51756`)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 61168f71f4924..251ec17790ef2 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -4272,16 +4272,8 @@ def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]:
elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray):
out = vals.to_numpy(dtype=float, na_value=np.nan)
elif is_bool_dtype(vals.dtype):
- # GH#51424 deprecate to match Series/DataFrame behavior
- warnings.warn(
- f"Allowing bool dtype in {type(self).__name__}.quantile is "
- "deprecated and will raise in a future version, matching "
- "the Series/DataFrame behavior. Cast to uint8 dtype before "
- "calling quantile instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- out = np.asarray(vals)
+ # GH#51424 remove to match Series/DataFrame behavior
+ raise TypeError("Cannot use quantile with bool dtype")
elif needs_i8_conversion(vals.dtype):
inference = vals.dtype
# In this case we need to delay the casting until after the
diff --git a/pandas/tests/groupby/test_numeric_only.py b/pandas/tests/groupby/test_numeric_only.py
index 1b435fd55d05e..55a79863f206b 100644
--- a/pandas/tests/groupby/test_numeric_only.py
+++ b/pandas/tests/groupby/test_numeric_only.py
@@ -368,18 +368,11 @@ def test_deprecate_numeric_only_series(dtype, groupby_func, request):
msg = "cannot be performed against 'object' dtypes"
else:
msg = "is not supported for object dtype"
- warn = FutureWarning if groupby_func == "fillna" else None
- warn_msg = "DataFrameGroupBy.fillna is deprecated"
- with tm.assert_produces_warning(warn, match=warn_msg):
- with pytest.raises(TypeError, match=msg):
- method(*args)
+ with pytest.raises(TypeError, match=msg):
+ method(*args)
elif dtype is object:
- warn = FutureWarning if groupby_func == "fillna" else None
- warn_msg = "SeriesGroupBy.fillna is deprecated"
- with tm.assert_produces_warning(warn, match=warn_msg):
- result = method(*args)
- with tm.assert_produces_warning(warn, match=warn_msg):
- expected = expected_method(*args)
+ result = method(*args)
+ expected = expected_method(*args)
if groupby_func in obj_result:
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
@@ -419,12 +412,10 @@ def test_deprecate_numeric_only_series(dtype, groupby_func, request):
with pytest.raises(TypeError, match=msg):
method(*args, numeric_only=True)
elif dtype == bool and groupby_func == "quantile":
- msg = "Allowing bool dtype in SeriesGroupBy.quantile"
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ msg = "Cannot use quantile with bool dtype"
+ with pytest.raises(TypeError, match=msg):
# GH#51424
- result = method(*args, numeric_only=True)
- expected = method(*args, numeric_only=False)
- tm.assert_series_equal(result, expected)
+ method(*args, numeric_only=False)
else:
result = method(*args, numeric_only=True)
expected = method(*args, numeric_only=False)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Ref: #53975 | https://api.github.com/repos/pandas-dev/pandas/pulls/57744 | 2024-03-06T03:28:08Z | 2024-03-07T00:02:39Z | 2024-03-07T00:02:39Z | 2024-03-07T02:16:13Z |
CLN: Enforce deprecation get_group with tuples of length 1 | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index 157b87c93e729..700256389441f 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -198,9 +198,11 @@ Removal of prior version deprecations/changes
- All arguments in :meth:`Series.to_dict` are now keyword only (:issue:`56493`)
- Changed the default value of ``observed`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to ``True`` (:issue:`51811`)
- Enforced deprecation disallowing parsing datetimes with mixed time zones unless user passes ``utc=True`` to :func:`to_datetime` (:issue:`57275`)
+- Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`)
- Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`)
- Enforced silent-downcasting deprecation for :ref:`all relevant methods <whatsnew_220.silent_downcasting>` (:issue:`54710`)
- In :meth:`DataFrame.stack`, the default value of ``future_stack`` is now ``True``; specifying ``False`` will raise a ``FutureWarning`` (:issue:`55448`)
+- Iterating over a :class:`.DataFrameGroupBy` or :class:`.SeriesGroupBy` will return tuples of length 1 for the groups when grouping by ``level`` a list of length 1 (:issue:`50064`)
- Methods ``apply``, ``agg``, and ``transform`` will no longer replace NumPy functions (e.g. ``np.sum``) and built-in functions (e.g. ``min``) with the equivalent pandas implementation; use string aliases (e.g. ``"sum"`` and ``"min"``) if you desire to use the pandas implementation (:issue:`53974`)
- Passing both ``freq`` and ``fill_value`` in :meth:`DataFrame.shift` and :meth:`Series.shift` and :meth:`.DataFrameGroupBy.shift` now raises a ``ValueError`` (:issue:`54818`)
- Removed :meth:`DateOffset.is_anchored` and :meth:`offsets.Tick.is_anchored` (:issue:`56594`)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 61168f71f4924..d5d57a4f7402f 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -920,17 +920,9 @@ def get_group(self, name) -> DataFrame | Series:
):
# GH#25971
if isinstance(name, tuple) and len(name) == 1:
- # Allow users to pass tuples of length 1 to silence warning
name = name[0]
- elif not isinstance(name, tuple):
- warnings.warn(
- "When grouping with a length-1 list-like, "
- "you will need to pass a length-1 tuple to get_group in a future "
- "version of pandas. Pass `(name,)` instead of `name` to silence "
- "this warning.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
+ else:
+ raise KeyError(name)
inds = self._get_index(name)
if not len(inds):
@@ -1016,18 +1008,11 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
keys = self.keys
level = self.level
result = self._grouper.get_iterator(self._selected_obj)
- # error: Argument 1 to "len" has incompatible type "Hashable"; expected "Sized"
- if is_list_like(level) and len(level) == 1: # type: ignore[arg-type]
- # GH 51583
- warnings.warn(
- "Creating a Groupby object with a length-1 list-like "
- "level parameter will yield indexes as tuples in a future version. "
- "To keep indexes as scalars, create Groupby objects with "
- "a scalar level parameter instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- if isinstance(keys, list) and len(keys) == 1:
+ # mypy: Argument 1 to "len" has incompatible type "Hashable"; expected "Sized"
+ if (
+ (is_list_like(level) and len(level) == 1) # type: ignore[arg-type]
+ or (isinstance(keys, list) and len(keys) == 1)
+ ):
# GH#42795 - when keys is a list, return tuples even when length is 1
result = (((key,), group) for key, group in result)
return result
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 76c8a6fdb9570..6b8eda13aa3e6 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -252,11 +252,7 @@ def test_level_get_group(observed):
names=["Index1", "Index2"],
),
)
- msg = "you will need to pass a length-1 tuple"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- # GH#25971 - warn when not passing a length-1 tuple
- result = g.get_group("a")
-
+ result = g.get_group(("a",))
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 686279f25939a..c5727f922ae69 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2533,19 +2533,14 @@ def test_groupby_string_dtype():
@pytest.mark.parametrize(
"level_arg, multiindex", [([0], False), ((0,), False), ([0], True), ((0,), True)]
)
-def test_single_element_listlike_level_grouping_deprecation(level_arg, multiindex):
+def test_single_element_listlike_level_grouping(level_arg, multiindex):
# GH 51583
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])
if multiindex:
df = df.set_index(["a", "b"])
- depr_msg = (
- "Creating a Groupby object with a length-1 list-like "
- "level parameter will yield indexes as tuples in a future version. "
- "To keep indexes as scalars, create Groupby objects with "
- "a scalar level parameter instead."
- )
- with tm.assert_produces_warning(FutureWarning, match=depr_msg):
- [key for key, _ in df.groupby(level=level_arg)]
+ result = [key for key, _ in df.groupby(level=level_arg)]
+ expected = [(1,), (2,)] if multiindex else [("x",), ("y",)]
+ assert result == expected
@pytest.mark.parametrize("func", ["sum", "cumsum", "cumprod", "prod"])
@@ -2887,22 +2882,18 @@ def test_groupby_series_with_datetimeindex_month_name():
"kwarg, value, name, warn",
[
("by", "a", 1, None),
- ("by", ["a"], 1, FutureWarning),
("by", ["a"], (1,), None),
("level", 0, 1, None),
- ("level", [0], 1, FutureWarning),
("level", [0], (1,), None),
],
)
-def test_depr_get_group_len_1_list_likes(test_series, kwarg, value, name, warn):
+def test_get_group_len_1_list_likes(test_series, kwarg, value, name, warn):
# GH#25971
obj = DataFrame({"b": [3, 4, 5]}, index=Index([1, 1, 2], name="a"))
if test_series:
obj = obj["b"]
gb = obj.groupby(**{kwarg: value})
- msg = "you will need to pass a length-1 tuple"
- with tm.assert_produces_warning(warn, match=msg):
- result = gb.get_group(name)
+ result = gb.get_group(name)
if test_series:
expected = Series([3, 4], index=Index([1, 1], name="a"), name="b")
else:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Ref: #54155 & #50064 | https://api.github.com/repos/pandas-dev/pandas/pulls/57743 | 2024-03-06T02:55:35Z | 2024-03-07T04:42:40Z | 2024-03-07T04:42:40Z | 2024-03-07T21:08:57Z |
CLN: Enforce deprecation of method and limit in pct_change methods | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index fae7edba057ec..26d300680a34d 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -226,6 +226,7 @@ Removal of prior version deprecations/changes
- Removed ``read_gbq`` and ``DataFrame.to_gbq``. Use ``pandas_gbq.read_gbq`` and ``pandas_gbq.to_gbq`` instead https://pandas-gbq.readthedocs.io/en/latest/api.html (:issue:`55525`)
- Removed ``use_nullable_dtypes`` from :func:`read_parquet` (:issue:`51853`)
- Removed ``year``, ``month``, ``quarter``, ``day``, ``hour``, ``minute``, and ``second`` keywords in the :class:`PeriodIndex` constructor, use :meth:`PeriodIndex.from_fields` instead (:issue:`55960`)
+- Removed argument ``limit`` from :meth:`DataFrame.pct_change`, :meth:`Series.pct_change`, :meth:`.DataFrameGroupBy.pct_change`, and :meth:`.SeriesGroupBy.pct_change`; the argument ``method`` must be set to ``None`` and will be removed in a future version of pandas (:issue:`53520`)
- Removed deprecated argument ``obj`` in :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` (:issue:`53545`)
- Removed deprecated behavior of :meth:`Series.agg` using :meth:`Series.apply` (:issue:`53325`)
- Removed option ``mode.use_inf_as_na``, convert inf entries to ``NaN`` before instead (:issue:`51684`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1bc6b7a3eea03..68578098ca609 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11120,8 +11120,7 @@ def describe(
def pct_change(
self,
periods: int = 1,
- fill_method: FillnaOptions | None | lib.NoDefault = lib.no_default,
- limit: int | None | lib.NoDefault = lib.no_default,
+ fill_method: None = None,
freq=None,
**kwargs,
) -> Self:
@@ -11143,17 +11142,12 @@ def pct_change(
----------
periods : int, default 1
Periods to shift for forming percent change.
- fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
- How to handle NAs **before** computing percent changes.
+ fill_method : None
+ Must be None. This argument will be removed in a future version of pandas.
.. deprecated:: 2.1
All options of `fill_method` are deprecated except `fill_method=None`.
- limit : int, default None
- The number of consecutive NAs to fill before stopping.
-
- .. deprecated:: 2.1
-
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'ME' or BDay()).
**kwargs
@@ -11260,52 +11254,18 @@ def pct_change(
APPL -0.252395 -0.011860 NaN
"""
# GH#53491
- if fill_method not in (lib.no_default, None) or limit is not lib.no_default:
- warnings.warn(
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- f"{type(self).__name__}.pct_change are deprecated and will be removed "
- "in a future version. Either fill in any non-leading NA values prior "
- "to calling pct_change or specify 'fill_method=None' to not fill NA "
- "values.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- if fill_method is lib.no_default:
- if limit is lib.no_default:
- cols = self.items() if self.ndim == 2 else [(None, self)]
- for _, col in cols:
- if len(col) > 0:
- mask = col.isna().values
- mask = mask[np.argmax(~mask) :]
- if mask.any():
- warnings.warn(
- "The default fill_method='pad' in "
- f"{type(self).__name__}.pct_change is deprecated and "
- "will be removed in a future version. Either fill in "
- "any non-leading NA values prior to calling pct_change "
- "or specify 'fill_method=None' to not fill NA values.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- break
- fill_method = "pad"
- if limit is lib.no_default:
- limit = None
+ if fill_method is not None:
+ raise ValueError(f"fill_method must be None; got {fill_method=}.")
axis = self._get_axis_number(kwargs.pop("axis", "index"))
- if fill_method is None:
- data = self
- else:
- data = self._pad_or_backfill(fill_method, axis=axis, limit=limit)
-
- shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
+ shifted = self.shift(periods=periods, freq=freq, axis=axis, **kwargs)
# Unsupported left operand type for / ("Self")
- rs = data / shifted - 1 # type: ignore[operator]
+ rs = self / shifted - 1 # type: ignore[operator]
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
- rs = rs.reindex_like(data)
+ rs = rs.reindex_like(self)
return rs.__finalize__(self, method="pct_change")
@final
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 61168f71f4924..e39523346616c 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -45,7 +45,6 @@ class providing the base-class of operations.
AnyArrayLike,
ArrayLike,
DtypeObj,
- FillnaOptions,
IndexLabel,
IntervalClosedType,
NDFrameT,
@@ -5147,8 +5146,7 @@ def diff(
def pct_change(
self,
periods: int = 1,
- fill_method: FillnaOptions | None | lib.NoDefault = lib.no_default,
- limit: int | None | lib.NoDefault = lib.no_default,
+ fill_method: None = None,
freq=None,
):
"""
@@ -5161,19 +5159,11 @@ def pct_change(
a period of 1 means adjacent elements are compared, whereas a period
of 2 compares every other element.
- fill_method : FillnaOptions or None, default None
- Specifies how to handle missing values after the initial shift
- operation necessary for percentage change calculation. Users are
- encouraged to handle missing values manually in future versions.
- Valid options are:
- - A FillnaOptions value ('ffill', 'bfill') for forward or backward filling.
- - None to avoid filling.
- Note: Usage is discouraged due to impending deprecation.
+ fill_method : None
+ Must be None. This argument will be removed in a future version of pandas.
- limit : int or None, default None
- The maximum number of consecutive NA values to fill, based on the chosen
- `fill_method`. Address NaN values prior to using `pct_change` as this
- parameter is nearing deprecation.
+ .. deprecated:: 2.1
+ All options of `fill_method` are deprecated except `fill_method=None`.
freq : str, pandas offset object, or None, default None
The frequency increment for time series data (e.g., 'M' for month-end).
@@ -5227,49 +5217,24 @@ def pct_change(
goldfish 0.2 0.125
"""
# GH#53491
- if fill_method not in (lib.no_default, None) or limit is not lib.no_default:
- warnings.warn(
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- f"{type(self).__name__}.pct_change are deprecated and will be removed "
- "in a future version. Either fill in any non-leading NA values prior "
- "to calling pct_change or specify 'fill_method=None' to not fill NA "
- "values.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- if fill_method is lib.no_default:
- if limit is lib.no_default and any(
- grp.isna().values.any() for _, grp in self
- ):
- warnings.warn(
- "The default fill_method='ffill' in "
- f"{type(self).__name__}.pct_change is deprecated and will "
- "be removed in a future version. Either fill in any "
- "non-leading NA values prior to calling pct_change or "
- "specify 'fill_method=None' to not fill NA values.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- fill_method = "ffill"
- if limit is lib.no_default:
- limit = None
+ if fill_method is not None:
+ raise ValueError(f"fill_method must be None; got {fill_method=}.")
# TODO(GH#23918): Remove this conditional for SeriesGroupBy when
# GH#23918 is fixed
if freq is not None:
f = lambda x: x.pct_change(
periods=periods,
- fill_method=fill_method,
- limit=limit,
freq=freq,
axis=0,
)
return self._python_apply_general(f, self._selected_obj, is_transform=True)
if fill_method is None: # GH30463
- fill_method = "ffill"
- limit = 0
- filled = getattr(self, fill_method)(limit=limit)
+ op = "ffill"
+ else:
+ op = fill_method
+ filled = getattr(self, op)(limit=0)
fill_grp = filled.groupby(self._grouper.codes, group_keys=self.group_keys)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
diff --git a/pandas/tests/frame/methods/test_pct_change.py b/pandas/tests/frame/methods/test_pct_change.py
index 92b66e12d4356..7d4197577228e 100644
--- a/pandas/tests/frame/methods/test_pct_change.py
+++ b/pandas/tests/frame/methods/test_pct_change.py
@@ -10,30 +10,17 @@
class TestDataFramePctChange:
@pytest.mark.parametrize(
- "periods, fill_method, limit, exp",
+ "periods, exp",
[
- (1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
- (1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
- (1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
- (1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
- (-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]),
- (-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]),
- (-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
- (-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
+ (1, [np.nan, np.nan, np.nan, 1, 1, 1.5, np.nan, np.nan]),
+ (-1, [np.nan, np.nan, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
],
)
- def test_pct_change_with_nas(
- self, periods, fill_method, limit, exp, frame_or_series
- ):
+ def test_pct_change_with_nas(self, periods, exp, frame_or_series):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
obj = frame_or_series(vals)
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- f"{type(obj).__name__}.pct_change are deprecated"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = obj.pct_change(periods=periods, fill_method=fill_method, limit=limit)
+ res = obj.pct_change(periods=periods)
tm.assert_equal(res, frame_or_series(exp))
def test_pct_change_numeric(self):
@@ -45,40 +32,28 @@ def test_pct_change_numeric(self):
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- "DataFrame.pct_change are deprecated"
- )
-
for axis in range(2):
- expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(axis=axis) - 1
-
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = pnl.pct_change(axis=axis, fill_method="pad")
+ expected = pnl / pnl.shift(axis=axis) - 1
+ result = pnl.pct_change(axis=axis)
tm.assert_frame_equal(result, expected)
def test_pct_change(self, datetime_frame):
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- "DataFrame.pct_change are deprecated"
- )
-
- rs = datetime_frame.pct_change(fill_method=None)
+ rs = datetime_frame.pct_change()
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.ffill()
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
- filled = datetime_frame.bfill(limit=1)
- tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
+ rs = datetime_frame.pct_change()
+ tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
- filled = datetime_frame.ffill()
tm.assert_frame_equal(
- rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
+ rs,
+ (datetime_frame / datetime_frame.shift(freq="5D") - 1).reindex_like(
+ datetime_frame
+ ),
)
def test_pct_change_shift_over_nas(self):
@@ -86,75 +61,45 @@ def test_pct_change_shift_over_nas(self):
df = DataFrame({"a": s, "b": s})
- msg = "The default fill_method='pad' in DataFrame.pct_change is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- chg = df.pct_change()
-
- expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
+ chg = df.pct_change()
+ expected = Series([np.nan, 0.5, np.nan, np.nan, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
- "freq, periods, fill_method, limit",
+ "freq, periods",
[
- ("5B", 5, None, None),
- ("3B", 3, None, None),
- ("3B", 3, "bfill", None),
- ("7B", 7, "pad", 1),
- ("7B", 7, "bfill", 3),
- ("14B", 14, None, None),
+ ("5B", 5),
+ ("3B", 3),
+ ("14B", 14),
],
)
def test_pct_change_periods_freq(
- self, datetime_frame, freq, periods, fill_method, limit
+ self,
+ datetime_frame,
+ freq,
+ periods,
):
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- "DataFrame.pct_change are deprecated"
- )
-
# GH#7292
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_freq = datetime_frame.pct_change(
- freq=freq, fill_method=fill_method, limit=limit
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_periods = datetime_frame.pct_change(
- periods, fill_method=fill_method, limit=limit
- )
+ rs_freq = datetime_frame.pct_change(freq=freq)
+ rs_periods = datetime_frame.pct_change(periods)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_freq = empty_ts.pct_change(
- freq=freq, fill_method=fill_method, limit=limit
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_periods = empty_ts.pct_change(
- periods, fill_method=fill_method, limit=limit
- )
+ rs_freq = empty_ts.pct_change(freq=freq)
+ rs_periods = empty_ts.pct_change(periods)
tm.assert_frame_equal(rs_freq, rs_periods)
-@pytest.mark.parametrize("fill_method", ["pad", "ffill", None])
-def test_pct_change_with_duplicated_indices(fill_method):
+def test_pct_change_with_duplicated_indices():
# GH30463
data = DataFrame(
{0: [np.nan, 1, 2, 3, 9, 18], 1: [0, 1, np.nan, 3, 9, 18]}, index=["a", "b"] * 3
)
- warn = None if fill_method is None else FutureWarning
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- "DataFrame.pct_change are deprecated"
- )
- with tm.assert_produces_warning(warn, match=msg):
- result = data.pct_change(fill_method=fill_method)
+ result = data.pct_change()
- if fill_method is None:
- second_column = [np.nan, np.inf, np.nan, np.nan, 2.0, 1.0]
- else:
- second_column = [np.nan, np.inf, 0.0, 2.0, 2.0, 1.0]
+ second_column = [np.nan, np.inf, np.nan, np.nan, 2.0, 1.0]
expected = DataFrame(
{0: [np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], 1: second_column},
index=["a", "b"] * 3,
@@ -162,7 +107,7 @@ def test_pct_change_with_duplicated_indices(fill_method):
tm.assert_frame_equal(result, expected)
-def test_pct_change_none_beginning_no_warning():
+def test_pct_change_none_beginning():
# GH#54481
df = DataFrame(
[
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index 54efe163f077e..68030c394d606 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -584,14 +584,8 @@ def test_categorical_reducers(reduction_func, observed, sort, as_index, index_ki
tm.assert_equal(result, expected)
-def test_categorical_transformers(
- request, transformation_func, observed, sort, as_index
-):
+def test_categorical_transformers(transformation_func, observed, sort, as_index):
# GH#36327
- if transformation_func == "fillna":
- msg = "GH#49651 fillna may incorrectly reorders results when dropna=False"
- request.applymarker(pytest.mark.xfail(reason=msg, strict=False))
-
values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None)
df = pd.DataFrame(
{"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)}
@@ -621,12 +615,7 @@ def test_categorical_transformers(
)
gb_dropna = df.groupby("x", dropna=True, observed=observed, sort=sort)
- msg = "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated"
- if transformation_func == "pct_change":
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = getattr(gb_keepna, "pct_change")(*args)
- else:
- result = getattr(gb_keepna, transformation_func)(*args)
+ result = getattr(gb_keepna, transformation_func)(*args)
expected = getattr(gb_dropna, transformation_func)(*args)
for iloc, value in zip(
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index db327cc689afe..a5b6e19d8b065 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -344,31 +344,12 @@ def mock_op(x):
test_op = lambda x: x.transform(transformation_func)
mock_op = lambda x: getattr(x, transformation_func)()
- if transformation_func == "pct_change":
- msg = "The default fill_method='pad' in DataFrame.pct_change is deprecated"
- groupby_msg = (
- "The default fill_method='ffill' in DataFrameGroupBy.pct_change "
- "is deprecated"
- )
- warn = FutureWarning
- groupby_warn = FutureWarning
- elif transformation_func == "fillna":
- msg = ""
- groupby_msg = "DataFrameGroupBy.fillna is deprecated"
- warn = None
- groupby_warn = FutureWarning
- else:
- msg = groupby_msg = ""
- warn = groupby_warn = None
-
- with tm.assert_produces_warning(groupby_warn, match=groupby_msg):
- result = test_op(df.groupby("A"))
+ result = test_op(df.groupby("A"))
# pass the group in same order as iterating `for ... in df.groupby(...)`
# but reorder to match df's index since this is a transform
groups = [df[["B"]].iloc[4:6], df[["B"]].iloc[6:], df[["B"]].iloc[:4]]
- with tm.assert_produces_warning(warn, match=msg):
- expected = concat([mock_op(g) for g in groups]).sort_index()
+ expected = concat([mock_op(g) for g in groups]).sort_index()
# sort_index does not preserve the freq
expected = expected.set_axis(df.index)
@@ -917,9 +898,7 @@ def test_pad_stable_sorting(fill_method):
],
)
@pytest.mark.parametrize("periods", [1, -1])
-@pytest.mark.parametrize("fill_method", ["ffill", "bfill", None])
-@pytest.mark.parametrize("limit", [None, 1])
-def test_pct_change(frame_or_series, freq, periods, fill_method, limit):
+def test_pct_change(frame_or_series, freq, periods):
# GH 21200, 21621, 30463
vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]
keys = ["a", "b"]
@@ -927,8 +906,6 @@ def test_pct_change(frame_or_series, freq, periods, fill_method, limit):
df = DataFrame({"key": key_v, "vals": vals * 2})
df_g = df
- if fill_method is not None:
- df_g = getattr(df.groupby("key"), fill_method)(limit=limit)
grp = df_g.groupby(df.key)
expected = grp["vals"].obj / grp["vals"].shift(periods) - 1
@@ -940,14 +917,7 @@ def test_pct_change(frame_or_series, freq, periods, fill_method, limit):
else:
expected = expected.to_frame("vals")
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- f"{type(gb).__name__}.pct_change are deprecated"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = gb.pct_change(
- periods=periods, fill_method=fill_method, limit=limit, freq=freq
- )
+ result = gb.pct_change(periods=periods, freq=freq)
tm.assert_equal(result, expected)
@@ -1360,7 +1330,7 @@ def test_null_group_str_reducer(request, dropna, reduction_func):
tm.assert_equal(result, expected)
-def test_null_group_str_transformer(request, dropna, transformation_func):
+def test_null_group_str_transformer(dropna, transformation_func):
# GH 17093
df = DataFrame({"A": [1, 1, np.nan], "B": [1, 2, 2]}, index=[1, 2, 3])
args = get_groupby_method_args(transformation_func, df)
@@ -1385,21 +1355,7 @@ def test_null_group_str_transformer(request, dropna, transformation_func):
# ngroup/cumcount always returns a Series as it counts the groups, not values
expected = expected["B"].rename(None)
- if transformation_func == "pct_change" and not dropna:
- warn = FutureWarning
- msg = (
- "The default fill_method='ffill' in DataFrameGroupBy.pct_change "
- "is deprecated"
- )
- elif transformation_func == "fillna":
- warn = FutureWarning
- msg = "DataFrameGroupBy.fillna is deprecated"
- else:
- warn = None
- msg = ""
- with tm.assert_produces_warning(warn, match=msg):
- result = gb.transform(transformation_func, *args)
-
+ result = gb.transform(transformation_func, *args)
tm.assert_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_pct_change.py b/pandas/tests/series/methods/test_pct_change.py
index 6c80e711c3684..6279cf64818b8 100644
--- a/pandas/tests/series/methods/test_pct_change.py
+++ b/pandas/tests/series/methods/test_pct_change.py
@@ -10,23 +10,13 @@
class TestSeriesPctChange:
def test_pct_change(self, datetime_series):
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- "Series.pct_change are deprecated"
- )
-
- rs = datetime_series.pct_change(fill_method=None)
+ rs = datetime_series.pct_change()
tm.assert_series_equal(rs, datetime_series / datetime_series.shift(1) - 1)
rs = datetime_series.pct_change(2)
filled = datetime_series.ffill()
tm.assert_series_equal(rs, filled / filled.shift(2) - 1)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs = datetime_series.pct_change(fill_method="bfill", limit=1)
- filled = datetime_series.bfill(limit=1)
- tm.assert_series_equal(rs, filled / filled.shift(1) - 1)
-
rs = datetime_series.pct_change(freq="5D")
filled = datetime_series.ffill()
tm.assert_series_equal(
@@ -45,69 +35,27 @@ def test_pct_change_with_duplicate_axis(self):
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
-
- msg = "The default fill_method='pad' in Series.pct_change is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- chg = s.pct_change()
-
- expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
+ chg = s.pct_change()
+ expected = Series([np.nan, 0.5, np.nan, np.nan, 0.2])
tm.assert_series_equal(chg, expected)
- @pytest.mark.parametrize(
- "freq, periods, fill_method, limit",
- [
- ("5B", 5, None, None),
- ("3B", 3, None, None),
- ("3B", 3, "bfill", None),
- ("7B", 7, "pad", 1),
- ("7B", 7, "bfill", 3),
- ("14B", 14, None, None),
- ],
- )
- def test_pct_change_periods_freq(
- self, freq, periods, fill_method, limit, datetime_series
- ):
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- "Series.pct_change are deprecated"
- )
-
+ @pytest.mark.parametrize("freq, periods", [("5B", 5), ("3B", 3), ("14B", 14)])
+ def test_pct_change_periods_freq(self, freq, periods, datetime_series):
# GH#7292
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_freq = datetime_series.pct_change(
- freq=freq, fill_method=fill_method, limit=limit
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_periods = datetime_series.pct_change(
- periods, fill_method=fill_method, limit=limit
- )
+ rs_freq = datetime_series.pct_change(freq=freq)
+ rs_periods = datetime_series.pct_change(periods)
tm.assert_series_equal(rs_freq, rs_periods)
empty_ts = Series(index=datetime_series.index, dtype=object)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_freq = empty_ts.pct_change(
- freq=freq, fill_method=fill_method, limit=limit
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- rs_periods = empty_ts.pct_change(
- periods, fill_method=fill_method, limit=limit
- )
+ rs_freq = empty_ts.pct_change(freq=freq)
+ rs_periods = empty_ts.pct_change(periods)
tm.assert_series_equal(rs_freq, rs_periods)
-@pytest.mark.parametrize("fill_method", ["pad", "ffill", None])
-def test_pct_change_with_duplicated_indices(fill_method):
+def test_pct_change_with_duplicated_indices():
# GH30463
s = Series([np.nan, 1, 2, 3, 9, 18], index=["a", "b"] * 3)
-
- warn = None if fill_method is None else FutureWarning
- msg = (
- "The 'fill_method' keyword being not None and the 'limit' keyword in "
- "Series.pct_change are deprecated"
- )
- with tm.assert_produces_warning(warn, match=msg):
- result = s.pct_change(fill_method=fill_method)
-
+ result = s.pct_change()
expected = Series([np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], index=["a", "b"] * 3)
tm.assert_series_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Ref: #53520 | https://api.github.com/repos/pandas-dev/pandas/pulls/57742 | 2024-03-06T02:37:11Z | 2024-03-06T23:59:42Z | 2024-03-06T23:59:42Z | 2024-03-07T00:00:16Z |
CLN: Enforce deprecation of groupby with as_index=False excluding out-of-axis groupings | diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst
index fae7edba057ec..a757ffb2d9665 100644
--- a/doc/source/whatsnew/v3.0.0.rst
+++ b/doc/source/whatsnew/v3.0.0.rst
@@ -190,6 +190,7 @@ Other Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`read_excel`, :func:`read_json`, :func:`read_html`, and :func:`read_xml` no longer accept raw string or byte representation of the data. That type of data must be wrapped in a :py:class:`StringIO` or :py:class:`BytesIO` (:issue:`53767`)
+- :meth:`DataFrame.groupby` with ``as_index=False`` and aggregation methods will no longer exclude from the result the groupings that do not arise from the input (:issue:`49519`)
- :meth:`Series.dt.to_pydatetime` now returns a :class:`Series` of :py:class:`datetime.datetime` objects (:issue:`52459`)
- :meth:`SeriesGroupBy.agg` no longer pins the name of the group to the input passed to the provided ``func`` (:issue:`51703`)
- All arguments except ``name`` in :meth:`Index.rename` are now keyword only (:issue:`56493`)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 61168f71f4924..72f03bbe66263 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1287,34 +1287,43 @@ def _set_result_index_ordered(
return result
@final
- def _insert_inaxis_grouper(self, result: Series | DataFrame) -> DataFrame:
+ def _insert_inaxis_grouper(
+ self, result: Series | DataFrame, qs: npt.NDArray[np.float64] | None = None
+ ) -> DataFrame:
if isinstance(result, Series):
result = result.to_frame()
+ n_groupings = len(self._grouper.groupings)
+
+ if qs is not None:
+ result.insert(
+ 0, f"level_{n_groupings}", np.tile(qs, len(result) // len(qs))
+ )
+
# zip in reverse so we can always insert at loc 0
- columns = result.columns
- for name, lev, in_axis in zip(
- reversed(self._grouper.names),
- reversed(self._grouper.get_group_levels()),
- reversed([grp.in_axis for grp in self._grouper.groupings]),
+ for level, (name, lev, in_axis) in enumerate(
+ zip(
+ reversed(self._grouper.names),
+ reversed(self._grouper.get_group_levels()),
+ reversed([grp.in_axis for grp in self._grouper.groupings]),
+ )
):
+ if name is None:
+ # Behave the same as .reset_index() when a level is unnamed
+ name = (
+ "index"
+ if n_groupings == 1 and qs is None
+ else f"level_{n_groupings - level - 1}"
+ )
+
# GH #28549
# When using .apply(-), name will be in columns already
- if name not in columns:
- if in_axis:
+ if name not in result.columns:
+ # if in_axis:
+ if qs is None:
result.insert(0, name, lev)
else:
- msg = (
- "A grouping was used that is not in the columns of the "
- "DataFrame and so was excluded from the result. This grouping "
- "will be included in a future version of pandas. Add the "
- "grouping as a column of the DataFrame to silence this warning."
- )
- warnings.warn(
- message=msg,
- category=FutureWarning,
- stacklevel=find_stack_level(),
- )
+ result.insert(0, name, Index(np.repeat(lev, len(qs))))
return result
@@ -1341,18 +1350,17 @@ def _wrap_aggregated_output(
if not self.as_index:
# `not self.as_index` is only relevant for DataFrameGroupBy,
# enforced in __init__
- result = self._insert_inaxis_grouper(result)
+ result = self._insert_inaxis_grouper(result, qs=qs)
result = result._consolidate()
- index = Index(range(self._grouper.ngroups))
+ result.index = RangeIndex(len(result))
else:
index = self._grouper.result_index
-
- if qs is not None:
- # We get here with len(qs) != 1 and not self.as_index
- # in test_pass_args_kwargs
- index = _insert_quantile_level(index, qs)
- result.index = index
+ if qs is not None:
+ # We get here with len(qs) != 1 and not self.as_index
+ # in test_pass_args_kwargs
+ index = _insert_quantile_level(index, qs)
+ result.index = index
return result
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 255784e8bf24d..45a9540a1a0b9 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1250,10 +1250,7 @@ def test_pass_args_kwargs_duplicate_columns(tsframe, as_index):
tsframe.columns = ["A", "B", "A", "C"]
gb = tsframe.groupby(lambda x: x.month, as_index=as_index)
- warn = None if as_index else FutureWarning
- msg = "A grouping .* was excluded from the result"
- with tm.assert_produces_warning(warn, match=msg):
- res = gb.agg(np.percentile, 80, axis=0)
+ res = gb.agg(np.percentile, 80, axis=0)
ex_data = {
1: tsframe[tsframe.index.month == 1].quantile(0.8),
@@ -1261,7 +1258,7 @@ def test_pass_args_kwargs_duplicate_columns(tsframe, as_index):
}
expected = DataFrame(ex_data).T
if not as_index:
- # TODO: try to get this more consistent?
+ expected.insert(0, "index", [1, 2])
expected.index = Index(range(2))
tm.assert_frame_equal(res, expected)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 76c8a6fdb9570..467d932f1c45f 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -779,24 +779,27 @@ def test_as_index():
# function grouper
f = lambda r: df.loc[r, "A"]
- msg = "A grouping .* was excluded from the result"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.groupby(["cat", f], as_index=False, observed=True).sum()
+ result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
+ "level_1": [10, 11],
"A": [10, 22],
"B": [101, 205],
},
- columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
- msg = "A grouping .* was excluded from the result"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.groupby(["cat", s], as_index=False, observed=True).sum()
+ result = df.groupby(["cat", s], as_index=False, observed=True).sum()
+ expected = DataFrame(
+ {
+ "cat": ["a", "b"],
+ "A": [10, 22],
+ "B": [101, 205],
+ },
+ )
tm.assert_frame_equal(result, expected)
# is original index dropped?
@@ -1852,7 +1855,7 @@ def test_category_order_reducer(
request, as_index, sort, observed, reduction_func, index_kind, ordered
):
# GH#48749
- if reduction_func == "corrwith" and not as_index:
+ if reduction_func == "corrwith" and not as_index and index_kind != "single":
msg = "GH#49950 - corrwith with as_index=False may not have grouping column"
request.applymarker(pytest.mark.xfail(reason=msg))
elif index_kind != "range" and not as_index:
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 686279f25939a..52c93d566bc73 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -103,26 +103,22 @@ def f(x, q=None, axis=0):
# DataFrame
for as_index in [True, False]:
df_grouped = tsframe.groupby(lambda x: x.month, as_index=as_index)
- warn = None if as_index else FutureWarning
- msg = "A grouping .* was excluded from the result"
- with tm.assert_produces_warning(warn, match=msg):
- agg_result = df_grouped.agg(np.percentile, 80, axis=0)
- with tm.assert_produces_warning(warn, match=msg):
- apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
- with tm.assert_produces_warning(warn, match=msg):
- expected = df_grouped.quantile(0.8)
+ agg_result = df_grouped.agg(np.percentile, 80, axis=0)
+ apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
+ expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
apply_result = df_grouped.apply(DataFrame.quantile, [0.4, 0.8])
- with tm.assert_produces_warning(warn, match=msg):
- expected_seq = df_grouped.quantile([0.4, 0.8])
+ expected_seq = df_grouped.quantile([0.4, 0.8])
+ if not as_index:
+ # apply treats the op as a transform; .quantile knows it's a reduction
+ apply_result = apply_result.reset_index()
+ apply_result["level_0"] = [1, 1, 2, 2]
tm.assert_frame_equal(apply_result, expected_seq, check_names=False)
- with tm.assert_produces_warning(warn, match=msg):
- agg_result = df_grouped.agg(f, q=80)
- with tm.assert_produces_warning(warn, match=msg):
- apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
+ agg_result = df_grouped.agg(f, q=80)
+ apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index 54efe163f077e..9e2f9285d06ec 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -552,11 +552,6 @@ def test_categorical_reducers(reduction_func, observed, sort, as_index, index_ki
expected = expected.set_index(["x", "x2"])
else:
expected = expected.set_index("x")
- elif index_kind != "range" and reduction_func != "size":
- # size, unlike other methods, has the desired behavior in GH#49519
- expected = expected.drop(columns="x")
- if index_kind == "multi":
- expected = expected.drop(columns="x2")
if reduction_func in ("idxmax", "idxmin") and index_kind != "range":
# expected was computed with a RangeIndex; need to translate to index values
values = expected["y"].values.tolist()
@@ -572,13 +567,7 @@ def test_categorical_reducers(reduction_func, observed, sort, as_index, index_ki
if as_index:
expected = expected["size"].rename(None)
- if as_index or index_kind == "range" or reduction_func == "size":
- warn = None
- else:
- warn = FutureWarning
- msg = "A grouping .* was excluded from the result"
- with tm.assert_produces_warning(warn, match=msg):
- result = getattr(gb_keepna, reduction_func)(*args)
+ result = getattr(gb_keepna, reduction_func)(*args)
# size will return a Series, others are DataFrame
tm.assert_equal(result, expected)
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 8474d4c1d2d1c..2961369936717 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -1125,12 +1125,8 @@ def test_grouping_by_key_is_in_axis():
assert not gb._grouper.groupings[0].in_axis
assert gb._grouper.groupings[1].in_axis
- # Currently only in-axis groupings are including in the result when as_index=False;
- # This is likely to change in the future.
- msg = "A grouping .* was excluded from the result"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = gb.sum()
- expected = DataFrame({"b": [1, 2], "c": [7, 5]})
+ result = gb.sum()
+ expected = DataFrame({"a": [1, 2], "b": [1, 2], "c": [7, 5]})
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Ref: #49519 | https://api.github.com/repos/pandas-dev/pandas/pulls/57741 | 2024-03-06T02:03:31Z | 2024-03-07T00:01:46Z | 2024-03-07T00:01:46Z | 2024-03-07T02:16:18Z |
Backport PR #57172: MAINT: Adjust the codebase to the new 's keyword meaning | diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py
index ee6f00b219a15..5c933294fb944 100644
--- a/pandas/core/array_algos/quantile.py
+++ b/pandas/core/array_algos/quantile.py
@@ -102,7 +102,7 @@ def quantile_with_mask(
interpolation=interpolation,
)
- result = np.array(result, copy=False)
+ result = np.asarray(result)
result = result.T
return result
@@ -201,9 +201,9 @@ def _nanpercentile(
]
if values.dtype.kind == "f":
# preserve itemsize
- result = np.array(result, dtype=values.dtype, copy=False).T
+ result = np.asarray(result, dtype=values.dtype).T
else:
- result = np.array(result, copy=False).T
+ result = np.asarray(result).T
if (
result.dtype != values.dtype
and not mask.all()
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index f8b07bd73d315..f2b8aa75ca5bf 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -656,7 +656,9 @@ def __arrow_array__(self, type=None):
"""Convert myself to a pyarrow ChunkedArray."""
return self._pa_array
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
"""Correctly construct numpy arrays when passed to `np.asarray()`."""
return self.to_numpy(dtype=dtype)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index ea0e2e54e3339..abfe2369b0d0d 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -719,7 +719,10 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy)
- return np.array(self, dtype=dtype, copy=copy)
+ if not copy:
+ return np.asarray(self, dtype=dtype)
+ else:
+ return np.array(self, dtype=dtype, copy=copy)
def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:
"""
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index b87c5375856dc..f191f7277743f 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1636,7 +1636,9 @@ def _validate_codes_for_dtype(cls, codes, *, dtype: CategoricalDtype) -> np.ndar
# -------------------------------------------------------------
@ravel_compat
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
"""
The numpy array interface.
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index a0e0a1434e871..1042a1b3fde61 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -351,7 +351,9 @@ def _formatter(self, boxed: bool = False):
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 02656b655a0c6..a146220d249e2 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -635,12 +635,12 @@ def _resolution_obj(self) -> Resolution:
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype=None, copy=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
- return super().__array__(dtype=dtype)
+ return super().__array__(dtype=dtype, copy=copy)
def __iter__(self) -> Iterator:
"""
@@ -2393,7 +2393,7 @@ def objects_to_datetime64(
assert errors in ["raise", "ignore", "coerce"]
# if str-dtype, convert
- data = np.array(data, copy=False, dtype=np.object_)
+ data = np.asarray(data, dtype=np.object_)
result, tz_parsed = tslib.array_to_datetime(
data,
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index e69f996441703..91db7f11bcbe0 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1567,7 +1567,9 @@ def is_non_overlapping_monotonic(self) -> bool:
# ---------------------------------------------------------------------
# Conversion
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 320d8cb10b8c2..d7e816b9d3781 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -593,7 +593,9 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
"""
the array interface, return my values
We return an object array here to preserve our scalar values
diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py
index 210450e868698..68fa7fcb6573c 100644
--- a/pandas/core/arrays/numeric.py
+++ b/pandas/core/arrays/numeric.py
@@ -159,7 +159,10 @@ def _coerce_to_data_and_mask(
return values, mask, dtype, inferred_type
original = values
- values = np.array(values, copy=copy)
+ if not copy:
+ values = np.asarray(values)
+ else:
+ values = np.array(values, copy=copy)
inferred_type = None
if values.dtype == object or is_string_dtype(values.dtype):
inferred_type = lib.infer_dtype(values, skipna=True)
@@ -168,7 +171,10 @@ def _coerce_to_data_and_mask(
raise TypeError(f"{values.dtype} cannot be converted to {name}")
elif values.dtype.kind == "b" and checker(dtype):
- values = np.array(values, dtype=default_dtype, copy=copy)
+ if not copy:
+ values = np.asarray(values, dtype=default_dtype)
+ else:
+ values = np.array(values, dtype=default_dtype, copy=copy)
elif values.dtype.kind not in "iuf":
name = dtype_cls.__name__.strip("_")
@@ -207,9 +213,9 @@ def _coerce_to_data_and_mask(
inferred_type not in ["floating", "mixed-integer-float"]
and not mask.any()
):
- values = np.array(original, dtype=dtype, copy=False)
+ values = np.asarray(original, dtype=dtype)
else:
- values = np.array(original, dtype="object", copy=False)
+ values = np.asarray(original, dtype="object")
# we copy as need to coerce here
if mask.any():
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index d83a37088daec..07eb91e0cb13b 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -150,7 +150,9 @@ def dtype(self) -> NumpyEADtype:
# ------------------------------------------------------------------------
# NumPy Array Interface
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
return np.asarray(self._ndarray, dtype=dtype)
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index d635eb4a25df3..c1229e27ab51a 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -256,7 +256,10 @@ def __init__(
raise raise_on_incompatible(values, dtype.freq)
values, dtype = values._ndarray, values.dtype
- values = np.array(values, dtype="int64", copy=copy)
+ if not copy:
+ values = np.asarray(values, dtype="int64")
+ else:
+ values = np.array(values, dtype="int64", copy=copy)
if dtype is None:
raise ValueError("dtype is not specified and cannot be inferred")
dtype = cast(PeriodDtype, dtype)
@@ -400,7 +403,9 @@ def freq(self) -> BaseOffset:
def freqstr(self) -> str:
return freq_to_period_freqstr(self.freq.n, self.freq.name)
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
if dtype == "i8":
return self.asi8
elif dtype == bool:
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 98d84d899094b..82fcfa74ec7d2 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -551,7 +551,9 @@ def from_spmatrix(cls, data: spmatrix) -> Self:
return cls._simple_new(arr, index, dtype)
- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: NpDtype | None = None, copy: bool | None = None
+ ) -> np.ndarray:
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 1b885a2bdcd47..e9260a3ec50a2 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -1072,7 +1072,10 @@ def sequence_to_td64ns(
# This includes datetime64-dtype, see GH#23539, GH#29794
raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]")
- data = np.array(data, copy=copy)
+ if not copy:
+ data = np.asarray(data)
+ else:
+ data = np.array(data, copy=copy)
assert data.dtype.kind == "m"
assert data.dtype != "m8" # i.e. not unit-less
@@ -1150,7 +1153,7 @@ def _objects_to_td64ns(data, unit=None, errors: DateTimeErrorChoices = "raise"):
higher level.
"""
# coerce Index to np.ndarray, converting string-dtype if necessary
- values = np.array(data, dtype=np.object_, copy=False)
+ values = np.asarray(data, dtype=np.object_)
result = array_to_timedelta64(values, unit=unit, errors=errors)
return result.view("timedelta64[ns]")
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index d41a9c80a10ec..f8250ae475a10 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -626,7 +626,10 @@ def sanitize_array(
elif hasattr(data, "__array__"):
# e.g. dask array GH#38645
- data = np.array(data, copy=copy)
+ if not copy:
+ data = np.asarray(data)
+ else:
+ data = np.array(data, copy=copy)
return sanitize_array(
data,
index=index,
@@ -744,8 +747,11 @@ def _sanitize_str_dtypes(
# GH#19853: If data is a scalar, result has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
- data = np.array(data, dtype=dtype, copy=False)
- result = np.array(data, dtype=object, copy=copy)
+ data = np.asarray(data, dtype=dtype)
+ if not copy:
+ result = np.asarray(data, dtype=object)
+ else:
+ result = np.array(data, dtype=object, copy=copy)
return result
@@ -810,6 +816,8 @@ def _try_cast(
# this will raise if we have e.g. floats
subarr = maybe_cast_to_integer_array(arr, dtype)
+ elif not copy:
+ subarr = np.asarray(arr, dtype=dtype)
else:
subarr = np.array(arr, dtype=dtype, copy=copy)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 690af6b0ebdc7..7dd81ec59bc49 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1501,7 +1501,10 @@ def construct_2d_arraylike_from_scalar(
# Attempt to coerce to a numpy array
try:
- arr = np.array(value, dtype=dtype, copy=copy)
+ if not copy:
+ arr = np.asarray(value, dtype=dtype)
+ else:
+ arr = np.array(value, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
raise TypeError(
f"DataFrame constructor called with incompatible data and dtype: {err}"
@@ -1651,7 +1654,7 @@ def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.n
"out-of-bound Python int",
DeprecationWarning,
)
- casted = np.array(arr, dtype=dtype, copy=False)
+ casted = np.asarray(arr, dtype=dtype)
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 655a53997620a..c341ff9dff7e6 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -632,7 +632,7 @@ def infer_fill_value(val):
"""
if not is_list_like(val):
val = [val]
- val = np.array(val, copy=False)
+ val = np.asarray(val)
if val.dtype.kind in "mM":
return np.array("NaT", dtype=val.dtype)
elif val.dtype == object:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c09989fe87fb0..5c510d98596df 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1980,7 +1980,7 @@ def to_numpy(
dtype = np.dtype(dtype)
result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value)
if result.dtype is not dtype:
- result = np.array(result, dtype=dtype, copy=False)
+ result = np.asarray(result, dtype=dtype)
return result
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1dd471a09f1b1..2a86f75badecd 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2145,7 +2145,9 @@ def empty(self) -> bool_t:
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__: int = 1000
- def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: npt.DTypeLike | None = None, copy: bool_t | None = None
+ ) -> np.ndarray:
values = self._values
arr = np.asarray(values, dtype=dtype)
if (
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 4b3d1a9e006dc..6822c2c99427e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -912,7 +912,7 @@ def __len__(self) -> int:
"""
return len(self._data)
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype=None, copy=None) -> np.ndarray:
"""
The array interface, return my values.
"""
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 02a841a2075fd..091ddbcc099be 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -774,7 +774,7 @@ def _values(self) -> np.ndarray:
):
vals = vals.astype(object)
- vals = np.array(vals, copy=False)
+ vals = np.asarray(vals)
vals = algos.take_nd(vals, codes, fill_value=index._na_value)
values.append(vals)
@@ -1309,7 +1309,7 @@ def copy( # type: ignore[override]
new_index._id = self._id
return new_index
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype=None, copy=None) -> np.ndarray:
"""the array interface, return my values"""
return self.values
@@ -3397,7 +3397,7 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
locs = (level_codes >= idx.start) & (level_codes < idx.stop)
return locs
- locs = np.array(level_codes == idx, dtype=bool, copy=False)
+ locs = np.asarray(level_codes == idx, dtype=bool)
if not locs.any():
# The label is present in self.levels[level] but unused:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 220bb1133dfd5..2e0e04717373f 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1682,6 +1682,8 @@ def as_array(
na_value=na_value,
copy=copy,
).reshape(blk.shape)
+ elif not copy:
+ arr = np.asarray(blk.values, dtype=dtype)
else:
arr = np.array(blk.values, dtype=dtype, copy=copy)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c6a905cbb6ec1..236085c2a62e1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -971,7 +971,9 @@ def view(self, dtype: Dtype | None = None) -> Series:
# ----------------------------------------------------------------------
# NDArray Compat
- def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
+ def __array__(
+ self, dtype: npt.DTypeLike | None = None, copy: bool | None = None
+ ) -> np.ndarray:
"""
Return the values as a NumPy array.
@@ -984,6 +986,9 @@ def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
The dtype to use for the resulting NumPy array. By default,
the dtype is inferred from the data.
+ copy : bool or None, optional
+ Unused.
+
Returns
-------
numpy.ndarray
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 1139519d2bcd3..13c2f10785124 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -4065,7 +4065,7 @@ def _create_axes(
if isinstance(data_converted.dtype, CategoricalDtype):
ordered = data_converted.ordered
meta = "category"
- metadata = np.array(data_converted.categories, copy=False).ravel()
+ metadata = np.asarray(data_converted.categories).ravel()
data, dtype_name = _get_data_and_dtype_name(data_converted)
diff --git a/pandas/tests/arrays/integer/test_arithmetic.py b/pandas/tests/arrays/integer/test_arithmetic.py
index d979dd445a61a..8acd298f37a07 100644
--- a/pandas/tests/arrays/integer/test_arithmetic.py
+++ b/pandas/tests/arrays/integer/test_arithmetic.py
@@ -197,6 +197,7 @@ def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string
"Addition/subtraction of integers and integer-arrays with Timestamp",
"has no kernel",
"not implemented",
+ "The 'out' kwarg is necessary. Use numpy.strings.multiply without it.",
]
)
with pytest.raises(errs, match=msg):
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 82524ea115019..7f85c891afeed 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -12,6 +12,7 @@
Timestamp,
)
from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
+from pandas.compat.numpy import np_version_gt2
import pandas as pd
from pandas import (
@@ -638,13 +639,14 @@ def test_round(self, arr1d):
def test_array_interface(self, datetime_index):
arr = datetime_index._data
+ copy_false = None if np_version_gt2 else False
# default asarray gives the same underlying data (for tz naive)
result = np.asarray(arr)
expected = arr._ndarray
assert result is expected
tm.assert_numpy_array_equal(result, expected)
- result = np.array(arr, copy=False)
+ result = np.array(arr, copy=copy_false)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
@@ -653,7 +655,7 @@ def test_array_interface(self, datetime_index):
expected = arr._ndarray
assert result is expected
tm.assert_numpy_array_equal(result, expected)
- result = np.array(arr, dtype="datetime64[ns]", copy=False)
+ result = np.array(arr, dtype="datetime64[ns]", copy=copy_false)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="datetime64[ns]")
@@ -696,6 +698,7 @@ def test_array_tz(self, arr1d):
# GH#23524
arr = arr1d
dti = self.index_cls(arr1d)
+ copy_false = None if np_version_gt2 else False
expected = dti.asi8.view("M8[ns]")
result = np.array(arr, dtype="M8[ns]")
@@ -704,17 +707,18 @@ def test_array_tz(self, arr1d):
result = np.array(arr, dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
- # check that we are not making copies when setting copy=False
- result = np.array(arr, dtype="M8[ns]", copy=False)
+ # check that we are not making copies when setting copy=copy_false
+ result = np.array(arr, dtype="M8[ns]", copy=copy_false)
assert result.base is expected.base
assert result.base is not None
- result = np.array(arr, dtype="datetime64[ns]", copy=False)
+ result = np.array(arr, dtype="datetime64[ns]", copy=copy_false)
assert result.base is expected.base
assert result.base is not None
def test_array_i8_dtype(self, arr1d):
arr = arr1d
dti = self.index_cls(arr1d)
+ copy_false = None if np_version_gt2 else False
expected = dti.asi8
result = np.array(arr, dtype="i8")
@@ -723,8 +727,8 @@ def test_array_i8_dtype(self, arr1d):
result = np.array(arr, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
- # check that we are still making copies when setting copy=False
- result = np.array(arr, dtype="i8", copy=False)
+ # check that we are still making copies when setting copy=copy_false
+ result = np.array(arr, dtype="i8", copy=copy_false)
assert result.base is not expected.base
assert result.base is None
@@ -950,13 +954,14 @@ def test_int_properties(self, timedelta_index, propname):
def test_array_interface(self, timedelta_index):
arr = timedelta_index._data
+ copy_false = None if np_version_gt2 else False
# default asarray gives the same underlying data
result = np.asarray(arr)
expected = arr._ndarray
assert result is expected
tm.assert_numpy_array_equal(result, expected)
- result = np.array(arr, copy=False)
+ result = np.array(arr, copy=copy_false)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
@@ -965,7 +970,7 @@ def test_array_interface(self, timedelta_index):
expected = arr._ndarray
assert result is expected
tm.assert_numpy_array_equal(result, expected)
- result = np.array(arr, dtype="timedelta64[ns]", copy=False)
+ result = np.array(arr, dtype="timedelta64[ns]", copy=copy_false)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="timedelta64[ns]")
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 49eb06c299886..0567be737c681 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -112,8 +112,8 @@ def it_outer():
def __len__(self) -> int:
return len(self._values)
- def __array__(self, t=None):
- return np.asarray(self._values, dtype=t)
+ def __array__(self, dtype=None, copy=None):
+ return np.asarray(self._values, dtype=dtype)
@property
def ndim(self):
diff --git a/pandas/tests/extension/array_with_attr/array.py b/pandas/tests/extension/array_with_attr/array.py
index d0249d9af8098..2789d51ec2ce3 100644
--- a/pandas/tests/extension/array_with_attr/array.py
+++ b/pandas/tests/extension/array_with_attr/array.py
@@ -49,7 +49,10 @@ def __init__(self, values, attr=None) -> None:
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
- data = np.array(scalars, dtype="float64", copy=copy)
+ if not copy:
+ data = np.asarray(scalars, dtype="float64")
+ else:
+ data = np.array(scalars, dtype="float64", copy=copy)
return cls(data)
def __getitem__(self, item):
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 31f44f886add7..e43b50322bb92 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -146,7 +146,7 @@ def __eq__(self, other):
def __ne__(self, other):
return NotImplemented
- def __array__(self, dtype=None):
+ def __array__(self, dtype=None, copy=None):
if dtype is None:
dtype = object
if dtype == object:
@@ -210,8 +210,10 @@ def astype(self, dtype, copy=True):
value = self.astype(str) # numpy doesn't like nested dicts
arr_cls = dtype.construct_array_type()
return arr_cls._from_sequence(value, dtype=dtype, copy=False)
-
- return np.array([dict(x) for x in self], dtype=dtype, copy=copy)
+ elif not copy:
+ return np.asarray([dict(x) for x in self], dtype=dtype)
+ else:
+ return np.array([dict(x) for x in self], dtype=dtype, copy=copy)
def unique(self):
# Parent method doesn't work since np.array will try to infer
diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py
index f07585c0aec10..b3bb35c9396f4 100644
--- a/pandas/tests/extension/list/array.py
+++ b/pandas/tests/extension/list/array.py
@@ -115,7 +115,10 @@ def astype(self, dtype, copy=True):
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
# numpy has problems with astype(str) for nested elements
return np.array([str(x) for x in self.data], dtype=dtype)
- return np.array(self.data, dtype=dtype, copy=copy)
+ elif not copy:
+ return np.asarray(self.data, dtype=dtype)
+ else:
+ return np.array(self.data, dtype=dtype, copy=copy)
@classmethod
def _concat_same_type(cls, to_concat):
diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py
index 3d8523f344d46..5eda0f00f54ca 100644
--- a/pandas/tests/extension/test_common.py
+++ b/pandas/tests/extension/test_common.py
@@ -17,7 +17,7 @@ class DummyArray(ExtensionArray):
def __init__(self, data) -> None:
self.data = data
- def __array__(self, dtype):
+ def __array__(self, dtype=None, copy=None):
return self.data
@property
@@ -30,8 +30,10 @@ def astype(self, dtype, copy=True):
if copy:
return type(self)(self.data)
return self
-
- return np.array(self, dtype=dtype, copy=copy)
+ elif not copy:
+ return np.asarray(self, dtype=dtype)
+ else:
+ return np.array(self, dtype=dtype, copy=copy)
class TestExtensionArrayDtype:
diff --git a/pandas/tests/frame/methods/test_select_dtypes.py b/pandas/tests/frame/methods/test_select_dtypes.py
index 47c479faed1ef..d1bee6a3de613 100644
--- a/pandas/tests/frame/methods/test_select_dtypes.py
+++ b/pandas/tests/frame/methods/test_select_dtypes.py
@@ -32,7 +32,7 @@ def __init__(self, data, dtype) -> None:
self.data = data
self._dtype = dtype
- def __array__(self, dtype):
+ def __array__(self, dtype=None, copy=None):
return self.data
@property
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 42ce658701355..0593de7556406 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -59,7 +59,7 @@ def __init__(self, value, dtype) -> None:
self.value = value
self.dtype = np.dtype(dtype)
- def __array__(self):
+ def __array__(self, dtype=None, copy=None):
return np.array(self.value, dtype=self.dtype)
def __str__(self) -> str:
diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py
index 72641077c90fe..6042e5b9cc679 100644
--- a/pandas/tests/indexes/test_index_new.py
+++ b/pandas/tests/indexes/test_index_new.py
@@ -413,7 +413,7 @@ class ArrayLike:
def __init__(self, array) -> None:
self.array = array
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype=None, copy=None) -> np.ndarray:
return self.array
expected = Index(array)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/57740 | 2024-03-05T22:00:43Z | 2024-03-06T00:08:18Z | 2024-03-06T00:08:17Z | 2024-03-06T00:08:21Z |
CI/TST: Use worksteal over loadfile for pytest-xdist | diff --git a/ci/deps/actions-311-numpydev.yaml b/ci/deps/actions-311-numpydev.yaml
index b62e8630f2059..61a0eabbf133c 100644
--- a/ci/deps/actions-311-numpydev.yaml
+++ b/ci/deps/actions-311-numpydev.yaml
@@ -13,10 +13,7 @@ dependencies:
# test dependencies
- pytest>=7.3.2
- pytest-cov
- # Once pytest-cov > 4 comes out, unpin this
- # Right now, a DeprecationWarning related to rsyncdir
- # causes an InternalError within pytest
- - pytest-xdist>=2.2.0, <3
+ - pytest-xdist>=2.2.0
- hypothesis>=6.46.1
# pandas dependencies
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 39ab0890a32d1..d2c2f58427a23 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -10,7 +10,7 @@ echo PYTHONHASHSEED=$PYTHONHASHSEED
COVERAGE="-s --cov=pandas --cov-report=xml --cov-append --cov-config=pyproject.toml"
-PYTEST_CMD="MESONPY_EDITABLE_VERBOSE=1 PYTHONDEVMODE=1 PYTHONWARNDEFAULTENCODING=1 pytest -r fE -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE $PYTEST_TARGET"
+PYTEST_CMD="MESONPY_EDITABLE_VERBOSE=1 PYTHONDEVMODE=1 PYTHONWARNDEFAULTENCODING=1 pytest -r fE -n $PYTEST_WORKERS --dist=worksteal $TEST_ARGS $COVERAGE $PYTEST_TARGET"
if [[ "$PATTERN" ]]; then
PYTEST_CMD="$PYTEST_CMD -m \"$PATTERN\""
| In theory, this should help alleviate a worker that is running tests from a larger file. Let's see if it saves any time on the CI. | https://api.github.com/repos/pandas-dev/pandas/pulls/57737 | 2024-03-05T18:12:48Z | 2024-03-26T20:35:01Z | 2024-03-26T20:35:00Z | 2024-03-26T20:35:03Z |
Backport PR #57721 on branch 2.2.x (update from 2022 to 2024 image) | diff --git a/.circleci/config.yml b/.circleci/config.yml
index 90afb1ce29684..ea93575ac9430 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -3,7 +3,7 @@ version: 2.1
jobs:
test-arm:
machine:
- image: ubuntu-2004:2022.04.1
+ image: default
resource_class: arm.large
environment:
ENV_FILE: ci/deps/circle-310-arm64.yaml
@@ -46,7 +46,7 @@ jobs:
cibw-build:
type: string
machine:
- image: ubuntu-2004:2022.04.1
+ image: default
resource_class: arm.large
environment:
TRIGGER_SOURCE: << pipeline.trigger_source >>
| Backport PR #57721: update from 2022 to 2024 image | https://api.github.com/repos/pandas-dev/pandas/pulls/57729 | 2024-03-04T21:45:00Z | 2024-03-04T22:46:47Z | 2024-03-04T22:46:47Z | 2024-03-04T22:46:47Z |
Backport PR #57726: TST/CI: Fix test_repr on musl for dateutil 2.9 | diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py
index 8c61830ebe038..c5613daf62207 100644
--- a/pandas/tests/io/pytables/test_timezones.py
+++ b/pandas/tests/io/pytables/test_timezones.py
@@ -104,7 +104,7 @@ def test_append_with_timezones(setup_path, gettz):
msg = (
r"invalid info for \[values_block_1\] for \[tz\], "
- r"existing_value \[(dateutil/.*)?US/Eastern\] "
+ r"existing_value \[(dateutil/.*)?(US/Eastern|America/New_York)\] "
r"conflicts with new value \[(dateutil/.*)?EET\]"
)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/scalar/timestamp/test_formats.py b/pandas/tests/scalar/timestamp/test_formats.py
index d7160597ea6d6..e7ebcccef1c86 100644
--- a/pandas/tests/scalar/timestamp/test_formats.py
+++ b/pandas/tests/scalar/timestamp/test_formats.py
@@ -88,7 +88,7 @@ def test_isoformat(ts, timespec, expected_iso):
class TestTimestampRendering:
- timezones = ["UTC", "Asia/Tokyo", "US/Eastern", "dateutil/US/Pacific"]
+ timezones = ["UTC", "Asia/Tokyo", "US/Eastern", "dateutil/America/Los_Angeles"]
@pytest.mark.parametrize("tz", timezones)
@pytest.mark.parametrize("freq", ["D", "M", "S", "N"])
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/57728 | 2024-03-04T21:06:44Z | 2024-03-04T22:45:38Z | 2024-03-04T22:45:38Z | 2024-03-04T22:45:42Z |
TST/CI: Fix test_repr on musl for dateutil 2.9 | diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py
index b455235669636..9192804e49bd1 100644
--- a/pandas/tests/io/pytables/test_timezones.py
+++ b/pandas/tests/io/pytables/test_timezones.py
@@ -104,7 +104,7 @@ def test_append_with_timezones(setup_path, gettz):
msg = (
r"invalid info for \[values_block_1\] for \[tz\], "
- r"existing_value \[(dateutil/.*)?US/Eastern\] "
+ r"existing_value \[(dateutil/.*)?(US/Eastern|America/New_York)\] "
r"conflicts with new value \[(dateutil/.*)?EET\]"
)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/scalar/timestamp/test_formats.py b/pandas/tests/scalar/timestamp/test_formats.py
index 6a578b0a9eb09..b4493088acb31 100644
--- a/pandas/tests/scalar/timestamp/test_formats.py
+++ b/pandas/tests/scalar/timestamp/test_formats.py
@@ -89,7 +89,7 @@ def test_isoformat(ts, timespec, expected_iso):
class TestTimestampRendering:
@pytest.mark.parametrize(
- "tz", ["UTC", "Asia/Tokyo", "US/Eastern", "dateutil/US/Pacific"]
+ "tz", ["UTC", "Asia/Tokyo", "US/Eastern", "dateutil/America/Los_Angeles"]
)
@pytest.mark.parametrize("freq", ["D", "M", "S", "N"])
@pytest.mark.parametrize(
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/57726 | 2024-03-04T19:29:02Z | 2024-03-04T21:03:44Z | 2024-03-04T21:03:44Z | 2024-03-04T21:13:37Z |
ENH: vendor typing_extensions | diff --git a/pandas/_vendored/__init__.py b/pandas/_vendored/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/_vendored/typing_extensions.py b/pandas/_vendored/typing_extensions.py
new file mode 100644
index 0000000000000..53df8da175a56
--- /dev/null
+++ b/pandas/_vendored/typing_extensions.py
@@ -0,0 +1,2466 @@
+"""
+vendored copy of typing_extensions, copied from
+https://raw.githubusercontent.com/python/typing/master/typing_extensions/src_py3/typing_extensions.py
+
+on 2020-08-30.
+
+typing_extensions is distributed under the Python Software Foundation License.
+
+This is not a direct copy/paste of the original file. Changes are:
+ - this docstring
+ - ran `black`
+ - ran `isort`
+ - edited strings split by black to adhere to pandas style conventions
+ - AsyncContextManager is defined without `exec`
+ - python2-style super usages are updated
+ - replace foo[dot]__class__ with type(foo)
+ - Change a comment-syntax annotation in a docstring to newer syntax
+"""
+
+# These are used by Protocol implementation
+# We use internal typing helpers here, but this significantly reduces
+# code duplication. (Also this is only until Protocol is in typing.)
+import abc
+import collections
+import collections.abc as collections_abc
+import contextlib
+import operator
+import sys
+import typing
+from typing import Callable, Generic, Tuple, TypeVar
+
+# After PEP 560, internal typing API was substantially reworked.
+# This is especially important for Protocol class which uses internal APIs
+# quite extensivelly.
+PEP_560 = sys.version_info[:3] >= (3, 7, 0)
+
+if PEP_560:
+ GenericMeta = TypingMeta = type
+else:
+ from typing import GenericMeta, TypingMeta
+OLD_GENERICS = False
+try:
+ from typing import _next_in_mro, _type_check, _type_vars
+except ImportError:
+ OLD_GENERICS = True
+try:
+ from typing import _subs_tree # noqa
+
+ SUBS_TREE = True
+except ImportError:
+ SUBS_TREE = False
+try:
+ from typing import _tp_cache
+except ImportError:
+
+ def _tp_cache(x):
+ return x
+
+
+try:
+ from typing import _TypingEllipsis, _TypingEmpty
+except ImportError:
+
+ class _TypingEllipsis:
+ pass
+
+ class _TypingEmpty:
+ pass
+
+
+# The two functions below are copies of typing internal helpers.
+# They are needed by _ProtocolMeta
+
+
+def _no_slots_copy(dct):
+ dict_copy = dict(dct)
+ if "__slots__" in dict_copy:
+ for slot in dict_copy["__slots__"]:
+ dict_copy.pop(slot, None)
+ return dict_copy
+
+
+def _check_generic(cls, parameters):
+ if not cls.__parameters__:
+ raise TypeError("%s is not a generic class" % repr(cls))
+ alen = len(parameters)
+ elen = len(cls.__parameters__)
+ if alen != elen:
+ raise TypeError(
+ "Too %s parameters for %s; actual %s, expected %s"
+ % ("many" if alen > elen else "few", repr(cls), alen, elen)
+ )
+
+
+if hasattr(typing, "_generic_new"):
+ _generic_new = typing._generic_new
+else:
+ # Note: The '_generic_new(...)' function is used as a part of the
+ # process of creating a generic type and was added to the typing module
+ # as of Python 3.5.3.
+ #
+ # We've defined '_generic_new(...)' below to exactly match the behavior
+ # implemented in older versions of 'typing' bundled with Python 3.5.0 to
+ # 3.5.2. This helps eliminate redundancy when defining collection types
+ # like 'Deque' later.
+ #
+ # See https://github.com/python/typing/pull/308 for more details -- in
+ # particular, compare and contrast the definition of types like
+ # 'typing.List' before and after the merge.
+
+ def _generic_new(base_cls, cls, *args, **kwargs):
+ return base_cls.__new__(cls, *args, **kwargs)
+
+
+# See https://github.com/python/typing/pull/439
+if hasattr(typing, "_geqv"):
+ from typing import _geqv
+
+ _geqv_defined = True
+else:
+ _geqv = None
+ _geqv_defined = False
+
+if sys.version_info[:2] >= (3, 6):
+ import _collections_abc
+
+ _check_methods_in_mro = _collections_abc._check_methods
+else:
+
+ def _check_methods_in_mro(C, *methods):
+ mro = C.__mro__
+ for method in methods:
+ for B in mro:
+ if method in B.__dict__:
+ if B.__dict__[method] is None:
+ return NotImplemented
+ break
+ else:
+ return NotImplemented
+ return True
+
+
+# Please keep __all__ alphabetized within each category.
+__all__ = [
+ # Super-special typing primitives.
+ "ClassVar",
+ "Final",
+ "Type",
+ # ABCs (from collections.abc).
+ # The following are added depending on presence
+ # of their non-generic counterparts in stdlib:
+ # 'Awaitable',
+ # 'AsyncIterator',
+ # 'AsyncIterable',
+ # 'Coroutine',
+ # 'AsyncGenerator',
+ # 'AsyncContextManager',
+ # 'ChainMap',
+ # Concrete collection types.
+ "ContextManager",
+ "Counter",
+ "Deque",
+ "DefaultDict",
+ "TypedDict",
+ # Structural checks, a.k.a. protocols.
+ "SupportsIndex",
+ # One-off things.
+ "final",
+ "IntVar",
+ "Literal",
+ "NewType",
+ "overload",
+ "Text",
+ "TYPE_CHECKING",
+]
+
+# Annotated relies on substitution trees of pep 560. It will not work for
+# versions of typing older than 3.5.3
+HAVE_ANNOTATED = PEP_560 or SUBS_TREE
+
+if PEP_560:
+ __all__.extend(["get_args", "get_origin", "get_type_hints"])
+
+if HAVE_ANNOTATED:
+ __all__.append("Annotated")
+
+# Protocols are hard to backport to the original version of typing 3.5.0
+HAVE_PROTOCOLS = sys.version_info[:3] != (3, 5, 0)
+
+if HAVE_PROTOCOLS:
+ __all__.extend(["Protocol", "runtime", "runtime_checkable"])
+
+
+# TODO
+if hasattr(typing, "NoReturn"):
+ NoReturn = typing.NoReturn
+elif hasattr(typing, "_FinalTypingBase"):
+
+ class _NoReturn(typing._FinalTypingBase, _root=True):
+ """Special type indicating functions that never return.
+ Example::
+
+ from typing import NoReturn
+
+ def stop() -> NoReturn:
+ raise Exception('no way')
+
+ This type is invalid in other positions, e.g., ``List[NoReturn]``
+ will fail in static type checkers.
+ """
+
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("NoReturn cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("NoReturn cannot be used with issubclass().")
+
+ NoReturn = _NoReturn(_root=True)
+else:
+
+ class _NoReturnMeta(typing.TypingMeta):
+ """Metaclass for NoReturn"""
+
+ def __new__(cls, name, bases, namespace, _root=False):
+ return super().__new__(cls, name, bases, namespace, _root=_root)
+
+ def __instancecheck__(self, obj):
+ raise TypeError("NoReturn cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("NoReturn cannot be used with issubclass().")
+
+ class NoReturn(typing.Final, metaclass=_NoReturnMeta, _root=True):
+ """Special type indicating functions that never return.
+ Example::
+
+ from typing import NoReturn
+
+ def stop() -> NoReturn:
+ raise Exception('no way')
+
+ This type is invalid in other positions, e.g., ``List[NoReturn]``
+ will fail in static type checkers.
+ """
+
+ __slots__ = ()
+
+
+# Some unconstrained type variables. These are used by the container types.
+# (These are not for export.)
+T = typing.TypeVar("T") # Any type.
+KT = typing.TypeVar("KT") # Key type.
+VT = typing.TypeVar("VT") # Value type.
+T_co = typing.TypeVar("T_co", covariant=True) # Any type covariant containers.
+V_co = typing.TypeVar("V_co", covariant=True) # Any type covariant containers.
+VT_co = typing.TypeVar("VT_co", covariant=True) # Value type covariant containers.
+T_contra = typing.TypeVar("T_contra", contravariant=True) # Ditto contravariant.
+
+
+if hasattr(typing, "ClassVar"):
+ ClassVar = typing.ClassVar
+elif hasattr(typing, "_FinalTypingBase"):
+
+ class _ClassVar(typing._FinalTypingBase, _root=True):
+ """Special type construct to mark class variables.
+
+ An annotation wrapped in ClassVar indicates that a given
+ attribute is intended to be used as a class variable and
+ should not be set on instances of that class. Usage::
+
+ class Starship:
+ stats: ClassVar[Dict[str, int]] = {} # class variable
+ damage: int = 10 # instance variable
+
+ ClassVar accepts only types and cannot be further subscribed.
+
+ Note that ClassVar is not a class itself, and should not
+ be used with isinstance() or issubclass().
+ """
+
+ __slots__ = ("__type__",)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(
+ typing._type_check(
+ item, "{} accepts only single type.".format(cls.__name__[1:])
+ ),
+ _root=True,
+ )
+ raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += "[{}]".format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _ClassVar):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ ClassVar = _ClassVar(_root=True)
+else:
+
+ class _ClassVarMeta(typing.TypingMeta):
+ """Metaclass for ClassVar"""
+
+ def __new__(cls, name, bases, namespace, tp=None, _root=False):
+ self = super().__new__(cls, name, bases, namespace, _root=_root)
+ if tp is not None:
+ self.__type__ = tp
+ return self
+
+ def __instancecheck__(self, obj):
+ raise TypeError("ClassVar cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("ClassVar cannot be used with issubclass().")
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is not None:
+ raise TypeError(
+ "{} cannot be further subscripted".format(cls.__name__[1:])
+ )
+
+ param = typing._type_check(
+ item, "{} accepts only single type.".format(cls.__name__[1:])
+ )
+ return cls(
+ self.__name__, self.__bases__, dict(self.__dict__), tp=param, _root=True
+ )
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(
+ self.__name__,
+ self.__bases__,
+ dict(self.__dict__),
+ tp=self.__type__,
+ _root=True,
+ )
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += "[{}]".format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, ClassVar):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ class ClassVar(typing.Final, metaclass=_ClassVarMeta, _root=True):
+ """Special type construct to mark class variables.
+
+ An annotation wrapped in ClassVar indicates that a given
+ attribute is intended to be used as a class variable and
+ should not be set on instances of that class. Usage::
+
+ class Starship:
+ stats: ClassVar[Dict[str, int]] = {} # class variable
+ damage: int = 10 # instance variable
+
+ ClassVar accepts only types and cannot be further subscribed.
+
+ Note that ClassVar is not a class itself, and should not
+ be used with isinstance() or issubclass().
+ """
+
+ __type__ = None
+
+
+# On older versions of typing there is an internal class named "Final".
+if hasattr(typing, "Final") and sys.version_info[:2] >= (3, 7):
+ Final = typing.Final
+elif sys.version_info[:2] >= (3, 7):
+
+ class _FinalForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return "typing_extensions." + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(
+ parameters, "{} accepts only single type".format(self._name)
+ )
+ return _GenericAlias(self, (item,))
+
+ Final = _FinalForm(
+ "Final",
+ doc="""A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.""",
+ )
+elif hasattr(typing, "_FinalTypingBase"):
+
+ class _Final(typing._FinalTypingBase, _root=True):
+ """A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.
+ """
+
+ __slots__ = ("__type__",)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(
+ typing._type_check(
+ item, "{} accepts only single type.".format(cls.__name__[1:])
+ ),
+ _root=True,
+ )
+ raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += "[{}]".format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Final):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ Final = _Final(_root=True)
+else:
+
+ class _FinalMeta(typing.TypingMeta):
+ """Metaclass for Final"""
+
+ def __new__(cls, name, bases, namespace, tp=None, _root=False):
+ self = super().__new__(cls, name, bases, namespace, _root=_root)
+ if tp is not None:
+ self.__type__ = tp
+ return self
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Final cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Final cannot be used with issubclass().")
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is not None:
+ raise TypeError(
+ "{} cannot be further subscripted".format(cls.__name__[1:])
+ )
+
+ param = typing._type_check(
+ item, "{} accepts only single type.".format(cls.__name__[1:])
+ )
+ return cls(
+ self.__name__, self.__bases__, dict(self.__dict__), tp=param, _root=True
+ )
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(
+ self.__name__,
+ self.__bases__,
+ dict(self.__dict__),
+ tp=self.__type__,
+ _root=True,
+ )
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += "[{}]".format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, Final):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ class Final(typing.Final, metaclass=_FinalMeta, _root=True):
+ """A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.
+ """
+
+ __type__ = None
+
+
+if hasattr(typing, "final"):
+ final = typing.final
+else:
+
+ def final(f):
+ """This decorator can be used to indicate to type checkers that
+ the decorated method cannot be overridden, and decorated class
+ cannot be subclassed. For example:
+
+ class Base:
+ @final
+ def done(self) -> None:
+ ...
+ class Sub(Base):
+ def done(self) -> None: # Error reported by type checker
+ ...
+ @final
+ class Leaf:
+ ...
+ class Other(Leaf): # Error reported by type checker
+ ...
+
+ There is no runtime checking of these properties.
+ """
+ return f
+
+
+def IntVar(name):
+ return TypeVar(name)
+
+
+if hasattr(typing, "Literal"):
+ Literal = typing.Literal
+elif sys.version_info[:2] >= (3, 7):
+
+ class _LiteralForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return "typing_extensions." + self._name
+
+ def __getitem__(self, parameters):
+ return _GenericAlias(self, parameters)
+
+ Literal = _LiteralForm(
+ "Literal",
+ doc="""A type that can be used to indicate to type checkers
+ that the corresponding value has a value literally equivalent
+ to the provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to
+ the value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime
+ checking verifying that the parameter is actually a value
+ instead of a type.""",
+ )
+elif hasattr(typing, "_FinalTypingBase"):
+
+ class _Literal(typing._FinalTypingBase, _root=True):
+ """A type that can be used to indicate to type checkers that the
+ corresponding value has a value literally equivalent to the
+ provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to the
+ value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime checking
+ verifying that the parameter is actually a value instead of a type.
+ """
+
+ __slots__ = ("__values__",)
+
+ def __init__(self, values=None, **kwds):
+ self.__values__ = values
+
+ def __getitem__(self, values):
+ cls = type(self)
+ if self.__values__ is None:
+ if not isinstance(values, tuple):
+ values = (values,)
+ return cls(values, _root=True)
+ raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ return self
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__values__ is not None:
+ r += "[{}]".format(", ".join(map(typing._type_repr, self.__values__)))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__values__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Literal):
+ return NotImplemented
+ if self.__values__ is not None:
+ return self.__values__ == other.__values__
+ return self is other
+
+ Literal = _Literal(_root=True)
+else:
+
+ class _LiteralMeta(typing.TypingMeta):
+ """Metaclass for Literal"""
+
+ def __new__(cls, name, bases, namespace, values=None, _root=False):
+ self = super().__new__(cls, name, bases, namespace, _root=_root)
+ if values is not None:
+ self.__values__ = values
+ return self
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Literal cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Literal cannot be used with issubclass().")
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__values__ is not None:
+ raise TypeError(
+ "{} cannot be further subscripted".format(cls.__name__[1:])
+ )
+
+ if not isinstance(item, tuple):
+ item = (item,)
+ return cls(
+ self.__name__,
+ self.__bases__,
+ dict(self.__dict__),
+ values=item,
+ _root=True,
+ )
+
+ def _eval_type(self, globalns, localns):
+ return self
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__values__ is not None:
+ r += "[{}]".format(", ".join(map(typing._type_repr, self.__values__)))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__values__))
+
+ def __eq__(self, other):
+ if not isinstance(other, Literal):
+ return NotImplemented
+ if self.__values__ is not None:
+ return self.__values__ == other.__values__
+ return self is other
+
+ class Literal(typing.Final, metaclass=_LiteralMeta, _root=True):
+ """A type that can be used to indicate to type checkers that the
+ corresponding value has a value literally equivalent to the
+ provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to the
+ value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime checking
+ verifying that the parameter is actually a value instead of a type.
+ """
+
+ __values__ = None
+
+
+def _overload_dummy(*args, **kwds):
+ """Helper for @overload to raise when called."""
+ raise NotImplementedError(
+ "You should not call an overloaded function. "
+ "A series of @overload-decorated functions "
+ "outside a stub module should always be followed "
+ "by an implementation that is not @overload-ed."
+ )
+
+
+def overload(func):
+ """Decorator for overloaded functions/methods.
+
+ In a stub file, place two or more stub definitions for the same
+ function in a row, each decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+
+ In a non-stub file (i.e. a regular .py file), do the same but
+ follow it with an implementation. The implementation should *not*
+ be decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+ def utf8(value):
+ # implementation goes here
+ """
+ return _overload_dummy
+
+
+# This is not a real generic class. Don't use outside annotations.
+if hasattr(typing, "Type"):
+ Type = typing.Type
+else:
+ # Internal type variable used for Type[].
+ CT_co = typing.TypeVar("CT_co", covariant=True, bound=type)
+
+ class Type(typing.Generic[CT_co], extra=type):
+ """A special construct usable to annotate class objects.
+
+ For example, suppose we have the following classes::
+
+ class User: ... # Abstract base for User classes
+ class BasicUser(User): ...
+ class ProUser(User): ...
+ class TeamUser(User): ...
+
+ And a function that takes a class argument that's a subclass of
+ User and returns an instance of the corresponding class::
+
+ U = TypeVar('U', bound=User)
+ def new_user(user_class: Type[U]) -> U:
+ user = user_class()
+ # (Here we could write the user object to a database)
+ return user
+ joe = new_user(BasicUser)
+
+ At this point the type checker knows that joe has type BasicUser.
+ """
+
+ __slots__ = ()
+
+
+# Various ABCs mimicking those in collections.abc.
+# A few are simply re-exported for completeness.
+
+
+def _define_guard(type_name):
+ """
+ Returns True if the given type isn't defined in typing but
+ is defined in collections_abc.
+
+ Adds the type to __all__ if the collection is found in either
+ typing or collection_abc.
+ """
+ if hasattr(typing, type_name):
+ __all__.append(type_name)
+ globals()[type_name] = getattr(typing, type_name)
+ return False
+ elif hasattr(collections_abc, type_name):
+ __all__.append(type_name)
+ return True
+ else:
+ return False
+
+
+class _ExtensionsGenericMeta(GenericMeta):
+ def __subclasscheck__(self, subclass):
+ """This mimics a more modern GenericMeta.__subclasscheck__() logic
+ (that does not have problems with recursion) to work around interactions
+ between collections, typing, and typing_extensions on older
+ versions of Python, see https://github.com/python/typing/issues/501.
+ """
+ if sys.version_info[:3] >= (3, 5, 3) or sys.version_info[:3] < (3, 5, 0):
+ if self.__origin__ is not None:
+ if sys._getframe(1).f_globals["__name__"] not in ["abc", "functools"]:
+ raise TypeError(
+ "Parameterized generics cannot be used with class "
+ "or instance checks"
+ )
+ return False
+ if not self.__extra__:
+ return super().__subclasscheck__(subclass)
+ res = self.__extra__.__subclasshook__(subclass)
+ if res is not NotImplemented:
+ return res
+ if self.__extra__ in subclass.__mro__:
+ return True
+ for scls in self.__extra__.__subclasses__():
+ if isinstance(scls, GenericMeta):
+ continue
+ if issubclass(subclass, scls):
+ return True
+ return False
+
+
+if _define_guard("Awaitable"):
+
+ class Awaitable(
+ typing.Generic[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.Awaitable,
+ ):
+ __slots__ = ()
+
+
+if _define_guard("Coroutine"):
+
+ class Coroutine(
+ Awaitable[V_co],
+ typing.Generic[T_co, T_contra, V_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.Coroutine,
+ ):
+ __slots__ = ()
+
+
+if _define_guard("AsyncIterable"):
+
+ class AsyncIterable(
+ typing.Generic[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.AsyncIterable,
+ ):
+ __slots__ = ()
+
+
+if _define_guard("AsyncIterator"):
+
+ class AsyncIterator(
+ AsyncIterable[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.AsyncIterator,
+ ):
+ __slots__ = ()
+
+
+if hasattr(typing, "Deque"):
+ Deque = typing.Deque
+elif _geqv_defined:
+
+ class Deque(
+ collections.deque,
+ typing.MutableSequence[T],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.deque,
+ ):
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, Deque):
+ return collections.deque(*args, **kwds)
+ return _generic_new(collections.deque, cls, *args, **kwds)
+
+
+else:
+
+ class Deque(
+ collections.deque,
+ typing.MutableSequence[T],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.deque,
+ ):
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Deque:
+ return collections.deque(*args, **kwds)
+ return _generic_new(collections.deque, cls, *args, **kwds)
+
+
+if hasattr(typing, "ContextManager"):
+ ContextManager = typing.ContextManager
+elif hasattr(contextlib, "AbstractContextManager"):
+
+ class ContextManager(
+ typing.Generic[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=contextlib.AbstractContextManager,
+ ):
+ __slots__ = ()
+
+
+else:
+
+ class ContextManager(typing.Generic[T_co]):
+ __slots__ = ()
+
+ def __enter__(self):
+ return self
+
+ @abc.abstractmethod
+ def __exit__(self, exc_type, exc_value, traceback):
+ return None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is ContextManager:
+ # In Python 3.6+, it is possible to set a method to None to
+ # explicitly indicate that the class does not implement an ABC
+ # (https://bugs.python.org/issue25958), but we do not support
+ # that pattern here because this fallback class is only used
+ # in Python 3.5 and earlier.
+ if any("__enter__" in B.__dict__ for B in C.__mro__) and any(
+ "__exit__" in B.__dict__ for B in C.__mro__
+ ):
+ return True
+ return NotImplemented
+
+
+if hasattr(typing, "AsyncContextManager"):
+ AsyncContextManager = typing.AsyncContextManager
+ __all__.append("AsyncContextManager")
+elif hasattr(contextlib, "AbstractAsyncContextManager"):
+
+ class AsyncContextManager(
+ typing.Generic[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=contextlib.AbstractAsyncContextManager,
+ ):
+ __slots__ = ()
+
+ __all__.append("AsyncContextManager")
+
+else:
+
+ class AsyncContextManager(typing.Generic[T_co]):
+ __slots__ = ()
+
+ async def __aenter__(self):
+ return self
+
+ @abc.abstractmethod
+ async def __aexit__(self, exc_type, exc_value, traceback):
+ return None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is AsyncContextManager:
+ return _check_methods_in_mro(C, "__aenter__", "__aexit__")
+ return NotImplemented
+
+ __all__.append("AsyncContextManager")
+
+
+if hasattr(typing, "DefaultDict"):
+ DefaultDict = typing.DefaultDict
+elif _geqv_defined:
+
+ class DefaultDict(
+ collections.defaultdict,
+ typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.defaultdict,
+ ):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, DefaultDict):
+ return collections.defaultdict(*args, **kwds)
+ return _generic_new(collections.defaultdict, cls, *args, **kwds)
+
+
+else:
+
+ class DefaultDict(
+ collections.defaultdict,
+ typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.defaultdict,
+ ):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is DefaultDict:
+ return collections.defaultdict(*args, **kwds)
+ return _generic_new(collections.defaultdict, cls, *args, **kwds)
+
+
+if hasattr(typing, "Counter"):
+ Counter = typing.Counter
+elif (3, 5, 0) <= sys.version_info[:3] <= (3, 5, 1):
+ assert _geqv_defined
+ _TInt = typing.TypeVar("_TInt")
+
+ class _CounterMeta(typing.GenericMeta):
+ """Metaclass for Counter"""
+
+ def __getitem__(self, item):
+ return super().__getitem__((item, int))
+
+ class Counter(
+ collections.Counter,
+ typing.Dict[T, int],
+ metaclass=_CounterMeta,
+ extra=collections.Counter,
+ ):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, Counter):
+ return collections.Counter(*args, **kwds)
+ return _generic_new(collections.Counter, cls, *args, **kwds)
+
+
+elif _geqv_defined:
+
+ class Counter(
+ collections.Counter,
+ typing.Dict[T, int],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.Counter,
+ ):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, Counter):
+ return collections.Counter(*args, **kwds)
+ return _generic_new(collections.Counter, cls, *args, **kwds)
+
+
+else:
+
+ class Counter(
+ collections.Counter,
+ typing.Dict[T, int],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.Counter,
+ ):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Counter:
+ return collections.Counter(*args, **kwds)
+ return _generic_new(collections.Counter, cls, *args, **kwds)
+
+
+if hasattr(typing, "ChainMap"):
+ ChainMap = typing.ChainMap
+ __all__.append("ChainMap")
+elif hasattr(collections, "ChainMap"):
+ # ChainMap only exists in 3.3+
+ if _geqv_defined:
+
+ class ChainMap(
+ collections.ChainMap,
+ typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.ChainMap,
+ ):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, ChainMap):
+ return collections.ChainMap(*args, **kwds)
+ return _generic_new(collections.ChainMap, cls, *args, **kwds)
+
+ else:
+
+ class ChainMap(
+ collections.ChainMap,
+ typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.ChainMap,
+ ):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is ChainMap:
+ return collections.ChainMap(*args, **kwds)
+ return _generic_new(collections.ChainMap, cls, *args, **kwds)
+
+ __all__.append("ChainMap")
+
+
+if _define_guard("AsyncGenerator"):
+
+ class AsyncGenerator(
+ AsyncIterator[T_co],
+ typing.Generic[T_co, T_contra],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.AsyncGenerator,
+ ):
+ __slots__ = ()
+
+
+if hasattr(typing, "NewType"):
+ NewType = typing.NewType
+else:
+
+ def NewType(name, tp):
+ """NewType creates simple unique types with almost zero
+ runtime overhead. NewType(name, tp) is considered a subtype of tp
+ by static type checkers. At runtime, NewType(name, tp) returns
+ a dummy function that simply returns its argument. Usage::
+
+ UserId = NewType('UserId', int)
+
+ def name_by_id(user_id: UserId) -> str:
+ ...
+
+ UserId('user') # Fails type check
+
+ name_by_id(42) # Fails type check
+ name_by_id(UserId(42)) # OK
+
+ num: int = UserId(5) + 1
+ """
+
+ def new_type(x):
+ return x
+
+ new_type.__name__ = name
+ new_type.__supertype__ = tp
+ return new_type
+
+
+if hasattr(typing, "Text"):
+ Text = typing.Text
+else:
+ Text = str
+
+
+if hasattr(typing, "TYPE_CHECKING"):
+ TYPE_CHECKING = typing.TYPE_CHECKING
+else:
+ # Constant that's True when type checking, but False here.
+ TYPE_CHECKING = False
+
+
+def _gorg(cls):
+ """This function exists for compatibility with old typing versions."""
+ assert isinstance(cls, GenericMeta)
+ if hasattr(cls, "_gorg"):
+ return cls._gorg
+ while cls.__origin__ is not None:
+ cls = cls.__origin__
+ return cls
+
+
+if OLD_GENERICS:
+
+ def _next_in_mro(cls): # noqa
+ """This function exists for compatibility with old typing versions."""
+ next_in_mro = object
+ for i, c in enumerate(cls.__mro__[:-1]):
+ if isinstance(c, GenericMeta) and _gorg(c) is Generic:
+ next_in_mro = cls.__mro__[i + 1]
+ return next_in_mro
+
+
+_PROTO_WHITELIST = [
+ "Callable",
+ "Awaitable",
+ "Iterable",
+ "Iterator",
+ "AsyncIterable",
+ "AsyncIterator",
+ "Hashable",
+ "Sized",
+ "Container",
+ "Collection",
+ "Reversible",
+ "ContextManager",
+ "AsyncContextManager",
+]
+
+
+def _get_protocol_attrs(cls):
+ attrs = set()
+ for base in cls.__mro__[:-1]: # without object
+ if base.__name__ in ("Protocol", "Generic"):
+ continue
+ annotations = getattr(base, "__annotations__", {})
+ for attr in list(base.__dict__.keys()) + list(annotations.keys()):
+ if not attr.startswith("_abc_") and attr not in (
+ "__abstractmethods__",
+ "__annotations__",
+ "__weakref__",
+ "_is_protocol",
+ "_is_runtime_protocol",
+ "__dict__",
+ "__args__",
+ "__slots__",
+ "__next_in_mro__",
+ "__parameters__",
+ "__origin__",
+ "__orig_bases__",
+ "__extra__",
+ "__tree_hash__",
+ "__doc__",
+ "__subclasshook__",
+ "__init__",
+ "__new__",
+ "__module__",
+ "_MutableMapping__marker",
+ "_gorg",
+ ):
+ attrs.add(attr)
+ return attrs
+
+
+def _is_callable_members_only(cls):
+ return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
+
+
+if hasattr(typing, "Protocol"):
+ Protocol = typing.Protocol
+elif HAVE_PROTOCOLS and not PEP_560:
+
+ class _ProtocolMeta(GenericMeta):
+ """Internal metaclass for Protocol.
+
+ This exists so Protocol classes can be generic without deriving
+ from Generic.
+ """
+
+ if not OLD_GENERICS:
+
+ def __new__(
+ cls,
+ name,
+ bases,
+ namespace,
+ tvars=None,
+ args=None,
+ origin=None,
+ extra=None,
+ orig_bases=None,
+ ):
+ # This is just a version copied from GenericMeta.__new__ that
+ # includes "Protocol" special treatment. (Comments removed for brevity.)
+ assert extra is None # Protocols should not have extra
+ if tvars is not None:
+ assert origin is not None
+ assert all(isinstance(t, TypeVar) for t in tvars), tvars
+ else:
+ tvars = _type_vars(bases)
+ gvars = None
+ for base in bases:
+ if base is Generic:
+ raise TypeError("Cannot inherit from plain Generic")
+ if isinstance(base, GenericMeta) and base.__origin__ in (
+ Generic,
+ Protocol,
+ ):
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...] or "
+ "Protocol[...] multiple times."
+ )
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ raise TypeError(
+ "Some type variables (%s) "
+ "are not listed in %s[%s]"
+ % (
+ ", ".join(
+ str(t) for t in tvars if t not in gvarset
+ ),
+ "Generic"
+ if any(b.__origin__ is Generic for b in bases)
+ else "Protocol",
+ ", ".join(str(g) for g in gvars),
+ )
+ )
+ tvars = gvars
+
+ initial_bases = bases
+ if (
+ extra is not None
+ and type(extra) is abc.ABCMeta
+ and extra not in bases
+ ):
+ bases = (extra,) + bases
+ bases = tuple(
+ _gorg(b) if isinstance(b, GenericMeta) else b for b in bases
+ )
+ if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
+ bases = tuple(b for b in bases if b is not Generic)
+ namespace.update({"__origin__": origin, "__extra__": extra})
+ self = super().__new__(cls, name, bases, namespace, _root=True)
+ super().__setattr__("_gorg", self if not origin else _gorg(origin))
+ self.__parameters__ = tvars
+ self.__args__ = (
+ tuple(
+ ... if a is _TypingEllipsis else () if a is _TypingEmpty else a
+ for a in args
+ )
+ if args
+ else None
+ )
+ self.__next_in_mro__ = _next_in_mro(self)
+ if orig_bases is None:
+ self.__orig_bases__ = initial_bases
+ elif origin is not None:
+ self._abc_registry = origin._abc_registry
+ self._abc_cache = origin._abc_cache
+ if hasattr(self, "_subs_tree"):
+ self.__tree_hash__ = (
+ hash(self._subs_tree()) if origin else super().__hash__()
+ )
+ return self
+
+ def __init__(cls, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ if not cls.__dict__.get("_is_protocol", None):
+ cls._is_protocol = any(
+ b is Protocol
+ or isinstance(b, _ProtocolMeta)
+ and b.__origin__ is Protocol
+ for b in cls.__bases__
+ )
+ if cls._is_protocol:
+ for base in cls.__mro__[1:]:
+ if not (
+ base in (object, Generic)
+ or base.__module__ == "collections.abc"
+ and base.__name__ in _PROTO_WHITELIST
+ or isinstance(base, TypingMeta)
+ and base._is_protocol
+ or isinstance(base, GenericMeta)
+ and base.__origin__ is Generic
+ ):
+ raise TypeError(
+ "Protocols can only inherit from other "
+ "protocols, got %r" % base
+ )
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError("Protocols cannot be instantiated")
+
+ cls.__init__ = _no_init
+
+ def _proto_hook(other):
+ if not cls.__dict__.get("_is_protocol", None):
+ return NotImplemented
+ if not isinstance(other, type):
+ # Same error as for issubclass(1, int)
+ raise TypeError("issubclass() arg 1 must be a class")
+ for attr in _get_protocol_attrs(cls):
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ annotations = getattr(base, "__annotations__", {})
+ if (
+ isinstance(annotations, typing.Mapping)
+ and attr in annotations
+ and isinstance(other, _ProtocolMeta)
+ and other._is_protocol
+ ):
+ break
+ else:
+ return NotImplemented
+ return True
+
+ if "__subclasshook__" not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ def __instancecheck__(self, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if (
+ not getattr(self, "_is_protocol", False)
+ or _is_callable_members_only(self)
+ ) and issubclass(type(instance), self):
+ return True
+ if self._is_protocol:
+ if all(
+ hasattr(instance, attr)
+ and (
+ not callable(getattr(self, attr, None))
+ or getattr(instance, attr) is not None
+ )
+ for attr in _get_protocol_attrs(self)
+ ):
+ return True
+ return super().__instancecheck__(instance)
+
+ def __subclasscheck__(self, cls):
+ if self.__origin__ is not None:
+ if sys._getframe(1).f_globals["__name__"] not in ["abc", "functools"]:
+ raise TypeError(
+ "Parameterized generics cannot be used with class "
+ "or instance checks"
+ )
+ return False
+ if self.__dict__.get("_is_protocol", None) and not self.__dict__.get(
+ "_is_runtime_protocol", None
+ ):
+ if sys._getframe(1).f_globals["__name__"] in [
+ "abc",
+ "functools",
+ "typing",
+ ]:
+ return False
+ raise TypeError(
+ "Instance and class checks can only be used with "
+ "@runtime protocols"
+ )
+ if self.__dict__.get(
+ "_is_runtime_protocol", None
+ ) and not _is_callable_members_only(self):
+ if sys._getframe(1).f_globals["__name__"] in [
+ "abc",
+ "functools",
+ "typing",
+ ]:
+ return super().__subclasscheck__(cls)
+ raise TypeError(
+ "Protocols with non-method members don't support issubclass()"
+ )
+ return super().__subclasscheck__(cls)
+
+ if not OLD_GENERICS:
+
+ @_tp_cache
+ def __getitem__(self, params):
+ # We also need to copy this from GenericMeta.__getitem__ to get
+ # special treatment of "Protocol". (Comments removed for brevity.)
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and _gorg(self) is not Tuple:
+ raise TypeError(
+ "Parameter list to %s[...] cannot be empty" % self.__qualname__
+ )
+ msg = "Parameters to generic types must be types."
+ params = tuple(_type_check(p, msg) for p in params)
+ if self in (Generic, Protocol):
+ if not all(isinstance(p, TypeVar) for p in params):
+ raise TypeError(
+ "Parameters to %r[...] must all be type variables" % self
+ )
+ if len(set(params)) != len(params):
+ raise TypeError(
+ "Parameters to %r[...] must all be unique" % self
+ )
+ tvars = params
+ args = params
+ elif self in (Tuple, Callable):
+ tvars = _type_vars(params)
+ args = params
+ elif self.__origin__ in (Generic, Protocol):
+ raise TypeError(
+ "Cannot subscript already-subscripted %s" % repr(self)
+ )
+ else:
+ _check_generic(self, params)
+ tvars = _type_vars(params)
+ args = params
+
+ prepend = (self,) if self.__origin__ is None else ()
+ return type(self)(
+ self.__name__,
+ prepend + self.__bases__,
+ _no_slots_copy(self.__dict__),
+ tvars=tvars,
+ args=args,
+ origin=self,
+ extra=self.__extra__,
+ orig_bases=self.__orig_bases__,
+ )
+
+ class Protocol(metaclass=_ProtocolMeta):
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self) -> int:
+ ...
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self) -> int:
+ return 0
+
+ def func(x: Proto) -> int:
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with
+ @typing_extensions.runtime act as simple-minded runtime protocol that checks
+ only the presence of given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto({bases}):
+ def meth(self) -> T:
+ ...
+ """
+
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if _gorg(cls) is Protocol:
+ raise TypeError(
+ "Type Protocol cannot be instantiated; "
+ "it can be used only as a base class"
+ )
+ if OLD_GENERICS:
+ return _generic_new(_next_in_mro(cls), cls, *args, **kwds)
+ return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
+
+ if Protocol.__doc__ is not None:
+ Protocol.__doc__ = Protocol.__doc__.format(
+ bases="Protocol, Generic[T]" if OLD_GENERICS else "Protocol[T]"
+ )
+
+
+elif PEP_560:
+ from typing import _collect_type_vars, _GenericAlias, _type_check # noqa
+
+ class _ProtocolMeta(abc.ABCMeta):
+ # This metaclass is a bit unfortunate and exists only because of the lack
+ # of __instancehook__.
+ def __instancecheck__(cls, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if (
+ not getattr(cls, "_is_protocol", False)
+ or _is_callable_members_only(cls)
+ ) and issubclass(type(instance), cls):
+ return True
+ if cls._is_protocol:
+ if all(
+ hasattr(instance, attr)
+ and (
+ not callable(getattr(cls, attr, None))
+ or getattr(instance, attr) is not None
+ )
+ for attr in _get_protocol_attrs(cls)
+ ):
+ return True
+ return super().__instancecheck__(instance)
+
+ class Protocol(metaclass=_ProtocolMeta):
+ # There is quite a lot of overlapping code with typing.Generic.
+ # Unfortunately it is hard to avoid this while these live in two different
+ # modules. The duplicated code will be removed when Protocol is moved to typing.
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self) -> int:
+ ...
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self) -> int:
+ return 0
+
+ def func(x: Proto) -> int:
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with
+ @typing_extensions.runtime act as simple-minded runtime protocol that checks
+ only the presence of given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto(Protocol[T]):
+ def meth(self) -> T:
+ ...
+ """
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if cls is Protocol:
+ raise TypeError(
+ "Type Protocol cannot be instantiated; "
+ "it can only be used as a base class"
+ )
+ return super().__new__(cls)
+
+ @_tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and cls is not Tuple:
+ raise TypeError(
+ "Parameter list to {}[...] cannot be empty".format(cls.__qualname__)
+ )
+ msg = "Parameters to generic types must be types."
+ params = tuple(_type_check(p, msg) for p in params)
+ if cls is Protocol:
+ # Generic can only be subscripted with unique type variables.
+ if not all(isinstance(p, TypeVar) for p in params):
+ i = 0
+ while isinstance(params[i], TypeVar):
+ i += 1
+ raise TypeError(
+ "Parameters to Protocol[...] must all be type variables. "
+ "Parameter {} is {}".format(i + 1, params[i])
+ )
+ if len(set(params)) != len(params):
+ raise TypeError("Parameters to Protocol[...] must all be unique")
+ else:
+ # Subscripting a regular Generic subclass.
+ _check_generic(cls, params)
+ return _GenericAlias(cls, params)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ tvars = []
+ if "__orig_bases__" in cls.__dict__:
+ error = Generic in cls.__orig_bases__
+ else:
+ error = Generic in cls.__bases__
+ if error:
+ raise TypeError("Cannot inherit from plain Generic")
+ if "__orig_bases__" in cls.__dict__:
+ tvars = _collect_type_vars(cls.__orig_bases__)
+ # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
+ # If found, tvars must be a subset of it.
+ # If not found, tvars is it.
+ # Also check for and reject plain Generic,
+ # and reject multiple Generic[...] and/or Protocol[...].
+ gvars = None
+ for base in cls.__orig_bases__:
+ if isinstance(base, _GenericAlias) and base.__origin__ in (
+ Generic,
+ Protocol,
+ ):
+ # for error messages
+ the_base = (
+ "Generic" if base.__origin__ is Generic else "Protocol"
+ )
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...] "
+ "and/or Protocol[...] multiple types."
+ )
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
+ s_args = ", ".join(str(g) for g in gvars)
+ raise TypeError(
+ "Some type variables ({}) are "
+ "not listed in {}[{}]".format(s_vars, the_base, s_args)
+ )
+ tvars = gvars
+ cls.__parameters__ = tuple(tvars)
+
+ # Determine if this is a protocol or a concrete subclass.
+ if not cls.__dict__.get("_is_protocol", None):
+ cls._is_protocol = any(b is Protocol for b in cls.__bases__)
+
+ # Set (or override) the protocol subclass hook.
+ def _proto_hook(other):
+ if not cls.__dict__.get("_is_protocol", None):
+ return NotImplemented
+ if not getattr(cls, "_is_runtime_protocol", False):
+ if sys._getframe(2).f_globals["__name__"] in ["abc", "functools"]:
+ return NotImplemented
+ raise TypeError(
+ "Instance and class checks can only be used with "
+ "@runtime protocols"
+ )
+ if not _is_callable_members_only(cls):
+ if sys._getframe(2).f_globals["__name__"] in ["abc", "functools"]:
+ return NotImplemented
+ raise TypeError(
+ "Protocols with non-method members "
+ "don't support issubclass()"
+ )
+ if not isinstance(other, type):
+ # Same error as for issubclass(1, int)
+ raise TypeError("issubclass() arg 1 must be a class")
+ for attr in _get_protocol_attrs(cls):
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ annotations = getattr(base, "__annotations__", {})
+ if (
+ isinstance(annotations, typing.Mapping)
+ and attr in annotations
+ and isinstance(other, _ProtocolMeta)
+ and other._is_protocol
+ ):
+ break
+ else:
+ return NotImplemented
+ return True
+
+ if "__subclasshook__" not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ # We have nothing more to do for non-protocols.
+ if not cls._is_protocol:
+ return
+
+ # Check consistency of bases.
+ for base in cls.__bases__:
+ if not (
+ base in (object, Generic)
+ or base.__module__ == "collections.abc"
+ and base.__name__ in _PROTO_WHITELIST
+ or isinstance(base, _ProtocolMeta)
+ and base._is_protocol
+ ):
+ raise TypeError(
+ "Protocols can only inherit from other "
+ "protocols, got %r" % base
+ )
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError("Protocols cannot be instantiated")
+
+ cls.__init__ = _no_init
+
+
+if hasattr(typing, "runtime_checkable"):
+ runtime_checkable = typing.runtime_checkable
+elif HAVE_PROTOCOLS:
+
+ def runtime_checkable(cls):
+ """Mark a protocol class as a runtime protocol, so that it
+ can be used with isinstance() and issubclass(). Raise TypeError
+ if applied to a non-protocol class.
+
+ This allows a simple-minded structural check very similar to the
+ one-offs in collections.abc such as Hashable.
+ """
+ if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
+ raise TypeError(
+ "@runtime_checkable can be only applied to protocol classes, "
+ "got %r" % cls
+ )
+ cls._is_runtime_protocol = True
+ return cls
+
+
+if HAVE_PROTOCOLS:
+ # Exists for backwards compatibility.
+ runtime = runtime_checkable
+
+
+if hasattr(typing, "SupportsIndex"):
+ SupportsIndex = typing.SupportsIndex
+elif HAVE_PROTOCOLS:
+
+ @runtime_checkable
+ class SupportsIndex(Protocol):
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __index__(self) -> int:
+ pass
+
+
+if sys.version_info[:2] >= (3, 9):
+ # The standard library TypedDict in Python 3.8 does not store runtime information
+ # about which (if any) keys are optional. See https://bugs.python.org/issue38834
+ TypedDict = typing.TypedDict
+else:
+
+ def _check_fails(cls, other):
+ try:
+ if sys._getframe(1).f_globals["__name__"] not in [
+ "abc",
+ "functools",
+ "typing",
+ ]:
+ # Typed dicts are only for static structural subtyping.
+ raise TypeError("TypedDict does not support instance and class checks")
+ except (AttributeError, ValueError):
+ pass
+ return False
+
+ def _dict_new(*args, **kwargs):
+ if not args:
+ raise TypeError("TypedDict.__new__(): not enough arguments")
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ return dict(*args, **kwargs)
+
+ _dict_new.__text_signature__ = "($cls, _typename, _fields=None, /, **kwargs)"
+
+ def _typeddict_new(*args, total=True, **kwargs):
+ if not args:
+ raise TypeError("TypedDict.__new__(): not enough arguments")
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ if args:
+ typename, args = (
+ args[0],
+ args[1:],
+ ) # allow the "_typename" keyword be passed
+ elif "_typename" in kwargs:
+ typename = kwargs.pop("_typename")
+ import warnings
+
+ warnings.warn(
+ "Passing '_typename' as keyword argument is deprecated",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ else:
+ raise TypeError(
+ "TypedDict.__new__() missing 1 required positional "
+ "argument: '_typename'"
+ )
+ if args:
+ try:
+ (fields,) = args # allow the "_fields" keyword be passed
+ except ValueError:
+ raise TypeError(
+ "TypedDict.__new__() takes from 2 to 3 "
+ "positional arguments but {} "
+ "were given".format(len(args) + 2)
+ )
+ elif "_fields" in kwargs and len(kwargs) == 1:
+ fields = kwargs.pop("_fields")
+ import warnings
+
+ warnings.warn(
+ "Passing '_fields' as keyword argument is deprecated",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ else:
+ fields = None
+
+ if fields is None:
+ fields = kwargs
+ elif kwargs:
+ raise TypeError(
+ "TypedDict takes either a dict or keyword arguments, but not both"
+ )
+
+ ns = {"__annotations__": dict(fields), "__total__": total}
+ try:
+ # Setting correct module is necessary to make typed dict classes pickleable.
+ ns["__module__"] = sys._getframe(1).f_globals.get("__name__", "__main__")
+ except (AttributeError, ValueError):
+ pass
+
+ return _TypedDictMeta(typename, (), ns)
+
+ _typeddict_new.__text_signature__ = (
+ "($cls, _typename, _fields=None, /, *, total=True, **kwargs)"
+ )
+
+ class _TypedDictMeta(type):
+ def __new__(cls, name, bases, ns, total=True):
+ # Create new typed dict class object.
+ # This method is called directly when TypedDict is subclassed,
+ # or via _typeddict_new when TypedDict is instantiated. This way
+ # TypedDict supports all three syntaxes described in its docstring.
+ # Subclasses and instances of TypedDict return actual dictionaries
+ # via _dict_new.
+ ns["__new__"] = _typeddict_new if name == "TypedDict" else _dict_new
+ tp_dict = super().__new__(cls, name, (dict,), ns)
+
+ annotations = {}
+ own_annotations = ns.get("__annotations__", {})
+ own_annotation_keys = set(own_annotations.keys())
+ msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
+ own_annotations = {
+ n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
+ }
+ required_keys = set()
+ optional_keys = set()
+
+ for base in bases:
+ annotations.update(base.__dict__.get("__annotations__", {}))
+ required_keys.update(base.__dict__.get("__required_keys__", ()))
+ optional_keys.update(base.__dict__.get("__optional_keys__", ()))
+
+ annotations.update(own_annotations)
+ if total:
+ required_keys.update(own_annotation_keys)
+ else:
+ optional_keys.update(own_annotation_keys)
+
+ tp_dict.__annotations__ = annotations
+ tp_dict.__required_keys__ = frozenset(required_keys)
+ tp_dict.__optional_keys__ = frozenset(optional_keys)
+ if not hasattr(tp_dict, "__total__"):
+ tp_dict.__total__ = total
+ return tp_dict
+
+ __instancecheck__ = __subclasscheck__ = _check_fails
+
+ TypedDict = _TypedDictMeta("TypedDict", (dict,), {})
+ TypedDict.__module__ = __name__
+ TypedDict.__doc__ = """A simple typed name space. At runtime it is equivalent to a plain dict.
+
+ TypedDict creates a dictionary type that expects all of its
+ instances to have a certain set of keys, with each key
+ associated with a value of a consistent type. This expectation
+ is not checked at runtime but is only enforced by type checkers.
+ Usage::
+
+ class Point2D(TypedDict):
+ x: int
+ y: int
+ label: str
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+ The type info can be accessed via the Point2D.__annotations__ dict, and
+ the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+ TypedDict supports two additional equivalent forms::
+
+ Point2D = TypedDict('Point2D', x=int, y=int, label=str)
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+ The class syntax is only supported in Python 3.6+, while two other
+ syntax forms work for Python 2.7 and 3.2+
+ """
+
+
+# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
+if hasattr(typing, "Annotated"):
+ Annotated = typing.Annotated
+ get_type_hints = typing.get_type_hints
+ # Not exported and not a public API, but needed for get_origin() and get_args()
+ # to work.
+ _AnnotatedAlias = typing._AnnotatedAlias
+elif PEP_560:
+
+ class _AnnotatedAlias(typing._GenericAlias, _root=True):
+ """Runtime representation of an annotated type.
+
+ At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
+ with extra annotations. The alias behaves like a normal typing alias,
+ instantiating is the same as instantiating the underlying type, binding
+ it to types is also the same.
+ """
+
+ def __init__(self, origin, metadata):
+ if isinstance(origin, _AnnotatedAlias):
+ metadata = origin.__metadata__ + metadata
+ origin = origin.__origin__
+ super().__init__(origin, origin)
+ self.__metadata__ = metadata
+
+ def copy_with(self, params):
+ assert len(params) == 1
+ new_type = params[0]
+ return _AnnotatedAlias(new_type, self.__metadata__)
+
+ def __repr__(self):
+ return "typing_extensions.Annotated[{}, {}]".format(
+ typing._type_repr(self.__origin__),
+ ", ".join(repr(a) for a in self.__metadata__),
+ )
+
+ def __reduce__(self):
+ return operator.getitem, (Annotated, (self.__origin__,) + self.__metadata__)
+
+ def __eq__(self, other):
+ if not isinstance(other, _AnnotatedAlias):
+ return NotImplemented
+ if self.__origin__ != other.__origin__:
+ return False
+ return self.__metadata__ == other.__metadata__
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__metadata__))
+
+ class Annotated:
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type (and will be in
+ the __origin__ field), the remaining arguments are kept as a tuple in
+ the __extra__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise TypeError("Type Annotated cannot be instantiated.")
+
+ @_tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError(
+ "Annotated[...] should be used "
+ "with at least two arguments (a type and an "
+ "annotation)."
+ )
+ msg = "Annotated[t, ...]: t must be a type."
+ origin = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return _AnnotatedAlias(origin, metadata)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ raise TypeError("Cannot subclass {}.Annotated".format(cls.__module__))
+
+ def _strip_annotations(t):
+ """Strips the annotations from a given type.
+ """
+ if isinstance(t, _AnnotatedAlias):
+ return _strip_annotations(t.__origin__)
+ if isinstance(t, typing._GenericAlias):
+ stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ res = t.copy_with(stripped_args)
+ res._special = t._special
+ return res
+ return t
+
+ def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
+ """Return type hints for an object.
+
+ This is often the same as obj.__annotations__, but it handles
+ forward references encoded as string literals, adds Optional[t] if a
+ default value equal to None is set and recursively replaces all
+ 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
+
+ The argument may be a module, class, method, or function. The annotations
+ are returned as a dictionary. For classes, annotations include also
+ inherited members.
+
+ TypeError is raised if the argument is not of a type that can contain
+ annotations, and an empty dictionary is returned if no annotations are
+ present.
+
+ BEWARE -- the behavior of globalns and localns is counterintuitive
+ (unless you are familiar with how eval and exec work). The
+ search order is locals first, then globals.
+
+ - If no dict arguments are passed, an attempt is made to use the
+ globals from obj (or the respective module's globals for classes),
+ and these are also used as the locals. If the object does not appear
+ to have globals, an empty dictionary is used.
+
+ - If one dict argument is passed, it is used for both globals and
+ locals.
+
+ - If two dict arguments are passed, they specify globals and
+ locals, respectively.
+ """
+ hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
+ if include_extras:
+ return hint
+ return {k: _strip_annotations(t) for k, t in hint.items()}
+
+
+elif HAVE_ANNOTATED:
+
+ def _is_dunder(name):
+ """Returns True if name is a __dunder_variable_name__."""
+ return len(name) > 4 and name.startswith("__") and name.endswith("__")
+
+ # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
+ # checks, argument expansion etc. are done on the _subs_tre. As a result we
+ # can't provide a get_type_hints function that strips out annotations.
+
+ class AnnotatedMeta(typing.GenericMeta):
+ """Metaclass for Annotated"""
+
+ def __new__(cls, name, bases, namespace, **kwargs):
+ if any(b is not object for b in bases):
+ raise TypeError("Cannot subclass " + str(Annotated))
+ return super().__new__(cls, name, bases, namespace, **kwargs)
+
+ @property
+ def __metadata__(self):
+ return self._subs_tree()[2]
+
+ def _tree_repr(self, tree):
+ cls, origin, metadata = tree
+ if not isinstance(origin, tuple):
+ tp_repr = typing._type_repr(origin)
+ else:
+ tp_repr = origin[0]._tree_repr(origin)
+ metadata_reprs = ", ".join(repr(arg) for arg in metadata)
+ return "%s[%s, %s]" % (cls, tp_repr, metadata_reprs)
+
+ def _subs_tree(self, tvars=None, args=None): # noqa
+ if self is Annotated:
+ return Annotated
+ res = super()._subs_tree(tvars=tvars, args=args)
+ # Flatten nested Annotated
+ if isinstance(res[1], tuple) and res[1][0] is Annotated:
+ sub_tp = res[1][1]
+ sub_annot = res[1][2]
+ return (Annotated, sub_tp, sub_annot + res[2])
+ return res
+
+ def _get_cons(self):
+ """Return the class used to create instance of this type."""
+ if self.__origin__ is None:
+ raise TypeError(
+ "Cannot get the underlying type of a "
+ "non-specialized Annotated type."
+ )
+ tree = self._subs_tree()
+ while isinstance(tree, tuple) and tree[0] is Annotated:
+ tree = tree[1]
+ if isinstance(tree, tuple):
+ return tree[0]
+ else:
+ return tree
+
+ @_tp_cache
+ def __getitem__(self, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if self.__origin__ is not None: # specializing an instantiated type
+ return super().__getitem__(params)
+ elif not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError(
+ "Annotated[...] should be instantiated "
+ "with at least two arguments (a type and an "
+ "annotation)."
+ )
+ else:
+ msg = "Annotated[t, ...]: t must be a type."
+ tp = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return type(self)(
+ self.__name__,
+ self.__bases__,
+ _no_slots_copy(self.__dict__),
+ tvars=_type_vars((tp,)),
+ # Metadata is a tuple so it won't be touched by _replace_args et al.
+ args=(tp, metadata),
+ origin=self,
+ )
+
+ def __call__(self, *args, **kwargs):
+ cons = self._get_cons()
+ result = cons(*args, **kwargs)
+ try:
+ result.__orig_class__ = self
+ except AttributeError:
+ pass
+ return result
+
+ def __getattr__(self, attr):
+ # For simplicity we just don't relay all dunder names
+ if self.__origin__ is not None and not _is_dunder(attr):
+ return getattr(self._get_cons(), attr)
+ raise AttributeError(attr)
+
+ def __setattr__(self, attr, value):
+ if _is_dunder(attr) or attr.startswith("_abc_"):
+ super().__setattr__(attr, value)
+ elif self.__origin__ is None:
+ raise AttributeError(attr)
+ else:
+ setattr(self._get_cons(), attr, value)
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Annotated cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Annotated cannot be used with issubclass().")
+
+ class Annotated(metaclass=AnnotatedMeta):
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type, the remaining
+ arguments are kept as a tuple in the __metadata__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+
+# Python 3.8 has get_origin() and get_args() but those implementations aren't
+# Annotated-aware, so we can't use those, only Python 3.9 versions will do.
+if sys.version_info[:2] >= (3, 9):
+ get_origin = typing.get_origin
+ get_args = typing.get_args
+elif PEP_560:
+ from typing import _GenericAlias # noqa
+
+ def get_origin(tp):
+ """Get the unsubscripted version of a type.
+
+ This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
+ and Annotated. Return None for unsupported types. Examples::
+
+ get_origin(Literal[42]) is Literal
+ get_origin(int) is None
+ get_origin(ClassVar[int]) is ClassVar
+ get_origin(Generic) is Generic
+ get_origin(Generic[T]) is Generic
+ get_origin(Union[T, int]) is Union
+ get_origin(List[Tuple[T, T]][int]) == list
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return Annotated
+ if isinstance(tp, _GenericAlias):
+ return tp.__origin__
+ if tp is Generic:
+ return Generic
+ return None
+
+ def get_args(tp):
+ """Get type arguments with all substitutions performed.
+
+ For unions, basic simplifications used by Union constructor are performed.
+ Examples::
+ get_args(Dict[str, int]) == (str, int)
+ get_args(int) == ()
+ get_args(Union[int, Union[T, int], str][int]) == (int, str)
+ get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
+ get_args(Callable[[], T][int]) == ([], int)
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return (tp.__origin__,) + tp.__metadata__
+ if isinstance(tp, _GenericAlias):
+ res = tp.__args__
+ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
+ res = (list(res[:-1]), res[-1])
+ return res
+ return ()
+
+
+if hasattr(typing, "TypeAlias"):
+ TypeAlias = typing.TypeAlias
+elif sys.version_info[:2] >= (3, 9):
+
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return "typing_extensions." + self._name
+
+ @_TypeAliasForm
+ def TypeAlias(self, parameters):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ raise TypeError("{} is not subscriptable".format(self))
+
+
+elif sys.version_info[:2] >= (3, 7):
+
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return "typing_extensions." + self._name
+
+ TypeAlias = _TypeAliasForm(
+ "TypeAlias",
+ doc="""Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example
+ above.""",
+ )
+
+elif hasattr(typing, "_FinalTypingBase"):
+
+ class _TypeAliasMeta(typing.TypingMeta):
+ """Metaclass for TypeAlias"""
+
+ def __repr__(self):
+ return "typing_extensions.TypeAlias"
+
+ class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("TypeAlias cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("TypeAlias cannot be used with issubclass().")
+
+ def __repr__(self):
+ return "typing_extensions.TypeAlias"
+
+ TypeAlias = _TypeAliasBase(_root=True)
+else:
+
+ class _TypeAliasMeta(typing.TypingMeta):
+ """Metaclass for TypeAlias"""
+
+ def __instancecheck__(self, obj):
+ raise TypeError("TypeAlias cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("TypeAlias cannot be used with issubclass().")
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError("Cannot instantiate TypeAlias")
+
+ class TypeAlias(metaclass=_TypeAliasMeta, _root=True):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+
+ __slots__ = ()
diff --git a/setup.cfg b/setup.cfg
index 2447a91f88f4e..29ae85f7985f7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -68,6 +68,7 @@ omit =
*/tests/*
pandas/_typing.py
pandas/_version.py
+ pandas/_vendored/typing_extensions.py
plugins = Cython.Coverage
[coverage:report]
@@ -99,7 +100,7 @@ directory = coverage_html_report
# To be kept consistent with "Import Formatting" section in contributing.rst
[isort]
-known_pre_libs = pandas._config
+known_pre_libs = pandas._config,pandas._vendored
known_pre_core = pandas._libs,pandas._typing,pandas.util._*,pandas.compat,pandas.errors
known_dtypes = pandas.core.dtypes
known_post_core = pandas.tseries,pandas.io,pandas.plotting
@@ -113,7 +114,7 @@ combine_as_imports = True
line_length = 88
force_sort_within_sections = True
skip_glob = env,
-skip = pandas/__init__.py
+skip = pandas/__init__.py,pandas/_vendored/typing_extensions.py
[mypy]
ignore_missing_imports=True
@@ -124,6 +125,10 @@ warn_redundant_casts = True
warn_unused_ignores = True
show_error_codes = True
+[mypy-pandas._vendored.*]
+check_untyped_defs=False
+ignore_errors=True
+
[mypy-pandas.tests.*]
check_untyped_defs=False
| - [x] closes #34869
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
haven't figured out how to make isort and black ignore this file | https://api.github.com/repos/pandas-dev/pandas/pulls/36000 | 2020-08-31T02:47:39Z | 2020-09-01T23:37:09Z | 2020-09-01T23:37:09Z | 2020-09-01T23:47:58Z |
BUG: None in Float64Index raising TypeError, should return False | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index 9747a8ef3e71f..b907b8ac33516 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -29,7 +29,7 @@ Bug fixes
- Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`)
- Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`)
- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`)
--
+- Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index d6659cc1895b1..569562f5b5037 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -80,7 +80,11 @@ cdef class IndexEngine:
values = self._get_index_values()
self._check_type(val)
- loc = _bin_search(values, val) # .searchsorted(val, side='left')
+ try:
+ loc = _bin_search(values, val) # .searchsorted(val, side='left')
+ except TypeError:
+ # GH#35788 e.g. val=None with float64 values
+ raise KeyError(val)
if loc >= len(values):
raise KeyError(val)
if values[loc] != val:
diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py
index 473e370c76f8b..508bd2f566507 100644
--- a/pandas/tests/indexes/numeric/test_indexing.py
+++ b/pandas/tests/indexes/numeric/test_indexing.py
@@ -228,6 +228,12 @@ def test_take_fill_value_ints(self, klass):
class TestContains:
+ @pytest.mark.parametrize("klass", [Float64Index, Int64Index, UInt64Index])
+ def test_contains_none(self, klass):
+ # GH#35788 should return False, not raise TypeError
+ index = klass([0, 1, 2, 3, 4])
+ assert None not in index
+
def test_contains_float64_nans(self):
index = Float64Index([1.0, 2.0, np.nan])
assert np.nan in index
| - [x] closes #35788
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35999 | 2020-08-30T23:24:59Z | 2020-09-01T01:20:41Z | 2020-09-01T01:20:41Z | 2020-09-01T15:03:03Z |
Issue35925 Remove trailing commas | diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 724558bd49ea2..274860b3fdb5c 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1846,7 +1846,7 @@ def test_multilevel_index_loc_order(self, dim, keys, expected):
# GH 22797
# Try to respect order of keys given for MultiIndex.loc
kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]}
- df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs,)
+ df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs)
exp_index = MultiIndex.from_arrays(expected)
if dim == "index":
res = df.loc[keys, :]
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 0d60e6e8a978f..c45e4508c6153 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -285,7 +285,7 @@ def test_nansum(self, skipna):
def test_nanmean(self, skipna):
self.check_funs(
- nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False,
+ nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False
)
def test_nanmean_overflow(self):
diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index a3de8aa69f840..158b994cf03ae 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -95,7 +95,7 @@ def test_rolling_apply_consistency(
with warnings.catch_warnings():
warnings.filterwarnings(
- "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
+ "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
@@ -107,7 +107,7 @@ def test_rolling_apply_consistency(
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
- x.rolling(window=window, center=center, min_periods=min_periods), name,
+ x.rolling(window=window, center=center, min_periods=min_periods), name
)
if (
@@ -492,7 +492,7 @@ def test_moment_functions_zero_length_pairwise():
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
- index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([]),
+ index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
)
df2_expected = DataFrame(
index=pd.MultiIndex.from_product(
@@ -635,7 +635,7 @@ def test_rolling_consistency(consistency_data, window, min_periods, center):
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings(
- "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
+ "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning
)
# test consistency between different rolling_* moments
diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py
index 89d46a8bb6cb5..a83bfabc4a048 100644
--- a/pandas/tests/window/moments/test_moments_ewm.py
+++ b/pandas/tests/window/moments/test_moments_ewm.py
@@ -73,7 +73,7 @@ def simple_wma(s, w):
(s1, True, True, [(1.0 - alpha), np.nan, 1.0]),
(s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1.0 - alpha), np.nan, alpha]),
- (s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan],),
+ (s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan]),
(s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]),
(
s2,
@@ -95,7 +95,7 @@ def simple_wma(s, w):
alpha * ((1.0 - alpha) ** 2 + alpha),
],
),
- (s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha],),
+ (s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index 81f020fe7de23..da256e80dff7e 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -150,14 +150,14 @@ def get_result(obj, window, min_periods=None, center=False):
series_xp = (
get_result(
- series.reindex(list(series.index) + s), window=25, min_periods=minp,
+ series.reindex(list(series.index) + s), window=25, min_periods=minp
)
.shift(-12)
.reindex(series.index)
)
frame_xp = (
get_result(
- frame.reindex(list(frame.index) + s), window=25, min_periods=minp,
+ frame.reindex(list(frame.index) + s), window=25, min_periods=minp
)
.shift(-12)
.reindex(frame.index)
@@ -169,14 +169,14 @@ def get_result(obj, window, min_periods=None, center=False):
else:
series_xp = (
get_result(
- series.reindex(list(series.index) + s), window=25, min_periods=0,
+ series.reindex(list(series.index) + s), window=25, min_periods=0
)
.shift(-12)
.reindex(series.index)
)
frame_xp = (
get_result(
- frame.reindex(list(frame.index) + s), window=25, min_periods=0,
+ frame.reindex(list(frame.index) + s), window=25, min_periods=0
)
.shift(-12)
.reindex(frame.index)
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 2300d8dd5529b..ab73e075eed04 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -88,8 +88,8 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
- ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {},),
- ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {},),
+ ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}),
+ ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}),
(
"max",
np.max,
@@ -204,7 +204,7 @@ def test_rolling_forward_skewness(constructor):
@pytest.mark.parametrize(
"func,expected",
[
- ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan],),
+ ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]),
(
"corr",
[
diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py
index e82d4b8cbf770..7425cc5df4c2f 100644
--- a/pandas/tests/window/test_pairwise.py
+++ b/pandas/tests/window/test_pairwise.py
@@ -195,7 +195,7 @@ def test_cov_mulittindex(self):
columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
index = range(3)
- df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns,)
+ df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns)
result = df.ewm(alpha=0.1).cov()
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 8d72e2cb92ca9..67b20fd2d6daa 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -73,7 +73,7 @@ def test_constructor_with_timedelta_window(window):
# GH 15440
n = 10
df = DataFrame(
- {"value": np.arange(n)}, index=pd.date_range("2015-12-24", periods=n, freq="D"),
+ {"value": np.arange(n)}, index=pd.date_range("2015-12-24", periods=n, freq="D")
)
expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))
@@ -92,7 +92,7 @@ def test_constructor_timedelta_window_and_minperiods(window, raw):
# GH 15305
n = 10
df = DataFrame(
- {"value": np.arange(n)}, index=pd.date_range("2017-08-08", periods=n, freq="D"),
+ {"value": np.arange(n)}, index=pd.date_range("2017-08-08", periods=n, freq="D")
)
expected = DataFrame(
{"value": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))},
@@ -153,7 +153,7 @@ def test_closed_one_entry(func):
def test_closed_one_entry_groupby(func):
# GH24718
ser = pd.DataFrame(
- data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3),
+ data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3)
)
result = getattr(
ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func
@@ -182,7 +182,7 @@ def test_closed_one_entry_groupby(func):
def test_closed_min_max_datetime(input_dtype, func, closed, expected):
# see gh-21704
ser = pd.Series(
- data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10),
+ data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10)
)
result = getattr(ser.rolling("3D", closed=closed), func)()
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index f80ff1a53cd69..8ef6dac2862db 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -548,7 +548,7 @@ def is_superperiod(source, target) -> bool:
def _maybe_coerce_freq(code) -> str:
- """ we might need to coerce a code to a rule_code
+ """we might need to coerce a code to a rule_code
and uppercase it
Parameters
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 0dad8c7397e37..ca7b99492bbf7 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -186,10 +186,10 @@ def skip_if_no(package: str, min_version: Optional[str] = None):
is_platform_windows(), reason="not used on win32"
)
skip_if_has_locale = pytest.mark.skipif(
- _skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}",
+ _skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}"
)
skip_if_not_us_locale = pytest.mark.skipif(
- _skip_if_not_us_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}",
+ _skip_if_not_us_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}"
)
skip_if_no_scipy = pytest.mark.skipif(
_skip_if_no_scipy(), reason="Missing SciPy requirement"
| #35925
Files edited:
- pandas/tests/test_multilevel.py
- pandas/tests/test_nanops.py
- pandas/tests/window/moments/test_moments_consistency_rolling.py
- pandas/tests/window/moments/test_moments_ewm.py
- pandas/tests/window/moments/test_moments_rolling.py
- pandas/tests/window/test_base_indexer.py
- pandas/tests/window/test_pairwise.py
- pandas/tests/window/test_rolling.py
- pandas/tseries/frequencies.py
- pandas/util/_test_decorators.py | https://api.github.com/repos/pandas-dev/pandas/pulls/35996 | 2020-08-30T17:56:06Z | 2020-08-31T09:59:18Z | 2020-08-31T09:59:18Z | 2020-08-31T09:59:26Z |
TYP: typing errors in _xlsxwriter.py #35994 | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 3cd0d721bbdc6..ead36c95556b1 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -653,7 +653,6 @@ def __new__(cls, path, engine=None, **kwargs):
return object.__new__(cls)
# declare external properties you can count on
- book = None
curr_sheet = None
path = None
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index 72f3d81b1c662..f39391ae1fe7f 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -25,7 +25,7 @@ def __init__(
super().__init__(path, mode=mode, **engine_kwargs)
- self.book: OpenDocumentSpreadsheet = OpenDocumentSpreadsheet()
+ self.book = OpenDocumentSpreadsheet()
self._style_dict: Dict[str, str] = {}
def save(self) -> None:
diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py
index 85a1bb031f457..bdbb006ae93dc 100644
--- a/pandas/io/excel/_xlsxwriter.py
+++ b/pandas/io/excel/_xlsxwriter.py
@@ -1,3 +1,5 @@
+from typing import Dict, List, Tuple
+
import pandas._libs.json as json
from pandas.io.excel._base import ExcelWriter
@@ -8,7 +10,7 @@ class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
- STYLE_MAPPING = {
+ STYLE_MAPPING: Dict[str, List[Tuple[Tuple[str, ...], str]]] = {
"font": [
(("name",), "font_name"),
(("sz",), "font_size"),
@@ -170,7 +172,7 @@ def __init__(
**engine_kwargs,
):
# Use the xlsxwriter module as the Excel writer.
- import xlsxwriter
+ from xlsxwriter import Workbook
if mode == "a":
raise ValueError("Append mode is not supported with xlsxwriter!")
@@ -184,7 +186,7 @@ def __init__(
**engine_kwargs,
)
- self.book = xlsxwriter.Workbook(path, **engine_kwargs)
+ self.book = Workbook(path, **engine_kwargs)
def save(self):
"""
| - [x] closes #35994
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/35995 | 2020-08-30T17:18:37Z | 2020-08-31T18:22:35Z | 2020-08-31T18:22:35Z | 2020-08-31T20:03:42Z |
TYP: check_untyped_defs core.dtypes.cast | diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index f71fd0d406c54..e66f513e347a9 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -75,7 +75,7 @@ def _new_DatetimeIndex(cls, d):
+ [
method
for method in DatetimeArray._datetimelike_methods
- if method not in ("tz_localize",)
+ if method not in ("tz_localize", "tz_convert")
],
DatetimeArray,
wrap=True,
@@ -228,6 +228,11 @@ class DatetimeIndex(DatetimeTimedeltaMixin):
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in DatetimeIndex
+ @doc(DatetimeArray.tz_convert)
+ def tz_convert(self, tz) -> "DatetimeIndex":
+ arr = self._data.tz_convert(tz)
+ return type(self)._simple_new(arr, name=self.name)
+
@doc(DatetimeArray.tz_localize)
def tz_localize(
self, tz, ambiguous="raise", nonexistent="raise"
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 3c1fe6bacefcf..8fcc5f74ea897 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -307,9 +307,7 @@ def _convert_listlike_datetimes(
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == "utc":
- # error: Item "DatetimeIndex" of "Union[DatetimeArray, DatetimeIndex]" has
- # no attribute "tz_convert"
- arg = arg.tz_convert(None).tz_localize(tz) # type: ignore[union-attr]
+ arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg_dtype):
diff --git a/setup.cfg b/setup.cfg
index aa1535a171f0a..2ba22e5aad3c7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -157,9 +157,6 @@ check_untyped_defs=False
[mypy-pandas.core.computation.scope]
check_untyped_defs=False
-[mypy-pandas.core.dtypes.cast]
-check_untyped_defs=False
-
[mypy-pandas.core.frame]
check_untyped_defs=False
| https://api.github.com/repos/pandas-dev/pandas/pulls/35992 | 2020-08-30T15:35:14Z | 2020-08-31T01:31:23Z | 2020-08-31T01:31:22Z | 2020-08-31T08:07:07Z | |
TYP: misc typing in core\indexes\base.py | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 95bd757f1994e..27f9b577203ac 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1772,13 +1772,13 @@ def from_records(
arrays = [data[k] for k in columns]
else:
arrays = []
- arr_columns = []
+ arr_columns_list = []
for k, v in data.items():
if k in columns:
- arr_columns.append(k)
+ arr_columns_list.append(k)
arrays.append(v)
- arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns)
+ arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a07c3328def54..48b02fc525cc1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -10,6 +10,8 @@
Hashable,
List,
Optional,
+ Sequence,
+ TypeVar,
Union,
)
import warnings
@@ -22,7 +24,7 @@
from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import tz_compare
-from pandas._typing import DtypeObj, Label
+from pandas._typing import AnyArrayLike, Dtype, DtypeObj, Label
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import InvalidIndexError
@@ -98,7 +100,7 @@
)
if TYPE_CHECKING:
- from pandas import Series
+ from pandas import RangeIndex, Series
__all__ = ["Index"]
@@ -188,6 +190,9 @@ def _new_Index(cls, d):
return cls.__new__(cls, **d)
+_IndexT = TypeVar("_IndexT", bound="Index")
+
+
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
@@ -787,7 +792,13 @@ def repeat(self, repeats, axis=None):
# --------------------------------------------------------------------
# Copying Methods
- def copy(self, name=None, deep=False, dtype=None, names=None):
+ def copy(
+ self: _IndexT,
+ name: Optional[Label] = None,
+ deep: bool = False,
+ dtype: Optional[Dtype] = None,
+ names: Optional[Sequence[Label]] = None,
+ ) -> _IndexT:
"""
Make a copy of this object.
@@ -949,10 +960,9 @@ def _format_with_header(
# could have nans
mask = isna(values)
if mask.any():
- result = np.array(result)
- result[mask] = na_rep
- # error: "List[str]" has no attribute "tolist"
- result = result.tolist() # type: ignore[attr-defined]
+ result_arr = np.array(result)
+ result_arr[mask] = na_rep
+ result = result_arr.tolist()
else:
result = trim_front(format_array(values, None, justify="left"))
return header + result
@@ -4913,7 +4923,13 @@ def _get_string_slice(self, key: str_t, use_lhs: bool = True, use_rhs: bool = Tr
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
- def slice_indexer(self, start=None, end=None, step=None, kind=None):
+ def slice_indexer(
+ self,
+ start: Optional[Label] = None,
+ end: Optional[Label] = None,
+ step: Optional[int] = None,
+ kind: Optional[str_t] = None,
+ ) -> slice:
"""
Compute the slice indexer for input labels and step.
@@ -5513,7 +5529,9 @@ def ensure_index_from_sequences(sequences, names=None):
return MultiIndex.from_arrays(sequences, names=names)
-def ensure_index(index_like, copy: bool = False):
+def ensure_index(
+ index_like: Union[AnyArrayLike, Sequence], copy: bool = False
+) -> Index:
"""
Ensure that we have an index from some index-like object.
@@ -5549,7 +5567,18 @@ def ensure_index(index_like, copy: bool = False):
index_like = index_like.copy()
return index_like
if hasattr(index_like, "name"):
- return Index(index_like, name=index_like.name, copy=copy)
+ # https://github.com/python/mypy/issues/1424
+ # error: Item "ExtensionArray" of "Union[ExtensionArray,
+ # Sequence[Any]]" has no attribute "name" [union-attr]
+ # error: Item "Sequence[Any]" of "Union[ExtensionArray, Sequence[Any]]"
+ # has no attribute "name" [union-attr]
+ # error: "Sequence[Any]" has no attribute "name" [attr-defined]
+ # error: Item "Sequence[Any]" of "Union[Series, Sequence[Any]]" has no
+ # attribute "name" [union-attr]
+ # error: Item "Sequence[Any]" of "Union[Any, Sequence[Any]]" has no
+ # attribute "name" [union-attr]
+ name = index_like.name # type: ignore[union-attr, attr-defined]
+ return Index(index_like, name=name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
@@ -5604,7 +5633,7 @@ def _validate_join_method(method: str):
raise ValueError(f"do not recognize join method {method}")
-def default_index(n):
+def default_index(n: int) -> "RangeIndex":
from pandas.core.indexes.range import RangeIndex
return RangeIndex(0, n, name=None)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 5d309ef7cd515..08f9bd51de77b 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1,7 +1,7 @@
""" define the IntervalIndex """
from operator import le, lt
import textwrap
-from typing import Any, List, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union, cast
import numpy as np
@@ -56,6 +56,9 @@
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.core.ops import get_op_result_name
+if TYPE_CHECKING:
+ from pandas import CategoricalIndex
+
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
@@ -786,6 +789,7 @@ def get_indexer(
right_indexer = self.right.get_indexer(target_as_index.right)
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
elif is_categorical_dtype(target_as_index.dtype):
+ target_as_index = cast("CategoricalIndex", target_as_index)
# get an indexer for unique categories then propagate to codes via take_1d
categories_indexer = self.get_indexer(target_as_index.categories)
indexer = take_1d(categories_indexer, target_as_index.codes, fill_value=-1)
| https://api.github.com/repos/pandas-dev/pandas/pulls/35991 | 2020-08-30T15:33:36Z | 2020-09-02T18:44:40Z | 2020-09-02T18:44:40Z | 2020-09-02T18:49:29Z | |
TYP: misc typing fixes for pandas\core\frame.py | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 95bd757f1994e..e14f757e02159 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1014,7 +1014,7 @@ def iterrows(self) -> Iterable[Tuple[Label, Series]]:
s = klass(v, index=columns, name=k)
yield k, s
- def itertuples(self, index=True, name="Pandas"):
+ def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
@@ -1088,7 +1088,11 @@ def itertuples(self, index=True, name="Pandas"):
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
- itertuple = collections.namedtuple(name, fields, rename=True)
+ # https://github.com/python/mypy/issues/9046
+ # error: namedtuple() expects a string literal as the first argument
+ itertuple = collections.namedtuple( # type: ignore[misc]
+ name, fields, rename=True
+ )
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
@@ -4591,7 +4595,7 @@ def set_index(
frame = self.copy()
arrays = []
- names = []
+ names: List[Label] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
| pandas\core\frame.py:1091: error: namedtuple() expects a string literal as the first argument [misc]
pandas\core\frame.py:4594: error: Need type annotation for 'names' (hint: "names: List[<type>] = ...") [var-annotated] | https://api.github.com/repos/pandas-dev/pandas/pulls/35990 | 2020-08-30T15:29:57Z | 2020-08-31T15:06:35Z | 2020-08-31T15:06:35Z | 2020-08-31T16:23:51Z |
TST: Verify operators with IntegerArray and list-likes (22606) | diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py
index 6286711ac6113..31e9a4c4bc44e 100644
--- a/pandas/tests/arithmetic/conftest.py
+++ b/pandas/tests/arithmetic/conftest.py
@@ -2,6 +2,7 @@
import pytest
import pandas as pd
+import pandas._testing as tm
# ------------------------------------------------------------------
# Helper Functions
@@ -239,5 +240,14 @@ def box_with_array(request):
return request.param
+@pytest.fixture(params=[pd.Index, pd.Series, tm.to_array, np.array, list], ids=id_func)
+def box_1d_array(request):
+ """
+ Fixture to test behavior for Index, Series, tm.to_array, numpy Array and list
+ classes
+ """
+ return request.param
+
+
# alias so we can use the same fixture for multiple parameters in a test
box_with_array2 = box_with_array
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index df98b43e11f4a..4472088fc6d14 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -11,11 +11,19 @@
import pytest
import pandas as pd
-from pandas import Index, Series, Timedelta, TimedeltaIndex
+from pandas import Index, Int64Index, Series, Timedelta, TimedeltaIndex, array
import pandas._testing as tm
from pandas.core import ops
+@pytest.fixture(params=[Index, Series, tm.to_array])
+def box_pandas_1d_array(request):
+ """
+ Fixture to test behavior for Index, Series and tm.to_array classes
+ """
+ return request.param
+
+
def adjust_negative_zero(zero, expected):
"""
Helper to adjust the expected result if we are dividing by -0.0
@@ -1340,3 +1348,33 @@ def test_dataframe_div_silenced():
)
with tm.assert_produces_warning(None):
pdf1.div(pdf2, fill_value=0)
+
+
+@pytest.mark.parametrize(
+ "data, expected_data",
+ [([0, 1, 2], [0, 2, 4])],
+)
+def test_integer_array_add_list_like(
+ box_pandas_1d_array, box_1d_array, data, expected_data
+):
+ # GH22606 Verify operators with IntegerArray and list-likes
+ arr = array(data, dtype="Int64")
+ container = box_pandas_1d_array(arr)
+ left = container + box_1d_array(data)
+ right = box_1d_array(data) + container
+
+ if Series == box_pandas_1d_array:
+ assert_function = tm.assert_series_equal
+ expected = Series(expected_data, dtype="Int64")
+ elif Series == box_1d_array:
+ assert_function = tm.assert_series_equal
+ expected = Series(expected_data, dtype="object")
+ elif Index in (box_pandas_1d_array, box_1d_array):
+ assert_function = tm.assert_index_equal
+ expected = Int64Index(expected_data)
+ else:
+ assert_function = tm.assert_numpy_array_equal
+ expected = np.array(expected_data, dtype="object")
+
+ assert_function(left, expected)
+ assert_function(right, expected)
| - [x] closes #22606
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35987 | 2020-08-30T12:48:40Z | 2020-10-07T03:15:05Z | 2020-10-07T03:15:05Z | 2020-10-07T03:15:09Z |
DOC: Add Notes about difference to numpy behaviour for ddof in std() GH35985 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 486bea7cd1b47..f488195dcaa86 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10706,7 +10706,12 @@ def _doc_parms(cls):
Returns
-------
-%(name1)s or %(name2)s (if level specified)\n"""
+%(name1)s or %(name2)s (if level specified)
+
+Notes
+-----
+To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the
+default `ddof=1`)\n"""
_bool_doc = """
%(desc)s
| - [x] closes #35985
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35986 | 2020-08-30T11:51:51Z | 2020-09-03T16:58:37Z | 2020-09-03T16:58:36Z | 2020-09-03T16:58:49Z |
CLN: window/rolling.py | diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 39fcfcbe2bff6..05cc996178051 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -377,23 +377,13 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
return values
- def _wrap_result(self, result, block=None, obj=None):
+ def _wrap_result(self, result: np.ndarray) -> "Series":
"""
- Wrap a single result.
+ Wrap a single 1D result.
"""
- if obj is None:
- obj = self._selected_obj
- index = obj.index
+ obj = self._selected_obj
- if isinstance(result, np.ndarray):
-
- if result.ndim == 1:
- from pandas import Series
-
- return Series(result, index, name=obj.name)
-
- return type(obj)(result, index=index, columns=block.columns)
- return result
+ return obj._constructor(result, obj.index, name=obj.name)
def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeriesUnion:
"""
@@ -454,7 +444,7 @@ def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"):
# insert at the end
result[name] = extra_col
- def _center_window(self, result, window) -> np.ndarray:
+ def _center_window(self, result: np.ndarray, window) -> np.ndarray:
"""
Center the result in the window.
"""
@@ -513,7 +503,6 @@ def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> "Series":
Series version of _apply_blockwise
"""
_, obj = self._create_blocks(self._selected_obj)
- values = obj.values
try:
values = self._prep_values(obj.values)
@@ -535,7 +524,7 @@ def _apply_blockwise(
# This isn't quite blockwise, since `blocks` is actually a collection
# of homogenenous DataFrames.
- blocks, obj = self._create_blocks(self._selected_obj)
+ _, obj = self._create_blocks(self._selected_obj)
mgr = obj._mgr
def hfunc(bvalues: ArrayLike) -> ArrayLike:
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/35982 | 2020-08-30T03:24:03Z | 2020-08-31T20:42:06Z | 2020-08-31T20:42:06Z | 2020-08-31T22:11:38Z |
DOC clean up doc/source/getting_started/overview.rst | diff --git a/doc/source/getting_started/overview.rst b/doc/source/getting_started/overview.rst
index d8a40c5406dee..032ba73a7293d 100644
--- a/doc/source/getting_started/overview.rst
+++ b/doc/source/getting_started/overview.rst
@@ -9,9 +9,9 @@ Package overview
**pandas** is a `Python <https://www.python.org>`__ package providing fast,
flexible, and expressive data structures designed to make working with
"relational" or "labeled" data both easy and intuitive. It aims to be the
-fundamental high-level building block for doing practical, **real world** data
+fundamental high-level building block for doing practical, **real-world** data
analysis in Python. Additionally, it has the broader goal of becoming **the
-most powerful and flexible open source data analysis / manipulation tool
+most powerful and flexible open source data analysis/manipulation tool
available in any language**. It is already well on its way toward this goal.
pandas is well suited for many different kinds of data:
@@ -21,7 +21,7 @@ pandas is well suited for many different kinds of data:
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- - Any other form of observational / statistical data sets. The data actually
+ - Any other form of observational / statistical data sets. The data
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, :class:`Series` (1-dimensional)
@@ -57,7 +57,7 @@ Here are just a few of the things that pandas does well:
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
- conversion, moving window statistics, date shifting and lagging.
+ conversion, moving window statistics, date shifting, and lagging.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
@@ -101,12 +101,12 @@ fashion.
Also, we would like sensible default behaviors for the common API functions
which take into account the typical orientation of time series and
-cross-sectional data sets. When using ndarrays to store 2- and 3-dimensional
+cross-sectional data sets. When using the N-dimensional array (ndarrays) to store 2- and 3-dimensional
data, a burden is placed on the user to consider the orientation of the data
set when writing functions; axes are considered more or less equivalent (except
when C- or Fortran-contiguousness matters for performance). In pandas, the axes
are intended to lend more semantic meaning to the data; i.e., for a particular
-data set there is likely to be a "right" way to orient the data. The goal,
+data set, there is likely to be a "right" way to orient the data. The goal,
then, is to reduce the amount of mental effort required to code up data
transformations in downstream functions.
@@ -148,8 +148,8 @@ pandas possible. Thanks to `all of our contributors <https://github.com/pandas-d
If you're interested in contributing, please visit the :ref:`contributing guide <contributing>`.
pandas is a `NumFOCUS <https://www.numfocus.org/open-source-projects/>`__ sponsored project.
-This will help ensure the success of development of pandas as a world-class open-source
-project, and makes it possible to `donate <https://pandas.pydata.org/donate.html>`__ to the project.
+This will help ensure the success of the development of pandas as a world-class open-source
+project and makes it possible to `donate <https://pandas.pydata.org/donate.html>`__ to the project.
Project governance
------------------
| - [x] closes #35980
| https://api.github.com/repos/pandas-dev/pandas/pulls/35981 | 2020-08-29T22:26:24Z | 2020-08-31T18:24:05Z | 2020-08-31T18:24:05Z | 2020-08-31T18:24:15Z |
BUG: Respect errors="ignore" during extension astype | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index b8f6d0e52d058..944f6f268e867 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -33,6 +33,7 @@ Bug fixes
- Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`)
- Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`)
- Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`)
+- Bug in :meth:`Series.astype` and :meth:`DataFrame.astype` not respecting the ``errors`` argument when set to ``"ignore"`` for extension dtypes (:issue:`35471`)
- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`)
- Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`)
- Bug in :class:`Series` constructor incorrectly raising a ``TypeError`` when passed an ordered set (:issue:`36044`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 9f4e535dc787d..263c7c2b6940a 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -581,8 +581,13 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"):
# force the copy here
if self.is_extension:
- # TODO: Should we try/except this astype?
- values = self.values.astype(dtype)
+ try:
+ values = self.values.astype(dtype)
+ except (ValueError, TypeError):
+ if errors == "ignore":
+ values = self.values
+ else:
+ raise
else:
if issubclass(dtype.type, str):
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index b0fd0496ea81e..d3f256259b15f 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -8,6 +8,7 @@
CategoricalDtype,
DataFrame,
DatetimeTZDtype,
+ Interval,
IntervalDtype,
NaT,
Series,
@@ -565,3 +566,24 @@ def test_astype_empty_dtype_dict(self):
result = df.astype(dict())
tm.assert_frame_equal(result, df)
assert result is not df
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ DataFrame(Series(["x", "y", "z"], dtype="string")),
+ DataFrame(Series(["x", "y", "z"], dtype="category")),
+ DataFrame(Series(3 * [Timestamp("2020-01-01", tz="UTC")])),
+ DataFrame(Series(3 * [Interval(0, 1)])),
+ ],
+ )
+ @pytest.mark.parametrize("errors", ["raise", "ignore"])
+ def test_astype_ignores_errors_for_extension_dtypes(self, df, errors):
+ # https://github.com/pandas-dev/pandas/issues/35471
+ if errors == "ignore":
+ expected = df
+ result = df.astype(float, errors=errors)
+ tm.assert_frame_equal(result, expected)
+ else:
+ msg = "(Cannot cast)|(could not convert)"
+ with pytest.raises((ValueError, TypeError), match=msg):
+ df.astype(float, errors=errors)
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index 9fdc4179de2e1..b9d90a9fc63dd 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -1,4 +1,6 @@
-from pandas import Series, date_range
+import pytest
+
+from pandas import Interval, Series, Timestamp, date_range
import pandas._testing as tm
@@ -23,3 +25,24 @@ def test_astype_dt64tz_to_str(self):
dtype=object,
)
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "values",
+ [
+ Series(["x", "y", "z"], dtype="string"),
+ Series(["x", "y", "z"], dtype="category"),
+ Series(3 * [Timestamp("2020-01-01", tz="UTC")]),
+ Series(3 * [Interval(0, 1)]),
+ ],
+ )
+ @pytest.mark.parametrize("errors", ["raise", "ignore"])
+ def test_astype_ignores_errors_for_extension_dtypes(self, values, errors):
+ # https://github.com/pandas-dev/pandas/issues/35471
+ if errors == "ignore":
+ expected = values
+ result = values.astype(float, errors="ignore")
+ tm.assert_series_equal(result, expected)
+ else:
+ msg = "(Cannot cast)|(could not convert)"
+ with pytest.raises((ValueError, TypeError), match=msg):
+ values.astype(float, errors=errors)
| - [x] closes https://github.com/pandas-dev/pandas/issues/35471
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35979 | 2020-08-29T21:24:13Z | 2020-09-06T16:59:44Z | 2020-09-06T16:59:44Z | 2020-09-27T03:27:54Z |
TYP: annotate plotting._matplotlib.converter | diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 214a67690d695..3db7c38eced65 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -2,7 +2,7 @@
import datetime as pydt
from datetime import datetime, timedelta, tzinfo
import functools
-from typing import Optional, Tuple
+from typing import Any, List, Optional, Tuple
from dateutil.relativedelta import relativedelta
import matplotlib.dates as dates
@@ -144,7 +144,7 @@ def convert(value, unit, axis):
return value
@staticmethod
- def axisinfo(unit, axis):
+ def axisinfo(unit, axis) -> Optional[units.AxisInfo]:
if unit != "time":
return None
@@ -294,7 +294,7 @@ def try_parse(values):
return values
@staticmethod
- def axisinfo(unit, axis):
+ def axisinfo(unit: Optional[tzinfo], axis) -> units.AxisInfo:
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
@@ -473,7 +473,7 @@ def _get_default_annual_spacing(nyears) -> Tuple[int, int]:
return (min_spacing, maj_spacing)
-def period_break(dates, period):
+def period_break(dates: PeriodIndex, period: str) -> np.ndarray:
"""
Returns the indices where the given period changes.
@@ -489,7 +489,7 @@ def period_break(dates, period):
return np.nonzero(current - previous)[0]
-def has_level_label(label_flags, vmin):
+def has_level_label(label_flags: np.ndarray, vmin: float) -> bool:
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
@@ -984,18 +984,24 @@ class TimeSeries_DateFormatter(Formatter):
----------
freq : {int, string}
Valid frequency specifier.
- minor_locator : {False, True}
+ minor_locator : bool, default False
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
- dynamic_mode : {True, False}
+ dynamic_mode : bool, default True
Whether the formatter works in dynamic mode or not.
"""
- def __init__(self, freq, minor_locator=False, dynamic_mode=True, plot_obj=None):
+ def __init__(
+ self,
+ freq,
+ minor_locator: bool = False,
+ dynamic_mode: bool = True,
+ plot_obj=None,
+ ):
freq = to_offset(freq)
self.format = None
self.freq = freq
- self.locs = []
+ self.locs: List[Any] = [] # unused, for matplotlib compat
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
| https://api.github.com/repos/pandas-dev/pandas/pulls/35978 | 2020-08-29T18:01:24Z | 2020-08-30T13:23:45Z | 2020-08-30T13:23:45Z | 2020-08-30T15:06:17Z | |
ENH: Optimize nrows in read_excel | diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index 80af2cff41769..1eaccb9f2d897 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -11,7 +11,7 @@
def _generate_dataframe():
- N = 2000
+ N = 20000
C = 5
df = DataFrame(
np.random.randn(N, C),
@@ -69,5 +69,9 @@ def time_read_excel(self, engine):
fname = self.fname_odf if engine == "odf" else self.fname_excel
read_excel(fname, engine=engine)
+ def time_read_excel_nrows(self, engine):
+ fname = self.fname_odf if engine == "odf" else self.fname_excel
+ read_excel(fname, engine=engine, nrows=1)
+
from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index dbc88d0b371e8..e28ecc16fcb7b 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -207,6 +207,7 @@ Performance improvements
- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`)
- Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`)
+- Performance improvement in `read_excel` for when ``nrows`` is much smaller than the length of the file (:issue:`33281`).
- Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`)
- Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`)
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 65e95fd321772..e80072fad8896 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -3,12 +3,12 @@
from io import BufferedIOBase, BytesIO, RawIOBase
import os
from textwrap import fill
-from typing import Any, Mapping, Union
+from typing import Any, List, Mapping, Optional, Union
from pandas._config import config
from pandas._libs.parsers import STR_NA_VALUES
-from pandas._typing import StorageOptions
+from pandas._typing import Scalar, StorageOptions
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments
@@ -394,7 +394,14 @@ def get_sheet_by_index(self, index):
pass
@abc.abstractmethod
- def get_sheet_data(self, sheet, convert_float):
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ header_nrows: int,
+ skiprows_nrows: int,
+ nrows: Optional[int],
+ ) -> List[List[Scalar]]:
pass
def parse(
@@ -450,7 +457,22 @@ def parse(
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
- data = self.get_sheet_data(sheet, convert_float)
+ if isinstance(header, int):
+ header_nrows = header
+ elif header is None:
+ header_nrows = 0
+ else:
+ header_nrows = max(header)
+ if isinstance(skiprows, int):
+ skiprows_nrows = skiprows
+ elif skiprows is None:
+ skiprows_nrows = 0
+ else:
+ skiprows_nrows = len(skiprows)
+
+ data = self.get_sheet_data(
+ sheet, convert_float, header_nrows, skiprows_nrows, nrows
+ )
usecols = maybe_convert_usecols(usecols)
if not data:
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index ffb599cdfaaf8..6b3bf4f1375ad 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -1,4 +1,4 @@
-from typing import List, cast
+from typing import List, Optional, cast
import numpy as np
@@ -71,7 +71,14 @@ def get_sheet_by_name(self, name: str):
raise ValueError(f"sheet {name} not found")
- def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ header_nrows: int,
+ skiprows_nrows: int,
+ nrows: Optional[int],
+ ) -> List[List[Scalar]]:
"""
Parse an ODF Table into a list of lists
"""
@@ -87,6 +94,8 @@ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
table: List[List[Scalar]] = []
+ if isinstance(nrows, int):
+ sheet_rows = sheet_rows[: header_nrows + skiprows_nrows + nrows + 1]
for i, sheet_row in enumerate(sheet_rows):
sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names]
empty_cells = 0
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index a5cadf4d93389..bc7b168eeaaa2 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -508,7 +508,14 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.value
- def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ header_nrows: int,
+ skiprows_nrows: int,
+ nrows: Optional[int],
+ ) -> List[List[Scalar]]:
data: List[List[Scalar]] = []
for row in sheet.rows:
data.append([self._convert_cell(cell, convert_float) for cell in row])
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index ac94f4dd3df74..cf3dcebdff6eb 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -1,4 +1,4 @@
-from typing import List
+from typing import List, Optional
from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions
from pandas.compat._optional import import_optional_dependency
@@ -68,7 +68,14 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.v
- def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ header_nrows: int,
+ skiprows_nrows: int,
+ nrows: Optional[int],
+ ) -> List[List[Scalar]]:
return [
[self._convert_cell(c, convert_float) for c in r]
for r in sheet.rows(sparse=False)
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index dfd5dde0329ae..e5d0d66f9570a 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -1,8 +1,9 @@
from datetime import time
+from typing import List, Optional
import numpy as np
-from pandas._typing import StorageOptions
+from pandas._typing import Scalar, StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas.io.excel._base import BaseExcelReader
@@ -49,7 +50,14 @@ def get_sheet_by_name(self, name):
def get_sheet_by_index(self, index):
return self.book.sheet_by_index(index)
- def get_sheet_data(self, sheet, convert_float):
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ header_nrows: int,
+ skiprows_nrows: int,
+ nrows: Optional[int],
+ ) -> List[List[Scalar]]:
from xlrd import (
XL_CELL_BOOLEAN,
XL_CELL_DATE,
@@ -98,9 +106,14 @@ def _parse_cell(cell_contents, cell_typ):
cell_contents = val
return cell_contents
- data = []
+ data: List[List[Scalar]] = []
- for i in range(sheet.nrows):
+ sheet_nrows = sheet.nrows
+
+ if isinstance(nrows, int):
+ sheet_nrows = min(header_nrows + skiprows_nrows + nrows + 1, sheet_nrows)
+
+ for i in range(sheet_nrows):
row = [
_parse_cell(value, typ)
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 431a50477fccc..b312f67349658 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1153,5 +1153,21 @@ def test_read_datetime_multiindex(self, engine, read_ext):
],
)
expected = pd.DataFrame([], columns=expected_column_index)
+ tm.assert_frame_equal(expected, actual)
+ @pytest.mark.parametrize(
+ "header, skiprows", [(1, 2), (0, 3), (1, [0, 1]), ([2], 1)]
+ )
+ @td.check_file_leaks
+ def test_header_skiprows_nrows(self, engine, read_ext, header, skiprows):
+ # GH 32727
+ data = pd.read_excel("test1" + read_ext, engine=engine)
+ expected = (
+ DataFrame(data.iloc[3:6])
+ .reset_index(drop=True)
+ .rename(columns=data.iloc[2].rename(None))
+ )
+ actual = pd.read_excel(
+ "test1" + read_ext, engine=engine, header=header, skiprows=skiprows, nrows=3
+ )
tm.assert_frame_equal(expected, actual)
| - [ ] closes #32727
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
based on #33281
----
output of asv benchmarks:
```
(pandas-dev) marco@marco-Predator-PH315-52:~/pandas-dev/asv_bench$ asv continuous -f 1.1 upstream/master optimise-nrows-excel -b excel.ReadExcel
· Creating environments..................................................................................................................................
· Discovering benchmarks
·· Uninstalling from conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
·· Building d0a8a687 <optimise-nrows-excel> for conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt....................................
·· Installing d0a8a687 <optimise-nrows-excel> into conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt..
· Running 4 total benchmarks (2 commits * 1 environments * 2 benchmarks)
[ 0.00%] · For pandas commit c413df6d <master> (round 1/2):
[ 0.00%] ·· Building for conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt....................................
[ 0.00%] ·· Benchmarking conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 12.50%] ··· Setting up io.excel:62 ok
[ 12.50%] ··· Running (io.excel.ReadExcel.time_read_excel--)..
[ 25.00%] · For pandas commit d0a8a687 <optimise-nrows-excel> (round 1/2):
[ 25.00%] ·· Building for conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt..
[ 25.00%] ·· Benchmarking conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 37.50%] ··· Setting up io.excel:62 ok
[ 37.50%] ··· Running (io.excel.ReadExcel.time_read_excel--)..
[ 50.00%] · For pandas commit d0a8a687 <optimise-nrows-excel> (round 2/2):
[ 50.00%] ·· Benchmarking conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 62.50%] ··· Setting up io.excel:62 ok
[ 62.50%] ··· io.excel.ReadExcel.time_read_excel ok
[ 62.50%] ··· ========== ============
engine
---------- ------------
xlrd 953±6ms
openpyxl 1.66±0.03s
odf 6.02±0.02s
========== ============
[ 75.00%] ··· io.excel.ReadExcel.time_read_excel_nrows ok
[ 75.00%] ··· ========== ============
engine
---------- ------------
xlrd 878±20ms
openpyxl 1.67±0.02s
odf 4.58±0.04s
========== ============
[ 75.00%] · For pandas commit c413df6d <master> (round 2/2):
[ 75.00%] ·· Building for conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt..
[ 75.00%] ·· Benchmarking conda-py3.8-Cython0.29.16-jinja2-matplotlib-numba-numexpr-numpy-odfpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 87.50%] ··· Setting up io.excel:62 ok
[ 87.50%] ··· io.excel.ReadExcel.time_read_excel ok
[ 87.50%] ··· ========== ============
engine
---------- ------------
xlrd 941±5ms
openpyxl 1.69±0.02s
odf 6.15±0.04s
========== ============
[100.00%] ··· io.excel.ReadExcel.time_read_excel_nrows ok
[100.00%] ··· ========== ============
engine
---------- ------------
xlrd 971±20ms
openpyxl 1.69±0.01s
odf 6.07±0.03s
========== ============
before after ratio
[c413df6d] [d0a8a687]
<master> <optimise-nrows-excel>
- 971±20ms 878±20ms 0.90 io.excel.ReadExcel.time_read_excel_nrows('xlrd')
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
PERFORMANCE INCREASED.
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/35974 | 2020-08-29T10:15:23Z | 2020-09-21T21:47:15Z | 2020-09-21T21:47:15Z | 2020-09-22T17:00:12Z |
ENH: implement timeszones support for read_json(orient='table') and astype() from 'object' | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 46675c336c6a3..4d7c1479bd744 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -217,6 +217,7 @@ Other enhancements
- ``Styler`` now allows direct CSS class name addition to individual data cells (:issue:`36159`)
- :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`)
- :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`)
+-
- Added methods :meth:`IntegerArray.prod`, :meth:`IntegerArray.min`, and :meth:`IntegerArray.max` (:issue:`33790`)
- Where possible :meth:`RangeIndex.difference` and :meth:`RangeIndex.symmetric_difference` will return :class:`RangeIndex` instead of :class:`Int64Index` (:issue:`36564`)
- Added :meth:`Rolling.sem()` and :meth:`Expanding.sem()` to compute the standard error of mean (:issue:`26476`).
@@ -388,6 +389,8 @@ Datetimelike
- Bug in :class:`DatetimeIndex.shift` incorrectly raising when shifting empty indexes (:issue:`14811`)
- :class:`Timestamp` and :class:`DatetimeIndex` comparisons between timezone-aware and timezone-naive objects now follow the standard library ``datetime`` behavior, returning ``True``/``False`` for ``!=``/``==`` and raising for inequality comparisons (:issue:`28507`)
- Bug in :meth:`DatetimeIndex.equals` and :meth:`TimedeltaIndex.equals` incorrectly considering ``int64`` indexes as equal (:issue:`36744`)
+- :meth:`to_json` and :meth:`read_json` now implements timezones parsing when orient structure is 'table'.
+- :meth:`astype` now attempts to convert to 'datetime64[ns, tz]' directly from 'object' with inferred timezone from string (:issue:`35973`).
- Bug in :meth:`TimedeltaIndex.sum` and :meth:`Series.sum` with ``timedelta64`` dtype on an empty index or series returning ``NaT`` instead of ``Timedelta(0)`` (:issue:`31751`)
Timedelta
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index a1050f4271e05..2b3cd2b51884c 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1970,7 +1970,13 @@ def sequence_to_dt64ns(
data, inferred_tz = objects_to_datetime64ns(
data, dayfirst=dayfirst, yearfirst=yearfirst
)
- tz = _maybe_infer_tz(tz, inferred_tz)
+ if tz and inferred_tz:
+ # two timezones: convert to intended from base UTC repr
+ data = tzconversion.tz_convert_from_utc(data.view("i8"), tz)
+ data = data.view(DT64NS_DTYPE)
+ elif inferred_tz:
+ tz = inferred_tz
+
data_dtype = data.dtype
# `data` may have originally been a Categorical[datetime64[ns, tz]],
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 288bc0adc5162..088e81b184192 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -262,7 +262,9 @@ def __init__(
# NotImplemented on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
- raise NotImplementedError("orient='table' is not supported for MultiIndex")
+ raise NotImplementedError(
+ "orient='table' is not supported for MultiIndex columns"
+ )
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if (
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index 2b4c86b3c4406..0499a35296490 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -323,10 +323,6 @@ def parse_table_schema(json, precise_float):
for field in table["schema"]["fields"]
}
- # Cannot directly use as_type with timezone data on object; raise for now
- if any(str(x).startswith("datetime64[ns, ") for x in dtypes.values()):
- raise NotImplementedError('table="orient" can not yet read timezone data')
-
# No ISO constructor for Timedelta as of yet, so need to raise
if "timedelta64" in dtypes.values():
raise NotImplementedError(
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index d3f256259b15f..f05c90f37ea8a 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -587,3 +587,27 @@ def test_astype_ignores_errors_for_extension_dtypes(self, df, errors):
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
df.astype(float, errors=errors)
+
+ def test_astype_tz_conversion(self):
+ # GH 35973
+ val = {"tz": date_range("2020-08-30", freq="d", periods=2, tz="Europe/London")}
+ df = DataFrame(val)
+ result = df.astype({"tz": "datetime64[ns, Europe/Berlin]"})
+
+ expected = df
+ expected["tz"] = expected["tz"].dt.tz_convert("Europe/Berlin")
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("tz", ["UTC", "Europe/Berlin"])
+ def test_astype_tz_object_conversion(self, tz):
+ # GH 35973
+ val = {"tz": date_range("2020-08-30", freq="d", periods=2, tz="Europe/London")}
+ expected = DataFrame(val)
+
+ # convert expected to object dtype from other tz str (independently tested)
+ result = expected.astype({"tz": f"datetime64[ns, {tz}]"})
+ result = result.astype({"tz": "object"})
+
+ # do real test: object dtype to a specified tz, different from construction tz.
+ result = result.astype({"tz": "datetime64[ns, Europe/London]"})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 6e35b224ef4c3..dba4b9214e50c 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -676,6 +676,11 @@ class TestTableOrientReader:
{"floats": [1.0, 2.0, 3.0, 4.0]},
{"floats": [1.1, 2.2, 3.3, 4.4]},
{"bools": [True, False, False, True]},
+ {
+ "timezones": pd.date_range(
+ "2016-01-01", freq="d", periods=4, tz="US/Central"
+ ) # added in # GH 35973
+ },
],
)
@pytest.mark.skipif(sys.version_info[:3] == (3, 7, 0), reason="GH-35309")
@@ -686,22 +691,59 @@ def test_read_json_table_orient(self, index_nm, vals, recwarn):
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("index_nm", [None, "idx", "index"])
+ @pytest.mark.parametrize(
+ "vals",
+ [{"timedeltas": pd.timedelta_range("1H", periods=4, freq="T")}],
+ )
+ def test_read_json_table_orient_raises(self, index_nm, vals, recwarn):
+ df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
+ out = df.to_json(orient="table")
+ with pytest.raises(NotImplementedError, match="can not yet read "):
+ pd.read_json(out, orient="table")
+
+ @pytest.mark.parametrize(
+ "idx",
+ [
+ pd.Index(range(4)),
+ pd.Index(
+ pd.date_range(
+ "2020-08-30",
+ freq="d",
+ periods=4,
+ ),
+ freq=None,
+ ),
+ pd.Index(
+ pd.date_range("2020-08-30", freq="d", periods=4, tz="US/Central"),
+ freq=None,
+ ),
+ pd.MultiIndex.from_product(
+ [
+ pd.date_range("2020-08-30", freq="d", periods=2, tz="US/Central"),
+ ["x", "y"],
+ ],
+ ),
+ ],
+ )
@pytest.mark.parametrize(
"vals",
[
- {"timedeltas": pd.timedelta_range("1H", periods=4, freq="T")},
+ {"floats": [1.1, 2.2, 3.3, 4.4]},
+ {"dates": pd.date_range("2020-08-30", freq="d", periods=4)},
{
"timezones": pd.date_range(
- "2016-01-01", freq="d", periods=4, tz="US/Central"
+ "2020-08-30", freq="d", periods=4, tz="Europe/London"
)
},
],
)
- def test_read_json_table_orient_raises(self, index_nm, vals, recwarn):
- df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))
+ @pytest.mark.skipif(sys.version_info[:3] == (3, 7, 0), reason="GH-35309")
+ def test_read_json_table_timezones_orient(self, idx, vals, recwarn):
+ # GH 35973
+ df = DataFrame(vals, index=idx)
out = df.to_json(orient="table")
- with pytest.raises(NotImplementedError, match="can not yet read "):
- pd.read_json(out, orient="table")
+ result = pd.read_json(out, orient="table")
+ tm.assert_frame_equal(df, result)
def test_comprehensive(self):
df = DataFrame(
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Currently timezones raise a `NotImplementedError` when using the `read_json(orient='table')` method.
This PR aims to fix what I believe is a fairly common request (numerous workarounds and questions exist on StackOverflow).
The PR aims to reconstitute DataFrames via json columns with timezones, Index with timezones or MultiIndex with timezones and/or combinations. | https://api.github.com/repos/pandas-dev/pandas/pulls/35973 | 2020-08-29T07:45:26Z | 2020-11-04T03:00:06Z | 2020-11-04T03:00:06Z | 2020-11-22T08:35:29Z |
TYP: annotate plotting._matplotlib.tools | diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 26b25597ce1a6..4d643ffb734e4 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -1,6 +1,6 @@
# being a bit too dynamic
from math import ceil
-from typing import TYPE_CHECKING, Tuple
+from typing import TYPE_CHECKING, Iterable, List, Sequence, Tuple, Union
import warnings
import matplotlib.table
@@ -15,10 +15,13 @@
from pandas.plotting._matplotlib import compat
if TYPE_CHECKING:
+ from matplotlib.axes import Axes
+ from matplotlib.axis import Axis
+ from matplotlib.lines import Line2D # noqa:F401
from matplotlib.table import Table
-def format_date_labels(ax, rot):
+def format_date_labels(ax: "Axes", rot):
# mini version of autofmt_xdate
for label in ax.get_xticklabels():
label.set_ha("right")
@@ -278,7 +281,7 @@ def _subplots(
return fig, axes
-def _remove_labels_from_axis(axis):
+def _remove_labels_from_axis(axis: "Axis"):
for t in axis.get_majorticklabels():
t.set_visible(False)
@@ -294,7 +297,15 @@ def _remove_labels_from_axis(axis):
axis.get_label().set_visible(False)
-def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
+def _handle_shared_axes(
+ axarr: Iterable["Axes"],
+ nplots: int,
+ naxes: int,
+ nrows: int,
+ ncols: int,
+ sharex: bool,
+ sharey: bool,
+):
if nplots > 1:
if compat._mpl_ge_3_2_0():
row_num = lambda x: x.get_subplotspec().rowspan.start
@@ -340,7 +351,7 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
_remove_labels_from_axis(ax.yaxis)
-def _flatten(axes):
+def _flatten(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]:
if not is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, ABCIndexClass)):
@@ -348,7 +359,13 @@ def _flatten(axes):
return np.array(axes)
-def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
+def _set_ticks_props(
+ axes: Union["Axes", Sequence["Axes"]],
+ xlabelsize=None,
+ xrot=None,
+ ylabelsize=None,
+ yrot=None,
+):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
@@ -363,7 +380,7 @@ def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=Non
return axes
-def _get_all_lines(ax):
+def _get_all_lines(ax: "Axes") -> List["Line2D"]:
lines = ax.get_lines()
if hasattr(ax, "right_ax"):
@@ -375,7 +392,7 @@ def _get_all_lines(ax):
return lines
-def _get_xlim(lines) -> Tuple[float, float]:
+def _get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]:
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
| Same idea as #35960, focused on clarifying Axis vs Axes | https://api.github.com/repos/pandas-dev/pandas/pulls/35968 | 2020-08-28T22:30:05Z | 2020-08-31T10:15:05Z | 2020-08-31T10:15:05Z | 2020-08-31T14:45:45Z |
BUG: instantiation using a dict with a period scalar | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index ff9e803b4990a..7f1b0c88c83e1 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -333,7 +333,7 @@ Sparse
ExtensionArray
^^^^^^^^^^^^^^
--
+- Fixed Bug where :class:`DataFrame` column set to scalar extension type via a dict instantion was considered an object type rather than the extension type (:issue:`35965`)
-
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 3812c306b8eb4..0993328aef8de 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -472,7 +472,7 @@ def sanitize_array(
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
- dtype, value = infer_dtype_from_scalar(value)
+ dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 7c5aafcbbc7e9..e87e944672eea 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -709,7 +709,6 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj,
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
- val = val.ordinal
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 419ff81a2a478..7aada1e6eda48 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -612,6 +612,8 @@ def _maybe_convert_i8(self, key):
if scalar:
# Timestamp/Timedelta
key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)
+ if lib.is_period(key):
+ key_i8 = key.ordinal
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py
index 70d38aad951cc..157adacbdfdf7 100644
--- a/pandas/tests/dtypes/cast/test_infer_dtype.py
+++ b/pandas/tests/dtypes/cast/test_infer_dtype.py
@@ -84,13 +84,11 @@ def test_infer_dtype_from_period(freq, pandas_dtype):
if pandas_dtype:
exp_dtype = f"period[{freq}]"
- exp_val = p.ordinal
else:
exp_dtype = np.object_
- exp_val = p
assert dtype == exp_dtype
- assert val == exp_val
+ assert val == p
@pytest.mark.parametrize(
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 0d1004809f7f1..eb334e811c5a4 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -717,6 +717,24 @@ def test_constructor_period_dict(self):
assert df["a"].dtype == a.dtype
assert df["b"].dtype == b.dtype
+ @pytest.mark.parametrize(
+ "data,dtype",
+ [
+ (pd.Period("2012-01", freq="M"), "period[M]"),
+ (pd.Period("2012-02-01", freq="D"), "period[D]"),
+ (Interval(left=0, right=5), IntervalDtype("int64")),
+ (Interval(left=0.1, right=0.5), IntervalDtype("float64")),
+ ],
+ )
+ def test_constructor_period_dict_scalar(self, data, dtype):
+ # scalar periods
+ df = DataFrame({"a": data}, index=[0])
+ assert df["a"].dtype == dtype
+
+ expected = DataFrame(index=[0], columns=["a"], data=data)
+
+ tm.assert_frame_equal(df, expected)
+
@pytest.mark.parametrize(
"data,dtype",
[
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index ce078059479b4..0fb8c5955a2e7 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -8,16 +8,23 @@
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
-from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
+ DatetimeTZDtype,
+ IntervalDtype,
+ PeriodDtype,
+)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
+ Interval,
IntervalIndex,
MultiIndex,
NaT,
+ Period,
Series,
Timestamp,
date_range,
@@ -1075,6 +1082,26 @@ def test_constructor_dict_order(self):
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize(
+ "data,dtype",
+ [
+ (Period("2020-01"), PeriodDtype("M")),
+ (Interval(left=0, right=5), IntervalDtype("int64")),
+ (
+ Timestamp("2011-01-01", tz="US/Eastern"),
+ DatetimeTZDtype(tz="US/Eastern"),
+ ),
+ ],
+ )
+ def test_constructor_dict_extension(self, data, dtype):
+ d = {"a": data}
+ result = Series(d, index=["a"])
+ expected = Series(data, index=["a"], dtype=dtype)
+
+ assert result.dtype == dtype
+
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
| - [x] closes #35965
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Fixing bug discussed in [issue 35965](https://github.com/pandas-dev/pandas/issues/35965) where `pd.DataFrame({'a': pd.Period('2020-01')})` created `a` as an object column instead of a `period[m]` column.
Changing the functionality of `infer_dtype_from_scalar` isn't necessarily required here, but the fact that `infer_dtype_from_scalar` would return the `period.ordinal` value seems inconsistent with the behavior for other dtypes in this function. Additionally, that functionality was only used in a single place within the code (`interval.py`), which I fixed accordingly. | https://api.github.com/repos/pandas-dev/pandas/pulls/35966 | 2020-08-28T21:15:57Z | 2020-09-11T13:03:04Z | 2020-09-11T13:03:03Z | 2020-10-10T15:07:02Z |
BUG/CLN: Decouple Series/DataFrame.transform | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index bce6a735b7b07..8864469eaf858 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -342,6 +342,7 @@ Other
^^^^^
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`)
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`)
+- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index 7ca68d8289bd5..8b74fe01d0dc0 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -18,9 +18,10 @@
Union,
)
-from pandas._typing import AggFuncType, FrameOrSeries, Label
+from pandas._typing import AggFuncType, Axis, FrameOrSeries, Label
from pandas.core.dtypes.common import is_dict_like, is_list_like
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.base import SpecificationError
import pandas.core.common as com
@@ -384,3 +385,98 @@ def validate_func_kwargs(
if not columns:
raise TypeError(no_arg_message)
return columns, func
+
+
+def transform(
+ obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs,
+) -> FrameOrSeries:
+ """
+ Transform a DataFrame or Series
+
+ Parameters
+ ----------
+ obj : DataFrame or Series
+ Object to compute the transform on.
+ func : string, function, list, or dictionary
+ Function(s) to compute the transform with.
+ axis : {0 or 'index', 1 or 'columns'}
+ Axis along which the function is applied:
+
+ * 0 or 'index': apply function to each column.
+ * 1 or 'columns': apply function to each row.
+
+ Returns
+ -------
+ DataFrame or Series
+ Result of applying ``func`` along the given axis of the
+ Series or DataFrame.
+
+ Raises
+ ------
+ ValueError
+ If the transform function fails or does not transform.
+ """
+ from pandas.core.reshape.concat import concat
+
+ is_series = obj.ndim == 1
+
+ if obj._get_axis_number(axis) == 1:
+ assert not is_series
+ return transform(obj.T, func, 0, *args, **kwargs).T
+
+ if isinstance(func, list):
+ if is_series:
+ func = {com.get_callable_name(v) or v: v for v in func}
+ else:
+ func = {col: func for col in obj}
+
+ if isinstance(func, dict):
+ if not is_series:
+ cols = sorted(set(func.keys()) - set(obj.columns))
+ if len(cols) > 0:
+ raise SpecificationError(f"Column(s) {cols} do not exist")
+
+ if any(isinstance(v, dict) for v in func.values()):
+ # GH 15931 - deprecation of renaming keys
+ raise SpecificationError("nested renamer is not supported")
+
+ results = {}
+ for name, how in func.items():
+ colg = obj._gotitem(name, ndim=1)
+ try:
+ results[name] = transform(colg, how, 0, *args, **kwargs)
+ except Exception as e:
+ if str(e) == "Function did not transform":
+ raise e
+
+ # combine results
+ if len(results) == 0:
+ raise ValueError("Transform function failed")
+ return concat(results, axis=1)
+
+ # func is either str or callable
+ try:
+ if isinstance(func, str):
+ result = obj._try_aggregate_string_function(func, *args, **kwargs)
+ else:
+ f = obj._get_cython_func(func)
+ if f and not args and not kwargs:
+ result = getattr(obj, f)()
+ else:
+ try:
+ result = obj.apply(func, args=args, **kwargs)
+ except Exception:
+ result = func(obj, *args, **kwargs)
+ except Exception:
+ raise ValueError("Transform function failed")
+
+ # Functions that transform may return empty Series/DataFrame
+ # when the dtype is not appropriate
+ if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty:
+ raise ValueError("Transform function failed")
+ if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
+ obj.index
+ ):
+ raise ValueError("Function did not transform")
+
+ return result
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 1926803d8f04b..a688302b99724 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -4,7 +4,7 @@
import builtins
import textwrap
-from typing import Any, Dict, FrozenSet, List, Optional, Union
+from typing import Any, Callable, Dict, FrozenSet, List, Optional, Union
import numpy as np
@@ -560,7 +560,7 @@ def _aggregate_multiple_funcs(self, arg, _axis):
) from err
return result
- def _get_cython_func(self, arg: str) -> Optional[str]:
+ def _get_cython_func(self, arg: Callable) -> Optional[str]:
"""
if we define an internal function for this argument, return it
"""
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b03593ad8afe1..1e5360f39a75e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -45,6 +45,7 @@
from pandas._libs import algos as libalgos, lib, properties
from pandas._libs.lib import no_default
from pandas._typing import (
+ AggFuncType,
ArrayLike,
Axes,
Axis,
@@ -116,7 +117,7 @@
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
-from pandas.core.aggregation import reconstruct_func, relabel_result
+from pandas.core.aggregation import reconstruct_func, relabel_result, transform
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
@@ -7461,15 +7462,16 @@ def _aggregate(self, arg, axis=0, *args, **kwargs):
agg = aggregate
@doc(
- NDFrame.transform,
+ _shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
- def transform(self, func, axis=0, *args, **kwargs) -> DataFrame:
- axis = self._get_axis_number(axis)
- if axis == 1:
- return self.T.transform(func, *args, **kwargs).T
- return super().transform(func, *args, **kwargs)
+ def transform(
+ self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
+ ) -> DataFrame:
+ result = transform(self, func, axis, *args, **kwargs)
+ assert isinstance(result, DataFrame)
+ return result
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fffd2e068ebcf..9ed9db801d0a8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10648,80 +10648,6 @@ def ewm(
times=times,
)
- @doc(klass=_shared_doc_kwargs["klass"], axis="")
- def transform(self, func, *args, **kwargs):
- """
- Call ``func`` on self producing a {klass} with transformed values.
-
- Produced {klass} will have same axis length as self.
-
- Parameters
- ----------
- func : function, str, list or dict
- Function to use for transforming the data. If a function, must either
- work when passed a {klass} or when passed to {klass}.apply.
-
- Accepted combinations are:
-
- - function
- - string function name
- - list of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
- - dict of axis labels -> functions, function names or list of such.
- {axis}
- *args
- Positional arguments to pass to `func`.
- **kwargs
- Keyword arguments to pass to `func`.
-
- Returns
- -------
- {klass}
- A {klass} that must have the same length as self.
-
- Raises
- ------
- ValueError : If the returned {klass} has a different length than self.
-
- See Also
- --------
- {klass}.agg : Only perform aggregating type operations.
- {klass}.apply : Invoke function on a {klass}.
-
- Examples
- --------
- >>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}})
- >>> df
- A B
- 0 0 1
- 1 1 2
- 2 2 3
- >>> df.transform(lambda x: x + 1)
- A B
- 0 1 2
- 1 2 3
- 2 3 4
-
- Even though the resulting {klass} must have the same length as the
- input {klass}, it is possible to provide several input functions:
-
- >>> s = pd.Series(range(3))
- >>> s
- 0 0
- 1 1
- 2 2
- dtype: int64
- >>> s.transform([np.sqrt, np.exp])
- sqrt exp
- 0 0.000000 1.000000
- 1 1.000000 2.718282
- 2 1.414214 7.389056
- """
- result = self.agg(func, *args, **kwargs)
- if is_scalar(result) or len(result) != len(self):
- raise ValueError("transforms cannot produce aggregated results")
-
- return result
-
# ----------------------------------------------------------------------
# Misc methods
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6cbd93135a2ca..632b93cdcf24b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -25,6 +25,7 @@
from pandas._libs import lib, properties, reshape, tslibs
from pandas._libs.lib import no_default
from pandas._typing import (
+ AggFuncType,
ArrayLike,
Axis,
DtypeObj,
@@ -89,6 +90,7 @@
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import SingleBlockManager
+from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import ensure_key_mapped
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
@@ -4081,14 +4083,16 @@ def aggregate(self, func=None, axis=0, *args, **kwargs):
agg = aggregate
@doc(
- NDFrame.transform,
+ _shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
- def transform(self, func, axis=0, *args, **kwargs):
- # Validate the axis parameter
- self._get_axis_number(axis)
- return super().transform(func, *args, **kwargs)
+ def transform(
+ self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
+ ) -> FrameOrSeriesUnion:
+ from pandas.core.aggregation import transform
+
+ return transform(self, func, axis, *args, **kwargs)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 0aaccb47efc44..244ee3aa298db 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -257,3 +257,72 @@
1 b B E 3
2 c B E 5
"""
+
+_shared_docs[
+ "transform"
+] = """\
+Call ``func`` on self producing a {klass} with transformed values.
+
+Produced {klass} will have same axis length as self.
+
+Parameters
+----------
+func : function, str, list or dict
+ Function to use for transforming the data. If a function, must either
+ work when passed a {klass} or when passed to {klass}.apply.
+
+ Accepted combinations are:
+
+ - function
+ - string function name
+ - list of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
+ - dict of axis labels -> functions, function names or list of such.
+{axis}
+*args
+ Positional arguments to pass to `func`.
+**kwargs
+ Keyword arguments to pass to `func`.
+
+Returns
+-------
+{klass}
+ A {klass} that must have the same length as self.
+
+Raises
+------
+ValueError : If the returned {klass} has a different length than self.
+
+See Also
+--------
+{klass}.agg : Only perform aggregating type operations.
+{klass}.apply : Invoke function on a {klass}.
+
+Examples
+--------
+>>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}})
+>>> df
+ A B
+0 0 1
+1 1 2
+2 2 3
+>>> df.transform(lambda x: x + 1)
+ A B
+0 1 2
+1 2 3
+2 3 4
+
+Even though the resulting {klass} must have the same length as the
+input {klass}, it is possible to provide several input functions:
+
+>>> s = pd.Series(range(3))
+>>> s
+0 0
+1 1
+2 2
+dtype: int64
+>>> s.transform([np.sqrt, np.exp])
+ sqrt exp
+0 0.000000 1.000000
+1 1.000000 2.718282
+2 1.414214 7.389056
+"""
diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py
index 3a345215482ed..346e60954fc13 100644
--- a/pandas/tests/frame/apply/test_frame_transform.py
+++ b/pandas/tests/frame/apply/test_frame_transform.py
@@ -1,72 +1,203 @@
import operator
+import re
import numpy as np
import pytest
-import pandas as pd
+from pandas import DataFrame, MultiIndex
import pandas._testing as tm
+from pandas.core.base import SpecificationError
+from pandas.core.groupby.base import transformation_kernels
from pandas.tests.frame.common import zip_frames
-def test_agg_transform(axis, float_frame):
- other_axis = 1 if axis in {0, "index"} else 0
+def test_transform_ufunc(axis, float_frame):
+ # GH 35964
+ with np.errstate(all="ignore"):
+ f_sqrt = np.sqrt(float_frame)
+ result = float_frame.transform(np.sqrt, axis=axis)
+ expected = f_sqrt
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("op", transformation_kernels)
+def test_transform_groupby_kernel(axis, float_frame, op):
+ # GH 35964
+ if op == "cumcount":
+ pytest.xfail("DataFrame.cumcount does not exist")
+ if op == "tshift":
+ pytest.xfail("Only works on time index and is deprecated")
+ if axis == 1 or axis == "columns":
+ pytest.xfail("GH 36308: groupby.transform with axis=1 is broken")
+
+ args = [0.0] if op == "fillna" else []
+ if axis == 0 or axis == "index":
+ ones = np.ones(float_frame.shape[0])
+ else:
+ ones = np.ones(float_frame.shape[1])
+ expected = float_frame.groupby(ones, axis=axis).transform(op, *args)
+ result = float_frame.transform(op, axis, *args)
+ tm.assert_frame_equal(result, expected)
+
+@pytest.mark.parametrize(
+ "ops, names", [([np.sqrt], ["sqrt"]), ([np.abs, np.sqrt], ["absolute", "sqrt"])]
+)
+def test_transform_list(axis, float_frame, ops, names):
+ # GH 35964
+ other_axis = 1 if axis in {0, "index"} else 0
with np.errstate(all="ignore"):
+ expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)
+ if axis in {0, "index"}:
+ expected.columns = MultiIndex.from_product([float_frame.columns, names])
+ else:
+ expected.index = MultiIndex.from_product([float_frame.index, names])
+ result = float_frame.transform(ops, axis=axis)
+ tm.assert_frame_equal(result, expected)
- f_abs = np.abs(float_frame)
- f_sqrt = np.sqrt(float_frame)
- # ufunc
- result = float_frame.transform(np.sqrt, axis=axis)
- expected = f_sqrt.copy()
- tm.assert_frame_equal(result, expected)
-
- result = float_frame.transform(np.sqrt, axis=axis)
- tm.assert_frame_equal(result, expected)
-
- # list-like
- expected = f_sqrt.copy()
- if axis in {0, "index"}:
- expected.columns = pd.MultiIndex.from_product(
- [float_frame.columns, ["sqrt"]]
- )
- else:
- expected.index = pd.MultiIndex.from_product([float_frame.index, ["sqrt"]])
- result = float_frame.transform([np.sqrt], axis=axis)
- tm.assert_frame_equal(result, expected)
-
- # multiple items in list
- # these are in the order as if we are applying both
- # functions per series and then concatting
- expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
- if axis in {0, "index"}:
- expected.columns = pd.MultiIndex.from_product(
- [float_frame.columns, ["absolute", "sqrt"]]
- )
- else:
- expected.index = pd.MultiIndex.from_product(
- [float_frame.index, ["absolute", "sqrt"]]
- )
- result = float_frame.transform([np.abs, "sqrt"], axis=axis)
- tm.assert_frame_equal(result, expected)
+def test_transform_dict(axis, float_frame):
+ # GH 35964
+ if axis == 0 or axis == "index":
+ e = float_frame.columns[0]
+ expected = float_frame[[e]].transform(np.abs)
+ else:
+ e = float_frame.index[0]
+ expected = float_frame.iloc[[0]].transform(np.abs)
+ result = float_frame.transform({e: np.abs}, axis=axis)
+ tm.assert_frame_equal(result, expected)
-def test_transform_and_agg_err(axis, float_frame):
- # cannot both transform and agg
- msg = "transforms cannot produce aggregated results"
- with pytest.raises(ValueError, match=msg):
- float_frame.transform(["max", "min"], axis=axis)
+@pytest.mark.parametrize("use_apply", [True, False])
+def test_transform_udf(axis, float_frame, use_apply):
+ # GH 35964
+ # transform uses UDF either via apply or passing the entire DataFrame
+ def func(x):
+ # transform is using apply iff x is not a DataFrame
+ if use_apply == isinstance(x, DataFrame):
+ # Force transform to fallback
+ raise ValueError
+ return x + 1
- msg = "cannot combine transform and aggregation operations"
- with pytest.raises(ValueError, match=msg):
- with np.errstate(all="ignore"):
- float_frame.transform(["max", "sqrt"], axis=axis)
+ result = float_frame.transform(func, axis=axis)
+ expected = float_frame + 1
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"])
def test_transform_method_name(method):
# GH 19760
- df = pd.DataFrame({"A": [-1, 2]})
+ df = DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
+
+
+def test_transform_and_agg_err(axis, float_frame):
+ # GH 35964
+ # cannot both transform and agg
+ msg = "Function did not transform"
+ with pytest.raises(ValueError, match=msg):
+ float_frame.transform(["max", "min"], axis=axis)
+
+ msg = "Function did not transform"
+ with pytest.raises(ValueError, match=msg):
+ float_frame.transform(["max", "sqrt"], axis=axis)
+
+
+def test_agg_dict_nested_renaming_depr():
+ df = DataFrame({"A": range(5), "B": 5})
+
+ # nested renaming
+ msg = r"nested renamer is not supported"
+ with pytest.raises(SpecificationError, match=msg):
+ # mypy identifies the argument as an invalid type
+ df.transform({"A": {"foo": "min"}, "B": {"bar": "max"}})
+
+
+def test_transform_reducer_raises(all_reductions):
+ # GH 35964
+ op = all_reductions
+ df = DataFrame({"A": [1, 2, 3]})
+ msg = "Function did not transform"
+ with pytest.raises(ValueError, match=msg):
+ df.transform(op)
+ with pytest.raises(ValueError, match=msg):
+ df.transform([op])
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": op})
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": [op]})
+
+
+# mypy doesn't allow adding lists of different types
+# https://github.com/python/mypy/issues/5492
+@pytest.mark.parametrize("op", [*transformation_kernels, lambda x: x + 1])
+def test_transform_bad_dtype(op):
+ # GH 35964
+ df = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms
+ if op in ("backfill", "shift", "pad", "bfill", "ffill"):
+ pytest.xfail("Transform function works on any datatype")
+ msg = "Transform function failed"
+ with pytest.raises(ValueError, match=msg):
+ df.transform(op)
+ with pytest.raises(ValueError, match=msg):
+ df.transform([op])
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": op})
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": [op]})
+
+
+@pytest.mark.parametrize("op", transformation_kernels)
+def test_transform_partial_failure(op):
+ # GH 35964
+ wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"]
+ if op in wont_fail:
+ pytest.xfail("Transform kernel is successful on all dtypes")
+ if op == "cumcount":
+ pytest.xfail("transform('cumcount') not implemented")
+ if op == "tshift":
+ pytest.xfail("Only works on time index; deprecated")
+
+ # Using object makes most transform kernels fail
+ df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})
+
+ expected = df[["B"]].transform([op])
+ result = df.transform([op])
+ tm.assert_equal(result, expected)
+
+ expected = df[["B"]].transform({"B": op})
+ result = df.transform({"B": op})
+ tm.assert_equal(result, expected)
+
+ expected = df[["B"]].transform({"B": [op]})
+ result = df.transform({"B": [op]})
+ tm.assert_equal(result, expected)
+
+
+@pytest.mark.parametrize("use_apply", [True, False])
+def test_transform_passes_args(use_apply):
+ # GH 35964
+ # transform uses UDF either via apply or passing the entire DataFrame
+ expected_args = [1, 2]
+ expected_kwargs = {"c": 3}
+
+ def f(x, a, b, c):
+ # transform is using apply iff x is not a DataFrame
+ if use_apply == isinstance(x, DataFrame):
+ # Force transform to fallback
+ raise ValueError
+ assert [a, b] == expected_args
+ assert c == expected_kwargs["c"]
+ return x
+
+ DataFrame([1]).transform(f, 0, *expected_args, **expected_kwargs)
+
+
+def test_transform_missing_columns(axis):
+ # GH 35964
+ df = DataFrame({"A": [1, 2], "B": [3, 4]})
+ match = re.escape("Column(s) ['C'] do not exist")
+ with pytest.raises(SpecificationError, match=match):
+ df.transform({"C": "cumsum"})
diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py
index b948317f32062..827f466e23106 100644
--- a/pandas/tests/series/apply/test_series_apply.py
+++ b/pandas/tests/series/apply/test_series_apply.py
@@ -209,8 +209,8 @@ def test_transform(self, string_series):
f_abs = np.abs(string_series)
# ufunc
- expected = f_sqrt.copy()
result = string_series.apply(np.sqrt)
+ expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
# list-like
@@ -219,6 +219,9 @@ def test_transform(self, string_series):
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
+ result = string_series.apply(["sqrt"])
+ tm.assert_frame_equal(result, expected)
+
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py
index 8bc3d2dc4d0db..0842674da2a7d 100644
--- a/pandas/tests/series/apply/test_series_transform.py
+++ b/pandas/tests/series/apply/test_series_transform.py
@@ -1,50 +1,90 @@
import numpy as np
import pytest
-import pandas as pd
+from pandas import DataFrame, Series, concat
import pandas._testing as tm
+from pandas.core.base import SpecificationError
+from pandas.core.groupby.base import transformation_kernels
-def test_transform(string_series):
- # transforming functions
-
+def test_transform_ufunc(string_series):
+ # GH 35964
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
- f_abs = np.abs(string_series)
- # ufunc
- result = string_series.transform(np.sqrt)
- expected = f_sqrt.copy()
- tm.assert_series_equal(result, expected)
+ # ufunc
+ result = string_series.transform(np.sqrt)
+ expected = f_sqrt.copy()
+ tm.assert_series_equal(result, expected)
- # list-like
- result = string_series.transform([np.sqrt])
- expected = f_sqrt.to_frame().copy()
- expected.columns = ["sqrt"]
- tm.assert_frame_equal(result, expected)
- result = string_series.transform([np.sqrt])
- tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("op", transformation_kernels)
+def test_transform_groupby_kernel(string_series, op):
+ # GH 35964
+ if op == "cumcount":
+ pytest.xfail("Series.cumcount does not exist")
+ if op == "tshift":
+ pytest.xfail("Only works on time index and is deprecated")
+
+ args = [0.0] if op == "fillna" else []
+ ones = np.ones(string_series.shape[0])
+ expected = string_series.groupby(ones).transform(op, *args)
+ result = string_series.transform(op, 0, *args)
+ tm.assert_series_equal(result, expected)
- result = string_series.transform(["sqrt"])
- tm.assert_frame_equal(result, expected)
- # multiple items in list
- # these are in the order as if we are applying both functions per
- # series and then concatting
- expected = pd.concat([f_sqrt, f_abs], axis=1)
- result = string_series.transform(["sqrt", "abs"])
- expected.columns = ["sqrt", "abs"]
+@pytest.mark.parametrize(
+ "ops, names", [([np.sqrt], ["sqrt"]), ([np.abs, np.sqrt], ["absolute", "sqrt"])]
+)
+def test_transform_list(string_series, ops, names):
+ # GH 35964
+ with np.errstate(all="ignore"):
+ expected = concat([op(string_series) for op in ops], axis=1)
+ expected.columns = names
+ result = string_series.transform(ops)
tm.assert_frame_equal(result, expected)
-def test_transform_and_agg_error(string_series):
+def test_transform_dict(string_series):
+ # GH 35964
+ with np.errstate(all="ignore"):
+ expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1)
+ expected.columns = ["foo", "bar"]
+ result = string_series.transform({"foo": np.sqrt, "bar": np.abs})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_transform_udf(axis, string_series):
+ # GH 35964
+ # via apply
+ def func(x):
+ if isinstance(x, Series):
+ raise ValueError
+ return x + 1
+
+ result = string_series.transform(func)
+ expected = string_series + 1
+ tm.assert_series_equal(result, expected)
+
+ # via map Series -> Series
+ def func(x):
+ if not isinstance(x, Series):
+ raise ValueError
+ return x + 1
+
+ result = string_series.transform(func)
+ expected = string_series + 1
+ tm.assert_series_equal(result, expected)
+
+
+def test_transform_wont_agg(string_series):
+ # GH 35964
# we are trying to transform with an aggregator
- msg = "transforms cannot produce aggregated results"
+ msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
string_series.transform(["min", "max"])
- msg = "cannot combine transform and aggregation operations"
+ msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.transform(["sqrt", "max"])
@@ -52,8 +92,74 @@ def test_transform_and_agg_error(string_series):
def test_transform_none_to_type():
# GH34377
- df = pd.DataFrame({"a": [None]})
-
- msg = "DataFrame constructor called with incompatible data and dtype"
- with pytest.raises(TypeError, match=msg):
+ df = DataFrame({"a": [None]})
+ msg = "Transform function failed"
+ with pytest.raises(ValueError, match=msg):
df.transform({"a": int})
+
+
+def test_transform_reducer_raises(all_reductions):
+ # GH 35964
+ op = all_reductions
+ s = Series([1, 2, 3])
+ msg = "Function did not transform"
+ with pytest.raises(ValueError, match=msg):
+ s.transform(op)
+ with pytest.raises(ValueError, match=msg):
+ s.transform([op])
+ with pytest.raises(ValueError, match=msg):
+ s.transform({"A": op})
+ with pytest.raises(ValueError, match=msg):
+ s.transform({"A": [op]})
+
+
+# mypy doesn't allow adding lists of different types
+# https://github.com/python/mypy/issues/5492
+@pytest.mark.parametrize("op", [*transformation_kernels, lambda x: x + 1])
+def test_transform_bad_dtype(op):
+ # GH 35964
+ s = Series(3 * [object]) # Series that will fail on most transforms
+ if op in ("backfill", "shift", "pad", "bfill", "ffill"):
+ pytest.xfail("Transform function works on any datatype")
+ msg = "Transform function failed"
+ with pytest.raises(ValueError, match=msg):
+ s.transform(op)
+ with pytest.raises(ValueError, match=msg):
+ s.transform([op])
+ with pytest.raises(ValueError, match=msg):
+ s.transform({"A": op})
+ with pytest.raises(ValueError, match=msg):
+ s.transform({"A": [op]})
+
+
+@pytest.mark.parametrize("use_apply", [True, False])
+def test_transform_passes_args(use_apply):
+ # GH 35964
+ # transform uses UDF either via apply or passing the entire Series
+ expected_args = [1, 2]
+ expected_kwargs = {"c": 3}
+
+ def f(x, a, b, c):
+ # transform is using apply iff x is not a Series
+ if use_apply == isinstance(x, Series):
+ # Force transform to fallback
+ raise ValueError
+ assert [a, b] == expected_args
+ assert c == expected_kwargs["c"]
+ return x
+
+ Series([1]).transform(f, 0, *expected_args, **expected_kwargs)
+
+
+def test_transform_axis_1_raises():
+ # GH 35964
+ msg = "No axis named 1 for object type Series"
+ with pytest.raises(ValueError, match=msg):
+ Series([1]).transform("sum", axis=1)
+
+
+def test_transform_nested_renamer():
+ # GH 35964
+ match = "nested renamer is not supported"
+ with pytest.raises(SpecificationError, match=match):
+ Series([1]).transform({"A": {"B": ["sum"]}})
| - [x] closes #35811
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
First step toward #35725. Currently `transform` just calls `aggregate`, and so if we are to forbid `aggregate` from transforming, these need to be decoupled. Other than the bugfix (#35811), the only other behavioral change is in the error messages.
Assuming the bugfix #35811 is the correct behavior, docs/whatsnew also needs to be updated.
I wasn't sure if tests should be marked with #35725 or perhaps this PR #. Any guidance here? | https://api.github.com/repos/pandas-dev/pandas/pulls/35964 | 2020-08-28T20:33:13Z | 2020-09-12T21:36:52Z | 2020-09-12T21:36:52Z | 2020-12-03T21:44:40Z |
TYP: misc cleanup in core\generic.py | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fea3efedb6abb..dd7b02d98ad42 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -387,7 +387,7 @@ def _get_block_manager_axis(cls, axis: Axis) -> int:
return m - axis
return axis
- def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
+ def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex]]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
@@ -417,10 +417,10 @@ def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
d[axis] = dindex
return d
- def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
+ def _get_index_resolvers(self) -> Dict[str, Union["Series", MultiIndex]]:
from pandas.core.computation.parsing import clean_column_name
- d: Dict[str, ABCSeries] = {}
+ d: Dict[str, Union["Series", MultiIndex]] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
@@ -4703,14 +4703,15 @@ def filter(
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
- def f(x):
+ def f(x) -> bool:
+ assert like is not None # needed for mypy
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
- def f(x):
+ def f(x) -> bool:
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
@@ -6556,7 +6557,10 @@ def replace(
regex = True
items = list(to_replace.items())
- keys, values = zip(*items) if items else ([], [])
+ if items:
+ keys, values = zip(*items)
+ else:
+ keys, values = ([], [])
are_mappings = [is_dict_like(v) for v in values]
| pandas\core\generic.py:4707: error: Unsupported operand types for in ("Optional[str]" and "str") [operator]
pandas\core\generic.py:6559: error: 'builtins.object' object is not iterable [misc]
| https://api.github.com/repos/pandas-dev/pandas/pulls/35963 | 2020-08-28T19:43:38Z | 2020-08-29T23:57:01Z | 2020-08-29T23:57:01Z | 2020-08-30T11:17:04Z |
TYP: annotate plotting based on _get_axe_freq | diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index b490e07e43753..4d23a5e5fc249 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1,5 +1,5 @@
import re
-from typing import List, Optional
+from typing import TYPE_CHECKING, List, Optional
import warnings
from matplotlib.artist import Artist
@@ -43,6 +43,9 @@
table,
)
+if TYPE_CHECKING:
+ from matplotlib.axes import Axes
+
class MPLPlot:
"""
@@ -1147,7 +1150,7 @@ def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds):
return lines
@classmethod
- def _ts_plot(cls, ax, x, data, style=None, **kwds):
+ def _ts_plot(cls, ax: "Axes", x, data, style=None, **kwds):
from pandas.plotting._matplotlib.timeseries import (
_decorate_axes,
_maybe_resample,
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index 193602e1baf4a..fd89a093d25a4 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -24,14 +24,15 @@
from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod
if TYPE_CHECKING:
- from pandas import Index, Series # noqa:F401
+ from matplotlib.axes import Axes
+ from pandas import Index, Series # noqa:F401
# ---------------------------------------------------------------------
# Plotting functions and monkey patches
-def _maybe_resample(series: "Series", ax, kwargs):
+def _maybe_resample(series: "Series", ax: "Axes", kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
@@ -74,7 +75,7 @@ def _is_sup(f1: str, f2: str) -> bool:
)
-def _upsample_others(ax, freq, kwargs):
+def _upsample_others(ax: "Axes", freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
@@ -97,7 +98,7 @@ def _upsample_others(ax, freq, kwargs):
ax.legend(lines, labels, loc="best", title=title)
-def _replot_ax(ax, freq, kwargs):
+def _replot_ax(ax: "Axes", freq, kwargs):
data = getattr(ax, "_plot_data", None)
# clear current axes and data
@@ -127,7 +128,7 @@ def _replot_ax(ax, freq, kwargs):
return lines, labels
-def _decorate_axes(ax, freq, kwargs):
+def _decorate_axes(ax: "Axes", freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, "_plot_data"):
ax._plot_data = []
@@ -143,7 +144,7 @@ def _decorate_axes(ax, freq, kwargs):
ax.date_axis_info = None
-def _get_ax_freq(ax):
+def _get_ax_freq(ax: "Axes"):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
@@ -174,7 +175,7 @@ def _get_period_alias(freq) -> Optional[str]:
return freq
-def _get_freq(ax, series: "Series"):
+def _get_freq(ax: "Axes", series: "Series"):
# get frequency from data
freq = getattr(series.index, "freq", None)
if freq is None:
@@ -192,7 +193,7 @@ def _get_freq(ax, series: "Series"):
return freq, ax_freq
-def _use_dynamic_x(ax, data: "FrameOrSeriesUnion") -> bool:
+def _use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool:
freq = _get_index_freq(data.index)
ax_freq = _get_ax_freq(ax)
@@ -234,7 +235,7 @@ def _get_index_freq(index: "Index") -> Optional[BaseOffset]:
return freq
-def _maybe_convert_index(ax, data):
+def _maybe_convert_index(ax: "Axes", data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)):
@@ -264,7 +265,7 @@ def _maybe_convert_index(ax, data):
# Do we need the rest for convenience?
-def _format_coord(freq, t, y):
+def _format_coord(freq, t, y) -> str:
time_period = Period(ordinal=int(t), freq=freq)
return f"t = {time_period} y = {y:8f}"
| In some places in plotting `ax` is an Axes object and in other its an Axis object. Current goal is to pin these down.
in timeseries._get_ax_freq we call `ax.get_shared_x_axes()`, which is an Axes method that does not exist on Axis. This annotates that usage and annotates all the other places where we can infer Axes from that. | https://api.github.com/repos/pandas-dev/pandas/pulls/35960 | 2020-08-28T18:20:48Z | 2020-08-30T11:59:01Z | 2020-08-30T11:59:01Z | 2020-08-30T15:06:52Z |
Issue35925 remove more trailing commas | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 555024ad75f5e..dbc105be3c62b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -962,12 +962,12 @@ def _get_values_tuple(self, key):
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer], index=new_index).__finalize__(
- self,
+ self
)
def _get_values(self, indexer):
try:
- return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self,)
+ return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self)
except ValueError:
# mpl compat if we look up e.g. ser[:, np.newaxis];
# see tests.series.timeseries.test_mpl_compat_hack
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index c57c434dd3040..1913b51a68c15 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -362,7 +362,7 @@ def var(self, bias: bool = False, *args, **kwargs):
def f(arg):
return window_aggregations.ewmcov(
- arg, arg, self.com, self.adjust, self.ignore_na, self.min_periods, bias,
+ arg, arg, self.com, self.adjust, self.ignore_na, self.min_periods, bias
)
return self._apply(f)
@@ -458,7 +458,7 @@ def _get_corr(X, Y):
def _cov(x, y):
return window_aggregations.ewmcov(
- x, y, self.com, self.adjust, self.ignore_na, self.min_periods, 1,
+ x, y, self.com, self.adjust, self.ignore_na, self.min_periods, 1
)
x_values = X._prep_values()
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 7c76a8e2a0b22..a21521f4ce8bb 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -40,7 +40,7 @@ class BaseIndexer:
"""Base class for window bounds calculations."""
def __init__(
- self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs,
+ self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs
):
"""
Parameters
@@ -105,7 +105,7 @@ def get_window_bounds(
) -> Tuple[np.ndarray, np.ndarray]:
return calculate_variable_window_bounds(
- num_values, self.window_size, min_periods, center, closed, self.index_array,
+ num_values, self.window_size, min_periods, center, closed, self.index_array
)
@@ -316,7 +316,7 @@ def get_window_bounds(
# Cannot use groupby_indicies as they might not be monotonic with the object
# we're rolling over
window_indicies = np.arange(
- window_indicies_start, window_indicies_start + len(indices),
+ window_indicies_start, window_indicies_start + len(indices)
)
window_indicies_start += len(indices)
# Extend as we'll be slicing window like [start, end)
diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py
index 5d35ec7457ab0..aec294c3c84c2 100644
--- a/pandas/core/window/numba_.py
+++ b/pandas/core/window/numba_.py
@@ -57,7 +57,7 @@ def generate_numba_apply_func(
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def roll_apply(
- values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int,
+ values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int
) -> np.ndarray:
result = np.empty(len(begin))
for i in loop_range(len(result)):
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index baabdf0fca29a..39fcfcbe2bff6 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2117,7 +2117,7 @@ def count(self):
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(
- self, func, raw=False, engine=None, engine_kwargs=None, args=None, kwargs=None,
+ self, func, raw=False, engine=None, engine_kwargs=None, args=None, kwargs=None
):
return super().apply(
func,
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index b40d2a57b8106..4d6f03489725f 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -20,9 +20,7 @@ def expand(self, prop, value: str):
try:
mapping = self.SIDE_SHORTHANDS[len(tokens)]
except KeyError:
- warnings.warn(
- f'Could not expand "{prop}: {value}"', CSSWarning,
- )
+ warnings.warn(f'Could not expand "{prop}: {value}"', CSSWarning)
return
for key, idx in zip(self.SIDES, mapping):
yield prop_fmt.format(key), tokens[idx]
@@ -117,10 +115,7 @@ def __call__(self, declarations_str, inherited=None):
props[prop] = self.size_to_pt(
props[prop], em_pt=font_size, conversions=self.BORDER_WIDTH_RATIOS
)
- for prop in [
- f"margin-{side}",
- f"padding-{side}",
- ]:
+ for prop in [f"margin-{side}", f"padding-{side}"]:
if prop in props:
# TODO: support %
props[prop] = self.size_to_pt(
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 81990b3d505e1..461ef6823918e 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -80,7 +80,7 @@
FloatFormatType = Union[str, Callable, "EngFormatter"]
ColspaceType = Mapping[Label, Union[str, int]]
ColspaceArgType = Union[
- str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]],
+ str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]]
]
common_docstring = """
@@ -741,7 +741,7 @@ def _to_str_columns(self) -> List[List[str]]:
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
- fmt_values, self.justify, minimum=col_space.get(c, 0), adj=self.adj,
+ fmt_values, self.justify, minimum=col_space.get(c, 0), adj=self.adj
)
stringified.append(fmt_values)
else:
@@ -1069,7 +1069,7 @@ def _get_formatted_index(self, frame: "DataFrame") -> List[str]:
fmt_index = [
tuple(
_make_fixed_width(
- list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj,
+ list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj
)
)
for x in fmt_index
diff --git a/pandas/io/orc.py b/pandas/io/orc.py
index ea79efd0579e5..b556732e4d116 100644
--- a/pandas/io/orc.py
+++ b/pandas/io/orc.py
@@ -12,7 +12,7 @@
def read_orc(
- path: FilePathOrBuffer, columns: Optional[List[str]] = None, **kwargs,
+ path: FilePathOrBuffer, columns: Optional[List[str]] = None, **kwargs
) -> "DataFrame":
"""
Load an ORC object from the file path, returning a DataFrame.
| xref #35925
| https://api.github.com/repos/pandas-dev/pandas/pulls/35959 | 2020-08-28T17:55:58Z | 2020-08-28T18:39:49Z | 2020-08-28T18:39:49Z | 2020-08-28T18:40:00Z |
TYP: Remove NDFrame._add_series_or_dataframe_operations | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 606bd4cc3b52d..95bd757f1994e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9306,7 +9306,6 @@ def _AXIS_NAMES(self) -> Dict[int, str]:
DataFrame._add_numeric_operations()
-DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fea3efedb6abb..8bdf0861175b2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6,7 +6,6 @@
import operator
import pickle
import re
-from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
@@ -101,17 +100,22 @@
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.core.shared_docs import _shared_docs
+from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
+ from pandas._libs.tslibs import BaseOffset
+
from pandas.core.resample import Resampler
from pandas.core.series import Series # noqa: F401
+ from pandas.core.window.indexers import BaseIndexer
# goal is to be able to define the docs close to function, while still being
# able to share
+_shared_docs = {**_shared_docs}
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
@@ -5127,51 +5131,6 @@ def pipe(self, func, *args, **kwargs):
"""
return com.pipe(self, func, *args, **kwargs)
- _shared_docs["aggregate"] = dedent(
- """
- Aggregate using one or more operations over the specified axis.
- {versionadded}
- Parameters
- ----------
- func : function, str, list or dict
- Function to use for aggregating the data. If a function, must either
- work when passed a {klass} or when passed to {klass}.apply.
-
- Accepted combinations are:
-
- - function
- - string function name
- - list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- - dict of axis labels -> functions, function names or list of such.
- {axis}
- *args
- Positional arguments to pass to `func`.
- **kwargs
- Keyword arguments to pass to `func`.
-
- Returns
- -------
- scalar, Series or DataFrame
-
- The return can be:
-
- * scalar : when Series.agg is called with single function
- * Series : when DataFrame.agg is called with a single function
- * DataFrame : when DataFrame.agg is called with several functions
-
- Return scalar, Series or DataFrame.
- {see_also}
- Notes
- -----
- `agg` is an alias for `aggregate`. Use the alias.
-
- In pandas, agg, as most operations just ignores the missing values,
- and returns the operation only considering the values that are present.
-
- A passed user-defined-function will be passed a Series for evaluation.
- {examples}"""
- )
-
# ----------------------------------------------------------------------
# Attribute access
@@ -7448,77 +7407,6 @@ def clip(
return result
- _shared_docs[
- "groupby"
- ] = """
- Group %(klass)s using a mapper or by a Series of columns.
-
- A groupby operation involves some combination of splitting the
- object, applying a function, and combining the results. This can be
- used to group large amounts of data and compute operations on these
- groups.
-
- Parameters
- ----------
- by : mapping, function, label, or list of labels
- Used to determine the groups for the groupby.
- If ``by`` is a function, it's called on each value of the object's
- index. If a dict or Series is passed, the Series or dict VALUES
- will be used to determine the groups (the Series' values are first
- aligned; see ``.align()`` method). If an ndarray is passed, the
- values are used as-is determine the groups. A label or list of
- labels may be passed to group by the columns in ``self``. Notice
- that a tuple is interpreted as a (single) key.
- axis : {0 or 'index', 1 or 'columns'}, default 0
- Split along rows (0) or columns (1).
- level : int, level name, or sequence of such, default None
- If the axis is a MultiIndex (hierarchical), group by a particular
- level or levels.
- as_index : bool, default True
- For aggregated output, return object with group labels as the
- index. Only relevant for DataFrame input. as_index=False is
- effectively "SQL-style" grouped output.
- sort : bool, default True
- Sort group keys. Get better performance by turning this off.
- Note this does not influence the order of observations within each
- group. Groupby preserves the order of rows within each group.
- group_keys : bool, default True
- When calling apply, add group keys to index to identify pieces.
- squeeze : bool, default False
- Reduce the dimensionality of the return type if possible,
- otherwise return a consistent type.
-
- .. deprecated:: 1.1.0
-
- observed : bool, default False
- This only applies if any of the groupers are Categoricals.
- If True: only show observed values for categorical groupers.
- If False: show all values for categorical groupers.
-
- .. versionadded:: 0.23.0
- dropna : bool, default True
- If True, and if group keys contain NA values, NA values together
- with row/column will be dropped.
- If False, NA values will also be treated as the key in groups
-
- .. versionadded:: 1.1.0
-
- Returns
- -------
- %(klass)sGroupBy
- Returns a groupby object that contains information about the groups.
-
- See Also
- --------
- resample : Convenience method for frequency conversion and resampling
- of time series.
-
- Notes
- -----
- See the `user guide
- <https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
- """
-
def asfreq(
self: FrameOrSeries,
freq,
@@ -8427,35 +8315,6 @@ def ranker(data):
return ranker(data)
- _shared_docs[
- "compare"
- ] = """
- Compare to another %(klass)s and show the differences.
-
- .. versionadded:: 1.1.0
-
- Parameters
- ----------
- other : %(klass)s
- Object to compare with.
-
- align_axis : {0 or 'index', 1 or 'columns'}, default 1
- Determine which axis to align the comparison on.
-
- * 0, or 'index' : Resulting differences are stacked vertically
- with rows drawn alternately from self and other.
- * 1, or 'columns' : Resulting differences are aligned horizontally
- with columns drawn alternately from self and other.
-
- keep_shape : bool, default False
- If true, all rows and columns are kept.
- Otherwise, only the ones with different values are kept.
-
- keep_equal : bool, default False
- If true, the result keeps values that are equal.
- Otherwise, equal values are shown as NaNs.
- """
-
@Appender(_shared_docs["compare"] % _shared_doc_kwargs)
def compare(
self,
@@ -10585,45 +10444,21 @@ def mad(self, axis=None, skipna=None, level=None):
examples=_min_examples,
)
- @classmethod
- def _add_series_or_dataframe_operations(cls):
- """
- Add the series or dataframe only operations to the cls; evaluate
- the doc strings again.
- """
- from pandas.core.window import (
- Expanding,
- ExponentialMovingWindow,
- Rolling,
- Window,
- )
-
- @doc(Rolling)
- def rolling(
- self,
- window,
- min_periods=None,
- center=False,
- win_type=None,
- on=None,
- axis=0,
- closed=None,
- ):
- axis = self._get_axis_number(axis)
-
- if win_type is not None:
- return Window(
- self,
- window=window,
- min_periods=min_periods,
- center=center,
- win_type=win_type,
- on=on,
- axis=axis,
- closed=closed,
- )
+ @doc(Rolling)
+ def rolling(
+ self,
+ window: "Union[int, timedelta, BaseOffset, BaseIndexer]",
+ min_periods: Optional[int] = None,
+ center: bool_t = False,
+ win_type: Optional[str] = None,
+ on: Optional[str] = None,
+ axis: Axis = 0,
+ closed: Optional[str] = None,
+ ):
+ axis = self._get_axis_number(axis)
- return Rolling(
+ if win_type is not None:
+ return Window(
self,
window=window,
min_periods=min_periods,
@@ -10634,53 +10469,59 @@ def rolling(
closed=closed,
)
- cls.rolling = rolling
-
- @doc(Expanding)
- def expanding(self, min_periods=1, center=None, axis=0):
- axis = self._get_axis_number(axis)
- if center is not None:
- warnings.warn(
- "The `center` argument on `expanding` "
- "will be removed in the future",
- FutureWarning,
- stacklevel=2,
- )
- else:
- center = False
+ return Rolling(
+ self,
+ window=window,
+ min_periods=min_periods,
+ center=center,
+ win_type=win_type,
+ on=on,
+ axis=axis,
+ closed=closed,
+ )
- return Expanding(self, min_periods=min_periods, center=center, axis=axis)
+ @doc(Expanding)
+ def expanding(
+ self, min_periods: int = 1, center: Optional[bool_t] = None, axis: Axis = 0
+ ) -> Expanding:
+ axis = self._get_axis_number(axis)
+ if center is not None:
+ warnings.warn(
+ "The `center` argument on `expanding` will be removed in the future",
+ FutureWarning,
+ stacklevel=2,
+ )
+ else:
+ center = False
- cls.expanding = expanding
+ return Expanding(self, min_periods=min_periods, center=center, axis=axis)
- @doc(ExponentialMovingWindow)
- def ewm(
+ @doc(ExponentialMovingWindow)
+ def ewm(
+ self,
+ com: Optional[float] = None,
+ span: Optional[float] = None,
+ halflife: Optional[Union[float, TimedeltaConvertibleTypes]] = None,
+ alpha: Optional[float] = None,
+ min_periods: int = 0,
+ adjust: bool_t = True,
+ ignore_na: bool_t = False,
+ axis: Axis = 0,
+ times: Optional[Union[str, np.ndarray, FrameOrSeries]] = None,
+ ) -> ExponentialMovingWindow:
+ axis = self._get_axis_number(axis)
+ return ExponentialMovingWindow(
self,
- com=None,
- span=None,
- halflife=None,
- alpha=None,
- min_periods=0,
- adjust=True,
- ignore_na=False,
- axis=0,
- times=None,
- ):
- axis = self._get_axis_number(axis)
- return ExponentialMovingWindow(
- self,
- com=com,
- span=span,
- halflife=halflife,
- alpha=alpha,
- min_periods=min_periods,
- adjust=adjust,
- ignore_na=ignore_na,
- axis=axis,
- times=times,
- )
-
- cls.ewm = ewm
+ com=com,
+ span=span,
+ halflife=halflife,
+ alpha=alpha,
+ min_periods=min_periods,
+ adjust=adjust,
+ ignore_na=ignore_na,
+ axis=axis,
+ times=times,
+ )
@doc(klass=_shared_doc_kwargs["klass"], axis="")
def transform(self, func, *args, **kwargs):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 555024ad75f5e..a852529e9b517 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -5000,7 +5000,6 @@ def to_period(self, freq=None, copy=True) -> "Series":
Series._add_numeric_operations()
-Series._add_series_or_dataframe_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series)
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index b81942f062b19..0aaccb47efc44 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -2,117 +2,258 @@
_shared_docs: Dict[str, str] = dict()
+_shared_docs[
+ "aggregate"
+] = """\
+Aggregate using one or more operations over the specified axis.
+{versionadded}
+Parameters
+----------
+func : function, str, list or dict
+ Function to use for aggregating the data. If a function, must either
+ work when passed a {klass} or when passed to {klass}.apply.
+
+ Accepted combinations are:
+
+ - function
+ - string function name
+ - list of functions and/or function names, e.g. ``[np.sum, 'mean']``
+ - dict of axis labels -> functions, function names or list of such.
+{axis}
+*args
+ Positional arguments to pass to `func`.
+**kwargs
+ Keyword arguments to pass to `func`.
+
+Returns
+-------
+scalar, Series or DataFrame
+
+ The return can be:
+
+ * scalar : when Series.agg is called with single function
+ * Series : when DataFrame.agg is called with a single function
+ * DataFrame : when DataFrame.agg is called with several functions
+
+ Return scalar, Series or DataFrame.
+{see_also}
+Notes
+-----
+`agg` is an alias for `aggregate`. Use the alias.
+
+A passed user-defined-function will be passed a Series for evaluation.
+{examples}"""
+
+_shared_docs[
+ "compare"
+] = """\
+Compare to another %(klass)s and show the differences.
+
+.. versionadded:: 1.1.0
+
+Parameters
+----------
+other : %(klass)s
+ Object to compare with.
+
+align_axis : {0 or 'index', 1 or 'columns'}, default 1
+ Determine which axis to align the comparison on.
+
+ * 0, or 'index' : Resulting differences are stacked vertically
+ with rows drawn alternately from self and other.
+ * 1, or 'columns' : Resulting differences are aligned horizontally
+ with columns drawn alternately from self and other.
+
+keep_shape : bool, default False
+ If true, all rows and columns are kept.
+ Otherwise, only the ones with different values are kept.
+
+keep_equal : bool, default False
+ If true, the result keeps values that are equal.
+ Otherwise, equal values are shown as NaNs.
+"""
+
+_shared_docs[
+ "groupby"
+] = """\
+Group %(klass)s using a mapper or by a Series of columns.
+
+A groupby operation involves some combination of splitting the
+object, applying a function, and combining the results. This can be
+used to group large amounts of data and compute operations on these
+groups.
+
+Parameters
+----------
+by : mapping, function, label, or list of labels
+ Used to determine the groups for the groupby.
+ If ``by`` is a function, it's called on each value of the object's
+ index. If a dict or Series is passed, the Series or dict VALUES
+ will be used to determine the groups (the Series' values are first
+ aligned; see ``.align()`` method). If an ndarray is passed, the
+ values are used as-is determine the groups. A label or list of
+ labels may be passed to group by the columns in ``self``. Notice
+ that a tuple is interpreted as a (single) key.
+axis : {0 or 'index', 1 or 'columns'}, default 0
+ Split along rows (0) or columns (1).
+level : int, level name, or sequence of such, default None
+ If the axis is a MultiIndex (hierarchical), group by a particular
+ level or levels.
+as_index : bool, default True
+ For aggregated output, return object with group labels as the
+ index. Only relevant for DataFrame input. as_index=False is
+ effectively "SQL-style" grouped output.
+sort : bool, default True
+ Sort group keys. Get better performance by turning this off.
+ Note this does not influence the order of observations within each
+ group. Groupby preserves the order of rows within each group.
+group_keys : bool, default True
+ When calling apply, add group keys to index to identify pieces.
+squeeze : bool, default False
+ Reduce the dimensionality of the return type if possible,
+ otherwise return a consistent type.
+
+ .. deprecated:: 1.1.0
+
+observed : bool, default False
+ This only applies if any of the groupers are Categoricals.
+ If True: only show observed values for categorical groupers.
+ If False: show all values for categorical groupers.
+
+ .. versionadded:: 0.23.0
+dropna : bool, default True
+ If True, and if group keys contain NA values, NA values together
+ with row/column will be dropped.
+ If False, NA values will also be treated as the key in groups
+
+ .. versionadded:: 1.1.0
+
+Returns
+-------
+%(klass)sGroupBy
+ Returns a groupby object that contains information about the groups.
+
+See Also
+--------
+resample : Convenience method for frequency conversion and resampling
+ of time series.
+
+Notes
+-----
+See the `user guide
+<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
+"""
_shared_docs[
"melt"
-] = """
- Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
-
- This function is useful to massage a DataFrame into a format where one
- or more columns are identifier variables (`id_vars`), while all other
- columns, considered measured variables (`value_vars`), are "unpivoted" to
- the row axis, leaving just two non-identifier columns, 'variable' and
- 'value'.
- %(versionadded)s
- Parameters
- ----------
- id_vars : tuple, list, or ndarray, optional
- Column(s) to use as identifier variables.
- value_vars : tuple, list, or ndarray, optional
- Column(s) to unpivot. If not specified, uses all columns that
- are not set as `id_vars`.
- var_name : scalar
- Name to use for the 'variable' column. If None it uses
- ``frame.columns.name`` or 'variable'.
- value_name : scalar, default 'value'
- Name to use for the 'value' column.
- col_level : int or str, optional
- If columns are a MultiIndex then use this level to melt.
- ignore_index : bool, default True
- If True, original index is ignored. If False, the original index is retained.
- Index labels will be repeated as necessary.
-
- .. versionadded:: 1.1.0
-
- Returns
- -------
- DataFrame
- Unpivoted DataFrame.
-
- See Also
- --------
- %(other)s : Identical method.
- pivot_table : Create a spreadsheet-style pivot table as a DataFrame.
- DataFrame.pivot : Return reshaped DataFrame organized
- by given index / column values.
- DataFrame.explode : Explode a DataFrame from list-like
- columns to long format.
-
- Examples
- --------
- >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
- ... 'B': {0: 1, 1: 3, 2: 5},
- ... 'C': {0: 2, 1: 4, 2: 6}})
- >>> df
- A B C
- 0 a 1 2
- 1 b 3 4
- 2 c 5 6
-
- >>> %(caller)sid_vars=['A'], value_vars=['B'])
- A variable value
- 0 a B 1
- 1 b B 3
- 2 c B 5
-
- >>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
- A variable value
- 0 a B 1
- 1 b B 3
- 2 c B 5
- 3 a C 2
- 4 b C 4
- 5 c C 6
-
- The names of 'variable' and 'value' columns can be customized:
-
- >>> %(caller)sid_vars=['A'], value_vars=['B'],
- ... var_name='myVarname', value_name='myValname')
- A myVarname myValname
- 0 a B 1
- 1 b B 3
- 2 c B 5
-
- Original index values can be kept around:
-
- >>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False)
- A variable value
- 0 a B 1
- 1 b B 3
- 2 c B 5
- 0 a C 2
- 1 b C 4
- 2 c C 6
-
- If you have multi-index columns:
-
- >>> df.columns = [list('ABC'), list('DEF')]
- >>> df
- A B C
- D E F
- 0 a 1 2
- 1 b 3 4
- 2 c 5 6
-
- >>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
- A variable value
- 0 a B 1
- 1 b B 3
- 2 c B 5
-
- >>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
- (A, D) variable_0 variable_1 value
- 0 a B E 1
- 1 b B E 3
- 2 c B E 5
- """
+] = """\
+Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
+
+This function is useful to massage a DataFrame into a format where one
+or more columns are identifier variables (`id_vars`), while all other
+columns, considered measured variables (`value_vars`), are "unpivoted" to
+the row axis, leaving just two non-identifier columns, 'variable' and
+'value'.
+%(versionadded)s
+Parameters
+----------
+id_vars : tuple, list, or ndarray, optional
+ Column(s) to use as identifier variables.
+value_vars : tuple, list, or ndarray, optional
+ Column(s) to unpivot. If not specified, uses all columns that
+ are not set as `id_vars`.
+var_name : scalar
+ Name to use for the 'variable' column. If None it uses
+ ``frame.columns.name`` or 'variable'.
+value_name : scalar, default 'value'
+ Name to use for the 'value' column.
+col_level : int or str, optional
+ If columns are a MultiIndex then use this level to melt.
+ignore_index : bool, default True
+ If True, original index is ignored. If False, the original index is retained.
+ Index labels will be repeated as necessary.
+
+ .. versionadded:: 1.1.0
+
+Returns
+-------
+DataFrame
+ Unpivoted DataFrame.
+
+See Also
+--------
+%(other)s : Identical method.
+pivot_table : Create a spreadsheet-style pivot table as a DataFrame.
+DataFrame.pivot : Return reshaped DataFrame organized
+ by given index / column values.
+DataFrame.explode : Explode a DataFrame from list-like
+ columns to long format.
+
+Examples
+--------
+>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
+... 'B': {0: 1, 1: 3, 2: 5},
+... 'C': {0: 2, 1: 4, 2: 6}})
+>>> df
+ A B C
+0 a 1 2
+1 b 3 4
+2 c 5 6
+
+>>> %(caller)sid_vars=['A'], value_vars=['B'])
+ A variable value
+0 a B 1
+1 b B 3
+2 c B 5
+
+>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
+ A variable value
+0 a B 1
+1 b B 3
+2 c B 5
+3 a C 2
+4 b C 4
+5 c C 6
+
+The names of 'variable' and 'value' columns can be customized:
+
+>>> %(caller)sid_vars=['A'], value_vars=['B'],
+... var_name='myVarname', value_name='myValname')
+ A myVarname myValname
+0 a B 1
+1 b B 3
+2 c B 5
+
+Original index values can be kept around:
+
+>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False)
+ A variable value
+0 a B 1
+1 b B 3
+2 c B 5
+0 a C 2
+1 b C 4
+2 c C 6
+
+If you have multi-index columns:
+
+>>> df.columns = [list('ABC'), list('DEF')]
+>>> df
+ A B C
+ D E F
+0 a 1 2
+1 b 3 4
+2 c 5 6
+
+>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
+ A variable value
+0 a B 1
+1 b B 3
+2 c B 5
+
+>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
+ (A, D) variable_0 variable_1 value
+0 a B E 1
+1 b B E 3
+2 c B E 5
+"""
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 51a067427e867..2f3058db4493b 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -7,9 +7,9 @@
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
-from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
from pandas.core.indexes.api import MultiIndex
+from pandas.core.shared_docs import _shared_docs
_shared_docs = dict(**_shared_docs)
_doc_template = """
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index baabdf0fca29a..f5e3587ed02d5 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -22,7 +22,7 @@
from pandas._libs.tslibs import BaseOffset, to_offset
import pandas._libs.window.aggregations as window_aggregations
-from pandas._typing import ArrayLike, Axis, FrameOrSeriesUnion, Label
+from pandas._typing import ArrayLike, Axis, FrameOrSeries, FrameOrSeriesUnion, Label
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
@@ -159,7 +159,7 @@ class _Window(PandasObject, ShallowMixin, SelectionMixin):
def __init__(
self,
- obj: FrameOrSeriesUnion,
+ obj: FrameOrSeries,
window=None,
min_periods: Optional[int] = None,
center: bool = False,
| Refactoring ``NDFrame._add series or dataframe`` class method helps with typing. | https://api.github.com/repos/pandas-dev/pandas/pulls/35957 | 2020-08-28T17:04:32Z | 2020-08-30T12:00:25Z | 2020-08-30T12:00:25Z | 2020-08-30T12:54:09Z |
Issue35925 remove trailing commas | diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 2c0d4931a7bf2..99a586f056b12 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -29,7 +29,7 @@
def concatenate_block_managers(
- mgrs_indexers, axes, concat_axis: int, copy: bool,
+ mgrs_indexers, axes, concat_axis: int, copy: bool
) -> BlockManager:
"""
Concatenate block managers into one.
@@ -76,7 +76,7 @@ def concatenate_block_managers(
b = make_block(values, placement=placement, ndim=blk.ndim)
else:
b = make_block(
- _concatenate_join_units(join_units, concat_axis, copy=copy,),
+ _concatenate_join_units(join_units, concat_axis, copy=copy),
placement=placement,
)
blocks.append(b)
@@ -339,7 +339,7 @@ def _concatenate_join_units(join_units, concat_axis, copy):
# 2D to put it a non-EA Block
concat_values = np.atleast_2d(concat_values)
else:
- concat_values = concat_compat(to_concat, axis=concat_axis,)
+ concat_values = concat_compat(to_concat, axis=concat_axis)
return concat_values
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index a5372b14d210f..67ff3b9456ccf 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -491,7 +491,7 @@ def get_axe(block, qs, axes):
values = values.take(indexer)
return SingleBlockManager(
- make_block(values, ndim=1, placement=np.arange(len(values))), axes[0],
+ make_block(values, ndim=1, placement=np.arange(len(values))), axes[0]
)
def isna(self, func) -> "BlockManager":
@@ -519,9 +519,7 @@ def where(
def setitem(self, indexer, value) -> "BlockManager":
return self.apply("setitem", indexer=indexer, value=value)
- def putmask(
- self, mask, new, align: bool = True, axis: int = 0,
- ):
+ def putmask(self, mask, new, align: bool = True, axis: int = 0):
transpose = self.ndim == 2
if align:
@@ -1923,7 +1921,7 @@ def _compare_or_regex_search(
"""
def _check_comparison_types(
- result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern],
+ result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern]
):
"""
Raises an error if the two arrays (a,b) cannot be compared.
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index ae4892c720d5b..05f5f9a00ae1b 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -11,7 +11,7 @@
BlockPairInfo = namedtuple(
- "BlockPairInfo", ["lvals", "rvals", "locs", "left_ea", "right_ea", "rblk"],
+ "BlockPairInfo", ["lvals", "rvals", "locs", "left_ea", "right_ea", "rblk"]
)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index e7e28798d84a2..e3f16a3ef4f90 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -1329,7 +1329,7 @@ def _zero_out_fperr(arg):
@disallow("M8", "m8")
def nancorr(
- a: np.ndarray, b: np.ndarray, method="pearson", min_periods: Optional[int] = None,
+ a: np.ndarray, b: np.ndarray, method="pearson", min_periods: Optional[int] = None
):
"""
a, b: ndarrays
diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py
index 4ace873f029ae..99c2fefc97ae7 100644
--- a/pandas/core/ops/docstrings.py
+++ b/pandas/core/ops/docstrings.py
@@ -31,7 +31,7 @@ def _make_flex_doc(op_name, typ):
base_doc = _flex_doc_SERIES
if op_desc["reverse"]:
base_doc += _see_also_reverse_SERIES.format(
- reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"],
+ reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"]
)
doc_no_examples = base_doc.format(
desc=op_desc["desc"],
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 9e8fb643791f2..299b68c6e71e0 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -500,7 +500,7 @@ def get_result(self):
mgrs_indexers.append((obj._mgr, indexers))
new_data = concatenate_block_managers(
- mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy,
+ mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy
)
if not self.copy:
new_data._consolidate_inplace()
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 64a9e2dbf6d99..969ac56e41860 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -239,7 +239,7 @@ def _add_margins(
elif values:
marginal_result_set = _generate_marginal_results(
- table, data, values, rows, cols, aggfunc, observed, margins_name,
+ table, data, values, rows, cols, aggfunc, observed, margins_name
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
@@ -308,7 +308,7 @@ def _compute_grand_margin(data, values, aggfunc, margins_name: str = "All"):
def _generate_marginal_results(
- table, data, values, rows, cols, aggfunc, observed, margins_name: str = "All",
+ table, data, values, rows, cols, aggfunc, observed, margins_name: str = "All"
):
if len(cols) > 0:
# need to "interleave" the margins
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 391313fbb5283..e81dd8f0c735c 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -81,9 +81,7 @@ class _Unstacker:
unstacked : DataFrame
"""
- def __init__(
- self, index: MultiIndex, level=-1, constructor=None,
- ):
+ def __init__(self, index: MultiIndex, level=-1, constructor=None):
if constructor is None:
constructor = DataFrame
@@ -422,7 +420,7 @@ def unstack(obj, level, fill_value=None):
if is_extension_array_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(
- obj.index, level=level, constructor=obj._constructor_expanddim,
+ obj.index, level=level, constructor=obj._constructor_expanddim
)
return unstacker.get_result(
obj.values, value_columns=None, fill_value=fill_value
@@ -436,7 +434,7 @@ def _unstack_frame(obj, level, fill_value=None):
return obj._constructor(mgr)
else:
return _Unstacker(
- obj.index, level=level, constructor=obj._constructor,
+ obj.index, level=level, constructor=obj._constructor
).get_result(obj._values, value_columns=obj.columns, fill_value=fill_value)
| xref #35925 | https://api.github.com/repos/pandas-dev/pandas/pulls/35956 | 2020-08-28T16:42:01Z | 2020-08-28T18:06:09Z | 2020-08-28T18:06:09Z | 2020-08-28T18:06:28Z |
TYP: misc cleanup in core\groupby\generic.py | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 82e629d184b19..3172fb4e0e853 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -9,7 +9,6 @@
import copy
from functools import partial
from textwrap import dedent
-import typing
from typing import (
TYPE_CHECKING,
Any,
@@ -22,6 +21,7 @@
Optional,
Sequence,
Type,
+ TypeVar,
Union,
)
import warnings
@@ -92,7 +92,7 @@
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
-ScalarResult = typing.TypeVar("ScalarResult")
+ScalarResult = TypeVar("ScalarResult")
def generate_property(name: str, klass: Type[FrameOrSeries]):
@@ -606,8 +606,8 @@ def filter(self, func, dropna=True, *args, **kwargs):
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
- def true_and_notna(x, *args, **kwargs) -> bool:
- b = wrapper(x, *args, **kwargs)
+ def true_and_notna(x) -> bool:
+ b = wrapper(x)
return b and notna(b)
try:
@@ -1210,7 +1210,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
# TODO: Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(
- **kwargs, dtype_if_empty=object
+ dtype_if_empty=object, **kwargs
)
values = [x if (x is not None) else backup for x in values]
| pandas\core\groupby\generic.py:610: error: Too many arguments [call-arg]
pandas\core\groupby\generic.py:1212: error: "create_series_with_explicit_dtype" gets multiple values for keyword argument "dtype_if_empty" [misc] | https://api.github.com/repos/pandas-dev/pandas/pulls/35955 | 2020-08-28T15:40:41Z | 2020-08-28T17:00:58Z | 2020-08-28T17:00:58Z | 2020-08-28T17:58:18Z |
TYP: misc typing cleanups for #32911 | diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index 0131240f99cf6..72f3d81b1c662 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -42,7 +42,7 @@ def write_cells(
sheet_name: Optional[str] = None,
startrow: int = 0,
startcol: int = 0,
- freeze_panes: Optional[List] = None,
+ freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write the frame cells using odf
@@ -215,14 +215,17 @@ def _process_style(self, style: Dict[str, Any]) -> str:
self.book.styles.addElement(odf_style)
return name
- def _create_freeze_panes(self, sheet_name: str, freeze_panes: List[int]) -> None:
- """Create freeze panes in the sheet
+ def _create_freeze_panes(
+ self, sheet_name: str, freeze_panes: Tuple[int, int]
+ ) -> None:
+ """
+ Create freeze panes in the sheet.
Parameters
----------
sheet_name : str
Name of the spreadsheet
- freeze_panes : list
+ freeze_panes : tuple of (int, int)
Freeze pane location x and y
"""
from odf.config import (
| pandas\io\excel\_odswriter.py:39:5: error: Argument 5 of "write_cells" is incompatible with supertype "ExcelWriter"; supertype defines the argument type as "Optional[Tuple[int, int]]" [override]
pandas\io\excel\_odswriter.py:62:35: error: Argument 1 to "_validate_freeze_panes" has incompatible type "Optional[List[Any]]"; expected "Optional[Tuple[int, int]]" [arg-type] | https://api.github.com/repos/pandas-dev/pandas/pulls/35954 | 2020-08-28T15:26:28Z | 2020-08-29T11:39:36Z | 2020-08-29T11:39:35Z | 2020-08-29T18:23:23Z |
TYP: misc typing cleanups for #29116 | diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index e2374b81ca13b..7ca68d8289bd5 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -10,6 +10,7 @@
Callable,
DefaultDict,
Dict,
+ Iterable,
List,
Optional,
Sequence,
@@ -17,14 +18,14 @@
Union,
)
-from pandas._typing import AggFuncType, Label
+from pandas._typing import AggFuncType, FrameOrSeries, Label
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.base import SpecificationError
import pandas.core.common as com
from pandas.core.indexes.api import Index
-from pandas.core.series import FrameOrSeriesUnion, Series
+from pandas.core.series import Series
def reconstruct_func(
@@ -276,12 +277,13 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any:
def relabel_result(
- result: FrameOrSeriesUnion,
+ result: FrameOrSeries,
func: Dict[str, List[Union[Callable, str]]],
- columns: Tuple,
- order: List[int],
+ columns: Iterable[Label],
+ order: Iterable[int],
) -> Dict[Label, Series]:
- """Internal function to reorder result if relabelling is True for
+ """
+ Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Parameters:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 606bd4cc3b52d..fe6fb97012fac 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7415,6 +7415,12 @@ def aggregate(self, func=None, axis=0, *args, **kwargs):
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
+
+ # For the return values of reconstruct_func, if relabeling is
+ # False, columns and order will be None.
+ assert columns is not None
+ assert order is not None
+
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
| pandas\core\frame.py:7429:59: error: Argument 3 to "relabel_result" has incompatible type "Optional[List[str]]"; expected "Tuple[Any, ...]" [arg-type]
pandas\core\frame.py:7429:68: error: Argument 4 to "relabel_result" has incompatible type "Optional[List[int]]"; expected "List[int]" [arg-type] | https://api.github.com/repos/pandas-dev/pandas/pulls/35953 | 2020-08-28T15:20:55Z | 2020-08-30T17:50:38Z | 2020-08-30T17:50:38Z | 2020-08-30T18:41:14Z |
BUG: Fix DataFrame.groupby().apply() for NaN groups with dropna=False | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index e65daa439a225..aa3255e673797 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -214,7 +214,8 @@ Performance improvements
Bug fixes
~~~~~~~~~
-
+- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`)
+-
Categorical
^^^^^^^^^^^
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 299b68c6e71e0..9b94dae8556f6 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -11,6 +11,7 @@
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+from pandas.core.dtypes.missing import isna
from pandas.core.arrays.categorical import (
factorize_from_iterable,
@@ -624,10 +625,11 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
- mask = level == key
+ # Find matching codes, include matching nan values as equal.
+ mask = (isna(level) & isna(key)) | (level == key)
if not mask.any():
raise ValueError(f"Key {key} not in level {level}")
- i = np.nonzero(level == key)[0][0]
+ i = np.nonzero(mask)[0][0]
to_concat.append(np.repeat(i, len(index)))
codes_list.append(np.concatenate(to_concat))
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index d1501111cb22b..66db06eeebdfb 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -274,3 +274,56 @@ def test_groupby_dropna_datetime_like_data(
expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt"))
tm.assert_frame_equal(grouped, expected)
+
+
+@pytest.mark.parametrize(
+ "dropna, data, selected_data, levels",
+ [
+ pytest.param(
+ False,
+ {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},
+ {"values": [0, 1, 0, 0]},
+ ["a", "b", np.nan],
+ id="dropna_false_has_nan",
+ ),
+ pytest.param(
+ True,
+ {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]},
+ {"values": [0, 1, 0]},
+ None,
+ id="dropna_true_has_nan",
+ ),
+ pytest.param(
+ # no nan in "groups"; dropna=True|False should be same.
+ False,
+ {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},
+ {"values": [0, 1, 0, 0]},
+ None,
+ id="dropna_false_no_nan",
+ ),
+ pytest.param(
+ # no nan in "groups"; dropna=True|False should be same.
+ True,
+ {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]},
+ {"values": [0, 1, 0, 0]},
+ None,
+ id="dropna_true_no_nan",
+ ),
+ ],
+)
+def test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data, levels):
+ # GH 35889
+
+ df = pd.DataFrame(data)
+ gb = df.groupby("groups", dropna=dropna)
+ result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))}))
+
+ mi_tuples = tuple(zip(data["groups"], selected_data["values"]))
+ mi = pd.MultiIndex.from_tuples(mi_tuples, names=["groups", None])
+ # Since right now, by default MI will drop NA from levels when we create MI
+ # via `from_*`, so we need to add NA for level manually afterwards.
+ if not dropna and levels:
+ mi = mi.set_levels(levels, level="groups")
+
+ expected = pd.DataFrame(selected_data, index=mi)
+ tm.assert_frame_equal(result, expected)
| - [X] closes #35889
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry in `v1.1.2.rst`
| https://api.github.com/repos/pandas-dev/pandas/pulls/35951 | 2020-08-28T09:33:10Z | 2020-09-05T03:15:05Z | 2020-09-05T03:15:04Z | 2020-09-05T03:15:11Z |
CLN remove unnecessary trailing commas to get ready for new version of black: generic -> blocks | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 2afa56b50c3c7..82e629d184b19 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -221,9 +221,7 @@ def _selection_name(self):
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
- @doc(
- _agg_template, examples=_agg_examples_doc, klass="Series",
- )
+ @doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
@@ -935,9 +933,7 @@ class DataFrameGroupBy(GroupBy[DataFrame]):
See :ref:`groupby.aggregate.named` for more."""
)
- @doc(
- _agg_template, examples=_agg_examples_doc, klass="DataFrame",
- )
+ @doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index f96b488fb8d0d..a91366af61d0d 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1077,7 +1077,7 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs)
tuple(args), kwargs, func, engine_kwargs
)
result = numba_agg_func(
- sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns),
+ sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns)
)
if cache_key not in NUMBA_FUNC_CACHE:
NUMBA_FUNC_CACHE[cache_key] = numba_agg_func
@@ -1595,8 +1595,7 @@ def max(self, numeric_only: bool = False, min_count: int = -1):
def first(self, numeric_only: bool = False, min_count: int = -1):
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
- """Helper function for first item that isn't NA.
- """
+ """Helper function for first item that isn't NA."""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
@@ -1620,8 +1619,7 @@ def first(x: Series):
def last(self, numeric_only: bool = False, min_count: int = -1):
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
- """Helper function for last item that isn't NA.
- """
+ """Helper function for last item that isn't NA."""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index c6171a55359fe..290680f380f5f 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -583,7 +583,7 @@ def transform(self, values, how: str, axis: int = 0, **kwargs):
return self._cython_operation("transform", values, how, axis, **kwargs)
def _aggregate(
- self, result, counts, values, comp_ids, agg_func, min_count: int = -1,
+ self, result, counts, values, comp_ids, agg_func, min_count: int = -1
):
if agg_func is libgroupby.group_nth:
# different signature from the others
@@ -603,9 +603,7 @@ def _transform(
return result
- def agg_series(
- self, obj: Series, func: F, *args, **kwargs,
- ):
+ def agg_series(self, obj: Series, func: F, *args, **kwargs):
# Caller is responsible for checking ngroups != 0
assert self.ngroups != 0
@@ -653,9 +651,7 @@ def _aggregate_series_fast(self, obj: Series, func: F):
result, counts = grouper.get_result()
return result, counts
- def _aggregate_series_pure_python(
- self, obj: Series, func: F, *args, **kwargs,
- ):
+ def _aggregate_series_pure_python(self, obj: Series, func: F, *args, **kwargs):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
@@ -841,9 +837,7 @@ def groupings(self) -> "List[grouper.Grouping]":
for lvl, name in zip(self.levels, self.names)
]
- def agg_series(
- self, obj: Series, func: F, *args, **kwargs,
- ):
+ def agg_series(self, obj: Series, func: F, *args, **kwargs):
# Caller is responsible for checking ngroups != 0
assert self.ngroups != 0
assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 0e8d7c1b866b8..efe1a853a9a76 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -81,9 +81,7 @@ def wrapper(left, right):
DatetimeLikeArrayMixin,
cache=True,
)
-@inherit_names(
- ["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin,
-)
+@inherit_names(["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin)
class DatetimeIndexOpsMixin(ExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 9281f8017761d..5d309ef7cd515 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -182,10 +182,10 @@ def func(intvidx_self, other, sort=False):
)
@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
@inherit_names(
- ["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray,
+ ["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray
)
@inherit_names(
- ["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True,
+ ["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True
)
class IntervalIndex(IntervalMixin, ExtensionIndex):
_typ = "intervalindex"
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 731907993d08f..80bb9f10fadd9 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -436,7 +436,7 @@ def isin(self, values, level=None):
def _is_compatible_with_other(self, other) -> bool:
return super()._is_compatible_with_other(other) or all(
isinstance(
- obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex),
+ obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex)
)
for obj in [self, other]
)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index b85e2d3947cb1..f1457a9aac62b 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -82,7 +82,7 @@ class RangeIndex(Int64Index):
# Constructors
def __new__(
- cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None,
+ cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None
):
cls._validate_dtype(dtype)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index c62be4f767f00..a38b47a4c2a25 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -724,7 +724,7 @@ def replace(
# _can_hold_element checks have reduced this back to the
# scalar case and we can avoid a costly object cast
return self.replace(
- to_replace[0], value, inplace=inplace, regex=regex, convert=convert,
+ to_replace[0], value, inplace=inplace, regex=regex, convert=convert
)
# GH 22083, TypeError or ValueError occurred within error handling
@@ -905,7 +905,7 @@ def setitem(self, indexer, value):
return block
def putmask(
- self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False,
+ self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False
) -> List["Block"]:
"""
putmask the data to the block; it is possible that we may create a
@@ -1292,7 +1292,7 @@ def shift(self, periods: int, axis: int = 0, fill_value=None):
return [self.make_block(new_values)]
def where(
- self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0,
+ self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0
) -> List["Block"]:
"""
evaluate the block; return result block(s) from the result
@@ -1366,7 +1366,7 @@ def where_func(cond, values, other):
# we are explicitly ignoring errors
block = self.coerce_to_target_dtype(other)
blocks = block.where(
- orig_other, cond, errors=errors, try_cast=try_cast, axis=axis,
+ orig_other, cond, errors=errors, try_cast=try_cast, axis=axis
)
return self._maybe_downcast(blocks, "infer")
@@ -1605,7 +1605,7 @@ def set(self, locs, values):
self.values = values
def putmask(
- self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False,
+ self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False
) -> List["Block"]:
"""
See Block.putmask.__doc__
@@ -1816,7 +1816,7 @@ def diff(self, n: int, axis: int = 1) -> List["Block"]:
return super().diff(n, axis)
def shift(
- self, periods: int, axis: int = 0, fill_value: Any = None,
+ self, periods: int, axis: int = 0, fill_value: Any = None
) -> List["ExtensionBlock"]:
"""
Shift the block by `periods`.
@@ -1833,7 +1833,7 @@ def shift(
]
def where(
- self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0,
+ self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0
) -> List["Block"]:
cond = _extract_bool_array(cond)
@@ -1945,7 +1945,7 @@ def _can_hold_element(self, element: Any) -> bool:
)
def to_native_types(
- self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs,
+ self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs
):
""" convert to our native types format """
values = self.values
@@ -2369,7 +2369,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
if not np.can_cast(to_replace_values, bool):
return self
return super().replace(
- to_replace, value, inplace=inplace, regex=regex, convert=convert,
+ to_replace, value, inplace=inplace, regex=regex, convert=convert
)
@@ -2453,18 +2453,18 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
if not either_list and is_re(to_replace):
return self._replace_single(
- to_replace, value, inplace=inplace, regex=True, convert=convert,
+ to_replace, value, inplace=inplace, regex=True, convert=convert
)
elif not (either_list or regex):
return super().replace(
- to_replace, value, inplace=inplace, regex=regex, convert=convert,
+ to_replace, value, inplace=inplace, regex=regex, convert=convert
)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(
- to_rep, v, inplace=inplace, regex=regex, convert=convert,
+ to_rep, v, inplace=inplace, regex=regex, convert=convert
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
@@ -2475,18 +2475,18 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
result_blocks = []
for b in blocks:
result = b._replace_single(
- to_rep, value, inplace=inplace, regex=regex, convert=convert,
+ to_rep, value, inplace=inplace, regex=regex, convert=convert
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(
- to_replace, value, inplace=inplace, convert=convert, regex=regex,
+ to_replace, value, inplace=inplace, convert=convert, regex=regex
)
def _replace_single(
- self, to_replace, value, inplace=False, regex=False, convert=True, mask=None,
+ self, to_replace, value, inplace=False, regex=False, convert=True, mask=None
):
"""
Replace elements by the given value.
| xref #35925 | https://api.github.com/repos/pandas-dev/pandas/pulls/35950 | 2020-08-28T08:00:48Z | 2020-08-28T09:27:17Z | 2020-08-28T09:27:17Z | 2020-08-28T16:50:43Z |
CLN remove unnecessary trailing commas to get ready for new version of black: _testing -> generic | diff --git a/pandas/_testing.py b/pandas/_testing.py
index ef6232fa6d575..b402b040d9268 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -939,7 +939,7 @@ def assert_categorical_equal(
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
- left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
+ left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
@@ -948,9 +948,7 @@ def assert_categorical_equal(
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
- assert_index_equal(
- lc, rc, obj=f"{obj}.categories",
- )
+ assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
@@ -1092,7 +1090,7 @@ def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
- obj, f"{obj} shapes are different", left.shape, right.shape,
+ obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
@@ -1559,7 +1557,7 @@ def assert_frame_equal(
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
- obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
+ obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_like:
@@ -2884,7 +2882,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]):
return expected
-def external_error_raised(expected_exception: Type[Exception],) -> ContextManager:
+def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index befde7c355818..2a6e983eff3ee 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -462,7 +462,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
def _factorize_array(
- values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None,
+ values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Factorize an array-like to codes and uniques.
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 832d09b062265..2976747d66dfa 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -40,7 +40,7 @@ def take(
fill_value = self._validate_fill_value(fill_value)
new_data = take(
- self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value,
+ self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value
)
return self._from_backing_data(new_data)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index a28b341669918..27b1afdb438cb 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1505,7 +1505,7 @@ def argsort(self, ascending=True, kind="quicksort", **kwargs):
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(
- self, inplace: bool = False, ascending: bool = True, na_position: str = "last",
+ self, inplace: bool = False, ascending: bool = True, na_position: str = "last"
):
"""
Sort the Categorical by category value returning a new
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 57df067c7b16e..d83ff91a1315f 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -138,7 +138,7 @@ def __from_arrow__(
return IntegerArray._concat_same_type(results)
-def integer_array(values, dtype=None, copy: bool = False,) -> "IntegerArray":
+def integer_array(values, dtype=None, copy: bool = False) -> "IntegerArray":
"""
Infer and return an integer array of the values.
@@ -182,7 +182,7 @@ def safe_cast(values, dtype, copy: bool):
def coerce_to_array(
- values, dtype, mask=None, copy: bool = False,
+ values, dtype, mask=None, copy: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 235840d6d201e..1237dea5c1a64 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -126,7 +126,7 @@ def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask)
def to_numpy(
- self, dtype=None, copy: bool = False, na_value: Scalar = lib.no_default,
+ self, dtype=None, copy: bool = False, na_value: Scalar = lib.no_default
) -> np.ndarray:
"""
Convert to a NumPy Array.
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 05f901518d82f..23a4a70734c81 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -280,7 +280,7 @@ def isna(self) -> np.ndarray:
return isna(self._ndarray)
def fillna(
- self, value=None, method: Optional[str] = None, limit: Optional[int] = None,
+ self, value=None, method: Optional[str] = None, limit: Optional[int] = None
) -> "PandasArray":
# TODO(_values_for_fillna): remove this
value, method = validate_fillna_kwargs(value, method)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index ddaf6d39f1837..cc39ffb5d1203 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -634,7 +634,7 @@ def _sub_period_array(self, other):
return new_values
def _addsub_int_array(
- self, other: np.ndarray, op: Callable[[Any, Any], Any],
+ self, other: np.ndarray, op: Callable[[Any, Any], Any]
) -> "PeriodArray":
"""
Add or subtract array of integers; equivalent to applying
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index e8c9f28e50084..f145e76046bee 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -514,9 +514,7 @@ def sanitize_array(
return subarr
-def _try_cast(
- arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool,
-):
+def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 286da6e1de9d5..fea3efedb6abb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -315,17 +315,13 @@ def _data(self):
@property
def _AXIS_NUMBERS(self) -> Dict[str, int]:
""".. deprecated:: 1.1.0"""
- warnings.warn(
- "_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=3,
- )
+ warnings.warn("_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=3)
return {"index": 0}
@property
def _AXIS_NAMES(self) -> Dict[int, str]:
""".. deprecated:: 1.1.0"""
- warnings.warn(
- "_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=3,
- )
+ warnings.warn("_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=3)
return {0: "index"}
def _construct_axes_dict(self, axes=None, **kwargs):
@@ -5128,7 +5124,7 @@ def pipe(self, func, *args, **kwargs):
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... ) # doctest: +SKIP
- """
+ """
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
@@ -5630,7 +5626,7 @@ def astype(
else:
# else, only a single dtype is given
- new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,)
+ new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self, method="astype")
# GH 33113: handle empty frame or series
@@ -6520,7 +6516,7 @@ def replace(
3 b
4 b
dtype: object
- """
+ """
if not (
is_scalar(to_replace)
or is_re_compilable(to_replace)
@@ -7772,7 +7768,7 @@ def between_time(
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_between_time(
- start_time, end_time, include_start=include_start, include_end=include_end,
+ start_time, end_time, include_start=include_start, include_end=include_end
)
return self._take_with_is_copy(indexer, axis=axis)
@@ -8939,7 +8935,7 @@ def _where(
self._check_inplace_setting(other)
new_data = self._mgr.putmask(
- mask=cond, new=other, align=align, axis=block_axis,
+ mask=cond, new=other, align=align, axis=block_axis
)
result = self._constructor(new_data)
return self._update_inplace(result)
| xref #35925
| https://api.github.com/repos/pandas-dev/pandas/pulls/35949 | 2020-08-28T06:59:47Z | 2020-08-28T09:26:44Z | 2020-08-28T09:26:44Z | 2020-08-28T16:50:38Z |
CLN: resolve UserWarning in `pandas/plotting/_matplotlib/core.py` #35945 | diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 2d64e1b051444..2d519f56738b1 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1226,8 +1226,8 @@ def get_label(i):
if self._need_to_set_index:
xticks = ax.get_xticks()
xticklabels = [get_label(x) for x in xticks]
- ax.set_xticklabels(xticklabels)
ax.xaxis.set_major_locator(FixedLocator(xticks))
+ ax.set_xticklabels(xticklabels)
condition = (
not self._use_dynamic_x()
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index ee43e5d7072fe..9ab697cb57690 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2796,10 +2796,12 @@ def test_table(self):
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
- ax = df.plot()
- assert len(ax.tables) == 0
- plotting.table(ax, df.T)
- assert len(ax.tables) == 1
+ # GH 35945 UserWarning
+ with tm.assert_produces_warning(None):
+ ax = df.plot()
+ assert len(ax.tables) == 0
+ plotting.table(ax, df.T)
+ assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=["x", "y"])
| - [x] closes #35945
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/35946 | 2020-08-28T05:40:08Z | 2020-09-05T03:33:50Z | 2020-09-05T03:33:50Z | 2020-10-02T11:56:55Z |
DOC: complement the documentation for pandas.DataFrame.agg #35912 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 606bd4cc3b52d..b1e7c8a51f52c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7376,6 +7376,15 @@ def _gotitem(
min 1.0 2.0
sum 12.0 NaN
+ Aggregate different functions over the columns and rename the index of the resulting
+ DataFrame.
+
+ >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
+ A B C
+ x 7.0 NaN NaN
+ y NaN 2.0 NaN
+ z NaN NaN 6.0
+
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
| - [x] closes #35912
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
<img width="676" alt="Screenshot 2020-08-28 at 11 59 23" src="https://user-images.githubusercontent.com/21543236/91519967-a71dd280-e926-11ea-87f5-e647fe650168.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/35941 | 2020-08-28T04:06:05Z | 2020-08-31T18:40:13Z | 2020-08-31T18:40:12Z | 2020-08-31T18:40:16Z |
TYP: annotations in core.groupby | diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index db734bb2f0c07..4d5acf527a867 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -1,3 +1,5 @@
+from typing import Optional, Tuple
+
import numpy as np
from pandas.core.algorithms import unique1d
@@ -6,9 +8,12 @@
CategoricalDtype,
recode_for_categories,
)
+from pandas.core.indexes.api import CategoricalIndex
-def recode_for_groupby(c: Categorical, sort: bool, observed: bool):
+def recode_for_groupby(
+ c: Categorical, sort: bool, observed: bool
+) -> Tuple[Categorical, Optional[Categorical]]:
"""
Code the categories to ensure we can groupby for categoricals.
@@ -73,7 +78,9 @@ def recode_for_groupby(c: Categorical, sort: bool, observed: bool):
return c.reorder_categories(cat.categories), None
-def recode_from_groupby(c: Categorical, sort: bool, ci):
+def recode_from_groupby(
+ c: Categorical, sort: bool, ci: CategoricalIndex
+) -> CategoricalIndex:
"""
Reverse the codes_to_groupby to account for sort / observed.
@@ -91,7 +98,8 @@ def recode_from_groupby(c: Categorical, sort: bool, ci):
"""
# we re-order to the original category orderings
if sort:
- return ci.set_categories(c.categories)
+ return ci.set_categories(c.categories) # type: ignore [attr-defined]
# we are not sorting, so add unobserved to the end
- return ci.add_categories(c.categories[~c.categories.isin(ci.categories)])
+ new_cats = c.categories[~c.categories.isin(ci.categories)]
+ return ci.add_categories(new_cats) # type: ignore [attr-defined]
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 3172fb4e0e853..e39464628ccaa 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -23,6 +23,7 @@
Type,
TypeVar,
Union,
+ cast,
)
import warnings
@@ -83,7 +84,7 @@
from pandas.plotting import boxplot_frame_groupby
if TYPE_CHECKING:
- from pandas.core.internals import Block
+ from pandas.core.internals import Block # noqa:F401
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
@@ -1591,7 +1592,7 @@ def _gotitem(self, key, ndim: int, subset=None):
Parameters
----------
key : string / list of selections
- ndim : 1,2
+ ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
@@ -1617,7 +1618,7 @@ def _gotitem(self, key, ndim: int, subset=None):
raise AssertionError("invalid ndim for _gotitem")
- def _wrap_frame_output(self, result, obj) -> DataFrame:
+ def _wrap_frame_output(self, result, obj: DataFrame) -> DataFrame:
result_index = self.grouper.levels[0]
if self.axis == 0:
@@ -1634,20 +1635,14 @@ def _get_data_to_aggregate(self) -> BlockManager:
else:
return obj._mgr
- def _insert_inaxis_grouper_inplace(self, result):
+ def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
- izip = zip(
- *map(
- reversed,
- (
- self.grouper.names,
- self.grouper.get_group_levels(),
- [grp.in_axis for grp in self.grouper.groupings],
- ),
- )
- )
columns = result.columns
- for name, lev, in_axis in izip:
+ for name, lev, in_axis in zip(
+ reversed(self.grouper.names),
+ reversed(self.grouper.get_group_levels()),
+ reversed([grp.in_axis for grp in self.grouper.groupings]),
+ ):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
@@ -1712,7 +1707,7 @@ def _wrap_transformed_output(
return result
- def _wrap_agged_blocks(self, blocks: "Sequence[Block]", items: Index) -> DataFrame:
+ def _wrap_agged_blocks(self, blocks: Sequence["Block"], items: Index) -> DataFrame:
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, axes=[items, index])
@@ -1739,7 +1734,7 @@ def _iterate_column_groupbys(self):
exclusions=self.exclusions,
)
- def _apply_to_column_groupbys(self, func):
+ def _apply_to_column_groupbys(self, func) -> DataFrame:
from pandas.core.reshape.concat import concat
return concat(
@@ -1748,7 +1743,7 @@ def _apply_to_column_groupbys(self, func):
axis=1,
)
- def count(self):
+ def count(self) -> DataFrame:
"""
Compute count of group, excluding missing values.
@@ -1778,7 +1773,7 @@ def count(self):
return self._reindex_output(result, fill_value=0)
- def nunique(self, dropna: bool = True):
+ def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
@@ -1844,6 +1839,7 @@ def nunique(self, dropna: bool = True):
],
axis=1,
)
+ results = cast(DataFrame, results)
if axis_number == 1:
results = results.T
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index a91366af61d0d..651af2d314251 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -459,7 +459,7 @@ def f(self):
@contextmanager
-def _group_selection_context(groupby):
+def _group_selection_context(groupby: "_GroupBy"):
"""
Set / reset the _group_selection_context.
"""
@@ -489,7 +489,7 @@ def __init__(
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
- grouper: "Optional[ops.BaseGrouper]" = None,
+ grouper: Optional["ops.BaseGrouper"] = None,
exclusions=None,
selection=None,
as_index: bool = True,
@@ -734,7 +734,7 @@ def pipe(self, func, *args, **kwargs):
plot = property(GroupByPlot)
- def _make_wrapper(self, name):
+ def _make_wrapper(self, name: str) -> Callable:
assert name in self._apply_allowlist
with _group_selection_context(self):
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 8239a792c65dd..18970ea0544e4 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -568,7 +568,9 @@ def codes(self) -> np.ndarray:
@cache_readonly
def result_index(self) -> Index:
if self.all_grouper is not None:
- return recode_from_groupby(self.all_grouper, self.sort, self.group_index)
+ group_idx = self.group_index
+ assert isinstance(group_idx, CategoricalIndex) # set in __init__
+ return recode_from_groupby(self.all_grouper, self.sort, group_idx)
return self.group_index
@property
@@ -607,7 +609,7 @@ def get_grouper(
mutated: bool = False,
validate: bool = True,
dropna: bool = True,
-) -> "Tuple[ops.BaseGrouper, List[Hashable], FrameOrSeries]":
+) -> Tuple["ops.BaseGrouper", List[Hashable], FrameOrSeries]:
"""
Create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 290680f380f5f..4dd5b7f30e7f0 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -82,7 +82,7 @@ class BaseGrouper:
def __init__(
self,
axis: Index,
- groupings: "Sequence[grouper.Grouping]",
+ groupings: Sequence["grouper.Grouping"],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
| I'm still seeing a couple of mypy complaints, suggestions @simonjayhawkins ? | https://api.github.com/repos/pandas-dev/pandas/pulls/35939 | 2020-08-28T01:34:17Z | 2020-08-31T10:16:16Z | 2020-08-31T10:16:16Z | 2020-08-31T14:44:24Z |
REGR: Fix comparison broadcasting over array of Intervals | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index b4c196f548147..c6917d1b50619 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`)
- Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`)
- Performance regression for :meth:`RangeIndex.format` (:issue:`35712`)
+- Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`)
-
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 6867e8aba7411..40bd5ad8f5a1f 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -358,6 +358,11 @@ cdef class Interval(IntervalMixin):
self_tuple = (self.left, self.right, self.closed)
other_tuple = (other.left, other.right, other.closed)
return PyObject_RichCompare(self_tuple, other_tuple, op)
+ elif util.is_array(other):
+ return np.array(
+ [PyObject_RichCompare(self, x, op) for x in other],
+ dtype=bool,
+ )
return NotImplemented
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 8603bff0587b6..83dfd42ae2a6e 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1581,3 +1581,10 @@ def test_replace_with_compiled_regex(self):
result = df.replace({regex: "z"}, regex=True)
expected = pd.DataFrame(["z", "b", "c"])
tm.assert_frame_equal(result, expected)
+
+ def test_replace_intervals(self):
+ # https://github.com/pandas-dev/pandas/issues/35931
+ df = pd.DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]})
+ result = df.replace({"a": {pd.Interval(0, 1): "x"}})
+ expected = pd.DataFrame({"a": ["x", "x"]})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/scalar/interval/test_arithmetic.py b/pandas/tests/scalar/interval/test_arithmetic.py
index 5252f1a4d5a24..b4c2b448e252a 100644
--- a/pandas/tests/scalar/interval/test_arithmetic.py
+++ b/pandas/tests/scalar/interval/test_arithmetic.py
@@ -45,3 +45,15 @@ def test_numeric_interval_add_timedelta_raises(interval, delta):
with pytest.raises((TypeError, ValueError), match=msg):
delta + interval
+
+
+@pytest.mark.parametrize("klass", [timedelta, np.timedelta64, Timedelta])
+def test_timdelta_add_timestamp_interval(klass):
+ delta = klass(0)
+ expected = Interval(Timestamp("2020-01-01"), Timestamp("2020-02-01"))
+
+ result = delta + expected
+ assert result == expected
+
+ result = expected + delta
+ assert result == expected
diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py
index a0151bb9ac7bf..8ad9a2c7a9c70 100644
--- a/pandas/tests/scalar/interval/test_interval.py
+++ b/pandas/tests/scalar/interval/test_interval.py
@@ -2,6 +2,7 @@
import pytest
from pandas import Interval, Period, Timedelta, Timestamp
+import pandas._testing as tm
import pandas.core.common as com
@@ -267,3 +268,11 @@ def test_constructor_errors_tz(self, tz_left, tz_right):
msg = "left and right must have the same time zone"
with pytest.raises(error, match=msg):
Interval(left, right)
+
+ def test_equality_comparison_broadcasts_over_array(self):
+ # https://github.com/pandas-dev/pandas/issues/35931
+ interval = Interval(0, 1)
+ arr = np.array([interval, interval])
+ result = interval == arr
+ expected = np.array([True, True])
+ tm.assert_numpy_array_equal(result, expected)
| - [x] closes https://github.com/pandas-dev/pandas/issues/35931
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35938 | 2020-08-28T01:21:44Z | 2020-08-31T22:32:34Z | 2020-08-31T22:32:34Z | 2020-09-01T14:55:45Z |
BUG: BlockSlider not clearing index._cache | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 407e8ba029ada..fca7e7d209031 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -256,6 +256,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`)
- Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`)
- Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`)
+- Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`)
-
Reshaping
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 7b36bc8baf891..8161b5c5c2b11 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -53,6 +53,7 @@ cdef class _BaseGrouper:
# to a 1-d ndarray like datetime / timedelta / period.
object.__setattr__(cached_ityp, '_index_data', islider.buf)
cached_ityp._engine.clear_mapping()
+ cached_ityp._cache.clear() # e.g. inferred_freq must go
object.__setattr__(cached_typ._mgr._block, 'values', vslider.buf)
object.__setattr__(cached_typ._mgr._block, 'mgr_locs',
slice(len(vslider.buf)))
@@ -71,6 +72,7 @@ cdef class _BaseGrouper:
object res
cached_ityp._engine.clear_mapping()
+ cached_ityp._cache.clear() # e.g. inferred_freq must go
res = self.f(cached_typ)
res = _extract_result(res)
if not initialized:
@@ -455,6 +457,7 @@ cdef class BlockSlider:
object.__setattr__(self.index, '_index_data', self.idx_slider.buf)
self.index._engine.clear_mapping()
+ self.index._cache.clear() # e.g. inferred_freq must go
cdef reset(self):
cdef:
diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py
index 0fd66cc047017..4a735fc7bb686 100644
--- a/pandas/tests/groupby/test_allowlist.py
+++ b/pandas/tests/groupby/test_allowlist.py
@@ -369,7 +369,6 @@ def test_groupby_selection_with_methods(df):
"ffill",
"bfill",
"pct_change",
- "tshift",
]
for m in methods:
@@ -379,6 +378,11 @@ def test_groupby_selection_with_methods(df):
# should always be frames!
tm.assert_frame_equal(res, exp)
+ # check that the index cache is cleared
+ with pytest.raises(ValueError, match="Freq was not set in the index"):
+ # GH#35937
+ g.tshift()
+
# methods which aren't just .foo()
tm.assert_frame_equal(g.fillna(0), g_exp.fillna(0))
tm.assert_frame_equal(g.dtypes, g_exp.dtypes)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35937 | 2020-08-27T23:33:20Z | 2020-09-02T03:18:17Z | 2020-09-02T03:18:17Z | 2020-09-02T17:00:22Z |
REGR: Fix inplace updates on column to set correct values | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index 9747a8ef3e71f..b4c196f548147 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`)
+- Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`)
- Performance regression for :meth:`RangeIndex.format` (:issue:`35712`)
-
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index a5372b14d210f..31f753eb9d75b 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1027,6 +1027,7 @@ def iset(self, loc: Union[int, slice, np.ndarray], value):
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
+ value = extract_array(value, extract_numpy=True)
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
if self._blklocs is None and self.ndim > 1:
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index b9219f9f833de..bbfaacae1b444 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -348,6 +348,12 @@ def test_fillna_frame(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_frame(data_missing)
+ @pytest.mark.skip("Invalid test")
+ def test_fillna_fill_other(self, data):
+ # inplace update doesn't work correctly with patched extension arrays
+ # extract_array returns PandasArray, while dtype is a numpy dtype
+ super().test_fillna_fill_other(data_missing)
+
class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
@pytest.mark.skip("Incorrect parent test")
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 8ecd9066ceff0..00cfa6265934f 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -644,3 +644,17 @@ def test_to_dict_of_blocks_item_cache():
assert df.loc[0, "b"] == "foo"
assert df["b"] is ser
+
+
+def test_update_inplace_sets_valid_block_values():
+ # https://github.com/pandas-dev/pandas/issues/33457
+ df = pd.DataFrame({"a": pd.Series([1, 2, None], dtype="category")})
+
+ # inplace update of a single column
+ df["a"].fillna(1, inplace=True)
+
+ # check we havent put a Series into any block.values
+ assert isinstance(df._mgr.blocks[0].values, pd.Categorical)
+
+ # smoketest for OP bug from GH#35731
+ assert df.isnull().sum().sum() == 0
| Closes #35731 | https://api.github.com/repos/pandas-dev/pandas/pulls/35936 | 2020-08-27T20:11:50Z | 2020-08-31T12:36:13Z | 2020-08-31T12:36:12Z | 2020-08-31T13:09:24Z |
TYP: annotations in pandas.plotting | diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 8f2080658e63e..214a67690d695 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -1,7 +1,8 @@
import contextlib
import datetime as pydt
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, tzinfo
import functools
+from typing import Optional, Tuple
from dateutil.relativedelta import relativedelta
import matplotlib.dates as dates
@@ -152,7 +153,7 @@ def axisinfo(unit, axis):
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label="time")
@staticmethod
- def default_units(x, axis):
+ def default_units(x, axis) -> str:
return "time"
@@ -421,7 +422,7 @@ def autoscale(self):
return self.nonsingular(vmin, vmax)
-def _from_ordinal(x, tz=None):
+def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime:
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
@@ -450,7 +451,7 @@ def _from_ordinal(x, tz=None):
# -------------------------------------------------------------------------
-def _get_default_annual_spacing(nyears):
+def _get_default_annual_spacing(nyears) -> Tuple[int, int]:
"""
Returns a default spacing between consecutive ticks for annual data.
"""
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index eef4276f0ed09..193602e1baf4a 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -62,13 +62,13 @@ def _maybe_resample(series: "Series", ax, kwargs):
return freq, series
-def _is_sub(f1, f2):
+def _is_sub(f1: str, f2: str) -> bool:
return (f1.startswith("W") and is_subperiod("D", f2)) or (
f2.startswith("W") and is_subperiod(f1, "D")
)
-def _is_sup(f1, f2):
+def _is_sup(f1: str, f2: str) -> bool:
return (f1.startswith("W") and is_superperiod("D", f2)) or (
f2.startswith("W") and is_superperiod(f1, "D")
)
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index caf2f27de9276..26b25597ce1a6 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -1,16 +1,22 @@
# being a bit too dynamic
from math import ceil
+from typing import TYPE_CHECKING, Tuple
import warnings
import matplotlib.table
import matplotlib.ticker as ticker
import numpy as np
+from pandas._typing import FrameOrSeries
+
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.plotting._matplotlib import compat
+if TYPE_CHECKING:
+ from matplotlib.table import Table
+
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
@@ -21,7 +27,7 @@ def format_date_labels(ax, rot):
fig.subplots_adjust(bottom=0.2)
-def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
+def table(ax, data: FrameOrSeries, rowLabels=None, colLabels=None, **kwargs) -> "Table":
if isinstance(data, ABCSeries):
data = data.to_frame()
elif isinstance(data, ABCDataFrame):
@@ -43,7 +49,7 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
return table
-def _get_layout(nplots, layout=None, layout_type="box"):
+def _get_layout(nplots: int, layout=None, layout_type: str = "box") -> Tuple[int, int]:
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError("Layout must be a tuple of (rows, columns)")
@@ -92,14 +98,14 @@ def _get_layout(nplots, layout=None, layout_type="box"):
def _subplots(
- naxes=None,
- sharex=False,
- sharey=False,
- squeeze=True,
+ naxes: int,
+ sharex: bool = False,
+ sharey: bool = False,
+ squeeze: bool = True,
subplot_kw=None,
ax=None,
layout=None,
- layout_type="box",
+ layout_type: str = "box",
**fig_kw,
):
"""
@@ -369,7 +375,7 @@ def _get_all_lines(ax):
return lines
-def _get_xlim(lines):
+def _get_xlim(lines) -> Tuple[float, float]:
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
| https://api.github.com/repos/pandas-dev/pandas/pulls/35935 | 2020-08-27T18:06:46Z | 2020-08-28T09:07:06Z | 2020-08-28T09:07:06Z | 2020-08-28T17:09:57Z | |
TYP: Annotations | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index befde7c355818..8501726c7d76d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -10,7 +10,7 @@
import numpy as np
from pandas._libs import Timestamp, algos, hashtable as htable, iNaT, lib
-from pandas._typing import AnyArrayLike, ArrayLike, DtypeObj
+from pandas._typing import AnyArrayLike, ArrayLike, DtypeObj, FrameOrSeriesUnion
from pandas.util._decorators import doc
from pandas.core.dtypes.cast import (
@@ -58,7 +58,7 @@
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
- from pandas import Series
+ from pandas import DataFrame, Series
_shared_docs: Dict[str, str] = {}
@@ -1101,6 +1101,9 @@ def __init__(self, obj, n: int, keep: str):
if self.keep not in ("first", "last", "all"):
raise ValueError('keep must be either "first", "last" or "all"')
+ def compute(self, method: str) -> FrameOrSeriesUnion:
+ raise NotImplementedError
+
def nlargest(self):
return self.compute("nlargest")
@@ -1133,7 +1136,7 @@ class SelectNSeries(SelectN):
nordered : Series
"""
- def compute(self, method):
+ def compute(self, method: str) -> "Series":
n = self.n
dtype = self.obj.dtype
@@ -1207,7 +1210,7 @@ def __init__(self, obj, n: int, keep: str, columns):
columns = list(columns)
self.columns = columns
- def compute(self, method):
+ def compute(self, method: str) -> "DataFrame":
from pandas import Int64Index
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index d85647edc3b81..8193d65b3b30c 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1167,6 +1167,10 @@ class ExtensionOpsMixin:
with NumPy arrays.
"""
+ @classmethod
+ def _create_arithmetic_method(cls, op):
+ raise AbstractMethodError(cls)
+
@classmethod
def _add_arithmetic_ops(cls):
cls.__add__ = cls._create_arithmetic_method(operator.add)
@@ -1186,6 +1190,10 @@ def _add_arithmetic_ops(cls):
cls.__divmod__ = cls._create_arithmetic_method(divmod)
cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)
+ @classmethod
+ def _create_comparison_method(cls, op):
+ raise AbstractMethodError(cls)
+
@classmethod
def _add_comparison_ops(cls):
cls.__eq__ = cls._create_comparison_method(operator.eq)
@@ -1195,6 +1203,10 @@ def _add_comparison_ops(cls):
cls.__le__ = cls._create_comparison_method(operator.le)
cls.__ge__ = cls._create_comparison_method(operator.ge)
+ @classmethod
+ def _create_logical_method(cls, op):
+ raise AbstractMethodError(cls)
+
@classmethod
def _add_logical_ops(cls):
cls.__and__ = cls._create_logical_method(operator.and_)
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index e71b2f94c8014..999873e7b81e4 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -4,17 +4,22 @@
SeriesGroupBy and the DataFrameGroupBy objects.
"""
import collections
+from typing import List
from pandas.core.dtypes.common import is_list_like, is_scalar
+from pandas.core.base import PandasObject
+
OutputKey = collections.namedtuple("OutputKey", ["label", "position"])
-class GroupByMixin:
+class GroupByMixin(PandasObject):
"""
Provide the groupby facilities to the mixed object.
"""
+ _attributes: List[str]
+
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
@@ -22,7 +27,7 @@ def _gotitem(self, key, ndim, subset=None):
Parameters
----------
key : string / list of selections
- ndim : 1,2
+ ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b1e5d5627e3f6..a07c3328def54 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3541,10 +3541,7 @@ def _join_multi(self, other, how, return_indexers=True):
if not overlap:
raise ValueError("cannot join with no overlapping index names")
- self_is_mi = isinstance(self, ABCMultiIndex)
- other_is_mi = isinstance(other, ABCMultiIndex)
-
- if self_is_mi and other_is_mi:
+ if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
# Drop the non-matching levels from left and right respectively
ldrop_names = list(self_names - overlap)
@@ -3590,7 +3587,7 @@ def _join_multi(self, other, how, return_indexers=True):
# Case where only one index is multi
# make the indices into mi's that match
flip_order = False
- if self_is_mi:
+ if isinstance(self, MultiIndex):
self, other = other, self
flip_order = True
# flip if join method is right or left
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 0e8d7c1b866b8..c5f9b0783d91b 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -1,7 +1,7 @@
"""
Base and utility classes for tseries type pandas objects.
"""
-from datetime import datetime
+from datetime import datetime, tzinfo
from typing import Any, List, Optional, TypeVar, Union, cast
import numpy as np
@@ -632,6 +632,8 @@ class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index):
but not PeriodIndex
"""
+ tz: Optional[tzinfo]
+
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 731907993d08f..c3eb0496a1bc5 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -45,6 +45,8 @@ class NumericIndex(Index):
This is an abstract class.
"""
+ _default_dtype: np.dtype
+
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None):
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index c62be4f767f00..94b62300e0af5 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1382,7 +1382,7 @@ def where_func(cond, values, other):
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)
- result_blocks = []
+ result_blocks: List["Block"] = []
for m in [mask, ~mask]:
if m.any():
taken = result.take(m.nonzero()[0], axis=axis)
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index a5372b14d210f..ad79317aee1ef 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -334,7 +334,7 @@ def reduce(self: T, func) -> T:
# If 2D, we assume that we're operating column-wise
assert self.ndim == 2
- res_blocks = []
+ res_blocks: List[Block] = []
for blk in self.blocks:
nbs = blk.reduce(func)
res_blocks.extend(nbs)
@@ -730,7 +730,7 @@ def _combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager":
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
- new_blocks = []
+ new_blocks: List[Block] = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = inv_indexer[b.mgr_locs.indexer]
| https://api.github.com/repos/pandas-dev/pandas/pulls/35933 | 2020-08-27T17:34:52Z | 2020-08-30T12:01:51Z | 2020-08-30T12:01:51Z | 2020-08-30T15:07:35Z | |
CLN remove unnecessary trailing commas from aggregation | diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index 891048ae82dfd..e2374b81ca13b 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -28,10 +28,8 @@
def reconstruct_func(
- func: Optional[AggFuncType], **kwargs,
-) -> Tuple[
- bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]],
-]:
+ func: Optional[AggFuncType], **kwargs
+) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
| xref #35925
| https://api.github.com/repos/pandas-dev/pandas/pulls/35930 | 2020-08-27T16:22:26Z | 2020-08-28T09:21:59Z | 2020-08-28T09:21:59Z | 2020-08-28T16:50:33Z |
REF: use BlockManager.apply for DataFrameGroupBy.count | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 2afa56b50c3c7..039f52e6f5b8d 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -76,7 +76,7 @@
from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba
from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same
import pandas.core.indexes.base as ibase
-from pandas.core.internals import BlockManager, make_block
+from pandas.core.internals import BlockManager
from pandas.core.series import Series
from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba
@@ -1765,20 +1765,24 @@ def count(self):
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
- # TODO(2DEA): reshape would not be necessary with 2D EAs
- vals = ((mask & ~isna(blk.values).reshape(blk.shape)) for blk in data.blocks)
- locs = (blk.mgr_locs for blk in data.blocks)
+ def hfunc(bvalues: ArrayLike) -> ArrayLike:
+ # TODO(2DEA): reshape would not be necessary with 2D EAs
+ if bvalues.ndim == 1:
+ # EA
+ masked = mask & ~isna(bvalues).reshape(1, -1)
+ else:
+ masked = mask & ~isna(bvalues)
- counted = (
- lib.count_level_2d(x, labels=ids, max_bin=ngroups, axis=1) for x in vals
- )
- blocks = [make_block(val, placement=loc) for val, loc in zip(counted, locs)]
+ counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups, axis=1)
+ return counted
+
+ new_mgr = data.apply(hfunc)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_agged_blocks() returns. GH 35028
with com.temp_setattr(self, "observed", True):
- result = self._wrap_agged_blocks(blocks, items=data.items)
+ result = self._wrap_agged_blocks(new_mgr.blocks, items=data.items)
return self._reindex_output(result, fill_value=0)
| https://api.github.com/repos/pandas-dev/pandas/pulls/35924 | 2020-08-27T14:43:01Z | 2020-09-04T20:47:13Z | 2020-09-04T20:47:13Z | 2020-09-04T20:50:50Z | |
Backport PR #35794: BUG: issubclass check with dtype instead of type,… | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index 5c4e770c7b33c..a87e06678faad 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -24,7 +24,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
-
+- Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`)
- Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`)
- Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`)
- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`)
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index bc9ff7c44b689..e55df1e1d8155 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -481,13 +481,21 @@ def stringify(value):
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
+ rhs = self.rhs
+ lhs = self.lhs
+
+ # GH#24883 unwrap dtype if necessary to ensure we have a type object
+ rhs_rt = rhs.return_type
+ rhs_rt = getattr(rhs_rt, "type", rhs_rt)
+ lhs_rt = lhs.return_type
+ lhs_rt = getattr(lhs_rt, "type", lhs_rt)
if (
- (self.lhs.is_scalar or self.rhs.is_scalar)
+ (lhs.is_scalar or rhs.is_scalar)
and self.op in _bool_ops_dict
and (
not (
- issubclass(self.rhs.return_type, (bool, np.bool_))
- and issubclass(self.lhs.return_type, (bool, np.bool_))
+ issubclass(rhs_rt, (bool, np.bool_))
+ and issubclass(lhs_rt, (bool, np.bool_))
)
)
):
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 628b955a1de92..56d178daee7fd 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -160,6 +160,13 @@ def test_eval_resolvers_as_list(self):
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
+ def test_eval_object_dtype_binop(self):
+ # GH#24883
+ df = pd.DataFrame({"a1": ["Y", "N"]})
+ res = df.eval("c = ((a1 == 'Y') & True)")
+ expected = pd.DataFrame({"a1": ["Y", "N"], "c": [True, False]})
+ tm.assert_frame_equal(res, expected)
+
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
| xref #35794 | https://api.github.com/repos/pandas-dev/pandas/pulls/35919 | 2020-08-27T09:18:35Z | 2020-08-27T10:37:07Z | 2020-08-27T10:37:07Z | 2020-08-27T10:37:16Z |
REF: window/test_dtypes.py with pytest idioms | diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index eb8252d5731be..7f03fa2a5ea0d 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -308,3 +308,34 @@ def which(request):
def halflife_with_times(request):
"""Halflife argument for EWM when times is specified."""
return request.param
+
+
+@pytest.fixture(
+ params=[
+ "object",
+ "category",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "float16",
+ "float32",
+ "float64",
+ "m8[ns]",
+ "M8[ns]",
+ pytest.param(
+ "datetime64[ns, UTC]",
+ marks=pytest.mark.skip(
+ "direct creation of extension dtype datetime64[ns, UTC] "
+ "is not supported ATM"
+ ),
+ ),
+ ]
+)
+def dtypes(request):
+ """Dtypes for window tests"""
+ return request.param
diff --git a/pandas/tests/window/test_dtypes.py b/pandas/tests/window/test_dtypes.py
index 0aa5bf019ff5e..245b48b351684 100644
--- a/pandas/tests/window/test_dtypes.py
+++ b/pandas/tests/window/test_dtypes.py
@@ -1,5 +1,3 @@
-from itertools import product
-
import numpy as np
import pytest
@@ -10,234 +8,95 @@
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
-# NOTE that these are yielded tests and so _create_data
-# is explicitly called.
-#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
-class Dtype:
- window = 2
-
- funcs = {
- "count": lambda v: v.count(),
- "max": lambda v: v.max(),
- "min": lambda v: v.min(),
- "sum": lambda v: v.sum(),
- "mean": lambda v: v.mean(),
- "std": lambda v: v.std(),
- "var": lambda v: v.var(),
- "median": lambda v: v.median(),
- }
-
- def get_expects(self):
- expects = {
- "sr1": {
- "count": Series([1, 2, 2, 2, 2], dtype="float64"),
- "max": Series([np.nan, 1, 2, 3, 4], dtype="float64"),
- "min": Series([np.nan, 0, 1, 2, 3], dtype="float64"),
- "sum": Series([np.nan, 1, 3, 5, 7], dtype="float64"),
- "mean": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
- "std": Series([np.nan] + [np.sqrt(0.5)] * 4, dtype="float64"),
- "var": Series([np.nan, 0.5, 0.5, 0.5, 0.5], dtype="float64"),
- "median": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
+def get_dtype(dtype, coerce_int=None):
+ if coerce_int is False and "int" in dtype:
+ return None
+ if dtype != "category":
+ return np.dtype(dtype)
+ return dtype
+
+
+@pytest.mark.parametrize(
+ "method, data, expected_data, coerce_int",
+ [
+ ("count", np.arange(5), [1, 2, 2, 2, 2], True),
+ ("count", np.arange(10, 0, -2), [1, 2, 2, 2, 2], True),
+ ("count", [0, 1, 2, np.nan, 4], [1, 2, 2, 1, 1], False),
+ ("max", np.arange(5), [np.nan, 1, 2, 3, 4], True),
+ ("max", np.arange(10, 0, -2), [np.nan, 10, 8, 6, 4], True),
+ ("max", [0, 1, 2, np.nan, 4], [np.nan, 1, 2, np.nan, np.nan], False),
+ ("min", np.arange(5), [np.nan, 0, 1, 2, 3], True),
+ ("min", np.arange(10, 0, -2), [np.nan, 8, 6, 4, 2], True),
+ ("min", [0, 1, 2, np.nan, 4], [np.nan, 0, 1, np.nan, np.nan], False),
+ ("sum", np.arange(5), [np.nan, 1, 3, 5, 7], True),
+ ("sum", np.arange(10, 0, -2), [np.nan, 18, 14, 10, 6], True),
+ ("sum", [0, 1, 2, np.nan, 4], [np.nan, 1, 3, np.nan, np.nan], False),
+ ("mean", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True),
+ ("mean", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True),
+ ("mean", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False),
+ ("std", np.arange(5), [np.nan] + [np.sqrt(0.5)] * 4, True),
+ ("std", np.arange(10, 0, -2), [np.nan] + [np.sqrt(2)] * 4, True),
+ (
+ "std",
+ [0, 1, 2, np.nan, 4],
+ [np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2,
+ False,
+ ),
+ ("var", np.arange(5), [np.nan, 0.5, 0.5, 0.5, 0.5], True),
+ ("var", np.arange(10, 0, -2), [np.nan, 2, 2, 2, 2], True),
+ ("var", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 0.5, np.nan, np.nan], False),
+ ("median", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True),
+ ("median", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True),
+ ("median", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False),
+ ],
+)
+def test_series_dtypes(method, data, expected_data, coerce_int, dtypes):
+ s = Series(data, dtype=get_dtype(dtypes, coerce_int=coerce_int))
+ if dtypes in ("m8[ns]", "M8[ns]") and method != "count":
+ msg = "No numeric types to aggregate"
+ with pytest.raises(DataError, match=msg):
+ getattr(s.rolling(2), method)()
+ else:
+ result = getattr(s.rolling(2), method)()
+ expected = Series(expected_data, dtype="float64")
+ tm.assert_almost_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "method, expected_data",
+ [
+ ("count", {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}),
+ ("max", {0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])}),
+ ("min", {0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])}),
+ (
+ "sum",
+ {0: Series([np.nan, 2, 6, 10, 14]), 1: Series([np.nan, 4, 8, 12, 16])},
+ ),
+ ("mean", {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}),
+ (
+ "std",
+ {
+ 0: Series([np.nan] + [np.sqrt(2)] * 4),
+ 1: Series([np.nan] + [np.sqrt(2)] * 4),
},
- "sr2": {
- "count": Series([1, 2, 2, 2, 2], dtype="float64"),
- "max": Series([np.nan, 10, 8, 6, 4], dtype="float64"),
- "min": Series([np.nan, 8, 6, 4, 2], dtype="float64"),
- "sum": Series([np.nan, 18, 14, 10, 6], dtype="float64"),
- "mean": Series([np.nan, 9, 7, 5, 3], dtype="float64"),
- "std": Series([np.nan] + [np.sqrt(2)] * 4, dtype="float64"),
- "var": Series([np.nan, 2, 2, 2, 2], dtype="float64"),
- "median": Series([np.nan, 9, 7, 5, 3], dtype="float64"),
- },
- "sr3": {
- "count": Series([1, 2, 2, 1, 1], dtype="float64"),
- "max": Series([np.nan, 1, 2, np.nan, np.nan], dtype="float64"),
- "min": Series([np.nan, 0, 1, np.nan, np.nan], dtype="float64"),
- "sum": Series([np.nan, 1, 3, np.nan, np.nan], dtype="float64"),
- "mean": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"),
- "std": Series(
- [np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2, dtype="float64"
- ),
- "var": Series([np.nan, 0.5, 0.5, np.nan, np.nan], dtype="float64"),
- "median": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"),
- },
- "df": {
- "count": DataFrame(
- {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])},
- dtype="float64",
- ),
- "max": DataFrame(
- {0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])},
- dtype="float64",
- ),
- "min": DataFrame(
- {0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])},
- dtype="float64",
- ),
- "sum": DataFrame(
- {
- 0: Series([np.nan, 2, 6, 10, 14]),
- 1: Series([np.nan, 4, 8, 12, 16]),
- },
- dtype="float64",
- ),
- "mean": DataFrame(
- {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
- dtype="float64",
- ),
- "std": DataFrame(
- {
- 0: Series([np.nan] + [np.sqrt(2)] * 4),
- 1: Series([np.nan] + [np.sqrt(2)] * 4),
- },
- dtype="float64",
- ),
- "var": DataFrame(
- {0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])},
- dtype="float64",
- ),
- "median": DataFrame(
- {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
- dtype="float64",
- ),
- },
- }
- return expects
-
- def _create_dtype_data(self, dtype):
- sr1 = Series(np.arange(5), dtype=dtype)
- sr2 = Series(np.arange(10, 0, -2), dtype=dtype)
- sr3 = sr1.copy()
- sr3[3] = np.NaN
- df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype)
-
- data = {"sr1": sr1, "sr2": sr2, "sr3": sr3, "df": df}
-
- return data
-
- def _create_data(self):
- self.data = self._create_dtype_data(self.dtype)
- self.expects = self.get_expects()
-
- def test_dtypes(self):
- self._create_data()
- for f_name, d_name in product(self.funcs.keys(), self.data.keys()):
-
- f = self.funcs[f_name]
- d = self.data[d_name]
- exp = self.expects[d_name][f_name]
- self.check_dtypes(f, f_name, d, d_name, exp)
-
- def check_dtypes(self, f, f_name, d, d_name, exp):
- roll = d.rolling(window=self.window)
- result = f(roll)
- tm.assert_almost_equal(result, exp)
-
-
-class TestDtype_object(Dtype):
- dtype = object
-
-
-class Dtype_integer(Dtype):
- pass
-
-
-class TestDtype_int8(Dtype_integer):
- dtype = np.int8
-
-
-class TestDtype_int16(Dtype_integer):
- dtype = np.int16
-
-
-class TestDtype_int32(Dtype_integer):
- dtype = np.int32
-
-
-class TestDtype_int64(Dtype_integer):
- dtype = np.int64
-
-
-class Dtype_uinteger(Dtype):
- pass
-
-
-class TestDtype_uint8(Dtype_uinteger):
- dtype = np.uint8
-
-
-class TestDtype_uint16(Dtype_uinteger):
- dtype = np.uint16
-
-
-class TestDtype_uint32(Dtype_uinteger):
- dtype = np.uint32
-
-
-class TestDtype_uint64(Dtype_uinteger):
- dtype = np.uint64
-
-
-class Dtype_float(Dtype):
- pass
-
-
-class TestDtype_float16(Dtype_float):
- dtype = np.float16
-
-
-class TestDtype_float32(Dtype_float):
- dtype = np.float32
-
-
-class TestDtype_float64(Dtype_float):
- dtype = np.float64
-
-
-class TestDtype_category(Dtype):
- dtype = "category"
- include_df = False
-
- def _create_dtype_data(self, dtype):
- sr1 = Series(range(5), dtype=dtype)
- sr2 = Series(range(10, 0, -2), dtype=dtype)
-
- data = {"sr1": sr1, "sr2": sr2}
-
- return data
-
-
-class DatetimeLike(Dtype):
- def check_dtypes(self, f, f_name, d, d_name, exp):
-
- roll = d.rolling(window=self.window)
- if f_name == "count":
- result = f(roll)
- tm.assert_almost_equal(result, exp)
-
- else:
- msg = "No numeric types to aggregate"
- with pytest.raises(DataError, match=msg):
- f(roll)
-
-
-class TestDtype_timedelta(DatetimeLike):
- dtype = np.dtype("m8[ns]")
-
-
-class TestDtype_datetime(DatetimeLike):
- dtype = np.dtype("M8[ns]")
-
-
-class TestDtype_datetime64UTC(DatetimeLike):
- dtype = "datetime64[ns, UTC]"
-
- def _create_data(self):
- pytest.skip(
- "direct creation of extension dtype "
- "datetime64[ns, UTC] is not supported ATM"
- )
+ ),
+ ("var", {0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])}),
+ ("median", {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}),
+ ],
+)
+def test_dataframe_dtypes(method, expected_data, dtypes):
+ if dtypes == "category":
+ pytest.skip("Category dataframe testing not implemented.")
+ df = DataFrame(np.arange(10).reshape((5, 2)), dtype=get_dtype(dtypes))
+ if dtypes in ("m8[ns]", "M8[ns]") and method != "count":
+ msg = "No numeric types to aggregate"
+ with pytest.raises(DataError, match=msg):
+ getattr(df.rolling(2), method)()
+ else:
+ result = getattr(df.rolling(2), method)()
+ expected = DataFrame(expected_data, dtype="float64")
+ tm.assert_frame_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/35918 | 2020-08-27T07:19:08Z | 2020-09-05T19:40:07Z | 2020-09-05T19:40:07Z | 2020-09-06T04:05:06Z |
"Backport PR #35838 on branch 1.1.x" | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index d60119f28c053..5c4e770c7b33c 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -24,6 +24,8 @@ Fixed regressions
Bug fixes
~~~~~~~~~
+
+- Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`)
- Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`)
- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`)
-
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 47f10f1f65f4a..e8c9f28e50084 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -35,6 +35,7 @@
is_iterator,
is_list_like,
is_object_dtype,
+ is_sparse,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
@@ -535,9 +536,10 @@ def _try_cast(
if maybe_castable(arr) and not copy and dtype is None:
return arr
- if isinstance(dtype, ExtensionDtype) and dtype.kind != "M":
+ if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
# create an extension array from its dtype
- # DatetimeTZ case needs to go through maybe_cast_to_datetime
+ # DatetimeTZ case needs to go through maybe_cast_to_datetime but
+ # SparseDtype does not
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 2697f42eb05a4..e6b4cb598989b 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -50,6 +50,7 @@
is_numeric_dtype,
is_object_dtype,
is_scalar,
+ is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
@@ -1323,7 +1324,9 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
f"Please pass in '{dtype.name}[ns]' instead."
)
- if is_datetime64 and not is_dtype_equal(dtype, DT64NS_DTYPE):
+ if is_datetime64 and not is_dtype_equal(
+ getattr(dtype, "subtype", dtype), DT64NS_DTYPE
+ ):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
@@ -1355,7 +1358,7 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
if is_scalar(value):
if value == iNaT or isna(value):
value = iNaT
- else:
+ elif not is_sparse(value):
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 1dd410ad02ee0..bcf7039ec9039 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1449,3 +1449,18 @@ def test_constructor_datetimelike_scalar_to_string_dtype(self):
result = Series("M", index=[1, 2, 3], dtype="string")
expected = pd.Series(["M", "M", "M"], index=[1, 2, 3], dtype="string")
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "values",
+ [
+ [np.datetime64("2012-01-01"), np.datetime64("2013-01-01")],
+ ["2012-01-01", "2013-01-01"],
+ ],
+ )
+ def test_constructor_sparse_datetime64(self, values):
+ # https://github.com/pandas-dev/pandas/issues/35762
+ dtype = pd.SparseDtype("datetime64[ns]")
+ result = pd.Series(values, dtype=dtype)
+ arr = pd.arrays.SparseArray(values, dtype=dtype)
+ expected = pd.Series(arr)
+ tm.assert_series_equal(result, expected)
| xref #35838 | https://api.github.com/repos/pandas-dev/pandas/pulls/35915 | 2020-08-27T02:36:25Z | 2020-08-27T09:22:20Z | 2020-08-27T09:22:20Z | 2020-08-27T17:11:02Z |
Make MultiIndex.get_loc raise for unhashable type | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index d1a66256454ca..0fa5dd30f8cd9 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`)
- Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`)
- Performance regression for :meth:`RangeIndex.format` (:issue:`35712`)
+- Regression where :meth:`MultiIndex.get_loc` would return a slice spanning the full index when passed an empty list (:issue:`35878`)
- Fix regression in invalid cache after an indexing operation; this can manifest when setting which does not update the data (:issue:`35521`)
- Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`)
- Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index f66b009e6d505..080ece8547479 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2725,6 +2725,8 @@ def get_loc(self, key, method=None):
"currently supported for MultiIndex"
)
+ hash(key)
+
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != "int64":
@@ -2739,8 +2741,7 @@ def _maybe_to_slice(loc):
mask[loc] = True
return mask
- if not isinstance(key, (tuple, list)):
- # not including list here breaks some indexing, xref #30892
+ if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index d27487dfb8aaa..e4549dfb3e68d 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -2111,7 +2111,7 @@ def test_type_error_multiindex(self):
)
dg = df.pivot_table(index="i", columns="c", values=["x", "y"])
- with pytest.raises(TypeError, match="is an invalid key"):
+ with pytest.raises(TypeError, match="unhashable type"):
dg[:, 0]
index = Index(range(2), name="i")
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 5e5fcd3db88d8..4565d79c632de 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
@@ -83,3 +84,10 @@ def test_nested_tuples_duplicates(self):
df3 = df.copy(deep=True)
df3.loc[[(dti[0], "a")], "c2"] = 1.0
tm.assert_frame_equal(df3, expected)
+
+ def test_multiindex_get_loc_list_raises(self):
+ # https://github.com/pandas-dev/pandas/issues/35878
+ idx = pd.MultiIndex.from_tuples([("a", 1), ("b", 2)])
+ msg = "unhashable type"
+ with pytest.raises(TypeError, match=msg):
+ idx.get_loc([])
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 3463de25ad91b..593d1c78a19e2 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -1,6 +1,7 @@
import numpy as np
-from pandas import NaT, Series, date_range
+from pandas import MultiIndex, NaT, Series, date_range
+import pandas.testing as tm
class TestSetitemDT64Values:
@@ -17,3 +18,11 @@ def test_setitem_none_nan(self):
series[5:7] = np.nan
assert series[6] is NaT
+
+ def test_setitem_multiindex_empty_slice(self):
+ # https://github.com/pandas-dev/pandas/issues/35878
+ idx = MultiIndex.from_tuples([("a", 1), ("b", 2)])
+ result = Series([1, 2], index=idx)
+ expected = result.copy()
+ result.loc[[]] = 0
+ tm.assert_series_equal(result, expected)
| - [x] closes #35878
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35914 | 2020-08-27T02:25:38Z | 2020-09-05T21:18:52Z | 2020-09-05T21:18:51Z | 2020-09-06T13:01:22Z |
TYP: annotate tseries.holiday | diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 8ab37f787bd10..d8a3040919e7b 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -12,7 +12,7 @@
from pandas.tseries.offsets import Day, Easter
-def next_monday(dt):
+def next_monday(dt: datetime) -> datetime:
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
@@ -24,7 +24,7 @@ def next_monday(dt):
return dt
-def next_monday_or_tuesday(dt):
+def next_monday_or_tuesday(dt: datetime) -> datetime:
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
@@ -39,7 +39,7 @@ def next_monday_or_tuesday(dt):
return dt
-def previous_friday(dt):
+def previous_friday(dt: datetime) -> datetime:
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
@@ -50,7 +50,7 @@ def previous_friday(dt):
return dt
-def sunday_to_monday(dt):
+def sunday_to_monday(dt: datetime) -> datetime:
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
@@ -59,7 +59,7 @@ def sunday_to_monday(dt):
return dt
-def weekend_to_monday(dt):
+def weekend_to_monday(dt: datetime) -> datetime:
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
@@ -72,7 +72,7 @@ def weekend_to_monday(dt):
return dt
-def nearest_workday(dt):
+def nearest_workday(dt: datetime) -> datetime:
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
@@ -84,7 +84,7 @@ def nearest_workday(dt):
return dt
-def next_workday(dt):
+def next_workday(dt: datetime) -> datetime:
"""
returns next weekday used for observances
"""
@@ -95,7 +95,7 @@ def next_workday(dt):
return dt
-def previous_workday(dt):
+def previous_workday(dt: datetime) -> datetime:
"""
returns previous weekday used for observances
"""
@@ -106,14 +106,14 @@ def previous_workday(dt):
return dt
-def before_nearest_workday(dt):
+def before_nearest_workday(dt: datetime) -> datetime:
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
-def after_nearest_workday(dt):
+def after_nearest_workday(dt: datetime) -> datetime:
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
@@ -428,9 +428,11 @@ def holidays(self, start=None, end=None, return_name=False):
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
- holidays = [rule.dates(start, end, return_name=True) for rule in self.rules]
- if holidays:
- holidays = concat(holidays)
+ pre_holidays = [
+ rule.dates(start, end, return_name=True) for rule in self.rules
+ ]
+ if pre_holidays:
+ holidays = concat(pre_holidays)
else:
holidays = Series(index=DatetimeIndex([]), dtype=object)
diff --git a/setup.cfg b/setup.cfg
index e4c0b3dcf37ef..aa1535a171f0a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -276,6 +276,3 @@ check_untyped_defs=False
[mypy-pandas.plotting._matplotlib.misc]
check_untyped_defs=False
-
-[mypy-pandas.tseries.holiday]
-check_untyped_defs=False
| cc @simonjayhawkins | https://api.github.com/repos/pandas-dev/pandas/pulls/35913 | 2020-08-27T02:08:26Z | 2020-08-27T18:25:03Z | 2020-08-27T18:25:03Z | 2020-08-27T18:50:01Z |
CI: Attempt to unpin pytest-xdist | diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml
index 4894129915722..1d15ca41c0f8e 100644
--- a/ci/deps/azure-windows-37.yaml
+++ b/ci/deps/azure-windows-37.yaml
@@ -8,7 +8,7 @@ dependencies:
# tools
- cython>=0.29.16
- pytest>=5.0.1
- - pytest-xdist>=1.21,<2.0.0 # GH 35737
+ - pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-azurepipelines
diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml
index 2853e12b28e35..23bede5eb26f1 100644
--- a/ci/deps/azure-windows-38.yaml
+++ b/ci/deps/azure-windows-38.yaml
@@ -8,7 +8,7 @@ dependencies:
# tools
- cython>=0.29.16
- pytest>=5.0.1
- - pytest-xdist>=1.21,<2.0.0 # GH 35737
+ - pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-azurepipelines
| - [x] closes #35756
2.1.0 was released yesterday: https://pypi.org/project/pytest-xdist/#history | https://api.github.com/repos/pandas-dev/pandas/pulls/35910 | 2020-08-26T19:48:33Z | 2020-08-27T16:06:26Z | 2020-08-27T16:06:26Z | 2020-08-27T16:06:38Z |
Backport PR #35777: BUG: DataFrame.apply with result_type=reduce incorrect index | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index 97bd4dccdcd84..748937deb5a9b 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -24,7 +24,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
-
+- Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`)
-
-
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 6d44cf917a07a..99a9e1377563c 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -340,7 +340,10 @@ def wrap_results_for_axis(
if self.result_type == "reduce":
# e.g. test_apply_dict GH#8735
- return self.obj._constructor_sliced(results)
+ res = self.obj._constructor_sliced(results)
+ res.index = res_index
+ return res
+
elif self.result_type is None and all(
isinstance(x, dict) for x in results.values()
):
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py
index 538978358c8e7..5a1e448beb40f 100644
--- a/pandas/tests/frame/apply/test_frame_apply.py
+++ b/pandas/tests/frame/apply/test_frame_apply.py
@@ -1541,3 +1541,12 @@ def func(row):
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, result)
+
+
+def test_apply_empty_list_reduce():
+ # GH#35683 get columns correct
+ df = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"])
+
+ result = df.apply(lambda x: [], result_type="reduce")
+ expected = pd.Series({"a": [], "b": []}, dtype=object)
+ tm.assert_series_equal(result, expected)
| #35777 | https://api.github.com/repos/pandas-dev/pandas/pulls/35905 | 2020-08-26T11:39:27Z | 2020-08-26T13:32:27Z | 2020-08-26T13:32:27Z | 2020-08-26T13:32:36Z |
Backport PR #35712: PERF: RangeIndex.format performance | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 3cd920158f774..0f0f009307c75 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -540,7 +540,7 @@ with :attr:`numpy.nan` in the case of an empty :class:`DataFrame` (:issue:`26397
.. ipython:: python
- df.describe()
+ df.describe()
``__str__`` methods now call ``__repr__`` rather than vice versa
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index 748937deb5a9b..d60119f28c053 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`)
--
+- Performance regression for :meth:`RangeIndex.format` (:issue:`35712`)
-
.. ---------------------------------------------------------------------------
@@ -25,7 +25,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
- Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`)
--
+- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 1be381e38b157..32bbdf425acab 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -924,7 +924,9 @@ def format(
return self._format_with_header(header, na_rep=na_rep)
- def _format_with_header(self, header, na_rep="NaN") -> List[str_t]:
+ def _format_with_header(
+ self, header: List[str_t], na_rep: str_t = "NaN"
+ ) -> List[str_t]:
from pandas.io.formats.format import format_array
values = self._values
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 74b235655e345..8af6ee555306a 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -347,7 +347,7 @@ def _format_attrs(self):
attrs.append(("length", len(self)))
return attrs
- def _format_with_header(self, header, na_rep="NaN") -> List[str]:
+ def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]:
from pandas.io.formats.printing import pprint_thing
result = [
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index ab0b3a394446d..9b57a25f1b0e9 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -350,15 +350,20 @@ def format(
"""
header = []
if name:
- fmt_name = ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
- header.append(fmt_name)
+ header.append(
+ ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
+ if self.name is not None
+ else ""
+ )
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
- def _format_with_header(self, header, na_rep="NaT", date_format=None) -> List[str]:
+ def _format_with_header(
+ self, header: List[str], na_rep: str = "NaT", date_format: Optional[str] = None
+ ) -> List[str]:
return header + list(
self._format_native_types(na_rep=na_rep, date_format=date_format)
)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 9548ebbd9c3b2..446e57d58a779 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -948,7 +948,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
# Rendering Methods
# __repr__ associated methods are based on MultiIndex
- def _format_with_header(self, header, na_rep="NaN") -> List[str]:
+ def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]:
return header + list(self._format_native_types(na_rep=na_rep))
def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs):
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index eee610681087d..dcc0bdd86a98b 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,7 +1,7 @@
from datetime import timedelta
import operator
from sys import getsizeof
-from typing import Any, Optional
+from typing import Any, List, Optional
import warnings
import numpy as np
@@ -195,6 +195,15 @@ def _format_data(self, name=None):
# we are formatting thru the attributes
return None
+ def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]:
+ if not len(self._range):
+ return header
+ first_val_str = str(self._range[0])
+ last_val_str = str(self._range[-1])
+ max_length = max(len(first_val_str), len(last_val_str))
+
+ return header + [f"{x:<{max_length}}" for x in self._range]
+
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 3b41c4bfacf73..5f82203d92dc3 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -1,5 +1,5 @@
import gc
-from typing import Optional, Type
+from typing import Type
import numpy as np
import pytest
@@ -33,7 +33,7 @@
class Base:
""" base class for index sub-class tests """
- _holder: Optional[Type[Index]] = None
+ _holder: Type[Index]
_compat_props = ["shape", "ndim", "size", "nbytes"]
def create_index(self) -> Index:
@@ -648,6 +648,12 @@ def test_format(self):
expected = [str(x) for x in idx]
assert idx.format() == expected
+ def test_format_empty(self):
+ # GH35712
+ empty_idx = self._holder([])
+ assert empty_idx.format() == []
+ assert empty_idx.format(name=True) == [""]
+
def test_hasnans_isnans(self, index):
# GH 11343, added tests for hasnans / isnans
if isinstance(index, MultiIndex):
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 15a88ab3819ce..085d41aaa5b76 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -536,6 +536,12 @@ def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key):
with pytest.raises(KeyError, match=msg):
df.loc[key]
+ def test_format_empty(self):
+ # GH35712
+ empty_idx = self._holder([], freq="A")
+ assert empty_idx.format() == []
+ assert empty_idx.format(name=True) == [""]
+
def test_maybe_convert_timedelta():
pi = PeriodIndex(["2000", "2001"], freq="D")
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index 5b6f9cb358b7d..3bd3f6cc09db7 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -166,8 +166,14 @@ def test_cached_data(self):
idx.any()
assert idx._cached_data is None
+ idx.format()
+ assert idx._cache == {}
+
df = pd.DataFrame({"a": range(10)}, index=idx)
+ str(df)
+ assert idx._cache == {}
+
df.loc[50]
assert idx._cached_data is None
@@ -506,3 +512,9 @@ def test_engineless_lookup(self):
idx.get_loc("a")
assert "_engine" not in idx._cache
+
+ def test_format_empty(self):
+ # GH35712
+ empty_idx = self._holder(0)
+ assert empty_idx.format() == []
+ assert empty_idx.format(name=True) == [""]
| #35712 | https://api.github.com/repos/pandas-dev/pandas/pulls/35904 | 2020-08-26T11:32:06Z | 2020-08-26T14:40:22Z | 2020-08-26T14:40:22Z | 2020-08-26T14:40:27Z |
REF: use BlockManager.apply for cython_agg_blocks, apply_blockwise | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index a92e3af0764a7..537feace59fcb 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1035,8 +1035,6 @@ def _cython_agg_blocks(
if numeric_only:
data = data.get_numeric_data(copy=False)
- agg_blocks: List["Block"] = []
-
no_result = object()
def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike:
@@ -1118,23 +1116,14 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike:
res_values = cast_agg_result(result, bvalues, how)
return res_values
- for i, block in enumerate(data.blocks):
- try:
- nbs = block.apply(blk_func)
- except (NotImplementedError, TypeError):
- # TypeError -> we may have an exception in trying to aggregate
- # continue and exclude the block
- # NotImplementedError -> "ohlc" with wrong dtype
- pass
- else:
- agg_blocks.extend(nbs)
+ # TypeError -> we may have an exception in trying to aggregate
+ # continue and exclude the block
+ # NotImplementedError -> "ohlc" with wrong dtype
+ new_mgr = data.apply(blk_func, ignore_failures=True)
- if not agg_blocks:
+ if not len(new_mgr):
raise DataError("No numeric types to aggregate")
- # reset the locs in the blocks to correspond to our
- # current ordering
- new_mgr = data._combine(agg_blocks)
return new_mgr
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 389252e7ef0f2..2e3098d94afcb 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -350,7 +350,13 @@ def operate_blockwise(self, other: "BlockManager", array_op) -> "BlockManager":
"""
return operate_blockwise(self, other, array_op)
- def apply(self: T, f, align_keys=None, **kwargs) -> T:
+ def apply(
+ self: T,
+ f,
+ align_keys: Optional[List[str]] = None,
+ ignore_failures: bool = False,
+ **kwargs,
+ ) -> T:
"""
Iterate over the blocks, collect and create a new BlockManager.
@@ -358,6 +364,10 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T:
----------
f : str or callable
Name of the Block method to apply.
+ align_keys: List[str] or None, default None
+ ignore_failures: bool, default False
+ **kwargs
+ Keywords to pass to `f`
Returns
-------
@@ -387,12 +397,20 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T:
# otherwise we have an ndarray
kwargs[k] = obj[b.mgr_locs.indexer]
- if callable(f):
- applied = b.apply(f, **kwargs)
- else:
- applied = getattr(b, f)(**kwargs)
+ try:
+ if callable(f):
+ applied = b.apply(f, **kwargs)
+ else:
+ applied = getattr(b, f)(**kwargs)
+ except (TypeError, NotImplementedError):
+ if not ignore_failures:
+ raise
+ continue
result_blocks = _extend_blocks(applied, result_blocks)
+ if ignore_failures:
+ return self._combine(result_blocks)
+
if len(result_blocks) == 0:
return self.make_empty(self.axes)
@@ -704,7 +722,7 @@ def get_numeric_data(self, copy: bool = False) -> "BlockManager":
self._consolidate_inplace()
return self._combine([b for b in self.blocks if b.is_numeric], copy)
- def _combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager":
+ def _combine(self: T, blocks: List[Block], copy: bool = True) -> T:
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index a3f60c0bc5098..558c0eeb0ea65 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -489,8 +489,6 @@ def _apply_blockwise(
if self._selected_obj.ndim == 1:
return self._apply_series(homogeneous_func)
- # This isn't quite blockwise, since `blocks` is actually a collection
- # of homogenenous DataFrames.
_, obj = self._create_blocks(self._selected_obj)
mgr = obj._mgr
@@ -500,25 +498,14 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike:
res_values = homogeneous_func(values)
return getattr(res_values, "T", res_values)
- skipped: List[int] = []
- res_blocks: List["Block"] = []
- for i, blk in enumerate(mgr.blocks):
- try:
- nbs = blk.apply(hfunc)
-
- except (TypeError, NotImplementedError):
- skipped.append(i)
- continue
-
- res_blocks.extend(nbs)
+ new_mgr = mgr.apply(hfunc, ignore_failures=True)
+ out = obj._constructor(new_mgr)
- if not len(res_blocks) and skipped:
+ if out.shape[1] == 0 and obj.shape[1] > 0:
raise DataError("No numeric types to aggregate")
- elif not len(res_blocks):
+ elif out.shape[1] == 0:
return obj.astype("float64")
- new_mgr = mgr._combine(res_blocks)
- out = obj._constructor(new_mgr)
self._insert_on_column(out, obj)
return out
| - [x] closes #34714
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35900 | 2020-08-26T00:30:11Z | 2020-09-03T02:56:34Z | 2020-09-03T02:56:33Z | 2020-09-03T02:57:42Z |
REF: handle axis=None case inside DataFrame.any/all to simplify _reduce | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 606bd4cc3b52d..31611f441ceea 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8598,14 +8598,11 @@ def _reduce(
cols = self.columns[~dtype_is_dt]
self = self[cols]
- if axis is None and filter_type == "bool":
- labels = None
- constructor = None
- else:
- # TODO: Make other agg func handle axis=None properly
- axis = self._get_axis_number(axis)
- labels = self._get_agg_axis(axis)
- constructor = self._constructor
+ # TODO: Make other agg func handle axis=None properly
+ axis = self._get_axis_number(axis)
+ labels = self._get_agg_axis(axis)
+ constructor = self._constructor
+ assert axis in [0, 1]
def func(values):
if is_extension_array_dtype(values.dtype):
@@ -8613,7 +8610,7 @@ def func(values):
else:
return op(values, axis=axis, skipna=skipna, **kwds)
- def _get_data(axis_matters):
+ def _get_data(axis_matters: bool) -> "DataFrame":
if filter_type is None:
data = self._get_numeric_data()
elif filter_type == "bool":
@@ -8630,7 +8627,7 @@ def _get_data(axis_matters):
raise NotImplementedError(msg)
return data
- if numeric_only is not None and axis in [0, 1]:
+ if numeric_only is not None:
df = self
if numeric_only is True:
df = _get_data(axis_matters=True)
@@ -8656,6 +8653,8 @@ def blk_func(values):
out[:] = coerce_to_dtypes(out.values, df.dtypes)
return out
+ assert numeric_only is None
+
if not self._is_homogeneous_type or self._mgr.any_extension_types:
# try to avoid self.values call
@@ -8683,40 +8682,24 @@ def blk_func(values):
result = result.iloc[0].rename(None)
return result
- if numeric_only is None:
- data = self
- values = data.values
-
- try:
- result = func(values)
-
- except TypeError:
- # e.g. in nanops trying to convert strs to float
+ data = self
+ values = data.values
- # TODO: why doesnt axis matter here?
- data = _get_data(axis_matters=False)
- labels = data._get_agg_axis(axis)
+ try:
+ result = func(values)
- values = data.values
- with np.errstate(all="ignore"):
- result = func(values)
+ except TypeError:
+ # e.g. in nanops trying to convert strs to float
- else:
- if numeric_only:
- data = _get_data(axis_matters=True)
- labels = data._get_agg_axis(axis)
+ # TODO: why doesnt axis matter here?
+ data = _get_data(axis_matters=False)
+ labels = data._get_agg_axis(axis)
- values = data.values
- else:
- data = self
- values = data.values
- result = func(values)
+ values = data.values
+ with np.errstate(all="ignore"):
+ result = func(values)
- if filter_type == "bool" and is_object_dtype(values) and axis is None:
- # work around https://github.com/numpy/numpy/issues/10489
- # TODO: can we de-duplicate parts of this with the next blocK?
- result = np.bool_(result)
- elif hasattr(result, "dtype") and is_object_dtype(result.dtype):
+ if is_object_dtype(result.dtype):
try:
if filter_type is None:
result = result.astype(np.float64)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 286da6e1de9d5..e55d5dcb001b4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11658,6 +11658,14 @@ def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
+
+ if self.ndim > 1 and axis is None:
+ # Reduce along one dimension then the other, to simplify DataFrame._reduce
+ res = logical_func(
+ self, axis=0, bool_only=bool_only, skipna=skipna, **kwargs
+ )
+ return logical_func(res, skipna=skipna, **kwargs)
+
return self._reduce(
func,
name=name,
| Between this, #35881, and the PR coming after 35881, we'll be able to simplify _reduce quite a bit. | https://api.github.com/repos/pandas-dev/pandas/pulls/35899 | 2020-08-25T23:27:39Z | 2020-09-02T01:56:40Z | 2020-09-02T01:56:40Z | 2020-09-02T02:30:58Z |
CI: docker 32-bit linux build #32709 | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 113ad3e338952..b1091ea7f60e4 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -26,3 +26,28 @@ jobs:
parameters:
name: Windows
vmImage: vs2017-win2016
+
+- job: py37_32bit
+ pool:
+ vmImage: ubuntu-18.04
+
+ steps:
+ - script: |
+ docker pull quay.io/pypa/manylinux2014_i686
+ docker run -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \
+ /bin/bash -xc "cd pandas && \
+ /opt/python/cp37-cp37m/bin/python -m venv ~/virtualenvs/pandas-dev && \
+ . ~/virtualenvs/pandas-dev/bin/activate && \
+ python -m pip install --no-deps -U pip wheel setuptools && \
+ pip install cython numpy python-dateutil pytz pytest pytest-xdist hypothesis pytest-azurepipelines && \
+ python setup.py build_ext -q -i -j2 && \
+ python -m pip install --no-build-isolation -e . && \
+ pytest -m 'not slow and not network and not clipboard' pandas --junitxml=test-data.xml"
+ displayName: 'Run 32-bit manylinux2014 Docker Build / Tests'
+
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python 3.7-32 bit full Linux'
diff --git a/pandas/tests/arrays/floating/test_function.py b/pandas/tests/arrays/floating/test_function.py
index 2767d93741d4c..baf60a363ad29 100644
--- a/pandas/tests/arrays/floating/test_function.py
+++ b/pandas/tests/arrays/floating/test_function.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+from pandas.compat import IS64
+
import pandas as pd
import pandas._testing as tm
@@ -71,6 +73,7 @@ def test_ufunc_reduce_raises(values):
np.add.reduce(a)
+@pytest.mark.skipif(not IS64, reason="GH 36579: fail on 32-bit system")
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
diff --git a/pandas/tests/base/test_misc.py b/pandas/tests/base/test_misc.py
index f7952c81cfd61..6a9d58021a4d9 100644
--- a/pandas/tests/base/test_misc.py
+++ b/pandas/tests/base/test_misc.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas.compat import PYPY
+from pandas.compat import IS64, PYPY
from pandas.core.dtypes.common import (
is_categorical_dtype,
@@ -128,7 +128,10 @@ def test_memory_usage(index_or_series_obj):
)
if len(obj) == 0:
- expected = 0 if isinstance(obj, Index) else 80
+ if isinstance(obj, Index):
+ expected = 0
+ else:
+ expected = 80 if IS64 else 48
assert res_deep == res == expected
elif is_object or is_categorical:
# only deep will pick them up
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 2e51fca71e139..b57fa2540add9 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1268,8 +1268,8 @@ def test_groupby_nat_exclude():
assert grouped.ngroups == 2
expected = {
- Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.int64),
- Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.int64),
+ Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
+ Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py
index 418d05a6b8752..8c2155aec7248 100644
--- a/pandas/tests/io/formats/test_info.py
+++ b/pandas/tests/io/formats/test_info.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import PYPY
+from pandas.compat import IS64, PYPY
from pandas import (
CategoricalIndex,
@@ -475,6 +475,7 @@ def test_info_categorical():
df.info(buf=buf)
+@pytest.mark.xfail(not IS64, reason="GH 36579: fail on 32-bit system")
def test_info_int_columns():
# GH#37245
df = DataFrame({1: [1, 2], 2: [2, 3]}, index=["A", "B"])
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index ae63b6af3a8b6..eee111dd4579c 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -13,6 +13,7 @@
import numpy as np
import pytest
+from pandas.compat import IS64
from pandas.errors import ParserError
import pandas.util._test_decorators as td
@@ -717,7 +718,10 @@ def test_float_precision_options(c_parser_only):
df3 = parser.read_csv(StringIO(s), float_precision="legacy")
- assert not df.iloc[0, 0] == df3.iloc[0, 0]
+ if IS64:
+ assert not df.iloc[0, 0] == df3.iloc[0, 0]
+ else:
+ assert df.iloc[0, 0] == df3.iloc[0, 0]
msg = "Unrecognized float_precision option: junk"
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 92128def4540a..642e6a691463e 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -4,6 +4,8 @@
import numpy as np
import pytest
+from pandas.compat import IS64
+
import pandas as pd
from pandas import (
Categorical,
@@ -2104,6 +2106,7 @@ def test_pivot_duplicates(self):
with pytest.raises(ValueError, match="duplicate entries"):
data.pivot("a", "b", "c")
+ @pytest.mark.xfail(not IS64, reason="GH 36579: fail on 32-bit system")
def test_pivot_empty(self):
df = DataFrame(columns=["a", "b", "c"])
result = df.pivot("a", "b", "c")
| - [x] closes #32709
| https://api.github.com/repos/pandas-dev/pandas/pulls/35898 | 2020-08-25T22:09:26Z | 2020-10-28T03:15:11Z | 2020-10-28T03:15:11Z | 2022-11-18T02:21:03Z |
CI: Mark s3 tests parallel safe | diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index 518f31d73efa9..193baa8c3ed74 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -34,12 +34,13 @@ def feather_file(datapath):
@pytest.fixture
-def s3so():
- return dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"})
+def s3so(worker_id):
+ worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
+ return dict(client_kwargs={"endpoint_url": f"http://127.0.0.1:555{worker_id}/"})
-@pytest.fixture(scope="module")
-def s3_base():
+@pytest.fixture(scope="session")
+def s3_base(worker_id):
"""
Fixture for mocking S3 interaction.
@@ -61,11 +62,13 @@ def s3_base():
# Launching moto in server mode, i.e., as a separate process
# with an S3 endpoint on localhost
- endpoint_uri = "http://127.0.0.1:5555/"
+ worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
+ endpoint_port = f"555{worker_id}"
+ endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
# pipe to null to avoid logging in terminal
proc = subprocess.Popen(
- shlex.split("moto_server s3 -p 5555"), stdout=subprocess.DEVNULL
+ shlex.split(f"moto_server s3 -p {endpoint_port}"), stdout=subprocess.DEVNULL
)
timeout = 5
@@ -79,7 +82,7 @@ def s3_base():
pass
timeout -= 0.1
time.sleep(0.1)
- yield
+ yield endpoint_uri
proc.terminate()
proc.wait()
@@ -119,9 +122,8 @@ def add_tips_files(bucket_name):
cli.put_object(Bucket=bucket_name, Key=s3_key, Body=f)
bucket = "pandas-test"
- endpoint_uri = "http://127.0.0.1:5555/"
- conn = boto3.resource("s3", endpoint_url=endpoint_uri)
- cli = boto3.client("s3", endpoint_url=endpoint_uri)
+ conn = boto3.resource("s3", endpoint_url=s3_base)
+ cli = boto3.client("s3", endpoint_url=s3_base)
try:
cli.create_bucket(Bucket=bucket)
@@ -143,7 +145,7 @@ def add_tips_files(bucket_name):
s3fs.S3FileSystem.clear_instance_cache()
yield conn
- s3 = s3fs.S3FileSystem(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"})
+ s3 = s3fs.S3FileSystem(client_kwargs={"endpoint_url": s3_base})
try:
s3.rm(bucket, recursive=True)
diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py
index 5bb205842269e..c0e3220454bf1 100644
--- a/pandas/tests/io/json/test_compression.py
+++ b/pandas/tests/io/json/test_compression.py
@@ -34,7 +34,7 @@ def test_read_zipped_json(datapath):
@td.skip_if_not_us_locale
-def test_with_s3_url(compression, s3_resource):
+def test_with_s3_url(compression, s3_resource, s3so):
# Bucket "pandas-test" created in tests/io/conftest.py
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
@@ -45,9 +45,7 @@ def test_with_s3_url(compression, s3_resource):
s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f)
roundtripped_df = pd.read_json(
- "s3://pandas-test/test-1",
- compression=compression,
- storage_options=dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}),
+ "s3://pandas-test/test-1", compression=compression, storage_options=s3so,
)
tm.assert_frame_equal(df, roundtripped_df)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 64a666079876f..2022abbaee323 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1702,17 +1702,14 @@ def test_json_multiindex(self, dataframe, expected):
result = series.to_json(orient="index")
assert result == expected
- def test_to_s3(self, s3_resource):
+ def test_to_s3(self, s3_resource, s3so):
import time
# GH 28375
mock_bucket_name, target_file = "pandas-test", "test.json"
df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
df.to_json(
- f"s3://{mock_bucket_name}/{target_file}",
- storage_options=dict(
- client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}
- ),
+ f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so,
)
timeout = 5
while True:
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 4e0c16c71a6a8..15f9837176315 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -158,10 +158,6 @@ def check_round_trip(
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
- if isinstance(path, str) and "s3://" in path:
- s3so = dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"})
- read_kwargs["storage_options"] = s3so
- write_kwargs["storage_options"] = s3so
if expected is None:
expected = df
@@ -555,15 +551,24 @@ def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so):
write_kwargs=kw,
)
- def test_s3_roundtrip(self, df_compat, s3_resource, pa):
+ def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so):
if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"):
pytest.skip()
# GH #19134
- check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet")
+ s3so = dict(storage_options=s3so)
+ check_round_trip(
+ df_compat,
+ pa,
+ path="s3://pandas-test/pyarrow.parquet",
+ read_kwargs=s3so,
+ write_kwargs=s3so,
+ )
@td.skip_if_no("s3fs")
@pytest.mark.parametrize("partition_col", [["A"], []])
- def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col):
+ def test_s3_roundtrip_for_dir(
+ self, df_compat, s3_resource, pa, partition_col, s3so
+ ):
# GH #26388
expected_df = df_compat.copy()
@@ -587,7 +592,10 @@ def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col):
pa,
expected=expected_df,
path="s3://pandas-test/parquet_dir",
- write_kwargs={"partition_cols": partition_col, "compression": None},
+ read_kwargs=dict(storage_options=s3so),
+ write_kwargs=dict(
+ partition_cols=partition_col, compression=None, storage_options=s3so
+ ),
check_like=True,
repeat=1,
)
@@ -761,9 +769,15 @@ def test_filter_row_groups(self, fp):
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
- def test_s3_roundtrip(self, df_compat, s3_resource, fp):
+ def test_s3_roundtrip(self, df_compat, s3_resource, fp, s3so):
# GH #19134
- check_round_trip(df_compat, fp, path="s3://pandas-test/fastparquet.parquet")
+ check_round_trip(
+ df_compat,
+ fp,
+ path="s3://pandas-test/fastparquet.parquet",
+ read_kwargs=dict(storage_options=s3so),
+ write_kwargs=dict(compression=None, storage_options=s3so),
+ )
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
| Closes https://github.com/pandas-dev/pandas/issues/35856
I think we need to update the pytest pattern though, so this should
fail. | https://api.github.com/repos/pandas-dev/pandas/pulls/35895 | 2020-08-25T14:49:51Z | 2020-08-26T02:21:41Z | 2020-08-26T02:21:41Z | 2020-09-10T16:30:56Z |
DOC: avoid StorageOptions type alias in docstrings | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index aaef71910c9ab..3cd0d721bbdc6 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -200,13 +200,13 @@
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
-storage_options : StorageOptions
+storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index a6cd8f524503b..6cbca59aed97e 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -18,7 +18,7 @@ class _ODFReader(_BaseExcelReader):
----------
filepath_or_buffer : string, path to be parsed or
an open readable stream.
- storage_options : StorageOptions
+ storage_options : dict, optional
passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
"""
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 73239190604db..c2730536af8a3 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -479,7 +479,7 @@ def __init__(
----------
filepath_or_buffer : string, path object or Workbook
Object to be parsed.
- storage_options : StorageOptions
+ storage_options : dict, optional
passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
"""
import_optional_dependency("openpyxl")
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index c0e281ff6c2da..c15a52abe4d53 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -19,7 +19,7 @@ def __init__(
----------
filepath_or_buffer : str, path object, or Workbook
Object to be parsed.
- storage_options : StorageOptions
+ storage_options : dict, optional
passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
"""
import_optional_dependency("pyxlsb")
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index ff1b3c8bdb964..a7fb519af61c6 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -17,7 +17,7 @@ def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
----------
filepath_or_buffer : string, path object or Workbook
Object to be parsed.
- storage_options : StorageOptions
+ storage_options : dict, optional
passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
"""
err_msg = "Install xlrd >= 1.0.0 for Excel support"
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 2d86fa44f22a4..fb606b5ec8aef 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -16,14 +16,13 @@ def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kw
----------
df : DataFrame
path : string file path, or file-like object
-
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
+ docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
@@ -106,6 +105,15 @@ def read_feather(
Whether to parallelize reading using multiple threads.
.. versionadded:: 0.24.0
+ storage_options : dict, optional
+ Extra options that make sense for a particular storage connection, e.g.
+ host, port, username, password, etc., if using a URL that will
+ be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
+ will be raised if providing this argument with a local path or
+ a file-like buffer. See the fsspec and backend storage implementation
+ docs for the set of allowed keys and values.
+
+ .. versionadded:: 1.2.0
Returns
-------
| Small follow-up on https://github.com/pandas-dev/pandas/pull/35655, replacing the "StorageOptions" with a plain "dict" in the docstrings ("StorageOptions" is not something known to users, in the type annotations it will expand but not in the docstrings)
(cc @martindurant) | https://api.github.com/repos/pandas-dev/pandas/pulls/35894 | 2020-08-25T14:42:06Z | 2020-08-25T20:12:32Z | 2020-08-25T20:12:32Z | 2020-08-25T20:12:35Z |
Backport PR #35814: TST: Fix test_parquet failures for pyarrow 1.0 | diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 82157f3d722a9..306b2a7849586 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -557,13 +557,23 @@ def test_s3_roundtrip(self, df_compat, s3_resource, pa):
@pytest.mark.parametrize("partition_col", [["A"], []])
def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col):
# GH #26388
- # https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716
- # As per pyarrow partitioned columns become 'categorical' dtypes
- # and are added to back of dataframe on read
-
expected_df = df_compat.copy()
- if partition_col:
- expected_df[partition_col] = expected_df[partition_col].astype("category")
+
+ # GH #35791
+ # read_table uses the new Arrow Datasets API since pyarrow 1.0.0
+ # Previous behaviour was pyarrow partitioned columns become 'category' dtypes
+ # These are added to back of dataframe on read. In new API category dtype is
+ # only used if partition field is string.
+ legacy_read_table = LooseVersion(pyarrow.__version__) < LooseVersion("1.0.0")
+ if partition_col and legacy_read_table:
+ partition_col_type = "category"
+ else:
+ partition_col_type = "int32"
+
+ expected_df[partition_col] = expected_df[partition_col].astype(
+ partition_col_type
+ )
+
check_round_trip(
df_compat,
pa,
| Backport https://github.com/pandas-dev/pandas/pull/35814 | https://api.github.com/repos/pandas-dev/pandas/pulls/35887 | 2020-08-25T07:07:52Z | 2020-08-25T10:04:02Z | 2020-08-25T10:04:02Z | 2020-08-25T15:18:11Z |
DOC: Fix documentation for pandas.Series.transform #35870 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 9f36405bf6428..286da6e1de9d5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10703,7 +10703,7 @@ def transform(self, func, *args, **kwargs):
- function
- string function name
- - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
+ - list of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
{axis}
*args
| - [x] closes #35870
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35885 | 2020-08-25T03:46:11Z | 2020-08-25T05:37:04Z | 2020-08-25T05:37:04Z | 2020-08-27T13:41:27Z |
REF: reuse _combine instead of reset_dropped_locs | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 1198baab12ac1..70a8379de64e9 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -21,7 +21,6 @@
Mapping,
Optional,
Sequence,
- Tuple,
Type,
Union,
)
@@ -1025,16 +1024,14 @@ def _iterate_slices(self) -> Iterable[Series]:
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
) -> DataFrame:
- agg_blocks, agg_items = self._cython_agg_blocks(
+ agg_mgr = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count
)
- return self._wrap_agged_blocks(agg_blocks, items=agg_items)
+ return self._wrap_agged_blocks(agg_mgr.blocks, items=agg_mgr.items)
def _cython_agg_blocks(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
- ) -> "Tuple[List[Block], Index]":
- # TODO: the actual managing of mgr_locs is a PITA
- # here, it should happen via BlockManager.combine
+ ) -> BlockManager:
data: BlockManager = self._get_data_to_aggregate()
@@ -1124,7 +1121,6 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike:
res_values = cast_agg_result(result, bvalues, how)
return res_values
- skipped: List[int] = []
for i, block in enumerate(data.blocks):
try:
nbs = block.apply(blk_func)
@@ -1132,7 +1128,7 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike:
# TypeError -> we may have an exception in trying to aggregate
# continue and exclude the block
# NotImplementedError -> "ohlc" with wrong dtype
- skipped.append(i)
+ pass
else:
agg_blocks.extend(nbs)
@@ -1141,9 +1137,8 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike:
# reset the locs in the blocks to correspond to our
# current ordering
- agg_items = data.reset_dropped_locs(agg_blocks, skipped)
-
- return agg_blocks, agg_items
+ new_mgr = data._combine(agg_blocks)
+ return new_mgr
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
if self.grouper.nkeys != 1:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 297ad3077ef1d..6f16254c56ec4 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1491,38 +1491,6 @@ def unstack(self, unstacker, fill_value) -> "BlockManager":
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
- def reset_dropped_locs(self, blocks: List[Block], skipped: List[int]) -> Index:
- """
- Decrement the mgr_locs of the given blocks with `skipped` removed.
-
- Notes
- -----
- Alters each block's mgr_locs inplace.
- """
- ncols = len(self)
-
- new_locs = [blk.mgr_locs.as_array for blk in blocks]
- indexer = np.concatenate(new_locs)
-
- new_items = self.items.take(np.sort(indexer))
-
- if skipped:
- # we need to adjust the indexer to account for the
- # items we have removed
- deleted_items = [self.blocks[i].mgr_locs.as_array for i in skipped]
- deleted = np.concatenate(deleted_items)
- ai = np.arange(ncols)
- mask = np.zeros(ncols)
- mask[deleted] = 1
- indexer = (ai - mask.cumsum())[indexer]
-
- offset = 0
- for blk in blocks:
- loc = len(blk.mgr_locs)
- blk.mgr_locs = indexer[offset : (offset + loc)]
- offset += loc
- return new_items
-
class SingleBlockManager(BlockManager):
""" manage a single block with """
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index a70247d9f7f9c..baabdf0fca29a 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -561,8 +561,7 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike:
elif not len(res_blocks):
return obj.astype("float64")
- new_cols = mgr.reset_dropped_locs(res_blocks, skipped)
- new_mgr = type(mgr).from_blocks(res_blocks, [new_cols, obj.index])
+ new_mgr = mgr._combine(res_blocks)
out = obj._constructor(new_mgr)
self._insert_on_column(out, obj)
return out
| https://api.github.com/repos/pandas-dev/pandas/pulls/35884 | 2020-08-25T03:40:01Z | 2020-08-25T12:59:42Z | 2020-08-25T12:59:42Z | 2020-08-25T15:14:10Z | |
REF: use BlockManager.apply for Rolling.count | diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 04509a40b98df..246bf8e6f71b7 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -22,7 +22,7 @@
from pandas._libs.tslibs import BaseOffset, to_offset
import pandas._libs.window.aggregations as window_aggregations
-from pandas._typing import ArrayLike, Axis, FrameOrSeries, FrameOrSeriesUnion, Label
+from pandas._typing import ArrayLike, Axis, FrameOrSeries, FrameOrSeriesUnion
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
@@ -44,6 +44,7 @@
ABCSeries,
ABCTimedeltaIndex,
)
+from pandas.core.dtypes.missing import notna
from pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin
import pandas.core.common as com
@@ -395,40 +396,6 @@ def _wrap_result(self, result, block=None, obj=None):
return type(obj)(result, index=index, columns=block.columns)
return result
- def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeriesUnion:
- """
- Wrap the results.
-
- Parameters
- ----------
- results : list of ndarrays
- obj : conformed data (may be resampled)
- skipped: List[int]
- Indices of blocks that are skipped.
- """
- from pandas import Series, concat
-
- if obj.ndim == 1:
- if not results:
- raise DataError("No numeric types to aggregate")
- assert len(results) == 1
- return Series(results[0], index=obj.index, name=obj.name)
-
- exclude: List[Label] = []
- orig_blocks = list(obj._to_dict_of_blocks(copy=False).values())
- for i in skipped:
- exclude.extend(orig_blocks[i].columns)
-
- columns = [c for c in self._selected_obj.columns if c not in exclude]
- if not columns and not len(results) and exclude:
- raise DataError("No numeric types to aggregate")
- elif not len(results):
- return obj.astype("float64")
-
- df = concat(results, axis=1).reindex(columns=columns, copy=False)
- self._insert_on_column(df, obj)
- return df
-
def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"):
# if we have an 'on' column we want to put it back into
# the results in the same location
@@ -1325,21 +1292,29 @@ def count(self):
# implementations shouldn't end up here
assert not isinstance(self.window, BaseIndexer)
- blocks, obj = self._create_blocks(self._selected_obj)
- results = []
- for b in blocks:
- result = b.notna().astype(int)
+ _, obj = self._create_blocks(self._selected_obj)
+
+ def hfunc(values: np.ndarray) -> np.ndarray:
+ result = notna(values)
+ result = result.astype(int)
+ frame = type(obj)(result.T)
result = self._constructor(
- result,
+ frame,
window=self._get_window(),
min_periods=self.min_periods or 0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
- results.append(result)
+ return result.values.T
- return self._wrap_results(results, obj, skipped=[])
+ new_mgr = obj._mgr.apply(hfunc)
+ out = obj._constructor(new_mgr)
+ if obj.ndim == 1:
+ out.name = obj.name
+ else:
+ self._insert_on_column(out, obj)
+ return out
_shared_docs["apply"] = dedent(
r"""
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35883 | 2020-08-25T02:12:19Z | 2020-08-31T18:28:25Z | 2020-08-31T18:28:25Z | 2020-08-31T19:21:05Z |
BUG: item_cache invalidation in get_numeric_data | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index ac9fe9d2fca26..8cf79500c0384 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -33,6 +33,7 @@ Bug fixes
- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`)
- Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`)
- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`)
+- Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 2e3098d94afcb..f4dba46cb965c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -719,7 +719,6 @@ def get_numeric_data(self, copy: bool = False) -> "BlockManager":
copy : bool, default False
Whether to copy the blocks
"""
- self._consolidate_inplace()
return self._combine([b for b in self.blocks if b.is_numeric], copy)
def _combine(self: T, blocks: List[Block], copy: bool = True) -> T:
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index d3548b639572d..f307acd8c2178 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -191,6 +191,23 @@ def test_corr_nullable_integer(self, nullable_column, other_column, method):
expected = pd.DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
tm.assert_frame_equal(result, expected)
+ def test_corr_item_cache(self):
+ # Check that corr does not lead to incorrect entries in item_cache
+
+ df = pd.DataFrame({"A": range(10)})
+ df["B"] = range(10)[::-1]
+
+ ser = df["A"] # populate item_cache
+ assert len(df._mgr.blocks) == 2
+
+ _ = df.corr()
+
+ # Check that the corr didnt break link between ser and df
+ ser.values[0] = 99
+ assert df.loc[0, "A"] == 99
+ assert df["A"] is ser
+ assert df.values[0, 0] == 99
+
class TestDataFrameCorrWith:
def test_corrwith(self, datetime_frame):
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35882 | 2020-08-25T01:13:32Z | 2020-09-05T19:55:37Z | 2020-09-05T19:55:37Z | 2020-09-07T11:33:18Z |
REF: ignore_failures in BlockManager.reduce | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1f9987d9d3f5b..8efe2fc090fc5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8595,6 +8595,7 @@ def _reduce(
cols = self.columns[~dtype_is_dt]
self = self[cols]
+ any_object = self.dtypes.apply(is_object_dtype).any()
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
@@ -8621,7 +8622,17 @@ def _get_data() -> DataFrame:
data = self._get_bool_data()
return data
- if numeric_only is not None:
+ if numeric_only is not None or (
+ numeric_only is None
+ and axis == 0
+ and not any_object
+ and not self._mgr.any_extension_types
+ ):
+ # For numeric_only non-None and axis non-None, we know
+ # which blocks to use and no try/except is needed.
+ # For numeric_only=None only the case with axis==0 and no object
+ # dtypes are unambiguous can be handled with BlockManager.reduce
+ # Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
@@ -8629,14 +8640,18 @@ def _get_data() -> DataFrame:
df = df.T
axis = 0
+ ignore_failures = numeric_only is None
+
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
- res = df._mgr.reduce(blk_func)
- out = df._constructor(res).iloc[0].rename(None)
+ res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
+ out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and is_object_dtype(out.dtype):
- out[:] = coerce_to_dtypes(out.values, df.dtypes)
+ # GH#35865 careful to cast explicitly to object
+ nvs = coerce_to_dtypes(out.values, df.dtypes.iloc[np.sort(indexer)])
+ out[:] = np.array(nvs, dtype=object)
return out
assert numeric_only is None
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 09f276be7d64a..9b6c4b664285e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -348,12 +348,18 @@ def apply(self, func, **kwargs) -> List["Block"]:
return self._split_op_result(result)
- def reduce(self, func) -> List["Block"]:
+ def reduce(self, func, ignore_failures: bool = False) -> List["Block"]:
# We will apply the function and reshape the result into a single-row
# Block with the same mgr_locs; squeezing will be done at a higher level
assert self.ndim == 2
- result = func(self.values)
+ try:
+ result = func(self.values)
+ except (TypeError, NotImplementedError):
+ if ignore_failures:
+ return []
+ raise
+
if np.ndim(result) == 0:
# TODO(EA2D): special case not needed with 2D EAs
res_values = np.array([[result]])
@@ -2454,6 +2460,34 @@ def is_bool(self):
"""
return lib.is_bool_array(self.values.ravel("K"))
+ def reduce(self, func, ignore_failures: bool = False) -> List[Block]:
+ """
+ For object-dtype, we operate column-wise.
+ """
+ assert self.ndim == 2
+
+ values = self.values
+ if len(values) > 1:
+ # split_and_operate expects func with signature (mask, values, inplace)
+ def mask_func(mask, values, inplace):
+ if values.ndim == 1:
+ values = values.reshape(1, -1)
+ return func(values)
+
+ return self.split_and_operate(None, mask_func, False)
+
+ try:
+ res = func(values)
+ except TypeError:
+ if not ignore_failures:
+ raise
+ return []
+
+ assert isinstance(res, np.ndarray)
+ assert res.ndim == 1
+ res = res.reshape(1, -1)
+ return [self.make_block_same_class(res)]
+
def convert(
self,
copy: bool = True,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index f2480adce89b4..7f5e99c3348b7 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -2,6 +2,7 @@
import itertools
from typing import (
Any,
+ Callable,
DefaultDict,
Dict,
List,
@@ -324,18 +325,44 @@ def _verify_integrity(self) -> None:
f"tot_items: {tot_items}"
)
- def reduce(self: T, func) -> T:
+ def reduce(
+ self: T, func: Callable, ignore_failures: bool = False
+ ) -> Tuple[T, np.ndarray]:
+ """
+ Apply reduction function blockwise, returning a single-row BlockManager.
+
+ Parameters
+ ----------
+ func : reduction function
+ ignore_failures : bool, default False
+ Whether to drop blocks where func raises TypeError.
+
+ Returns
+ -------
+ BlockManager
+ np.ndarray
+ Indexer of mgr_locs that are retained.
+ """
# If 2D, we assume that we're operating column-wise
assert self.ndim == 2
res_blocks: List[Block] = []
for blk in self.blocks:
- nbs = blk.reduce(func)
+ nbs = blk.reduce(func, ignore_failures)
res_blocks.extend(nbs)
- index = Index([0]) # placeholder
- new_mgr = BlockManager.from_blocks(res_blocks, [self.items, index])
- return new_mgr
+ index = Index([None]) # placeholder
+ if ignore_failures:
+ if res_blocks:
+ indexer = np.concatenate([blk.mgr_locs.as_array for blk in res_blocks])
+ new_mgr = self._combine(res_blocks, copy=False, index=index)
+ else:
+ indexer = []
+ new_mgr = type(self).from_blocks([], [Index([]), index])
+ else:
+ indexer = np.arange(self.shape[0])
+ new_mgr = type(self).from_blocks(res_blocks, [self.items, index])
+ return new_mgr, indexer
def operate_blockwise(self, other: "BlockManager", array_op) -> "BlockManager":
"""
@@ -700,7 +727,9 @@ def get_numeric_data(self, copy: bool = False) -> "BlockManager":
"""
return self._combine([b for b in self.blocks if b.is_numeric], copy)
- def _combine(self: T, blocks: List[Block], copy: bool = True) -> T:
+ def _combine(
+ self: T, blocks: List[Block], copy: bool = True, index: Optional[Index] = None
+ ) -> T:
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
@@ -716,6 +745,8 @@ def _combine(self: T, blocks: List[Block], copy: bool = True) -> T:
new_blocks.append(b)
axes = list(self.axes)
+ if index is not None:
+ axes[-1] = index
axes[0] = self.items.take(indexer)
return type(self).from_blocks(new_blocks, axes)
| Moving towards collecting all of the ignore_failures code in one place.
The case where we have object dtypes is kept separate in this PR, will be handled in the next pass. | https://api.github.com/repos/pandas-dev/pandas/pulls/35881 | 2020-08-25T00:54:54Z | 2020-10-10T18:36:03Z | 2020-10-10T18:36:03Z | 2020-10-14T13:04:24Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.