title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
STY: De-privatize imported names | diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index ffaf6d6505955..5a0cddb0af197 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -56,7 +56,7 @@ from pandas._libs.missing cimport checknull
cdef int64_t NPY_NAT = util.get_nat()
-_SIZE_HINT_LIMIT = (1 << 20) + 7
+SIZE_HINT_LIMIT = (1 << 20) + 7
cdef Py_ssize_t _INIT_VEC_CAP = 128
@@ -176,7 +176,7 @@ def unique_label_indices(const int64_t[:] labels):
ndarray[int64_t, ndim=1] arr
Int64VectorData *ud = idx.data
- kh_resize_int64(table, min(n, _SIZE_HINT_LIMIT))
+ kh_resize_int64(table, min(n, SIZE_HINT_LIMIT))
with nogil:
for i in range(n):
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index e0e026fe7cb5e..5e4da96d57e42 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -268,7 +268,7 @@ cdef class {{name}}HashTable(HashTable):
def __cinit__(self, int64_t size_hint=1):
self.table = kh_init_{{dtype}}()
if size_hint is not None:
- size_hint = min(size_hint, _SIZE_HINT_LIMIT)
+ size_hint = min(size_hint, SIZE_HINT_LIMIT)
kh_resize_{{dtype}}(self.table, size_hint)
def __len__(self) -> int:
@@ -603,7 +603,7 @@ cdef class StringHashTable(HashTable):
def __init__(self, int64_t size_hint=1):
self.table = kh_init_str()
if size_hint is not None:
- size_hint = min(size_hint, _SIZE_HINT_LIMIT)
+ size_hint = min(size_hint, SIZE_HINT_LIMIT)
kh_resize_str(self.table, size_hint)
def __dealloc__(self):
@@ -916,7 +916,7 @@ cdef class PyObjectHashTable(HashTable):
def __init__(self, int64_t size_hint=1):
self.table = kh_init_pymap()
if size_hint is not None:
- size_hint = min(size_hint, _SIZE_HINT_LIMIT)
+ size_hint = min(size_hint, SIZE_HINT_LIMIT)
kh_resize_pymap(self.table, size_hint)
def __dealloc__(self):
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index 0cc0a6b192df5..fcd081f563f92 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -138,7 +138,7 @@ def duplicated_{{dtype}}(const {{c_type}}[:] values, object keep='first'):
kh_{{ttype}}_t *table = kh_init_{{ttype}}()
ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool')
- kh_resize_{{ttype}}(table, min(n, _SIZE_HINT_LIMIT))
+ kh_resize_{{ttype}}(table, min(n, SIZE_HINT_LIMIT))
if keep not in ('last', 'first', False):
raise ValueError('keep must be either "first", "last" or False')
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index fa77af6bd5a25..811e28b830921 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -67,7 +67,7 @@ from pandas._libs.khash cimport (
khiter_t,
)
-from pandas.compat import _get_lzma_file, _import_lzma
+from pandas.compat import get_lzma_file, import_lzma
from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning
from pandas.core.dtypes.common import (
@@ -82,7 +82,7 @@ from pandas.core.dtypes.common import (
)
from pandas.core.dtypes.concat import union_categoricals
-lzma = _import_lzma()
+lzma = import_lzma()
cdef:
float64_t INF = <float64_t>np.inf
@@ -638,9 +638,9 @@ cdef class TextReader:
f'zip file {zip_names}')
elif self.compression == 'xz':
if isinstance(source, str):
- source = _get_lzma_file(lzma)(source, 'rb')
+ source = get_lzma_file(lzma)(source, 'rb')
else:
- source = _get_lzma_file(lzma)(filename=source)
+ source = get_lzma_file(lzma)(filename=source)
else:
raise ValueError(f'Unrecognized compression type: '
f'{self.compression}')
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 04d36749a3d8c..7dba578951deb 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -25,7 +25,7 @@
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
-from pandas.compat import _get_lzma_file, _import_lzma
+from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_bool,
@@ -70,7 +70,7 @@
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
-lzma = _import_lzma()
+lzma = import_lzma()
_N = 30
_K = 4
@@ -243,7 +243,7 @@ def decompress_file(path, compression):
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
- f = _get_lzma_file(lzma)(path, "rb")
+ f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
@@ -288,7 +288,7 @@ def write_to_compressed(compression, path, data, dest="test"):
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
- compress_method = _get_lzma_file(lzma)
+ compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index f2018a5c01711..57e378758cc78 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -77,7 +77,7 @@ def is_platform_mac() -> bool:
return sys.platform == "darwin"
-def _import_lzma():
+def import_lzma():
"""
Importing the `lzma` module.
@@ -97,7 +97,7 @@ def _import_lzma():
warnings.warn(msg)
-def _get_lzma_file(lzma):
+def get_lzma_file(lzma):
"""
Importing the `LZMAFile` class from the `lzma` module.
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 50ec3714f454b..57e63daff29e4 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -462,7 +462,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
return f(comps, values)
-def _factorize_array(
+def factorize_array(
values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None
) -> Tuple[np.ndarray, np.ndarray]:
"""
@@ -671,7 +671,7 @@ def factorize(
else:
na_value = None
- codes, uniques = _factorize_array(
+ codes, uniques = factorize_array(
values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 8193d65b3b30c..0c8efda5fc588 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -31,7 +31,7 @@
from pandas.core.dtypes.missing import isna
from pandas.core import ops
-from pandas.core.algorithms import _factorize_array, unique
+from pandas.core.algorithms import factorize_array, unique
from pandas.core.missing import backfill_1d, pad_1d
from pandas.core.sorting import nargminmax, nargsort
@@ -845,7 +845,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, "ExtensionArray"
# Complete control over factorization.
arr, na_value = self._values_for_factorize()
- codes, uniques = _factorize_array(
+ codes, uniques = factorize_array(
arr, na_sentinel=na_sentinel, na_value=na_value
)
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 1237dea5c1a64..31274232e2525 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -17,7 +17,7 @@
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
-from pandas.core.algorithms import _factorize_array, take
+from pandas.core.algorithms import factorize_array, take
from pandas.core.array_algos import masked_reductions
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.indexers import check_array_indexer
@@ -287,7 +287,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]:
arr = self._data
mask = self._mask
- codes, uniques = _factorize_array(arr, na_sentinel=na_sentinel, mask=mask)
+ codes, uniques = factorize_array(arr, na_sentinel=na_sentinel, mask=mask)
# the hashtables don't handle all different types of bits
uniques = uniques.astype(self.dtype.numpy_dtype, copy=False)
diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py
index 4d205909b9e2e..6c7261b3b33c9 100644
--- a/pandas/core/computation/check.py
+++ b/pandas/core/computation/check.py
@@ -1,10 +1,10 @@
from pandas.compat._optional import import_optional_dependency
ne = import_optional_dependency("numexpr", raise_on_missing=False, on_version="warn")
-_NUMEXPR_INSTALLED = ne is not None
-if _NUMEXPR_INSTALLED:
- _NUMEXPR_VERSION = ne.__version__
+NUMEXPR_INSTALLED = ne is not None
+if NUMEXPR_INSTALLED:
+ NUMEXPR_VERSION = ne.__version__
else:
- _NUMEXPR_VERSION = None
+ NUMEXPR_VERSION = None
-__all__ = ["_NUMEXPR_INSTALLED", "_NUMEXPR_VERSION"]
+__all__ = ["NUMEXPR_INSTALLED", "NUMEXPR_VERSION"]
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index b74f99fca21c7..f6a7935142a32 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -38,10 +38,10 @@ def _check_engine(engine: Optional[str]) -> str:
str
Engine name.
"""
- from pandas.core.computation.check import _NUMEXPR_INSTALLED
+ from pandas.core.computation.check import NUMEXPR_INSTALLED
if engine is None:
- engine = "numexpr" if _NUMEXPR_INSTALLED else "python"
+ engine = "numexpr" if NUMEXPR_INSTALLED else "python"
if engine not in _engines:
valid_engines = list(_engines.keys())
@@ -53,7 +53,7 @@ def _check_engine(engine: Optional[str]) -> str:
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == "numexpr":
- if not _NUMEXPR_INSTALLED:
+ if not NUMEXPR_INSTALLED:
raise ImportError(
"'numexpr' is not installed or an unsupported version. Cannot use "
"engine='numexpr' for query/eval if 'numexpr' is not installed"
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index a9c0cb0571446..d2c08c343ab4b 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -15,15 +15,15 @@
from pandas.core.dtypes.generic import ABCDataFrame
-from pandas.core.computation.check import _NUMEXPR_INSTALLED
+from pandas.core.computation.check import NUMEXPR_INSTALLED
from pandas.core.ops import roperator
-if _NUMEXPR_INSTALLED:
+if NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT: List[bool] = list()
-_USE_NUMEXPR = _NUMEXPR_INSTALLED
+_USE_NUMEXPR = NUMEXPR_INSTALLED
_evaluate = None
_where = None
@@ -40,7 +40,7 @@
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
- if _NUMEXPR_INSTALLED:
+ if NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
@@ -53,7 +53,7 @@ def set_use_numexpr(v=True):
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
- if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
+ if NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index b2144c45c6323..1fb3910b8577d 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -600,11 +600,11 @@ def __repr__(self) -> str:
class FuncNode:
def __init__(self, name: str):
- from pandas.core.computation.check import _NUMEXPR_INSTALLED, _NUMEXPR_VERSION
+ from pandas.core.computation.check import NUMEXPR_INSTALLED, NUMEXPR_VERSION
if name not in _mathops or (
- _NUMEXPR_INSTALLED
- and _NUMEXPR_VERSION < LooseVersion("2.6.9")
+ NUMEXPR_INSTALLED
+ and NUMEXPR_VERSION < LooseVersion("2.6.9")
and name in ("floor", "ceil")
):
raise ValueError(f'"{name}" is not a supported function')
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 150d6e24dbb86..e1a889bf79d95 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5257,7 +5257,7 @@ def duplicated(
4 True
dtype: bool
"""
- from pandas._libs.hashtable import _SIZE_HINT_LIMIT, duplicated_int64
+ from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64
from pandas.core.sorting import get_group_index
@@ -5266,7 +5266,7 @@ def duplicated(
def f(vals):
labels, shape = algorithms.factorize(
- vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
+ vals, size_hint=min(len(self), SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 080ece8547479..e49a23935efbd 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1342,9 +1342,9 @@ def format(
)
if adjoin:
- from pandas.io.formats.format import _get_adjustment
+ from pandas.io.formats.format import get_adjustment
- adj = _get_adjustment()
+ adj = get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index e12e0d7760ea7..fbccac1c2af67 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -10,8 +10,8 @@
IntBlock,
ObjectBlock,
TimeDeltaBlock,
- _safe_reshape,
make_block,
+ safe_reshape,
)
from pandas.core.internals.concat import concatenate_block_managers
from pandas.core.internals.managers import (
@@ -33,7 +33,7 @@
"IntBlock",
"ObjectBlock",
"TimeDeltaBlock",
- "_safe_reshape",
+ "safe_reshape",
"make_block",
"BlockManager",
"SingleBlockManager",
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 9f4e535dc787d..0271304442e45 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1673,7 +1673,7 @@ def putmask(
if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask):
new = new[mask]
- mask = _safe_reshape(mask, new_values.shape)
+ mask = safe_reshape(mask, new_values.shape)
new_values[mask] = new
return [self.make_block(values=new_values)]
@@ -2815,7 +2815,7 @@ def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike:
return values
-def _safe_reshape(arr, new_shape):
+def safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 13bc6a2e82195..3f446874ffd0e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -47,10 +47,10 @@
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
- _safe_reshape,
extend_blocks,
get_block_type,
make_block,
+ safe_reshape,
)
from pandas.core.internals.ops import blockwise_all, operate_blockwise
@@ -1015,7 +1015,7 @@ def value_getitem(placement):
else:
if value.ndim == self.ndim - 1:
- value = _safe_reshape(value, (1,) + value.shape)
+ value = safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
@@ -1138,7 +1138,7 @@ def insert(self, loc: int, item: Label, value, allow_duplicates: bool = False):
if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype):
# TODO(EA2D): special case not needed with 2D EAs
- value = _safe_reshape(value, (1,) + value.shape)
+ value = safe_reshape(value, (1,) + value.shape)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 8bdd466ae6f33..d03b2f29521b7 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -520,7 +520,7 @@ def compress_group_index(group_index, sort: bool = True):
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
- size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
+ size_hint = min(len(group_index), hashtable.SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = ensure_int64(group_index)
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 2f3058db4493b..df60d2dcf5e84 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -92,7 +92,7 @@ def f(x, name=name, *args):
return self._groupby.apply(f)
-def _flex_binary_moment(arg1, arg2, f, pairwise=False):
+def flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (
isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame))
@@ -222,7 +222,7 @@ def dataframe_from_int_dict(data, frame_template):
return dataframe_from_int_dict(results, arg1)
else:
- return _flex_binary_moment(arg2, arg1, f)
+ return flex_binary_moment(arg2, arg1, f)
def zsqrt(x):
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 1913b51a68c15..2bd36d8bff155 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -15,7 +15,7 @@
import pandas.core.common as common
from pandas.core.window.common import _doc_template, _shared_docs, zsqrt
-from pandas.core.window.rolling import _flex_binary_moment, _Rolling
+from pandas.core.window.rolling import _Rolling, flex_binary_moment
_bias_template = """
Parameters
@@ -416,7 +416,7 @@ def _get_cov(X, Y):
)
return X._wrap_result(cov)
- return _flex_binary_moment(
+ return flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
@@ -470,6 +470,6 @@ def _cov(x, y):
corr = cov / zsqrt(x_var * y_var)
return X._wrap_result(corr)
- return _flex_binary_moment(
+ return flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 558c0eeb0ea65..4c4ec4d700b7f 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -54,8 +54,8 @@
from pandas.core.window.common import (
WindowGroupByMixin,
_doc_template,
- _flex_binary_moment,
_shared_docs,
+ flex_binary_moment,
zsqrt,
)
from pandas.core.window.indexers import (
@@ -1774,7 +1774,7 @@ def _get_cov(X, Y):
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
- return _flex_binary_moment(
+ return flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
@@ -1913,7 +1913,7 @@ def _get_corr(a, b):
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
- return _flex_binary_moment(
+ return flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index a80b89569f429..3f130401558dd 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -40,12 +40,12 @@
ModeVar,
StorageOptions,
)
-from pandas.compat import _get_lzma_file, _import_lzma
+from pandas.compat import get_lzma_file, import_lzma
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import is_file_like
-lzma = _import_lzma()
+lzma = import_lzma()
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
@@ -562,7 +562,7 @@ def get_handle(
# XZ Compression
elif compression == "xz":
- f = _get_lzma_file(lzma)(path_or_buf, mode)
+ f = get_lzma_file(lzma)(path_or_buf, mode)
# Unrecognized Compression
else:
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 74eb65521f5b2..87343c22ad4e9 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -346,7 +346,7 @@ def read_excel(
)
-class _BaseExcelReader(metaclass=abc.ABCMeta):
+class BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
# If filepath_or_buffer is a url, load the data into a BytesIO
if is_url(filepath_or_buffer):
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 6cbca59aed97e..02575ab878f6e 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -7,10 +7,10 @@
import pandas as pd
-from pandas.io.excel._base import _BaseExcelReader
+from pandas.io.excel._base import BaseExcelReader
-class _ODFReader(_BaseExcelReader):
+class _ODFReader(BaseExcelReader):
"""
Read tables out of OpenDocument formatted files.
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 89b581da6ed31..f395127902101 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -5,7 +5,7 @@
from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions
from pandas.compat._optional import import_optional_dependency
-from pandas.io.excel._base import ExcelWriter, _BaseExcelReader
+from pandas.io.excel._base import BaseExcelReader, ExcelWriter
from pandas.io.excel._util import validate_freeze_panes
if TYPE_CHECKING:
@@ -438,7 +438,7 @@ def write_cells(
setattr(xcell, k, v)
-class _OpenpyxlReader(_BaseExcelReader):
+class _OpenpyxlReader(BaseExcelReader):
def __init__(
self,
filepath_or_buffer: FilePathOrBuffer,
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index c15a52abe4d53..069c3a2eaa643 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -3,10 +3,10 @@
from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions
from pandas.compat._optional import import_optional_dependency
-from pandas.io.excel._base import _BaseExcelReader
+from pandas.io.excel._base import BaseExcelReader
-class _PyxlsbReader(_BaseExcelReader):
+class _PyxlsbReader(BaseExcelReader):
def __init__(
self,
filepath_or_buffer: FilePathOrBuffer,
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index a7fb519af61c6..9057106fb08e5 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -5,10 +5,10 @@
from pandas._typing import StorageOptions
from pandas.compat._optional import import_optional_dependency
-from pandas.io.excel._base import _BaseExcelReader
+from pandas.io.excel._base import BaseExcelReader
-class _XlrdReader(_BaseExcelReader):
+class _XlrdReader(BaseExcelReader):
def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
"""
Reader using xlrd engine.
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 3dc4290953360..53b2b533215f0 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -256,7 +256,7 @@ def __init__(
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
- self.adj = _get_adjustment()
+ self.adj = get_adjustment()
self._chk_truncate()
@@ -439,7 +439,7 @@ def _get_pad(t):
return [x.rjust(_get_pad(x)) for x in texts]
-def _get_adjustment() -> TextAdjustment:
+def get_adjustment() -> TextAdjustment:
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
@@ -628,7 +628,7 @@ def __init__(
self.columns = frame.columns
self._chk_truncate()
- self.adj = _get_adjustment()
+ self.adj = get_adjustment()
def _chk_truncate(self) -> None:
"""
@@ -1733,7 +1733,7 @@ def _make_fixed_width(
return strings
if adj is None:
- adj = _get_adjustment()
+ adj = get_adjustment()
max_len = max(adj.len(x) for x in strings)
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 23daab725ec65..edc6fbfff61d7 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -321,7 +321,7 @@ def format_object_summary(
summary string
"""
from pandas.io.formats.console import get_console_size
- from pandas.io.formats.format import _get_adjustment
+ from pandas.io.formats.format import get_adjustment
display_width, _ = get_console_size()
if display_width is None:
@@ -350,7 +350,7 @@ def format_object_summary(
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
- adj = _get_adjustment()
+ adj = get_adjustment()
def _extend_line(
s: str, line: str, value: str, display_width: int, next_line_prefix: str
diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index b3fbd8c17d8bf..ead102f532a20 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -12,16 +12,16 @@
def test_compat():
# test we have compat with our version of nu
- from pandas.core.computation.check import _NUMEXPR_INSTALLED
+ from pandas.core.computation.check import NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if LooseVersion(ver) < LooseVersion(VERSIONS["numexpr"]):
- assert not _NUMEXPR_INSTALLED
+ assert not NUMEXPR_INSTALLED
else:
- assert _NUMEXPR_INSTALLED
+ assert NUMEXPR_INSTALLED
except ImportError:
pytest.skip("not testing numexpr version compat")
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 853ab00853d1b..49066428eb16c 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -18,7 +18,7 @@
from pandas import DataFrame, Series, compat, date_range
import pandas._testing as tm
from pandas.core.computation import pytables
-from pandas.core.computation.check import _NUMEXPR_VERSION
+from pandas.core.computation.check import NUMEXPR_VERSION
from pandas.core.computation.engines import NumExprClobberingError, _engines
import pandas.core.computation.expr as expr
from pandas.core.computation.expr import (
@@ -26,7 +26,7 @@
PandasExprVisitor,
PythonExprVisitor,
)
-from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR
+from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED
from pandas.core.computation.ops import (
_arith_ops_syms,
_binary_math_ops,
@@ -43,7 +43,7 @@
marks=pytest.mark.skipif(
engine == "numexpr" and not _USE_NUMEXPR,
reason=f"numexpr enabled->{_USE_NUMEXPR}, "
- f"installed->{_NUMEXPR_INSTALLED}",
+ f"installed->{NUMEXPR_INSTALLED}",
),
)
for engine in _engines
@@ -60,15 +60,15 @@ def parser(request):
@pytest.fixture
def ne_lt_2_6_9():
- if _NUMEXPR_INSTALLED and _NUMEXPR_VERSION >= LooseVersion("2.6.9"):
+ if NUMEXPR_INSTALLED and NUMEXPR_VERSION >= LooseVersion("2.6.9"):
pytest.skip("numexpr is >= 2.6.9")
return "numexpr"
@pytest.fixture
def unary_fns_for_ne():
- if _NUMEXPR_INSTALLED:
- if _NUMEXPR_VERSION >= LooseVersion("2.6.9"):
+ if NUMEXPR_INSTALLED:
+ if NUMEXPR_VERSION >= LooseVersion("2.6.9"):
return _unary_math_ops
else:
return tuple(x for x in _unary_math_ops if x not in ("floor", "ceil"))
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 447a6108fc3c7..e3cdeb9c1951f 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -189,7 +189,7 @@ def _concat_same_type(cls, to_concat):
def _values_for_factorize(self):
frozen = self._values_for_argsort()
if len(frozen) == 0:
- # _factorize_array expects 1-d array, this is a len-0 2-d array.
+ # factorize_array expects 1-d array, this is a len-0 2-d array.
frozen = frozen.ravel()
return frozen, ()
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index e17357e9845b5..70d0b4e9e835c 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -11,7 +11,7 @@
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
-from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
+from pandas.core.computation.expressions import _MIN_ELEMENTS, NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
@@ -375,7 +375,7 @@ def test_floordiv_axis0(self):
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
- @pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
+ @pytest.mark.skipif(not NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 56d178daee7fd..2994482fa5139 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -9,7 +9,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
-from pandas.core.computation.check import _NUMEXPR_INSTALLED
+from pandas.core.computation.check import NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@@ -39,7 +39,7 @@ def setup_method(self, method):
def test_query_default(self):
# GH 12749
- # this should always work, whether _NUMEXPR_INSTALLED or not
+ # this should always work, whether NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
@@ -65,7 +65,7 @@ def test_query_python(self):
def test_query_numexpr(self):
df = self.df
- if _NUMEXPR_INSTALLED:
+ if NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 22942ed75d0f3..1fb957505987f 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -226,7 +226,7 @@ def test_repr_truncation(self):
r = repr(df)
r = r[r.find("\n") + 1 :]
- adj = fmt._get_adjustment()
+ adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index d1c6705dd7a6f..2241fe7013568 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -24,7 +24,7 @@
import pytest
-from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian
+from pandas.compat import get_lzma_file, import_lzma, is_platform_little_endian
import pandas.util._test_decorators as td
import pandas as pd
@@ -33,7 +33,7 @@
from pandas.tseries.offsets import Day, MonthEnd
-lzma = _import_lzma()
+lzma = import_lzma()
@pytest.fixture(scope="module")
@@ -268,7 +268,7 @@ def compress_file(self, src_path, dest_path, compression):
with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f:
f.write(src_path, os.path.basename(src_path))
elif compression == "xz":
- f = _get_lzma_file(lzma)(dest_path, "w")
+ f = get_lzma_file(lzma)(dest_path, "w")
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 72a679d980641..ec7413514d430 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -303,7 +303,7 @@ def test_parametrized_factorize_na_value_default(self, data):
],
)
def test_parametrized_factorize_na_value(self, data, na_value):
- codes, uniques = algos._factorize_array(data, na_value=na_value)
+ codes, uniques = algos.factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, expected_codes)
diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index 158b994cf03ae..dfcbdde466d44 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series
import pandas._testing as tm
-from pandas.core.window.common import _flex_binary_moment
+from pandas.core.window.common import flex_binary_moment
from pandas.tests.window.common import (
check_pairwise_moment,
moments_consistency_cov_data,
@@ -150,7 +150,7 @@ def test_flex_binary_moment():
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
- _flex_binary_moment(5, 6, None)
+ flex_binary_moment(5, 6, None)
def test_corr_sanity():
diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py
index 7425cc5df4c2f..7f4e85b385b2d 100644
--- a/pandas/tests/window/test_pairwise.py
+++ b/pandas/tests/window/test_pairwise.py
@@ -41,7 +41,7 @@ def compare(self, result, expected):
@pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, f):
- # DataFrame methods (which do not call _flex_binary_moment())
+ # DataFrame methods (which do not call flex_binary_moment())
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 78facd6694635..94c252eca1671 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -35,7 +35,7 @@ def test_foo():
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import _np_version
-from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR
+from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED
def safe_import(mod_name: str, min_version: Optional[str] = None):
@@ -196,7 +196,7 @@ def skip_if_no(package: str, min_version: Optional[str] = None):
)
skip_if_no_ne = pytest.mark.skipif(
not _USE_NUMEXPR,
- reason=f"numexpr enabled->{_USE_NUMEXPR}, installed->{_NUMEXPR_INSTALLED}",
+ reason=f"numexpr enabled->{_USE_NUMEXPR}, installed->{NUMEXPR_INSTALLED}",
)
| getting close to being able to enable a code_check for these | https://api.github.com/repos/pandas-dev/pandas/pulls/36156 | 2020-09-06T03:06:33Z | 2020-09-06T17:05:36Z | 2020-09-06T17:05:36Z | 2020-09-06T19:04:13Z |
CLN: backport mpl warning fix to 1.1.2 | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index d1a66256454ca..95701d00b3c29 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -44,6 +44,7 @@ Bug fixes
Other
~~~~~
- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize` (:issue:`35667`)
+- :meth:`DataFrame.plot` and meth:`Series.plot` raise ``UserWarning`` about usage of FixedFormatter and FixedLocator (:issue:`35684` and :issue:`35945`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index b490e07e43753..646ed09331278 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1232,8 +1232,8 @@ def get_label(i):
if self._need_to_set_index:
xticks = ax.get_xticks()
xticklabels = [get_label(x) for x in xticks]
- ax.set_xticklabels(xticklabels)
ax.xaxis.set_major_locator(FixedLocator(xticks))
+ ax.set_xticklabels(xticklabels)
condition = (
not self._use_dynamic_x()
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index ee43e5d7072fe..9ab697cb57690 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2796,10 +2796,12 @@ def test_table(self):
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
- ax = df.plot()
- assert len(ax.tables) == 0
- plotting.table(ax, df.T)
- assert len(ax.tables) == 1
+ # GH 35945 UserWarning
+ with tm.assert_produces_warning(None):
+ ax = df.plot()
+ assert len(ax.tables) == 0
+ plotting.table(ax, df.T)
+ assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=["x", "y"])
| backport #35946 and #36145 | https://api.github.com/repos/pandas-dev/pandas/pulls/36155 | 2020-09-06T02:44:40Z | 2020-09-06T17:17:54Z | 2020-09-06T17:17:54Z | 2020-09-06T17:18:15Z |
REF: share more EA methods | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 2976747d66dfa..8b79f8ce66756 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -4,9 +4,10 @@
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
-from pandas.util._decorators import cache_readonly
+from pandas.util._decorators import cache_readonly, doc
-from pandas.core.algorithms import take, unique
+from pandas.core.algorithms import searchsorted, take, unique
+from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray
_T = TypeVar("_T", bound="NDArrayBackedExtensionArray")
@@ -120,3 +121,31 @@ def repeat(self: _T, repeats, axis=None) -> _T:
def unique(self: _T) -> _T:
new_data = unique(self._ndarray)
return self._from_backing_data(new_data)
+
+ @classmethod
+ @doc(ExtensionArray._concat_same_type)
+ def _concat_same_type(cls, to_concat, axis: int = 0):
+ dtypes = {str(x.dtype) for x in to_concat}
+ if len(dtypes) != 1:
+ raise ValueError("to_concat must have the same dtype (tz)", dtypes)
+
+ new_values = [x._ndarray for x in to_concat]
+ new_values = np.concatenate(new_values, axis=axis)
+ return to_concat[0]._from_backing_data(new_values)
+
+ @doc(ExtensionArray.searchsorted)
+ def searchsorted(self, value, side="left", sorter=None):
+ return searchsorted(self._ndarray, value, side=side, sorter=sorter)
+
+ @doc(ExtensionArray.shift)
+ def shift(self, periods=1, fill_value=None, axis=0):
+
+ fill_value = self._validate_shift_value(fill_value)
+ new_values = shift(self._ndarray, periods, axis, fill_value)
+
+ return self._from_backing_data(new_values)
+
+ def _validate_shift_value(self, fill_value):
+ # TODO: after deprecation in datetimelikearraymixin is enforced,
+ # we can remove this and ust validate_fill_value directly
+ return self._validate_fill_value(fill_value)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index c3c9009dda659..02305479bef67 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -44,8 +44,7 @@
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take_1d, unique1d
-from pandas.core.array_algos.transforms import shift
-from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray
+from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.base import (
ExtensionArray,
NoNewAttributesMixin,
@@ -1193,35 +1192,6 @@ def map(self, mapper):
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
- def shift(self, periods, fill_value=None):
- """
- Shift Categorical by desired number of periods.
-
- Parameters
- ----------
- periods : int
- Number of periods to move, can be positive or negative
- fill_value : object, optional
- The scalar value to use for newly introduced missing values.
-
- .. versionadded:: 0.24.0
-
- Returns
- -------
- shifted : Categorical
- """
- # since categoricals always have ndim == 1, an axis parameter
- # doesn't make any sense here.
- codes = self.codes
- if codes.ndim > 1:
- raise NotImplementedError("Categorical with ndim > 1.")
-
- fill_value = self._validate_fill_value(fill_value)
-
- codes = shift(codes, periods, axis=0, fill_value=fill_value)
-
- return self._constructor(codes, dtype=self.dtype, fastpath=True)
-
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
@@ -1383,20 +1353,6 @@ def notna(self):
notnull = notna
- def dropna(self):
- """
- Return the Categorical without null values.
-
- Missing values (-1 in .codes) are detected.
-
- Returns
- -------
- valid : Categorical
- """
- result = self[self.notna()]
-
- return result
-
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
@@ -1749,81 +1705,6 @@ def fillna(self, value=None, method=None, limit=None):
return self._constructor(codes, dtype=self.dtype, fastpath=True)
- def take(self: _T, indexer, allow_fill: bool = False, fill_value=None) -> _T:
- """
- Take elements from the Categorical.
-
- Parameters
- ----------
- indexer : sequence of int
- The indices in `self` to take. The meaning of negative values in
- `indexer` depends on the value of `allow_fill`.
- allow_fill : bool, default False
- How to handle negative values in `indexer`.
-
- * False: negative values in `indices` indicate positional indices
- from the right. This is similar to
- :func:`numpy.take`.
-
- * True: negative values in `indices` indicate missing values
- (the default). These values are set to `fill_value`. Any other
- other negative values raise a ``ValueError``.
-
- .. versionchanged:: 1.0.0
-
- Default value changed from ``True`` to ``False``.
-
- fill_value : object
- The value to use for `indices` that are missing (-1), when
- ``allow_fill=True``. This should be the category, i.e. a value
- in ``self.categories``, not a code.
-
- Returns
- -------
- Categorical
- This Categorical will have the same categories and ordered as
- `self`.
-
- See Also
- --------
- Series.take : Similar method for Series.
- numpy.ndarray.take : Similar method for NumPy arrays.
-
- Examples
- --------
- >>> cat = pd.Categorical(['a', 'a', 'b'])
- >>> cat
- ['a', 'a', 'b']
- Categories (2, object): ['a', 'b']
-
- Specify ``allow_fill==False`` to have negative indices mean indexing
- from the right.
-
- >>> cat.take([0, -1, -2], allow_fill=False)
- ['a', 'b', 'a']
- Categories (2, object): ['a', 'b']
-
- With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
- values that should be filled with the `fill_value`, which is
- ``np.nan`` by default.
-
- >>> cat.take([0, -1, -1], allow_fill=True)
- ['a', NaN, NaN]
- Categories (2, object): ['a', 'b']
-
- The fill value can be specified.
-
- >>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
- ['a', 'a', 'a']
- Categories (2, object): ['a', 'b']
-
- Specifying a fill value that's not in ``self.categories``
- will raise a ``ValueError``.
- """
- return NDArrayBackedExtensionArray.take(
- self, indexer, allow_fill=allow_fill, fill_value=fill_value
- )
-
# ------------------------------------------------------------------
# NDArrayBackedExtensionArray compat
@@ -1861,6 +1742,9 @@ def __contains__(self, key) -> bool:
return contains(self, key, container=self._codes)
+ # ------------------------------------------------------------------
+ # Rendering Methods
+
def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
@@ -1959,6 +1843,8 @@ def __repr__(self) -> str:
return result
+ # ------------------------------------------------------------------
+
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 5a44f87400b79..a5b8032974fa4 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -54,9 +54,8 @@
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts
-from pandas.core.array_algos.transforms import shift
from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray
-from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
+from pandas.core.arrays.base import ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.construction import array, extract_array
from pandas.core.indexers import check_array_indexer
@@ -672,18 +671,11 @@ def view(self, dtype=None):
@classmethod
def _concat_same_type(cls, to_concat, axis: int = 0):
-
- # do not pass tz to set because tzlocal cannot be hashed
- dtypes = {str(x.dtype) for x in to_concat}
- if len(dtypes) != 1:
- raise ValueError("to_concat must have the same dtype (tz)", dtypes)
+ new_obj = super()._concat_same_type(to_concat, axis)
obj = to_concat[0]
dtype = obj.dtype
- i8values = [x.asi8 for x in to_concat]
- values = np.concatenate(i8values, axis=axis)
-
new_freq = None
if is_period_dtype(dtype):
new_freq = obj.freq
@@ -697,11 +689,13 @@ def _concat_same_type(cls, to_concat, axis: int = 0):
if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs):
new_freq = obj.freq
- return cls._simple_new(values, dtype=dtype, freq=new_freq)
+ new_obj._freq = new_freq
+ return new_obj
def copy(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT:
- values = self.asi8.copy()
- return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
+ new_obj = super().copy()
+ new_obj._freq = self.freq
+ return new_obj
def _values_for_factorize(self):
return self.asi8, iNaT
@@ -713,14 +707,6 @@ def _from_factorized(cls, values, original):
def _values_for_argsort(self):
return self._data
- @Appender(ExtensionArray.shift.__doc__)
- def shift(self, periods=1, fill_value=None, axis=0):
-
- fill_value = self._validate_shift_value(fill_value)
- new_values = shift(self._data, periods, axis, fill_value)
-
- return type(self)._simple_new(new_values, dtype=self.dtype)
-
# ------------------------------------------------------------------
# Validation Methods
# TODO: try to de-duplicate these, ensure identical behavior
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 23a4a70734c81..588d68514649a 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -7,7 +7,6 @@
from pandas._libs import lib
from pandas._typing import Scalar
from pandas.compat.numpy import function as nv
-from pandas.util._decorators import doc
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.dtypes import ExtensionDtype
@@ -16,10 +15,9 @@
from pandas import compat
from pandas.core import nanops, ops
-from pandas.core.algorithms import searchsorted
from pandas.core.array_algos import masked_reductions
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
-from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
+from pandas.core.arrays.base import ExtensionOpsMixin
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.missing import backfill_1d, pad_1d
@@ -189,10 +187,6 @@ def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "PandasArray
def _from_factorized(cls, values, original) -> "PandasArray":
return cls(values)
- @classmethod
- def _concat_same_type(cls, to_concat) -> "PandasArray":
- return cls(np.concatenate(to_concat))
-
def _from_backing_data(self, arr: np.ndarray) -> "PandasArray":
return type(self)(arr)
@@ -423,10 +417,6 @@ def to_numpy(
return result
- @doc(ExtensionArray.searchsorted)
- def searchsorted(self, value, side="left", sorter=None):
- return searchsorted(self.to_numpy(), value, side=side, sorter=sorter)
-
# ------------------------------------------------------------------------
# Ops
| https://api.github.com/repos/pandas-dev/pandas/pulls/36154 | 2020-09-06T00:05:22Z | 2020-09-06T17:08:26Z | 2020-09-06T17:08:26Z | 2020-09-06T19:29:40Z | |
Fix compressed multiindex for output of groupby.rolling | diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst
index d1a66256454ca..6a21f36284996 100644
--- a/doc/source/whatsnew/v1.1.2.rst
+++ b/doc/source/whatsnew/v1.1.2.rst
@@ -21,6 +21,7 @@ Fixed regressions
- Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`)
- Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`)
- Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`)
+- Fixed regression in :meth:`Series.groupby.rolling` number of levels of :class:`MultiIndex` in input was compressed to one (:issue:`36018`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 558c0eeb0ea65..875de28439ed4 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2211,17 +2211,17 @@ def _apply(
# Compose MultiIndex result from grouping levels then rolling level
# Aggregate the MultiIndex data as tuples then the level names
grouped_object_index = self.obj.index
- grouped_index_name = [grouped_object_index.name]
+ grouped_index_name = [*grouped_object_index.names]
groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings]
result_index_names = groupby_keys + grouped_index_name
result_index_data = []
for key, values in self._groupby.grouper.indices.items():
for value in values:
- if not is_list_like(key):
- data = [key, grouped_object_index[value]]
- else:
- data = [*key, grouped_object_index[value]]
+ data = [
+ *com.maybe_make_list(key),
+ *com.maybe_make_list(grouped_object_index[value]),
+ ]
result_index_data.append(tuple(data))
result_index = MultiIndex.from_tuples(
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 170bf100b3891..cb85ad7584da7 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -372,3 +372,24 @@ def test_groupby_subset_rolling_subset_with_closed(self):
name="column1",
)
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("func", ["max", "min"])
+ def test_groupby_rolling_index_changed(self, func):
+ # GH: #36018 nlevels of MultiIndex changed
+ ds = Series(
+ [1, 2, 2],
+ index=pd.MultiIndex.from_tuples(
+ [("a", "x"), ("a", "y"), ("c", "z")], names=["1", "2"]
+ ),
+ name="a",
+ )
+
+ result = getattr(ds.groupby(ds).rolling(2), func)()
+ expected = Series(
+ [np.nan, np.nan, 2.0],
+ index=pd.MultiIndex.from_tuples(
+ [(1, "a", "x"), (2, "a", "y"), (2, "c", "z")], names=["a", "1", "2"]
+ ),
+ name="a",
+ )
+ tm.assert_series_equal(result, expected)
| - [x] closes #36018
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The number of leves for MultiIndex in input was compressed to one, because the tuples representing the Index rows were not unpacked. Also name was used instead of names -> resulted in missing name for every MultiIndex. | https://api.github.com/repos/pandas-dev/pandas/pulls/36152 | 2020-09-05T23:16:51Z | 2020-09-07T21:06:30Z | 2020-09-07T21:06:29Z | 2020-09-07T21:14:34Z |
REF: use BlockManager.apply in csv code | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index c8da04fbbf987..eb5b887c8b0cb 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -593,7 +593,7 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
- values = self.to_native_types()
+ values = self.to_native_types().values
# astype formatting
else:
@@ -684,7 +684,7 @@ def to_native_types(self, na_rep="nan", quoting=None, **kwargs):
values = np.array(values, dtype="object")
values[mask] = na_rep
- return values
+ return self.make_block(values)
# block actions #
def copy(self, deep: bool = True):
@@ -1774,7 +1774,7 @@ def to_native_types(self, na_rep="nan", quoting=None, **kwargs):
# TODO(EA2D): reshape not needed with 2D EAs
# we are expected to return a 2-d ndarray
- return values.reshape(1, len(values))
+ return self.make_block(values)
def take_nd(
self, indexer, axis: int = 0, new_mgr_locs=None, fill_value=lib.no_default
@@ -2021,7 +2021,7 @@ def to_native_types(
values = np.array(values, dtype="object")
values[mask] = na_rep
- return values
+ return self.make_block(values)
from pandas.io.formats.format import FloatArrayFormatter
@@ -2033,7 +2033,8 @@ def to_native_types(
quoting=quoting,
fixed_width=False,
)
- return formatter.get_result_as_array()
+ res = formatter.get_result_as_array()
+ return self.make_block(res)
class ComplexBlock(FloatOrComplexBlock):
@@ -2192,7 +2193,7 @@ def to_native_types(self, na_rep="NaT", date_format=None, **kwargs):
result = dta._format_native_types(
na_rep=na_rep, date_format=date_format, **kwargs
)
- return np.atleast_2d(result)
+ return self.make_block(result)
def set(self, locs, values):
"""
@@ -2408,7 +2409,8 @@ def fillna(self, value, **kwargs):
def to_native_types(self, na_rep="NaT", **kwargs):
""" convert to our native types format """
tda = self.array_values()
- return tda._format_native_types(na_rep, **kwargs)
+ res = tda._format_native_types(na_rep, **kwargs)
+ return self.make_block(res)
class BoolBlock(NumericBlock):
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 90ab6f61f4d74..1bda16d126905 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -341,12 +341,11 @@ def _save_chunk(self, start_i: int, end_i: int) -> None:
slicer = slice(start_i, end_i)
df = self.obj.iloc[slicer]
+ mgr = df._mgr
- for block in df._mgr.blocks:
- d = block.to_native_types(**self._number_format)
-
- for col_loc, col in zip(block.mgr_locs, d):
- data[col_loc] = col
+ res = mgr.apply("to_native_types", **self._number_format)
+ for i in range(len(res.items)):
+ data[i] = res.iget_values(i)
ix = self.data_index.to_native_types(slicer=slicer, **self._number_format)
libwriters.write_csv_rows(data, ix, self.nlevels, self.cols, self.writer)
| This doesn't get rid of internals usage entirely, but makes that access 1 layer less deep | https://api.github.com/repos/pandas-dev/pandas/pulls/36150 | 2020-09-05T22:56:02Z | 2020-09-12T21:28:01Z | 2020-09-12T21:28:01Z | 2020-09-12T21:30:15Z |
Make to_numeric default to correct precision | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 2afa1f1a6199e..2aac2596c18cb 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -245,7 +245,7 @@ Timezones
Numeric
^^^^^^^
--
+- Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`)
-
Conversion
diff --git a/pandas/_libs/src/parse_helper.h b/pandas/_libs/src/parse_helper.h
index 2ada0a4bd173d..d161c4e29fe15 100644
--- a/pandas/_libs/src/parse_helper.h
+++ b/pandas/_libs/src/parse_helper.h
@@ -18,7 +18,9 @@ int to_double(char *item, double *p_value, char sci, char decimal,
char *p_end = NULL;
int error = 0;
- *p_value = xstrtod(item, &p_end, decimal, sci, '\0', 1, &error, maybe_int);
+ /* Switch to precise xstrtod GH 31364 */
+ *p_value = precise_xstrtod(item, &p_end, decimal, sci, '\0', 1,
+ &error, maybe_int);
return (error == 0) && (!*p_end);
}
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index 263887a8ea36e..450076f2824ad 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -649,3 +649,61 @@ def test_failure_to_convert_uint64_string_to_NaN():
ser = Series([32, 64, np.nan])
result = to_numeric(pd.Series(["32", "64", "uint64"]), errors="coerce")
tm.assert_series_equal(result, ser)
+
+
+@pytest.mark.parametrize(
+ "strrep",
+ [
+ "243.164",
+ "245.968",
+ "249.585",
+ "259.745",
+ "265.742",
+ "272.567",
+ "279.196",
+ "280.366",
+ "275.034",
+ "271.351",
+ "272.889",
+ "270.627",
+ "280.828",
+ "290.383",
+ "308.153",
+ "319.945",
+ "336.0",
+ "344.09",
+ "351.385",
+ "356.178",
+ "359.82",
+ "361.03",
+ "367.701",
+ "380.812",
+ "387.98",
+ "391.749",
+ "391.171",
+ "385.97",
+ "385.345",
+ "386.121",
+ "390.996",
+ "399.734",
+ "413.073",
+ "421.532",
+ "430.221",
+ "437.092",
+ "439.746",
+ "446.01",
+ "451.191",
+ "460.463",
+ "469.779",
+ "472.025",
+ "479.49",
+ "474.864",
+ "467.54",
+ "471.978",
+ ],
+)
+def test_precision_float_conversion(strrep):
+ # GH 31364
+ result = to_numeric(strrep)
+
+ assert result == float(strrep)
| - [x] closes #31364
- [x] tests added / passed
- tools/test_to_numeric.py:test_precision
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This relates to a very old issue #8002 where the default precision for CSV files could create wrong answers in the last bit. @jreback was involved in the PR review for that, which created the default to not be the high precision parser.
For `to_numeric()`, I switched it to use `precise_xstrtod` instead of `xstrtod` by default.
Unknown whether there are performance implications for `to_numeric()`, although this comment https://github.com/pandas-dev/pandas/issues/17154#issuecomment-319917647 from @jorisvandenbossche indicates that maybe we should consider switching to the higher precision parser by default in `read_csv()` anyway.
Open question as to whether performance analysis of *this* PR is needed beyond what is shown below, which seems to indicate that using `precise_xstrtod` is *faster* than `xstrtod`
The other possibility is to use a keyword argument in `to_numeric` that would allow the higher precision parser. | https://api.github.com/repos/pandas-dev/pandas/pulls/36149 | 2020-09-05T20:59:20Z | 2020-09-08T15:30:37Z | 2020-09-08T15:30:36Z | 2020-09-18T11:34:47Z |
DEPR: disallow tznaive datetimes when indexing tzaware datetimeindex | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 5be9155b3ff0b..afc0046ec6822 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -289,6 +289,7 @@ Deprecations
- Deprecated :meth:`Index.is_all_dates` (:issue:`27744`)
- Deprecated automatic alignment on comparison operations between :class:`DataFrame` and :class:`Series`, do ``frame, ser = frame.align(ser, axis=1, copy=False)`` before e.g. ``frame == ser`` (:issue:`28759`)
- :meth:`Rolling.count` with ``min_periods=None`` will default to the size of the window in a future version (:issue:`31302`)
+- Deprecated slice-indexing on timezone-aware :class:`DatetimeIndex` with naive ``datetime`` objects, to match scalar indexing behavior (:issue:`36148`)
- :meth:`Index.ravel` returning a ``np.ndarray`` is deprecated, in the future this will return a view on the same index (:issue:`19956`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 67b71ce63a6e3..62a3688216247 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -604,6 +604,28 @@ def _validate_partial_date_slice(self, reso: Resolution):
# _parsed_string_to_bounds allows it.
raise KeyError
+ def _deprecate_mismatched_indexing(self, key):
+ # GH#36148
+ # we get here with isinstance(key, self._data._recognized_scalars)
+ try:
+ self._data._assert_tzawareness_compat(key)
+ except TypeError:
+ if self.tz is None:
+ msg = (
+ "Indexing a timezone-naive DatetimeIndex with a "
+ "timezone-aware datetime is deprecated and will "
+ "raise KeyError in a future version. "
+ "Use a timezone-naive object instead."
+ )
+ else:
+ msg = (
+ "Indexing a timezone-aware DatetimeIndex with a "
+ "timezone-naive datetime is deprecated and will "
+ "raise KeyError in a future version. "
+ "Use a timezone-aware object instead."
+ )
+ warnings.warn(msg, FutureWarning, stacklevel=5)
+
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
@@ -621,6 +643,7 @@ def get_loc(self, key, method=None, tolerance=None):
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
+ self._deprecate_mismatched_indexing(key)
key = self._maybe_cast_for_get_loc(key)
elif isinstance(key, str):
@@ -702,6 +725,8 @@ def _maybe_cast_slice_bound(self, label, side: str, kind):
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == "left" else lower
return lower if side == "left" else upper
+ elif isinstance(label, (self._data._recognized_scalars, date)):
+ self._deprecate_mismatched_indexing(label)
return self._maybe_cast_for_get_loc(label)
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 539d9cb8f06a7..cb12365f38605 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -675,10 +675,14 @@ def test_get_slice_bounds_datetime_within(
self, box, kind, side, expected, tz_aware_fixture
):
# GH 35690
- index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz_aware_fixture)
- result = index.get_slice_bound(
- box(year=2000, month=1, day=7), kind=kind, side=side
- )
+ tz = tz_aware_fixture
+ index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz)
+ key = box(year=2000, month=1, day=7)
+
+ warn = None if tz is None else FutureWarning
+ with tm.assert_produces_warning(warn, check_stacklevel=False):
+ # GH#36148 will require tzawareness-compat
+ result = index.get_slice_bound(key, kind=kind, side=side)
assert result == expected
@pytest.mark.parametrize("box", [date, datetime, Timestamp])
@@ -689,19 +693,27 @@ def test_get_slice_bounds_datetime_outside(
self, box, kind, side, year, expected, tz_aware_fixture
):
# GH 35690
- index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz_aware_fixture)
- result = index.get_slice_bound(
- box(year=year, month=1, day=7), kind=kind, side=side
- )
+ tz = tz_aware_fixture
+ index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz)
+ key = box(year=year, month=1, day=7)
+
+ warn = None if tz is None else FutureWarning
+ with tm.assert_produces_warning(warn, check_stacklevel=False):
+ # GH#36148 will require tzawareness-compat
+ result = index.get_slice_bound(key, kind=kind, side=side)
assert result == expected
@pytest.mark.parametrize("box", [date, datetime, Timestamp])
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
def test_slice_datetime_locs(self, box, kind, tz_aware_fixture):
# GH 34077
- index = DatetimeIndex(["2010-01-01", "2010-01-03"]).tz_localize(
- tz_aware_fixture
- )
- result = index.slice_locs(box(2010, 1, 1), box(2010, 1, 2))
+ tz = tz_aware_fixture
+ index = DatetimeIndex(["2010-01-01", "2010-01-03"]).tz_localize(tz)
+ key = box(2010, 1, 1)
+
+ warn = None if tz is None else FutureWarning
+ with tm.assert_produces_warning(warn, check_stacklevel=False):
+ # GH#36148 will require tzawareness-compat
+ result = index.slice_locs(key, box(2010, 1, 2))
expected = (0, 1)
assert result == expected
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 0389099a195d0..1801d13e75565 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -237,23 +237,38 @@ def test_getitem_setitem_datetimeindex():
expected = ts[4:8]
tm.assert_series_equal(result, expected)
- # repeat all the above with naive datetimes
- result = ts[datetime(1990, 1, 1, 4)]
+ # But we do not give datetimes a pass on tzawareness compat
+ # TODO: do the same with Timestamps and dt64
+ msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
+ naive = datetime(1990, 1, 1, 4)
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#36148 will require tzawareness compat
+ result = ts[naive]
expected = ts[4]
assert result == expected
result = ts.copy()
- result[datetime(1990, 1, 1, 4)] = 0
- result[datetime(1990, 1, 1, 4)] = ts[4]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # GH#36148 will require tzawareness compat
+ result[datetime(1990, 1, 1, 4)] = 0
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # GH#36148 will require tzawareness compat
+ result[datetime(1990, 1, 1, 4)] = ts[4]
tm.assert_series_equal(result, ts)
- result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # GH#36148 will require tzawareness compat
+ result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
- result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0
- result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # GH#36148 will require tzawareness compat
+ result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # GH#36148 will require tzawareness compat
+ result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py
index 45592f8d99b93..858b1d6b4df8c 100644
--- a/pandas/tests/series/methods/test_truncate.py
+++ b/pandas/tests/series/methods/test_truncate.py
@@ -101,7 +101,13 @@ def test_truncate_datetimeindex_tz(self):
# GH 9243
idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific")
s = Series(range(len(idx)), index=idx)
- result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4))
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # GH#36148 in the future will require tzawareness compat
+ s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4))
+
+ lb = idx[1]
+ ub = idx[3]
+ result = s.truncate(lb.to_pydatetime(), ub.to_pydatetime())
expected = Series([1, 2, 3], index=idx[1:4])
tm.assert_series_equal(result, expected)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/36148 | 2020-09-05T20:05:51Z | 2020-10-07T20:29:37Z | 2020-10-07T20:29:37Z | 2021-11-23T02:54:52Z |
REGR: Series access with Index of tuples/frozenset | diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst
index e3161012da5d1..5aca33869bca1 100644
--- a/doc/source/whatsnew/v1.1.3.rst
+++ b/doc/source/whatsnew/v1.1.3.rst
@@ -14,6 +14,8 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`)
+- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6cbd93135a2ca..162604c0544a5 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -887,21 +887,19 @@ def __getitem__(self, key):
elif key_is_scalar:
return self._get_value(key)
- if (
- isinstance(key, tuple)
- and is_hashable(key)
- and isinstance(self.index, MultiIndex)
- ):
+ if is_hashable(key):
# Otherwise index.get_value will raise InvalidIndexError
try:
+ # For labels that don't resolve as scalars like tuples and frozensets
result = self._get_value(key)
return result
except KeyError:
- # We still have the corner case where this tuple is a key
- # in the first level of our MultiIndex
- return self._get_values_tuple(key)
+ if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
+ # We still have the corner case where a tuple is a key
+ # in the first level of our MultiIndex
+ return self._get_values_tuple(key)
if is_iterator(key):
key = list(key)
@@ -961,7 +959,7 @@ def _get_values_tuple(self, key):
return result
if not isinstance(self.index, MultiIndex):
- raise ValueError("Can only tuple-index with a MultiIndex")
+ raise ValueError("key of type tuple not found and not a MultiIndex")
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
@@ -1015,9 +1013,11 @@ def __setitem__(self, key, value):
# GH#12862 adding an new key to the Series
self.loc[key] = value
- except TypeError as e:
+ except TypeError as err:
if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
- raise ValueError("Can only tuple-index with a MultiIndex") from e
+ raise ValueError(
+ "key of type tuple not found and not a MultiIndex"
+ ) from err
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 3ed25b8bca566..1fafdf00393e1 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -383,7 +383,7 @@ def test_2d_to_1d_assignment_raises():
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_basic_getitem_setitem_corner(datetime_series):
# invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2]
- msg = "Can only tuple-index with a MultiIndex"
+ msg = "key of type tuple not found and not a MultiIndex"
with pytest.raises(ValueError, match=msg):
datetime_series[:, 2]
with pytest.raises(ValueError, match=msg):
@@ -942,3 +942,22 @@ def assert_slices_equivalent(l_slc, i_slc):
for key2 in [keystr2, box(keystr2)]:
assert_slices_equivalent(SLC[key2:key:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[key:key2:-1], SLC[0:0:-1])
+
+
+def test_tuple_index():
+ # GH 35534 - Selecting values when a Series has an Index of tuples
+ s = pd.Series([1, 2], index=[("a",), ("b",)])
+ assert s[("a",)] == 1
+ assert s[("b",)] == 2
+ s[("b",)] = 3
+ assert s[("b",)] == 3
+
+
+def test_frozenset_index():
+ # GH35747 - Selecting values when a Series has an Index of frozenset
+ idx0, idx1 = frozenset("a"), frozenset("b")
+ s = pd.Series([1, 2], index=[idx0, idx1])
+ assert s[idx0] == 1
+ assert s[idx1] == 2
+ s[idx1] = 3
+ assert s[idx1] == 3
| - [x] closes #35534
- [x] closes #35747
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36147 | 2020-09-05T20:02:05Z | 2020-09-12T21:39:40Z | 2020-09-12T21:39:40Z | 2020-09-13T12:29:54Z |
DOC: Update pct_change documentation | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e4f7bb43b23dc..3dde957f75ea2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10797,7 +10797,7 @@ def pct_change(
----------
periods : int, default 1
Periods to shift for forming percent change.
- fill_method : str, default 'pad'
+ fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
How to handle NAs **before** computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
| Related to SO question: [How to avoid bfill or ffill when calculating pct_change with NaNs](https://stackoverflow.com/questions/75395345/how-to-avoid-bfill-or-ffill-when-calculating-pct-change-with-nans)
| https://api.github.com/repos/pandas-dev/pandas/pulls/51262 | 2023-02-09T10:14:51Z | 2023-02-09T17:17:08Z | 2023-02-09T17:17:08Z | 2023-02-09T17:20:32Z |
CLN: simplify groupby wrapping | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 7745de87633eb..d4d0a94f5fa50 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -219,16 +219,9 @@ def apply(self, func, *args, **kwargs) -> Series:
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
- data = self._obj_with_exclusions
- result = self._aggregate_with_numba(
- data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
+ return self._aggregate_with_numba(
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
)
- index = self.grouper.result_index
- result = self.obj._constructor(result.ravel(), index=index, name=data.name)
- if not self.as_index:
- result = self._insert_inaxis_grouper(result)
- result.index = default_index(len(result))
- return result
relabeling = func is None
columns = None
@@ -1261,16 +1254,9 @@ class DataFrameGroupBy(GroupBy[DataFrame]):
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
- data = self._obj_with_exclusions
- result = self._aggregate_with_numba(
- data, func, *args, engine_kwargs=engine_kwargs, **kwargs
+ return self._aggregate_with_numba(
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
)
- index = self.grouper.result_index
- result = self.obj._constructor(result, index=index, columns=data.columns)
- if not self.as_index:
- result = self._insert_inaxis_grouper(result)
- result.index = default_index(len(result))
- return result
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
@@ -1283,7 +1269,12 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
# this should be the only (non-raising) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
- result.columns = columns
+ result = cast(DataFrame, result)
+ # error: Incompatible types in assignment (expression has type
+ # "Optional[List[str]]", variable has type
+ # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]],
+ # Index, Series], Sequence[Any]]")
+ result.columns = columns # type: ignore[assignment]
if result is None:
@@ -1312,11 +1303,18 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
except ValueError as err:
if "No objects to concatenate" not in str(err):
raise
+ # _aggregate_frame can fail with e.g. func=Series.mode,
+ # where it expects 1D values but would be getting 2D values
+ # In other tests, using aggregate_frame instead of GroupByApply
+ # would give correct values but incorrect dtypes
+ # object vs float64 in test_cython_agg_empty_buckets
+ # float64 vs int64 in test_category_order_apply
result = self._aggregate_frame(func)
else:
# GH#32040, GH#35246
# e.g. test_groupby_as_index_select_column_sum_empty_df
+ result = cast(DataFrame, result)
result.columns = self._obj_with_exclusions.columns.copy()
if not self.as_index:
@@ -1502,8 +1500,7 @@ def arr_func(bvalues: ArrayLike) -> ArrayLike:
res_mgr.set_axis(1, mgr.axes[1])
res_df = self.obj._constructor(res_mgr)
- if self.axis == 1:
- res_df = res_df.T
+ res_df = self._maybe_transpose_result(res_df)
return res_df
def _transform_general(self, func, *args, **kwargs):
@@ -1830,7 +1827,7 @@ def _iterate_column_groupbys(self, obj: DataFrame | Series):
observed=self.observed,
)
- def _apply_to_column_groupbys(self, func, obj: DataFrame | Series) -> DataFrame:
+ def _apply_to_column_groupbys(self, func, obj: DataFrame) -> DataFrame:
from pandas.core.reshape.concat import concat
columns = obj.columns
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index a70084493ba6f..ed063a2987188 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1008,17 +1008,8 @@ def _concat_objects(
):
from pandas.core.reshape.concat import concat
- def reset_identity(values):
- # reset the identities of the components
- # of the values to prevent aliasing
- for v in com.not_none(*values):
- ax = v._get_axis(self.axis)
- ax._reset_identity()
- return values
-
if self.group_keys and not is_transform:
- values = reset_identity(values)
if self.as_index:
# possible MI return case
@@ -1063,7 +1054,6 @@ def reset_identity(values):
result = result.reindex(ax, axis=self.axis, copy=False)
else:
- values = reset_identity(values)
result = concat(values, axis=self.axis)
name = self.obj.name if self.obj.ndim == 1 else self._selection
@@ -1123,6 +1113,17 @@ def _indexed_output_to_ndframe(
) -> Series | DataFrame:
raise AbstractMethodError(self)
+ @final
+ def _maybe_transpose_result(self, result: NDFrameT) -> NDFrameT:
+ if self.axis == 1:
+ # Only relevant for DataFrameGroupBy, no-op for SeriesGroupBy
+ result = result.T
+ if result.index.equals(self.obj.index):
+ # Retain e.g. DatetimeIndex/TimedeltaIndex freq
+ # e.g. test_groupby_crash_on_nunique
+ result.index = self.obj.index.copy()
+ return result
+
@final
def _wrap_aggregated_output(
self,
@@ -1160,15 +1161,10 @@ def _wrap_aggregated_output(
result.index = index
- if self.axis == 1:
- # Only relevant for DataFrameGroupBy, no-op for SeriesGroupBy
- result = result.T
- if result.index.equals(self.obj.index):
- # Retain e.g. DatetimeIndex/TimedeltaIndex freq
- result.index = self.obj.index.copy()
- # TODO: Do this more systematically
-
- return self._reindex_output(result, qs=qs)
+ # error: Argument 1 to "_maybe_transpose_result" of "GroupBy" has
+ # incompatible type "Union[Series, DataFrame]"; expected "NDFrameT"
+ res = self._maybe_transpose_result(result) # type: ignore[arg-type]
+ return self._reindex_output(res, qs=qs)
def _wrap_applied_output(
self,
@@ -1242,9 +1238,7 @@ def _numba_agg_general(
return data._constructor(result, index=index, **result_kwargs)
@final
- def _transform_with_numba(
- self, data: DataFrame, func, *args, engine_kwargs=None, **kwargs
- ):
+ def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs):
"""
Perform groupby transform routine with the numba engine.
@@ -1252,7 +1246,10 @@ def _transform_with_numba(
to generate the indices of each group in the sorted data and then passes the
data and indices into a Numba jitted function.
"""
- starts, ends, sorted_index, sorted_data = self._numba_prep(data)
+ data = self._obj_with_exclusions
+ df = data if data.ndim == 2 else data.to_frame()
+
+ starts, ends, sorted_index, sorted_data = self._numba_prep(df)
numba_.validate_udf(func)
numba_transform_func = numba_.generate_numba_transform_func(
func, **get_jit_arguments(engine_kwargs, kwargs)
@@ -1262,17 +1259,22 @@ def _transform_with_numba(
sorted_index,
starts,
ends,
- len(data.columns),
+ len(df.columns),
*args,
)
# result values needs to be resorted to their original positions since we
# evaluated the data sorted by group
- return result.take(np.argsort(sorted_index), axis=0)
+ result = result.take(np.argsort(sorted_index), axis=0)
+ index = data.index
+ if data.ndim == 1:
+ result_kwargs = {"name": data.name}
+ result = result.ravel()
+ else:
+ result_kwargs = {"columns": data.columns}
+ return data._constructor(result, index=index, **result_kwargs)
@final
- def _aggregate_with_numba(
- self, data: DataFrame, func, *args, engine_kwargs=None, **kwargs
- ):
+ def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs):
"""
Perform groupby aggregation routine with the numba engine.
@@ -1280,7 +1282,10 @@ def _aggregate_with_numba(
to generate the indices of each group in the sorted data and then passes the
data and indices into a Numba jitted function.
"""
- starts, ends, sorted_index, sorted_data = self._numba_prep(data)
+ data = self._obj_with_exclusions
+ df = data if data.ndim == 2 else data.to_frame()
+
+ starts, ends, sorted_index, sorted_data = self._numba_prep(df)
numba_.validate_udf(func)
numba_agg_func = numba_.generate_numba_agg_func(
func, **get_jit_arguments(engine_kwargs, kwargs)
@@ -1290,10 +1295,20 @@ def _aggregate_with_numba(
sorted_index,
starts,
ends,
- len(data.columns),
+ len(df.columns),
*args,
)
- return result
+ index = self.grouper.result_index
+ if data.ndim == 1:
+ result_kwargs = {"name": data.name}
+ result = result.ravel()
+ else:
+ result_kwargs = {"columns": data.columns}
+ res = data._constructor(result, index=index, **result_kwargs)
+ if not self.as_index:
+ res = self._insert_inaxis_grouper(res)
+ res.index = default_index(len(res))
+ return res
# -----------------------------------------------------------------
# apply/agg/transform
@@ -1536,19 +1551,9 @@ def _cython_transform(
def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
- data = self._obj_with_exclusions
- df = data if data.ndim == 2 else data.to_frame()
- result = self._transform_with_numba(
- df, func, *args, engine_kwargs=engine_kwargs, **kwargs
+ return self._transform_with_numba(
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
)
- if self.obj.ndim == 2:
- return cast(DataFrame, self.obj)._constructor(
- result, index=data.index, columns=data.columns
- )
- else:
- return cast(Series, self.obj)._constructor(
- result.ravel(), index=data.index, name=data.name
- )
# optimized transforms
func = com.get_cython_func(func) or func
| This is the last of the groupby cleanup I have lined up. @rhshadrach im hoping more progress can be made in simplifying the wrapping and apply/agg try/excepts but don't see an immediate way forward. | https://api.github.com/repos/pandas-dev/pandas/pulls/51259 | 2023-02-09T03:01:30Z | 2023-02-09T21:51:14Z | 2023-02-09T21:51:14Z | 2023-02-09T22:15:16Z |
BUG: IntervalArray.shift with invalid NA fill_value | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7d4477cd4bd8d..6c5f74f728934 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1195,6 +1195,7 @@ Interval
^^^^^^^^
- Bug in :meth:`IntervalIndex.is_overlapping` incorrect output if interval has duplicate left boundaries (:issue:`49581`)
- Bug in :meth:`Series.infer_objects` failing to infer :class:`IntervalDtype` for an object series of :class:`Interval` objects (:issue:`50090`)
+- Bug in :meth:`Series.shift` with :class:`IntervalDtype` and invalid null ``fill_value`` failing to raise ``TypeError`` (:issue:`51258`)
-
Indexing
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 338cfa18fbe66..6805d32049d34 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1051,8 +1051,7 @@ def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray:
if not len(self) or periods == 0:
return self.copy()
- if isna(fill_value):
- fill_value = self.dtype.na_value
+ self._validate_scalar(fill_value)
# ExtensionArray.shift doesn't work for two reasons
# 1. IntervalArray.dtype.na_value may not be correct for the dtype.
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index b2476a7a076fc..b97eb32a60838 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -95,6 +95,10 @@ def test_shift(self):
expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)])
tm.assert_interval_array_equal(result, expected)
+ msg = "can only insert Interval objects and NA into an IntervalArray"
+ with pytest.raises(TypeError, match=msg):
+ a.shift(1, fill_value=pd.NaT)
+
def test_shift_datetime(self):
# GH#31502, GH#31504
a = IntervalArray.from_breaks(date_range("2000", periods=4))
@@ -106,6 +110,10 @@ def test_shift_datetime(self):
expected = a.take([1, 2, -1], allow_fill=True)
tm.assert_interval_array_equal(result, expected)
+ msg = "can only insert Interval objects and NA into an IntervalArray"
+ with pytest.raises(TypeError, match=msg):
+ a.shift(1, fill_value=np.timedelta64("NaT", "ns"))
+
class TestSetitem:
def test_set_na(self, left_right_dtypes):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51258 | 2023-02-09T02:59:51Z | 2023-02-09T17:21:45Z | 2023-02-09T17:21:45Z | 2023-02-09T17:48:58Z |
TST: clean up some groupby xfails | diff --git a/pandas/tests/groupby/test_api_consistency.py b/pandas/tests/groupby/test_api_consistency.py
index bd29f29719494..d62ee3593cd44 100644
--- a/pandas/tests/groupby/test_api_consistency.py
+++ b/pandas/tests/groupby/test_api_consistency.py
@@ -21,17 +21,22 @@ def test_frame_consistency(request, groupby_func):
if groupby_func in ("first", "last"):
msg = "first and last are entirely different between frame and groupby"
request.node.add_marker(pytest.mark.xfail(reason=msg))
- if groupby_func in ("nth", "cumcount", "ngroup"):
+ if groupby_func in ("cumcount",):
msg = "DataFrame has no such method"
request.node.add_marker(pytest.mark.xfail(reason=msg))
- if groupby_func in ("size",):
- msg = "Method is a property"
- request.node.add_marker(pytest.mark.xfail(reason=msg))
+
+ if groupby_func == "ngroup":
+ assert not hasattr(DataFrame, groupby_func)
+ return
frame_method = getattr(DataFrame, groupby_func)
gb_method = getattr(DataFrameGroupBy, groupby_func)
result = set(inspect.signature(gb_method).parameters)
- expected = set(inspect.signature(frame_method).parameters)
+ if groupby_func == "size":
+ # "size" is a method on GroupBy but property on DataFrame:
+ expected = {"self"}
+ else:
+ expected = set(inspect.signature(frame_method).parameters)
# Exclude certain arguments from result and expected depending on the operation
# Some of these may be purposeful inconsistencies between the APIs
@@ -79,17 +84,22 @@ def test_series_consistency(request, groupby_func):
if groupby_func in ("first", "last"):
msg = "first and last are entirely different between Series and groupby"
request.node.add_marker(pytest.mark.xfail(reason=msg))
- if groupby_func in ("nth", "cumcount", "ngroup", "corrwith"):
+ if groupby_func in ("cumcount", "corrwith"):
msg = "Series has no such method"
request.node.add_marker(pytest.mark.xfail(reason=msg))
- if groupby_func in ("size",):
- msg = "Method is a property"
- request.node.add_marker(pytest.mark.xfail(reason=msg))
+
+ if groupby_func == "ngroup":
+ assert not hasattr(Series, groupby_func)
+ return
series_method = getattr(Series, groupby_func)
gb_method = getattr(SeriesGroupBy, groupby_func)
result = set(inspect.signature(gb_method).parameters)
- expected = set(inspect.signature(series_method).parameters)
+ if groupby_func == "size":
+ # "size" is a method on GroupBy but property on Series
+ expected = {"self"}
+ else:
+ expected = set(inspect.signature(series_method).parameters)
# Exclude certain arguments from result and expected depending on the operation
# Some of these may be purposeful inconsistencies between the APIs
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51257 | 2023-02-09T01:41:03Z | 2023-02-09T17:21:33Z | 2023-02-09T17:21:33Z | 2023-02-09T17:52:44Z |
BUG: Groupby.filter with pd.NA | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 7745de87633eb..bd0f1bc677fba 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -562,7 +562,7 @@ def filter(self, func, dropna: bool = True, *args, **kwargs):
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
- return b and notna(b)
+ return notna(b) and b
try:
indices = [
@@ -1717,7 +1717,7 @@ def filter(self, func, dropna: bool = True, *args, **kwargs):
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
- if res and notna(res):
+ if notna(res) and res:
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py
index c121689232bb2..bc9795ca21e58 100644
--- a/pandas/tests/groupby/test_filters.py
+++ b/pandas/tests/groupby/test_filters.py
@@ -172,6 +172,20 @@ def test_filter_nan_is_false():
tm.assert_series_equal(g_s.filter(f), s[[]])
+def test_filter_pdna_is_false():
+ # in particular, dont raise in filter trying to call bool(pd.NA)
+ df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
+ ser = df["B"]
+ g_df = df.groupby(df["B"])
+ g_s = ser.groupby(ser)
+
+ func = lambda x: pd.NA
+ res = g_df.filter(func)
+ tm.assert_frame_equal(res, df.loc[[]])
+ res = g_s.filter(func)
+ tm.assert_series_equal(res, ser[[]])
+
+
def test_filter_against_workaround():
np.random.seed(0)
# Series of ints
| @Dr-Irv this is an example of the kind of bug that I have in mind when I said in the Categorical issue that getting pd.NA is a PITA. We've had 3 years to track them down and we're still finding places where having it unexpectedly causes bugs. Users will have this long tail until the end of time. | https://api.github.com/repos/pandas-dev/pandas/pulls/51255 | 2023-02-09T00:06:05Z | 2023-02-10T21:19:51Z | 2023-02-10T21:19:51Z | 2023-02-10T21:21:13Z |
BLD: Try stripping wheels | diff --git a/pyproject.toml b/pyproject.toml
index e5d6f420915ef..f4998fbe82722 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -143,6 +143,7 @@ parentdir_prefix = "pandas-"
[tool.cibuildwheel]
skip = "cp36-* cp37-* pp37-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux*"
build-verbosity = "3"
+environment = { LDFLAGS="-Wl,--strip-debug" }
test-requires = "hypothesis>=6.34.2 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17"
test-command = "python {project}/ci/test_wheels.py"
@@ -160,6 +161,13 @@ select = "*-win*"
# missing from the wheel
test-command = ""
+[[tool.cibuildwheel.overrides]]
+# Don't strip wheels on macOS.
+# macOS doesn't support stripping wheels with linker
+# https://github.com/MacPython/numpy-wheels/pull/87#issuecomment-624878264
+select = "*-macosx*"
+environment = ""
+
[[tool.cibuildwheel.overrides]]
select = "*-win32"
environment = { IS_32_BIT="true" }
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51251 | 2023-02-08T23:14:16Z | 2023-02-09T16:17:36Z | 2023-02-09T16:17:36Z | 2023-02-13T12:32:26Z |
ENH: Add CoW optimization to interpolate | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index bea7dd0a1ed6d..6bc0cb2969734 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -223,6 +223,9 @@ Copy-on-Write improvements
- :meth:`DataFrame.to_period` / :meth:`Series.to_period`
- :meth:`DataFrame.truncate`
- :meth:`DataFrame.tz_convert` / :meth:`Series.tz_localize`
+ - :meth:`DataFrame.interpolate` / :meth:`Series.interpolate`
+ - :meth:`DataFrame.ffill` / :meth:`Series.ffill`
+ - :meth:`DataFrame.bfill` / :meth:`Series.bfill`
- :meth:`DataFrame.infer_objects` / :meth:`Series.infer_objects`
- :meth:`DataFrame.astype` / :meth:`Series.astype`
- :func:`concat`
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index e66011acb978b..5ccc0aeb03500 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -228,7 +228,10 @@ def make_block(
@final
def make_block_same_class(
- self, values, placement: BlockPlacement | None = None
+ self,
+ values,
+ placement: BlockPlacement | None = None,
+ refs: BlockValuesRefs | None = None,
) -> Block:
"""Wrap given values in a block of same type as self."""
# Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet
@@ -237,7 +240,7 @@ def make_block_same_class(
placement = self._mgr_locs
# We assume maybe_coerce_values has already been called
- return type(self)(values, placement=placement, ndim=self.ndim)
+ return type(self)(values, placement=placement, ndim=self.ndim, refs=refs)
@final
def __repr__(self) -> str:
@@ -421,7 +424,9 @@ def coerce_to_target_dtype(self, other) -> Block:
return self.astype(new_dtype, copy=False)
@final
- def _maybe_downcast(self, blocks: list[Block], downcast=None) -> list[Block]:
+ def _maybe_downcast(
+ self, blocks: list[Block], downcast=None, using_cow: bool = False
+ ) -> list[Block]:
if downcast is False:
return blocks
@@ -431,23 +436,26 @@ def _maybe_downcast(self, blocks: list[Block], downcast=None) -> list[Block]:
# but ATM it breaks too much existing code.
# split and convert the blocks
- return extend_blocks([blk.convert() for blk in blocks])
+ return extend_blocks(
+ [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks]
+ )
if downcast is None:
return blocks
- return extend_blocks([b._downcast_2d(downcast) for b in blocks])
+ return extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks])
@final
@maybe_split
- def _downcast_2d(self, dtype) -> list[Block]:
+ def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]:
"""
downcast specialized to 2D case post-validation.
Refactored to allow use of maybe_split.
"""
new_values = maybe_downcast_to_dtype(self.values, dtype=dtype)
- return [self.make_block(new_values)]
+ refs = self.refs if using_cow and new_values is self.values else None
+ return [self.make_block(new_values, refs=refs)]
def convert(
self,
@@ -1209,6 +1217,7 @@ def interpolate(
limit_area: str | None = None,
fill_value: Any | None = None,
downcast: str | None = None,
+ using_cow: bool = False,
**kwargs,
) -> list[Block]:
@@ -1216,6 +1225,8 @@ def interpolate(
if not self._can_hold_na:
# If there are no NAs, then interpolate is a no-op
+ if using_cow:
+ return [self.copy(deep=False)]
return [self] if inplace else [self.copy()]
try:
@@ -1224,8 +1235,10 @@ def interpolate(
m = None
if m is None and self.dtype.kind != "f":
# only deal with floats
- # bc we already checked that can_hold_na, we dont have int dtype here
+ # bc we already checked that can_hold_na, we don't have int dtype here
# test_interp_basic checks that we make a copy here
+ if using_cow:
+ return [self.copy(deep=False)]
return [self] if inplace else [self.copy()]
if self.is_object and self.ndim == 2 and self.shape[0] != 1 and axis == 0:
@@ -1244,7 +1257,15 @@ def interpolate(
**kwargs,
)
- data = self.values if inplace else self.values.copy()
+ refs = None
+ if inplace:
+ if using_cow and self.refs.has_reference():
+ data = self.values.copy()
+ else:
+ data = self.values
+ refs = self.refs
+ else:
+ data = self.values.copy()
data = cast(np.ndarray, data) # bc overridden by ExtensionBlock
missing.interpolate_array_2d(
@@ -1259,8 +1280,8 @@ def interpolate(
**kwargs,
)
- nb = self.make_block_same_class(data)
- return nb._maybe_downcast([nb], downcast)
+ nb = self.make_block_same_class(data, refs=refs)
+ return nb._maybe_downcast([nb], downcast, using_cow)
def diff(self, n: int, axis: AxisInt = 1) -> list[Block]:
"""return block for the diff of the values"""
@@ -1632,6 +1653,7 @@ def interpolate(
inplace: bool = False,
limit: int | None = None,
fill_value=None,
+ using_cow: bool = False,
**kwargs,
):
values = self.values
@@ -2011,6 +2033,7 @@ def interpolate(
inplace: bool = False,
limit: int | None = None,
fill_value=None,
+ using_cow: bool = False,
**kwargs,
):
values = self.values
@@ -2020,12 +2043,20 @@ def interpolate(
# "Literal['linear']") [comparison-overlap]
if method == "linear": # type: ignore[comparison-overlap]
# TODO: GH#50950 implement for arbitrary EAs
- data_out = values._ndarray if inplace else values._ndarray.copy()
+ refs = None
+ if using_cow:
+ if inplace and not self.refs.has_reference():
+ data_out = values._ndarray
+ refs = self.refs
+ else:
+ data_out = values._ndarray.copy()
+ else:
+ data_out = values._ndarray if inplace else values._ndarray.copy()
missing.interpolate_array_2d(
data_out, method=method, limit=limit, index=index, axis=axis
)
new_values = type(values)._simple_new(data_out, dtype=values.dtype)
- return self.make_block_same_class(new_values)
+ return self.make_block_same_class(new_values, refs=refs)
elif values.ndim == 2 and axis == 0:
# NDArrayBackedExtensionArray.fillna assumes axis=1
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 517e6d7e48275..3601337b4cb9c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -389,14 +389,9 @@ def diff(self: T, n: int, axis: AxisInt) -> T:
return self.apply("diff", n=n, axis=axis)
def interpolate(self: T, inplace: bool, **kwargs) -> T:
- if inplace:
- # TODO(CoW) can be optimized to only copy those blocks that have refs
- if using_copy_on_write() and any(
- not self._has_no_reference_block(i) for i in range(len(self.blocks))
- ):
- self = self.copy()
-
- return self.apply("interpolate", inplace=inplace, **kwargs)
+ return self.apply(
+ "interpolate", inplace=inplace, **kwargs, using_cow=using_copy_on_write()
+ )
def shift(self: T, periods: int, axis: AxisInt, fill_value) -> T:
axis = self._normalize_axis(axis)
diff --git a/pandas/tests/copy_view/test_interp_fillna.py b/pandas/tests/copy_view/test_interp_fillna.py
new file mode 100644
index 0000000000000..1bfcf7d180e54
--- /dev/null
+++ b/pandas/tests/copy_view/test_interp_fillna.py
@@ -0,0 +1,164 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ NaT,
+ Series,
+ Timestamp,
+)
+import pandas._testing as tm
+from pandas.tests.copy_view.util import get_array
+
+
+@pytest.mark.parametrize("method", ["pad", "nearest", "linear"])
+def test_interpolate_no_op(using_copy_on_write, method):
+ df = DataFrame({"a": [1, 2]})
+ df_orig = df.copy()
+
+ result = df.interpolate(method=method)
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+
+ result.iloc[0, 0] = 100
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize("func", ["ffill", "bfill"])
+def test_interp_fill_functions(using_copy_on_write, func):
+ # Check that these takes the same code paths as interpolate
+ df = DataFrame({"a": [1, 2]})
+ df_orig = df.copy()
+
+ result = getattr(df, func)()
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+
+ result.iloc[0, 0] = 100
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize("func", ["ffill", "bfill"])
+@pytest.mark.parametrize(
+ "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
+)
+def test_interpolate_triggers_copy(using_copy_on_write, vals, func):
+ df = DataFrame({"a": vals})
+ result = getattr(df, func)()
+
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ # Check that we don't have references when triggering a copy
+ assert result._mgr._has_no_reference(0)
+
+
+@pytest.mark.parametrize(
+ "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
+)
+def test_interpolate_inplace_no_reference_no_copy(using_copy_on_write, vals):
+ df = DataFrame({"a": vals})
+ arr = get_array(df, "a")
+ df.interpolate(method="linear", inplace=True)
+
+ assert np.shares_memory(arr, get_array(df, "a"))
+ if using_copy_on_write:
+ # Check that we don't have references when triggering a copy
+ assert df._mgr._has_no_reference(0)
+
+
+@pytest.mark.parametrize(
+ "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
+)
+def test_interpolate_inplace_with_refs(using_copy_on_write, vals):
+ df = DataFrame({"a": [1, np.nan, 2]})
+ df_orig = df.copy()
+ arr = get_array(df, "a")
+ view = df[:]
+ df.interpolate(method="linear", inplace=True)
+
+ if using_copy_on_write:
+ # Check that copy was triggered in interpolate and that we don't
+ # have any references left
+ assert not np.shares_memory(arr, get_array(df, "a"))
+ tm.assert_frame_equal(df_orig, view)
+ assert df._mgr._has_no_reference(0)
+ assert view._mgr._has_no_reference(0)
+ else:
+ assert np.shares_memory(arr, get_array(df, "a"))
+
+
+def test_interpolate_cleaned_fill_method(using_copy_on_write):
+ # Check that "method is set to None" case works correctly
+ df = DataFrame({"a": ["a", np.nan, "c"], "b": 1})
+ df_orig = df.copy()
+
+ result = df.interpolate(method="asfreq")
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+
+ result.iloc[0, 0] = Timestamp("2021-12-31")
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_interpolate_object_convert_no_op(using_copy_on_write):
+ df = DataFrame({"a": ["a", "b", "c"], "b": 1})
+ arr_a = get_array(df, "a")
+ df.interpolate(method="pad", inplace=True)
+
+ # Now CoW makes a copy, it should not!
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(0)
+ assert np.shares_memory(arr_a, get_array(df, "a"))
+
+
+def test_interpolate_object_convert_copies(using_copy_on_write):
+ df = DataFrame({"a": Series([1, 2], dtype=object), "b": 1})
+ arr_a = get_array(df, "a")
+ df.interpolate(method="pad", inplace=True)
+
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(0)
+ assert not np.shares_memory(arr_a, get_array(df, "a"))
+
+
+def test_interpolate_downcast(using_copy_on_write):
+ df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})
+ arr_a = get_array(df, "a")
+ df.interpolate(method="pad", inplace=True, downcast="infer")
+
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(0)
+ assert np.shares_memory(arr_a, get_array(df, "a"))
+
+
+def test_interpolate_downcast_reference_triggers_copy(using_copy_on_write):
+ df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})
+ df_orig = df.copy()
+ arr_a = get_array(df, "a")
+ view = df[:]
+ df.interpolate(method="pad", inplace=True, downcast="infer")
+
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(0)
+ assert not np.shares_memory(arr_a, get_array(df, "a"))
+ tm.assert_frame_equal(df_orig, view)
+ else:
+ tm.assert_frame_equal(df, view)
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index 20e4571e2ba05..d6da967106fe6 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -52,7 +52,7 @@ def test_interpolate_inplace(self, frame_or_series, using_array_manager, request
assert np.shares_memory(orig, obj.values)
assert orig.squeeze()[1] == 1.5
- def test_interp_basic(self):
+ def test_interp_basic(self, using_copy_on_write):
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
@@ -75,8 +75,12 @@ def test_interp_basic(self):
# check we didn't operate inplace GH#45791
cvalues = df["C"]._values
dvalues = df["D"].values
- assert not np.shares_memory(cvalues, result["C"]._values)
- assert not np.shares_memory(dvalues, result["D"]._values)
+ if using_copy_on_write:
+ assert np.shares_memory(cvalues, result["C"]._values)
+ assert np.shares_memory(dvalues, result["D"]._values)
+ else:
+ assert not np.shares_memory(cvalues, result["C"]._values)
+ assert not np.shares_memory(dvalues, result["D"]._values)
res = df.interpolate(inplace=True)
assert res is None
@@ -95,14 +99,6 @@ def test_interp_basic_with_non_range_index(self):
"D": list("abcd"),
}
)
- expected = DataFrame(
- {
- "A": [1.0, 2.0, 3.0, 4.0],
- "B": [1.0, 4.0, 9.0, 9.0],
- "C": [1, 2, 3, 5],
- "D": list("abcd"),
- }
- )
result = df.set_index("C").interpolate()
expected = df.set_index("C")
@@ -327,20 +323,24 @@ def test_interp_raise_on_all_object_dtype(self):
df.interpolate()
def test_interp_inplace(self, using_copy_on_write):
- # TODO(CoW) inplace keyword (it is still mutating the parent)
- if using_copy_on_write:
- pytest.skip("CoW: inplace keyword not yet handled")
df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]})
expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]})
+ expected_cow = df.copy()
result = df.copy()
return_value = result["a"].interpolate(inplace=True)
assert return_value is None
- tm.assert_frame_equal(result, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(result, expected_cow)
+ else:
+ tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result["a"].interpolate(inplace=True, downcast="infer")
assert return_value is None
- tm.assert_frame_equal(result, expected.astype("int64"))
+ if using_copy_on_write:
+ tm.assert_frame_equal(result, expected_cow)
+ else:
+ tm.assert_frame_equal(result, expected.astype("int64"))
def test_interp_inplace_row(self):
# GH 10395
| - [ ] xref #49473 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This covers everything except the fillna branches, I intend to do fillna in a separate pr | https://api.github.com/repos/pandas-dev/pandas/pulls/51249 | 2023-02-08T21:58:41Z | 2023-02-10T10:27:59Z | 2023-02-10T10:27:58Z | 2023-02-10T10:28:07Z |
PERF: Construction of a DatetimeIndex from a list of Timestamp with timezone | diff --git a/.github/workflows/package-checks.yml b/.github/workflows/package-checks.yml
index d5854b3fcad18..1ec26e7ecdeef 100644
--- a/.github/workflows/package-checks.yml
+++ b/.github/workflows/package-checks.yml
@@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-22.04
strategy:
matrix:
- extra: ["test", "performance", "timezone", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output_formatting", "clipboard", "compression", "all"]
+ extra: ["test", "performance", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output_formatting", "clipboard", "compression", "all"]
fail-fast: false
name: Install Extras - ${{ matrix.extra }}
concurrency:
diff --git a/ci/deps/actions-310-numpydev.yaml b/ci/deps/actions-310-numpydev.yaml
index 1a461319685d2..a500ad4a6f852 100644
--- a/ci/deps/actions-310-numpydev.yaml
+++ b/ci/deps/actions-310-numpydev.yaml
@@ -18,9 +18,11 @@ dependencies:
- python-dateutil
- pytz
- pip
+
- pip:
- "cython"
- "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple"
- "--pre"
- "numpy"
- "scipy"
+ - "tzdata>=2022.1"
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 53a10ed10e4ff..f40b555593f6b 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -49,8 +49,10 @@ dependencies:
- scipy>=1.7.1
- sqlalchemy>=1.4.16
- tabulate>=0.8.9
- - tzdata>=2022a
- xarray>=0.21.0
- xlrd>=2.0.1
- xlsxwriter>=1.4.3
- zstandard>=0.15.2
+
+ - pip:
+ - tzdata>=2022.1
diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml
index 9b4652db0446c..fa08bdf438dff 100644
--- a/ci/deps/actions-311.yaml
+++ b/ci/deps/actions-311.yaml
@@ -49,8 +49,10 @@ dependencies:
- scipy>=1.7.1
- sqlalchemy>=1.4.16
- tabulate>=0.8.9
- - tzdata>=2022a
- xarray>=0.21.0
- xlrd>=2.0.1
- xlsxwriter>=1.4.3
- zstandard>=0.15.2
+
+ - pip:
+ - tzdata>=2022.1
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml
index 85bc05c4bf65c..a9265bd84ee87 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-38-downstream_compat.yaml
@@ -68,3 +68,6 @@ dependencies:
- pandas-gbq>=0.15.0
- pyyaml
- py
+
+ - pip:
+ - tzdata>=2022.1
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index e5c32dad04c01..2eb2ade8a2934 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -52,7 +52,6 @@ dependencies:
- scipy=1.7.1
- sqlalchemy=1.4.16
- tabulate=0.8.9
- - tzdata=2022a
- xarray=0.21.0
- xlrd=2.0.1
- xlsxwriter=1.4.3
@@ -60,3 +59,4 @@ dependencies:
- pip:
- pyqt5==5.15.1
+ - tzdata==2022.1
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index c65f194830417..27872514447a5 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -53,3 +53,6 @@ dependencies:
- xlrd>=2.0.1
- xlsxwriter>=1.4.3
- zstandard>=0.15.2
+
+ - pip:
+ - tzdata>=2022.1
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 0a5039126e49f..4b0575d8a3afd 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -49,8 +49,10 @@ dependencies:
- scipy>=1.7.1
- sqlalchemy>=1.4.16
- tabulate>=0.8.9
- - tzdata>=2022a
- xarray>=0.21.0
- xlrd>=2.0.1
- xlsxwriter>=1.4.3
- zstandard>=0.15.2
+
+ - pip:
+ - tzdata>=2022.1
diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-38.yaml
index 36e1df6773f4c..87b8344a25eb1 100644
--- a/ci/deps/actions-pypy-38.yaml
+++ b/ci/deps/actions-pypy-38.yaml
@@ -22,3 +22,6 @@ dependencies:
- numpy
- python-dateutil
- pytz
+
+ - pip:
+ - tzdata>=2022.1
diff --git a/ci/test_wheels_windows.bat b/ci/test_wheels_windows.bat
index 33de4fc5ad62d..6364169e53924 100644
--- a/ci/test_wheels_windows.bat
+++ b/ci/test_wheels_windows.bat
@@ -3,7 +3,7 @@ pd.test(extra_args=['-m not clipboard and not single_cpu and not slow and not ne
pd.test(extra_args=['-m not clipboard and single_cpu and not slow and not network and not db'])
python --version
-pip install pytz six numpy python-dateutil
+pip install pytz six numpy python-dateutil tzdata>=2022.1
pip install hypothesis>=6.34.2 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17
pip install --find-links=pandas/dist --no-index pandas
python -c "%test_command%"
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 28a57720a89a5..f34676edd26dc 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -308,25 +308,6 @@ Dependency Minimum Version pip ext
`numba <https://github.com/numba/numba>`__ 0.53.1 performance Alternative execution engine for operations that accept ``engine="numba"`` using a JIT compiler that translates Python functions to optimized machine code using the LLVM compiler.
===================================================== ================== ================== ===================================================================================================================================================================================
-Timezones
-^^^^^^^^^
-
-Installable with ``pip install "pandas[timezone]"``
-
-========================= ========================= =============== =============================================================
-Dependency Minimum Version pip extra Notes
-========================= ========================= =============== =============================================================
-tzdata 2022.1(pypi)/ timezone Allows the use of ``zoneinfo`` timezones with pandas.
- 2022a(for system tzdata) **Note**: You only need to install the pypi package if your
- system does not already provide the IANA tz database.
- However, the minimum tzdata version still applies, even if it
- is not enforced through an error.
-
- If you would like to keep your system tzdata version updated,
- it is recommended to use the ``tzdata`` package from
- conda-forge.
-========================= ========================= =============== =============================================================
-
Visualization
^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index ea7bc06746689..05a9ea4390277 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -25,7 +25,7 @@ When installing pandas using pip, sets of optional dependencies can also be inst
pip install "pandas[performance, aws]>=2.0.0"
The available extras, found in the :ref:`installation guide<install.dependencies>`, are
-``[all, performance, computation, timezone, fss, aws, gcp, excel, parquet, feather, hdf5, spss, postgresql, mysql,
+``[all, performance, computation, fss, aws, gcp, excel, parquet, feather, hdf5, spss, postgresql, mysql,
sql-other, html, xml, plot, output_formatting, clipboard, compression, test]`` (:issue:`39164`).
.. _whatsnew_200.enhancements.index_can_hold_numpy_numeric_dtypes:
@@ -647,6 +647,8 @@ If installed, we now require:
+-------------------+-----------------+----------+---------+
| python-dateutil | 2.8.2 | X | X |
+-------------------+-----------------+----------+---------+
+| tzdata | 2022.1 | X | X |
++-------------------+-----------------+----------+---------+
For `optional libraries <https://pandas.pydata.org/docs/getting_started/install.html>`_ the general recommendation is to use the latest version.
The following table lists the lowest version per library that is currently being tested throughout the development of pandas.
diff --git a/environment.yml b/environment.yml
index d74084f408260..f29ade1dc5173 100644
--- a/environment.yml
+++ b/environment.yml
@@ -52,7 +52,6 @@ dependencies:
- scipy>=1.7.1
- sqlalchemy>=1.4.16
- tabulate>=0.8.9
- - tzdata>=2022a
- xarray>=0.21.0
- xlrd>=2.0.1
- xlsxwriter>=1.4.3
@@ -119,3 +118,4 @@ dependencies:
- pip:
- sphinx-toggleprompt
- typing_extensions; python_version<"3.11"
+ - tzdata>=2022.1
diff --git a/pyproject.toml b/pyproject.toml
index 16143706196ef..ced96f974e4ba 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,7 +27,8 @@ dependencies = [
"numpy>=1.21.0; python_version>='3.10'",
"numpy>=1.23.2; python_version>='3.11'",
"python-dateutil>=2.8.2",
- "pytz>=2020.1"
+ "pytz>=2020.1",
+ "tzdata>=2022.1"
]
classifiers = [
'Development Status :: 5 - Production/Stable',
@@ -57,7 +58,6 @@ matplotlib = "pandas:plotting._matplotlib"
[project.optional-dependencies]
test = ['hypothesis>=6.34.2', 'pytest>=7.0.0', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0']
performance = ['bottleneck>=1.3.2', 'numba>=0.53.1', 'numexpr>=2.7.1']
-timezone = ['tzdata>=2022.1']
computation = ['scipy>=1.7.1', 'xarray>=0.21.0']
fss = ['fsspec>=2021.07.0']
aws = ['s3fs>=2021.08.0']
@@ -112,7 +112,6 @@ all = ['beautifulsoup4>=4.9.3',
'SQLAlchemy>=1.4.16',
'tables>=3.6.1',
'tabulate>=0.8.9',
- 'tzdata>=2022.1',
'xarray>=0.21.0',
'xlrd>=2.0.1',
'xlsxwriter>=1.4.3',
diff --git a/requirements-dev.txt b/requirements-dev.txt
index f9807a7926c1c..9c0bdc64d6e07 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -41,7 +41,6 @@ s3fs>=2021.08.0
scipy>=1.7.1
SQLAlchemy>=1.4.16
tabulate>=0.8.9
-tzdata>=2022.1
xarray>=0.21.0
xlrd>=2.0.1
xlsxwriter>=1.4.3
@@ -88,4 +87,5 @@ requests
pygments
sphinx-toggleprompt
typing_extensions; python_version<"3.11"
+tzdata>=2022.1
setuptools>=61.0.0
diff --git a/scripts/validate_min_versions_in_sync.py b/scripts/validate_min_versions_in_sync.py
index 3c12f17fe72cf..9edce3a00a502 100755
--- a/scripts/validate_min_versions_in_sync.py
+++ b/scripts/validate_min_versions_in_sync.py
@@ -39,7 +39,6 @@
EXCLUDE_DEPS = {"tzdata", "blosc"}
EXCLUSION_LIST = {
"python=3.8[build=*_pypy]": None,
- "tzdata": None,
"pyarrow": None,
}
# pandas package is not available
@@ -228,10 +227,9 @@ def get_versions_from_ci(content: list[str]) -> tuple[dict[str, str], dict[str,
continue
elif seen_required and line.strip():
if "==" in line:
- package, version = line.strip().split("==")
-
+ package, version = line.strip().split("==", maxsplit=1)
else:
- package, version = line.strip().split("=")
+ package, version = line.strip().split("=", maxsplit=1)
package = package[2:]
if package in EXCLUDE_DEPS:
continue
| - [ ] closes #49048 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
trying to use https://peps.python.org/pep-0508/#environment-markers, let's see if this works | https://api.github.com/repos/pandas-dev/pandas/pulls/51247 | 2023-02-08T21:21:11Z | 2023-03-15T15:27:13Z | 2023-03-15T15:27:13Z | 2023-03-17T13:43:29Z |
BUG validate_docstrings.py flake8 PermissionError | diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 5d0ef6e460486..a0eb02d069c58 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -207,7 +207,9 @@ def validate_pep8(self):
)
error_messages = []
- with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file:
+
+ file = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=False)
+ try:
file.write(content)
file.flush()
cmd = ["python", "-m", "flake8", "--quiet", "--statistics", file.name]
@@ -217,6 +219,9 @@ def validate_pep8(self):
messages = stdout.strip("\n")
if messages:
error_messages.append(messages)
+ finally:
+ file.close()
+ os.unlink(file.name)
for error_message in error_messages:
error_count, error_code, message = error_message.split(maxsplit=2)
| - [x] Part of #51236
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
When running `scripts/validate_docstrings.py` on Windows11, `flake8` cannot access the temporary file created by `tempfile`, a PermissionError is raised.
Setting delete=False in NamedTemporaryFile solve the issue.
Related issue : [stackoverflow](https://stackoverflow.com/a/54768241/17386911) | https://api.github.com/repos/pandas-dev/pandas/pulls/51244 | 2023-02-08T20:12:01Z | 2023-02-09T10:31:18Z | 2023-02-09T10:31:18Z | 2023-02-09T10:31:28Z |
DOC fix EX02 algorithms.factorize docstring | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 08fbe3be9b092..dcbf1cc0ee522 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -578,9 +578,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
MSG='Partially validate docstrings (EX02)' ; echo $MSG
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX02 --ignore_functions \
pandas.DataFrame.plot.line \
- pandas.Index.factorize \
pandas.Period.strftime \
- pandas.Series.factorize \
pandas.Series.floordiv \
pandas.Series.plot.line \
pandas.Series.rfloordiv \
@@ -611,7 +609,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.api.types.is_unsigned_integer_dtype \
pandas.core.groupby.DataFrameGroupBy.take \
pandas.core.groupby.SeriesGroupBy.take \
- pandas.factorize \
pandas.io.formats.style.Styler.concat \
pandas.io.formats.style.Styler.export \
pandas.io.formats.style.Styler.set_td_classes \
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 0037cd20e8c1e..636273724b57c 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -688,7 +688,7 @@ def factorize(
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
>>> codes
- array([0, 0, 1, 2, 0]...)
+ array([0, 0, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
@@ -697,7 +697,7 @@ def factorize(
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
>>> codes
- array([1, 1, 0, 2, 1]...)
+ array([1, 1, 0, 2, 1])
>>> uniques
array(['a', 'b', 'c'], dtype=object)
@@ -707,7 +707,7 @@ def factorize(
>>> codes, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
>>> codes
- array([ 0, -1, 1, 2, 0]...)
+ array([ 0, -1, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
@@ -718,7 +718,7 @@ def factorize(
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
- array([0, 0, 1]...)
+ array([0, 0, 1])
>>> uniques
['a', 'c']
Categories (3, object): ['a', 'b', 'c']
@@ -732,7 +732,7 @@ def factorize(
>>> cat = pd.Series(['a', 'a', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
- array([0, 0, 1]...)
+ array([0, 0, 1])
>>> uniques
Index(['a', 'c'], dtype='object')
| - [x] Part of #51236
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Towards #51236.
Fix EX02 docstring errors for :
`pandas.factorize`
`pandas.Index.factorize`
`pandas.Series.factorize`
| https://api.github.com/repos/pandas-dev/pandas/pulls/51243 | 2023-02-08T18:41:39Z | 2023-02-09T17:45:28Z | 2023-02-09T17:45:28Z | 2023-02-09T17:45:29Z |
BUG: bug in Index._should_fallback_to_positional | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 29f360e050548..c991b3b6a6004 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1250,6 +1250,8 @@ Indexing
- Bug in :meth:`Series.loc` raising error for out of bounds end of slice indexer (:issue:`50161`)
- Bug in :meth:`DataFrame.loc` raising ``ValueError`` with ``bool`` indexer and :class:`MultiIndex` (:issue:`47687`)
- Bug in :meth:`DataFrame.loc` raising ``IndexError`` when setting values for a pyarrow-backed column with a non-scalar indexer (:issue:`50085`)
+- Bug in :meth:`DataFrame.__getitem__`, :meth:`Series.__getitem__`, :meth:`DataFrame.__setitem__` and :meth:`Series.__setitem__`
+ when indexing on indexes with extension float dtypes (:class:`Float64` & :class:`Float64`) or complex dtypes using integers (:issue:`51053`)
- Bug in :meth:`DataFrame.loc` modifying object when setting incompatible value with an empty indexer (:issue:`45981`)
- Bug in :meth:`DataFrame.__setitem__` raising ``ValueError`` when right hand side is :class:`DataFrame` with :class:`MultiIndex` columns (:issue:`49121`)
- Bug in :meth:`DataFrame.reindex` casting dtype to ``object`` when :class:`DataFrame` has single extension array column when re-indexing ``columns`` and ``index`` (:issue:`48190`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 363bfe76d40fb..9d44c375c0b80 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5680,9 +5680,12 @@ def _should_fallback_to_positional(self) -> bool:
"""
Should an integer key be treated as positional?
"""
- if isinstance(self.dtype, np.dtype) and self.dtype.kind in ["i", "u", "f"]:
- return False
- return not self._holds_integer()
+ return self.inferred_type not in {
+ "integer",
+ "mixed-integer",
+ "floating",
+ "complex",
+ }
_index_shared_docs[
"get_indexer_non_unique"
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 2dfc31ccc1638..54e30a3355943 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -57,6 +57,15 @@ def test_getitem(self, float_frame):
with pytest.raises(KeyError, match="random"):
float_frame["random"]
+ def test_getitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype):
+ # GH51053
+ dtype = any_numeric_dtype
+ idx = Index([1, 0, 1], dtype=dtype)
+ df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx)
+ result = df[1]
+ expected = DataFrame([[1, 3], [4, 6]], columns=Index([1, 1], dtype=dtype))
+ tm.assert_frame_equal(result, expected, check_exact=True)
+
def test_getitem2(self, float_frame):
df = float_frame.copy()
@@ -71,6 +80,15 @@ def test_getitem2(self, float_frame):
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
+ def test_setitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype):
+ # GH51053
+ dtype = any_numeric_dtype
+ idx = Index([1, 0, 1], dtype=dtype)
+ df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx)
+ df[1] = 10
+ expected = DataFrame([[10, 2, 10], [10, 5, 10]], columns=idx)
+ tm.assert_frame_equal(df, expected, check_exact=True)
+
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index f214ade0a31aa..a8290f472cd7c 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -45,6 +45,26 @@ def test_basic_indexing():
s[5] = 0
+def test_getitem_numeric_should_not_fallback_to_positional(any_numeric_dtype):
+ # GH51053
+ dtype = any_numeric_dtype
+ idx = Index([1, 0, 1], dtype=dtype)
+ ser = Series(range(3), index=idx)
+ result = ser[1]
+ expected = Series([0, 2], index=Index([1, 1], dtype=dtype))
+ tm.assert_series_equal(result, expected, check_exact=True)
+
+
+def test_setitem_numeric_should_not_fallback_to_positional(any_numeric_dtype):
+ # GH51053
+ dtype = any_numeric_dtype
+ idx = Index([1, 0, 1], dtype=dtype)
+ ser = Series(range(3), index=idx)
+ ser[1] = 10
+ expected = Series([10, 1, 10], index=idx)
+ tm.assert_series_equal(ser, expected, check_exact=True)
+
+
def test_basic_getitem_with_labels(datetime_series):
indices = datetime_series.index[[5, 10, 15]]
| - [x] closes #51053
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Fixes #51053. | https://api.github.com/repos/pandas-dev/pandas/pulls/51241 | 2023-02-08T16:38:09Z | 2023-02-13T19:15:32Z | 2023-02-13T19:15:32Z | 2023-02-13T23:27:08Z |
API / CoW: constructing DataFrame from DataFrame/BlockManager creates lazy copy | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 9bf170d6eb9e4..224604a8ff8d0 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -246,6 +246,10 @@ Copy-on-Write improvements
a modification to the data happens) when constructing a Series from an existing
Series with the default of ``copy=False`` (:issue:`50471`)
+- The :class:`DataFrame` constructor will now create a lazy copy (deferring the copy until
+ a modification to the data happens) when constructing from an existing
+ :class:`DataFrame` with the default of ``copy=False`` (:issue:`51239`)
+
- The :class:`DataFrame` constructor, when constructing a DataFrame from a dictionary
of Series objects and specifying ``copy=False``, will now use a lazy copy
of those Series objects for the columns of the DataFrame (:issue:`50777`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 49416cc2d53c0..d80c80fa5d0ab 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -656,6 +656,8 @@ def __init__(
data = data.copy(deep=False)
if isinstance(data, (BlockManager, ArrayManager)):
+ if using_copy_on_write():
+ data = data.copy(deep=False)
# first check if a Manager is passed without any other arguments
# -> use fastpath (without checking Manager type)
if index is None and columns is None and dtype is None and not copy:
diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py
index 2cacf0d6f6f91..6cf45c194707e 100644
--- a/pandas/tests/copy_view/test_constructors.py
+++ b/pandas/tests/copy_view/test_constructors.py
@@ -82,6 +82,25 @@ def test_series_from_series_with_reindex(using_copy_on_write):
assert not result._mgr.blocks[0].refs.has_reference()
+@pytest.mark.parametrize("func", [lambda x: x, lambda x: x._mgr])
+@pytest.mark.parametrize("columns", [None, ["a"]])
+def test_dataframe_constructor_mgr_or_df(using_copy_on_write, columns, func):
+ df = DataFrame({"a": [1, 2, 3]})
+ df_orig = df.copy()
+
+ new_df = DataFrame(func(df))
+
+ assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
+ new_df.iloc[0] = 100
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
+ tm.assert_frame_equal(df, new_df)
+
+
@pytest.mark.parametrize("dtype", [None, "int64", "Int64"])
@pytest.mark.parametrize("index", [None, [0, 1, 2]])
@pytest.mark.parametrize("columns", [None, ["a", "b"], ["a", "b", "c"]])
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Checking if everything passes | https://api.github.com/repos/pandas-dev/pandas/pulls/51239 | 2023-02-08T15:57:33Z | 2023-02-26T17:43:12Z | 2023-02-26T17:43:12Z | 2023-03-01T09:22:32Z |
DOC: remove reference of NumericIndex in Int64Index docs | diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index fe11a02eccb3c..2b3c27db7984e 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -313,7 +313,7 @@ def _format_native_types(
Immutable sequence used for indexing and alignment.
.. deprecated:: 1.4.0
- In pandas v2.0 %(klass)s will be removed and :class:`NumericIndex` used instead.
+ In pandas v2.0 %(klass)s will be removed and :class:`Index` used instead.
%(klass)s will remain fully functional for the duration of pandas 1.x.
The basic object storing axis labels for all pandas objects.
| Small fix for https://github.com/pandas-dev/pandas/issues/51020#issuecomment-1422820547, in case we end up doing another 1.5.x release to have correct docs. | https://api.github.com/repos/pandas-dev/pandas/pulls/51238 | 2023-02-08T15:45:11Z | 2023-02-08T18:03:05Z | 2023-02-08T18:03:05Z | 2023-02-08T19:06:21Z |
TYP: more precise types + add type collections | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index dac949f69bfaf..4742348b209d9 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -133,8 +133,10 @@
ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES]
-FLOAT_NUMPY_DTYPES: list[Dtype] = [float, "float32", "float64"]
+FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
+ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES]
+
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
@@ -146,6 +148,10 @@
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
+ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES
+ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES]
+ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES]
+
ALL_NUMPY_DTYPES = (
ALL_REAL_NUMPY_DTYPES
+ COMPLEX_DTYPES
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 4d0354a2aab04..2c410bb98b506 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1409,7 +1409,7 @@ def float_ea_dtype(request):
return request.param
-@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES + tm.FLOAT_EA_DTYPES)
+@pytest.fixture(params=tm.ALL_FLOAT_DTYPES)
def any_float_dtype(request):
"""
Parameterized fixture for float dtypes.
@@ -1614,9 +1614,7 @@ def any_real_numpy_dtype(request):
return request.param
-@pytest.fixture(
- params=tm.ALL_REAL_NUMPY_DTYPES + tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES
-)
+@pytest.fixture(params=tm.ALL_REAL_DTYPES)
def any_real_numeric_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtype.
@@ -1676,12 +1674,7 @@ def any_numpy_dtype(request):
return request.param
-@pytest.fixture(
- params=tm.ALL_REAL_NUMPY_DTYPES
- + tm.COMPLEX_DTYPES
- + tm.ALL_INT_EA_DTYPES
- + tm.FLOAT_EA_DTYPES
-)
+@pytest.fixture(params=tm.ALL_NUMERIC_DTYPES)
def any_numeric_dtype(request):
"""
Parameterized fixture for all numeric dtypes.
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index e009ba45514a2..c621e9bae78f8 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2262,10 +2262,7 @@ def test_constructor_categorical_series(self):
@pytest.mark.parametrize(
"dtype",
- tm.ALL_INT_NUMPY_DTYPES
- + tm.ALL_INT_EA_DTYPES
- + tm.FLOAT_NUMPY_DTYPES
- + tm.COMPLEX_DTYPES
+ tm.ALL_NUMERIC_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES,
| `float_numpy_dtypes` has a too wide dtype + I think it would be beneficial for testing purposes to add some more numeric type collections. | https://api.github.com/repos/pandas-dev/pandas/pulls/51234 | 2023-02-08T14:44:45Z | 2023-02-08T19:14:53Z | 2023-02-08T19:14:53Z | 2023-02-08T19:36:10Z |
TST: categorical alignment | diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index a7f73f2e22fca..08fdad5ff1edd 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -287,6 +287,20 @@ def test_alignment_doesnt_change_tz(self):
assert ser.index is dti
assert ser_utc.index is dti_utc
+ def test_alignment_categorical(self):
+ # GH13365
+ cat = Categorical(["3z53", "3z53", "LoJG", "LoJG", "LoJG", "N503"])
+ ser1 = Series(2, index=cat)
+ ser2 = Series(2, index=cat[:-1])
+ result = ser1 * ser2
+
+ exp_index = ["3z53"] * 4 + ["LoJG"] * 9 + ["N503"]
+ exp_index = pd.CategoricalIndex(exp_index, categories=cat.categories)
+ exp_values = [4.0] * 13 + [np.nan]
+ expected = Series(exp_values, exp_index)
+
+ tm.assert_series_equal(result, expected)
+
def test_arithmetic_with_duplicate_index(self):
# GH#8363
| - [x] closes #13365
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
Already fixed on main, just adding a test | https://api.github.com/repos/pandas-dev/pandas/pulls/51232 | 2023-02-08T12:40:28Z | 2023-02-08T17:01:30Z | 2023-02-08T17:01:30Z | 2023-02-23T01:38:49Z |
DOC Correcting EX02 errors | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 3ab63b9bcb860..18f394b8e549b 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -601,12 +601,9 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.api.types.is_int64_dtype \
pandas.api.types.is_integer_dtype \
pandas.api.types.is_interval_dtype \
- pandas.api.types.is_named_tuple \
pandas.api.types.is_numeric_dtype \
pandas.api.types.is_object_dtype \
pandas.api.types.is_period_dtype \
- pandas.api.types.is_re \
- pandas.api.types.is_re_compilable \
pandas.api.types.is_signed_integer_dtype \
pandas.api.types.is_sparse \
pandas.api.types.is_string_dtype \
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 401b2c80b2c18..28e034de869f4 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -147,6 +147,8 @@ def is_re(obj) -> bool:
Examples
--------
+ >>> from pandas.api.types import is_re
+ >>> import re
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
@@ -170,6 +172,7 @@ def is_re_compilable(obj) -> bool:
Examples
--------
+ >>> from pandas.api.types import is_re_compilable
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
@@ -310,6 +313,7 @@ def is_named_tuple(obj) -> bool:
Examples
--------
>>> from collections import namedtuple
+ >>> from pandas.api.types import is_named_tuple
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Towards https://github.com/pandas-dev/pandas/issues/37875 and https://github.com/pandas-dev/pandas/issues/27977 | https://api.github.com/repos/pandas-dev/pandas/pulls/51231 | 2023-02-08T12:27:04Z | 2023-02-08T14:03:29Z | 2023-02-08T14:03:29Z | 2023-02-08T14:30:38Z |
DOC: add/fix Timedeltas docstrings | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 18f394b8e549b..08fbe3be9b092 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -204,7 +204,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Timestamp.utctimetuple \
pandas.Timestamp.weekday \
pandas.arrays.DatetimeArray \
- pandas.Timedelta.components \
pandas.Timedelta.view \
pandas.Timedelta.as_unit \
pandas.Timedelta.ceil \
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index d7352f80132a2..9a00adf2d4de7 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1025,6 +1025,23 @@ cdef class _Timedelta(timedelta):
@property
def days(self) -> int: # TODO(cython3): make cdef property
+ """
+ Returns the days of the timedelta.
+
+ Returns
+ -------
+ int
+
+ Examples
+ --------
+ >>> td = pd.Timedelta(1, "d")
+ >>> td.days
+ 1
+
+ >>> td = pd.Timedelta('4 min 3 us 42 ns')
+ >>> td.days
+ 0
+ """
# NB: using the python C-API PyDateTime_DELTA_GET_DAYS will fail
# (or be incorrect)
self._ensure_components()
@@ -1051,11 +1068,13 @@ cdef class _Timedelta(timedelta):
Examples
--------
**Using string input**
+
>>> td = pd.Timedelta('1 days 2 min 3 us 42 ns')
>>> td.seconds
120
**Using integer input**
+
>>> td = pd.Timedelta(42, unit='s')
>>> td.seconds
42
@@ -1273,6 +1292,13 @@ cdef class _Timedelta(timedelta):
def components(self):
"""
Return a components namedtuple-like.
+
+ Examples
+ --------
+ >>> td = pd.Timedelta('2 day 4 min 3 us 42 ns')
+ >>> td.components
+ Components(days=2, hours=0, minutes=4, seconds=0, milliseconds=0,
+ microseconds=3, nanoseconds=42)
"""
self._ensure_components()
# return the named tuple
| - [X] xref #27977
- ~[Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature~
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- ~ Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.~
- ~ Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.~
Add docstrings:
- Timedelta.components
- Timedelta.days
Fix example:
- Timedeltas.seconds | https://api.github.com/repos/pandas-dev/pandas/pulls/51229 | 2023-02-08T08:31:54Z | 2023-02-09T10:34:49Z | 2023-02-09T10:34:49Z | 2023-02-09T10:36:03Z |
BUG: groupby.agg with numba and as_index=False | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index f907e89880d25..3717e9b011f1c 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1297,6 +1297,7 @@ Groupby/resample/rolling
- Bug in :meth:`.DataFrameGroupBy.resample` raises ``KeyError`` when getting the result from a key list when resampling on time index (:issue:`50840`)
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` would raise incorrectly when grouper had ``axis=1`` for ``"ngroup"`` argument (:issue:`45986`)
- Bug in :meth:`.DataFrameGroupBy.describe` produced incorrect results when data had duplicate columns (:issue:`50806`)
+- Bug in :meth:`.DataFrameGroupBy.agg` with ``engine="numba"`` failing to respect ``as_index=False`` (:issue:`51228`)
-
Reshaping
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 613e23fab0497..86ca486e49a32 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1269,7 +1269,11 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
- return self.obj._constructor(result, index=index, columns=data.columns)
+ result = self.obj._constructor(result, index=index, columns=data.columns)
+ if not self.as_index:
+ result = self._insert_inaxis_grouper(result)
+ result.index = default_index(len(result))
+ return result
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py
index 0b2fb56a02006..9dd3d1d45abf0 100644
--- a/pandas/tests/groupby/aggregate/test_numba.py
+++ b/pandas/tests/groupby/aggregate/test_numba.py
@@ -51,7 +51,8 @@ def incorrect_function(values, index):
# Filter warnings when parallel=True and the function can't be parallelized by Numba
@pytest.mark.parametrize("jit", [True, False])
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
-def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython):
+@pytest.mark.parametrize("as_index", [True, False])
+def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
def func_numba(values, index):
return np.mean(values) * 2.7
@@ -65,7 +66,7 @@ def func_numba(values, index):
{0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
- grouped = data.groupby(0)
+ grouped = data.groupby(0, as_index=as_index)
if pandas_obj == "Series":
grouped = grouped[1]
diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py
index 2b70d7325a209..0264d2a09778f 100644
--- a/pandas/tests/groupby/transform/test_numba.py
+++ b/pandas/tests/groupby/transform/test_numba.py
@@ -48,7 +48,8 @@ def incorrect_function(values, index):
# Filter warnings when parallel=True and the function can't be parallelized by Numba
@pytest.mark.parametrize("jit", [True, False])
@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"])
-def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython):
+@pytest.mark.parametrize("as_index", [True, False])
+def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index):
def func(values, index):
return values + 1
@@ -62,7 +63,7 @@ def func(values, index):
{0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1]
)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
- grouped = data.groupby(0)
+ grouped = data.groupby(0, as_index=as_index)
if pandas_obj == "Series":
grouped = grouped[1]
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51228 | 2023-02-08T03:08:27Z | 2023-02-08T22:13:08Z | 2023-02-08T22:13:08Z | 2023-02-08T22:13:34Z |
PERF: ArrowExtensionArray.to_numpy(dtype=object) | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7d028935ad175..d7787e50d8179 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1061,7 +1061,7 @@ Performance improvements
- Performance improvement in :meth:`~arrays.ArrowExtensionArray.factorize` (:issue:`49177`)
- Performance improvement in :meth:`~arrays.ArrowExtensionArray.__setitem__` (:issue:`50248`, :issue:`50632`)
- Performance improvement in :class:`~arrays.ArrowExtensionArray` comparison methods when array contains NA (:issue:`50524`)
-- Performance improvement in :meth:`~arrays.ArrowExtensionArray.to_numpy` (:issue:`49973`)
+- Performance improvement in :meth:`~arrays.ArrowExtensionArray.to_numpy` (:issue:`49973`, :issue:`51227`)
- Performance improvement when parsing strings to :class:`BooleanDtype` (:issue:`50613`)
- Performance improvement in :meth:`DataFrame.join` when joining on a subset of a :class:`MultiIndex` (:issue:`48611`)
- Performance improvement for :meth:`MultiIndex.intersection` (:issue:`48604`)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index a4cde823c6713..82df2f5eac2fa 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -838,12 +838,12 @@ def to_numpy(
na_value = self.dtype.na_value
pa_type = self._data.type
- if (
- is_object_dtype(dtype)
- or pa.types.is_timestamp(pa_type)
- or pa.types.is_duration(pa_type)
- ):
+ if pa.types.is_timestamp(pa_type) or pa.types.is_duration(pa_type):
result = np.array(list(self), dtype=dtype)
+ elif is_object_dtype(dtype) and self._hasna:
+ result = np.empty(len(self), dtype=object)
+ mask = ~self.isna()
+ result[mask] = np.asarray(self[mask]._data)
else:
result = np.asarray(self._data, dtype=dtype)
if copy or self._hasna:
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 522a0d59e4161..4a6705e13032b 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -1525,6 +1525,16 @@ def test_to_numpy_with_defaults(data):
tm.assert_numpy_array_equal(result, expected)
+def test_to_numpy_int_with_na():
+ # GH51227: ensure to_numpy does not convert int to float
+ data = [1, None]
+ arr = pd.array(data, dtype="int64[pyarrow]")
+ result = arr.to_numpy()
+ expected = np.array([1, pd.NA], dtype=object)
+ assert isinstance(result[0], int)
+ tm.assert_numpy_array_equal(result, expected)
+
+
def test_setitem_null_slice(data):
# GH50248
orig = data.copy()
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
Perf improvement for `ArrowExtensionArray.to_numpy(dtype=object)`.
(`dtype=object` is not uncommon, see discussion in #22791)
Some examples where it has an impact:
```
import pandas as pd
import numpy as np
data = np.random.randn(1_000_000, 10)
df = pd.DataFrame(data, dtype="float64[pyarrow]")
%timeit df.sum(axis=1)
# 4.04 s ± 53.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- main
# 796 ms ± 22.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- PR
%timeit df.clip(0.0, 0.1)
# 4.82 s ± 159 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- main
# 1.51 s ± 10.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- PR
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/51227 | 2023-02-08T02:12:22Z | 2023-02-09T17:33:43Z | 2023-02-09T17:33:43Z | 2023-02-09T18:37:38Z |
REF: simplify aggregate_frame, column-name-pinning | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index fd07b472fc3da..3850f6bc12efd 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -269,11 +269,8 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
- index = self.grouper.result_index
- result = Series(result, index=index)
- if not self.as_index:
- result = self._insert_inaxis_grouper(result)
- result.index = default_index(len(result))
+ result = Series(result, index=self.grouper.result_index)
+ result = self._wrap_aggregated_output(result)
return result
agg = aggregate
@@ -1314,19 +1311,9 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
result = self._aggregate_frame(func)
else:
- sobj = self._selected_obj
-
- if isinstance(sobj, Series):
- # GH#35246 test_groupby_as_index_select_column_sum_empty_df
- result.columns = self._obj_with_exclusions.columns.copy()
- else:
- # Retain our column names
- result.columns._set_names(
- sobj.columns.names, level=list(range(sobj.columns.nlevels))
- )
- # select everything except for the last level, which is the one
- # containing the name of the function(s), see GH#32040
- result.columns = result.columns.droplevel(-1)
+ # GH#32040, GH#35246
+ # e.g. test_groupby_as_index_select_column_sum_empty_df
+ result.columns = self._obj_with_exclusions.columns.copy()
if not self.as_index:
result = self._insert_inaxis_grouper(result)
@@ -1358,17 +1345,9 @@ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
obj = self._obj_with_exclusions
result: dict[Hashable, NDFrame | np.ndarray] = {}
- if self.axis == 0:
- # test_pass_args_kwargs_duplicate_columns gets here with non-unique columns
- for name, data in self.grouper.get_iterator(obj, self.axis):
- fres = func(data, *args, **kwargs)
- result[name] = fres
- else:
- # we get here in a number of test_multilevel tests
- for name in self.indices:
- grp_df = self.get_group(name, obj=obj)
- fres = func(grp_df, *args, **kwargs)
- result[name] = fres
+ for name, grp_df in self.grouper.get_iterator(obj, self.axis):
+ fres = func(grp_df, *args, **kwargs)
+ result[name] = fres
result_index = self.grouper.result_index
other_ax = obj.axes[1 - self.axis]
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51226 | 2023-02-08T01:47:49Z | 2023-02-08T17:15:19Z | 2023-02-08T17:15:19Z | 2023-02-08T17:16:54Z |
REF: remove _transform_item_by_item | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index fd07b472fc3da..d0fdb7d5cca0f 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1558,9 +1558,8 @@ def _transform_general(self, func, *args, **kwargs):
object.__setattr__(group, "name", name)
try:
path, res = self._choose_path(fast_path, slow_path, group)
- except TypeError:
- return self._transform_item_by_item(obj, fast_path)
except ValueError as err:
+ # e.g. test_transform_with_non_scalar_group
msg = "transform must return a scalar value for each group"
raise ValueError(msg) from err
if group.size > 0:
@@ -1693,17 +1692,6 @@ def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFram
return path, res
- def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
- # iterate through columns, see test_transform_exclude_nuisance
- # gets here with non-unique columns
- output = {}
- for i, (colname, sgb) in enumerate(self._iterate_column_groupbys(obj)):
- output[i] = sgb.transform(wrapper)
-
- result = self.obj._constructor(output, index=obj.index)
- result.columns = obj.columns
- return result
-
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Filter elements from groups that don't satisfy a criterion.
| AFAICT this is leftover from when we dropped nuisance columns. | https://api.github.com/repos/pandas-dev/pandas/pulls/51224 | 2023-02-08T00:27:47Z | 2023-02-08T16:56:41Z | 2023-02-08T16:56:41Z | 2023-02-08T17:09:08Z |
ENH: add unit, as_unit to dt accessor | diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 659385c611ff0..5a43e5796d1d9 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -326,6 +326,7 @@ Datetime properties
Series.dt.days_in_month
Series.dt.tz
Series.dt.freq
+ Series.dt.unit
Datetime methods
^^^^^^^^^^^^^^^^
@@ -346,6 +347,7 @@ Datetime methods
Series.dt.ceil
Series.dt.month_name
Series.dt.day_name
+ Series.dt.as_unit
Period properties
^^^^^^^^^^^^^^^^^
@@ -370,6 +372,7 @@ Timedelta properties
Series.dt.microseconds
Series.dt.nanoseconds
Series.dt.components
+ Series.dt.unit
Timedelta methods
^^^^^^^^^^^^^^^^^
@@ -380,6 +383,7 @@ Timedelta methods
Series.dt.to_pytimedelta
Series.dt.total_seconds
+ Series.dt.as_unit
.. _api.series.str:
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index f907e89880d25..c9113182f862e 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -291,6 +291,7 @@ Other enhancements
- Improved error message in :func:`to_datetime` for non-ISO8601 formats, informing users about the position of the first error (:issue:`50361`)
- Improved error message when trying to align :class:`DataFrame` objects (for example, in :func:`DataFrame.compare`) to clarify that "identically labelled" refers to both index and columns (:issue:`50083`)
- Added :meth:`DatetimeIndex.as_unit` and :meth:`TimedeltaIndex.as_unit` to convert to different resolutions; supported resolutions are "s", "ms", "us", and "ns" (:issue:`50616`)
+- Added :meth:`Series.dt.unit` and :meth:`Series.dt.as_unit` to convert to different resolutions; supported resolutions are "s", "ms", "us", and "ns" (:issue:`51223`)
- Added new argument ``dtype`` to :func:`read_sql` to be consistent with :func:`read_sql_query` (:issue:`50797`)
-
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 1bf43f61a67a7..d2154265d5cb0 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -228,7 +228,9 @@ def _scalar_type(self) -> type[Timestamp]:
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
- _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
+ _datetimelike_ops: list[str] = (
+ _field_ops + _object_ops + _bool_ops + _other_ops + ["unit"]
+ )
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
@@ -240,6 +242,7 @@ def _scalar_type(self) -> type[Timestamp]:
"ceil",
"month_name",
"day_name",
+ "as_unit",
]
# ndim is inherited from ExtensionArray, must exist to ensure
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 861c9712cd2ae..65eaf83c48bb8 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -137,13 +137,14 @@ def _scalar_type(self) -> type[Timedelta]:
_bool_ops: list[str] = []
_object_ops: list[str] = ["freq"]
_field_ops: list[str] = ["days", "seconds", "microseconds", "nanoseconds"]
- _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
+ _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + ["unit"]
_datetimelike_methods: list[str] = [
"to_pytimedelta",
"total_seconds",
"round",
"floor",
"ceil",
+ "as_unit",
]
# Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 7525e8131fabf..788448f2c7be6 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -227,10 +227,14 @@ def isocalendar(self):
@delegate_names(
- delegate=DatetimeArray, accessors=DatetimeArray._datetimelike_ops, typ="property"
+ delegate=DatetimeArray,
+ accessors=DatetimeArray._datetimelike_ops + ["unit"],
+ typ="property",
)
@delegate_names(
- delegate=DatetimeArray, accessors=DatetimeArray._datetimelike_methods, typ="method"
+ delegate=DatetimeArray,
+ accessors=DatetimeArray._datetimelike_methods + ["as_unit"],
+ typ="method",
)
class DatetimeProperties(Properties):
"""
diff --git a/pandas/tests/series/accessors/test_cat_accessor.py b/pandas/tests/series/accessors/test_cat_accessor.py
index 9266afc89fa19..c999a3efee31f 100644
--- a/pandas/tests/series/accessors/test_cat_accessor.py
+++ b/pandas/tests/series/accessors/test_cat_accessor.py
@@ -167,6 +167,7 @@ def test_dt_accessor_api_for_categorical(self, idx):
("floor", ("D",), {}),
("ceil", ("D",), {}),
("asfreq", ("D",), {}),
+ ("as_unit", ("s"), {}),
]
if idx.dtype == "M8[ns]":
# exclude dt64tz since that is already localized and would raise
diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py
index 1914bdae07e4b..a7e3cd43d1a6e 100644
--- a/pandas/tests/series/accessors/test_dt_accessor.py
+++ b/pandas/tests/series/accessors/test_dt_accessor.py
@@ -55,6 +55,7 @@
"day_name",
"month_name",
"isocalendar",
+ "as_unit",
]
ok_for_td = TimedeltaArray._datetimelike_ops
ok_for_td_methods = [
@@ -64,6 +65,7 @@
"round",
"floor",
"ceil",
+ "as_unit",
]
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51223 | 2023-02-07T23:53:33Z | 2023-02-09T16:16:25Z | 2023-02-09T16:16:25Z | 2023-02-09T16:19:09Z |
REF: remove aggregate_item_by_item | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index fd07b472fc3da..471ff1697a34a 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1378,22 +1378,6 @@ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
return out
- def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
- # only for axis==0
- # tests that get here with non-unique cols:
- # test_resample_with_timedelta_yields_no_empty_groups,
- # test_resample_apply_product
-
- obj = self._obj_with_exclusions
- result: dict[int, NDFrame] = {}
-
- for i, (item, sgb) in enumerate(self._iterate_column_groupbys(obj)):
- result[i] = sgb.aggregate(func, *args, **kwargs)
-
- res_df = self.obj._constructor(result)
- res_df.columns = obj.columns
- return res_df
-
def _wrap_applied_output(
self,
data: DataFrame,
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 613f841a9a340..ccc32043b4dd2 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -40,10 +40,7 @@
npt,
)
from pandas.compat.numpy import function as nv
-from pandas.errors import (
- AbstractMethodError,
- DataError,
-)
+from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
@@ -423,15 +420,13 @@ def _groupby_and_aggregate(self, how, *args, **kwargs):
)
try:
- if isinstance(obj, ABCDataFrame) and callable(how):
- # Check if the function is reducing or not.
- # e.g. test_resample_apply_with_additional_args
- result = grouped._aggregate_item_by_item(how, *args, **kwargs)
+ if callable(how):
+ # TODO: test_resample_apply_with_additional_args fails if we go
+ # through the non-lambda path, not clear that it should.
+ func = lambda x: how(x, *args, **kwargs)
+ result = grouped.aggregate(func)
else:
result = grouped.aggregate(how, *args, **kwargs)
- except DataError:
- # got TypeErrors on aggregation
- result = grouped.apply(how, *args, **kwargs)
except (AttributeError, KeyError):
# we have a non-reducing function; try to evaluate
# alternatively we want to evaluate only a column of the input
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51222 | 2023-02-07T23:33:01Z | 2023-02-08T16:55:35Z | 2023-02-08T16:55:35Z | 2023-02-08T17:10:02Z |
DEP: Deprecated pad/backfill for Series/DataFrame | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7d028935ad175..c504afa195192 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -784,6 +784,10 @@ Deprecations
- Deprecated calling ``float`` or ``int`` on a single element :class:`Series` to return a ``float`` or ``int`` respectively. Extract the element before calling ``float`` or ``int`` instead (:issue:`51101`)
- Deprecated :meth:`Grouper.groups`, use :meth:`Groupby.groups` instead (:issue:`51182`)
- Deprecated :meth:`Grouper.grouper`, use :meth:`Groupby.grouper` instead (:issue:`51182`)
+- Deprecated :meth:`Series.pad` in favor of :meth:`Series.ffill` (:issue:`33396`)
+- Deprecated :meth:`Series.backfill` in favor of :meth:`Series.bfill` (:issue:`33396`)
+- Deprecated :meth:`DataFrame.pad` in favor of :meth:`DataFrame.ffill` (:issue:`33396`)
+- Deprecated :meth:`DataFrame.backfill` in favor of :meth:`DataFrame.bfill` (:issue:`33396`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6008e6b6cb566..d8809b64bd98d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6999,7 +6999,34 @@ def ffill(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
- pad = ffill
+ @doc(klass=_shared_doc_kwargs["klass"])
+ def pad(
+ self: NDFrameT,
+ *,
+ axis: None | Axis = None,
+ inplace: bool_t = False,
+ limit: None | int = None,
+ downcast: dict | None = None,
+ ) -> NDFrameT | None:
+ """
+ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
+
+ .. deprecated:: 2.0
+
+ {klass}.pad is deprecated. Use {klass}.ffill instead.
+
+ Returns
+ -------
+ {klass} or None
+ Object with missing values filled or None if ``inplace=True``.
+ """
+ warnings.warn(
+ "DataFrame.pad/Series.pad is deprecated. Use "
+ "DataFrame.ffill/Series.ffill instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast)
@overload
def bfill(
@@ -7055,7 +7082,34 @@ def bfill(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
- backfill = bfill
+ @doc(klass=_shared_doc_kwargs["klass"])
+ def backfill(
+ self: NDFrameT,
+ *,
+ axis: None | Axis = None,
+ inplace: bool_t = False,
+ limit: None | int = None,
+ downcast: dict | None = None,
+ ) -> NDFrameT | None:
+ """
+ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
+
+ .. deprecated:: 2.0
+
+ {klass}.backfill is deprecated. Use {klass}.backfill instead.
+
+ Returns
+ -------
+ {klass} or None
+ Object with missing values filled or None if ``inplace=True``.
+ """
+ warnings.warn(
+ "DataFrame.backfill/Series.backfill is deprecated. Use "
+ "DataFrame.bfill/Series.bfill instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast)
@overload
def replace(
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index 0645afd861029..b3f63db05dd28 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -769,3 +769,11 @@ def test_fillna_nones_inplace():
expected = DataFrame([[1, 2], [1, 2]], columns=["A", "B"])
tm.assert_frame_equal(df, expected)
+
+
+@pytest.mark.parametrize("func", ["pad", "backfill"])
+def test_pad_backfill_deprecated(func):
+ # GH#33396
+ df = DataFrame({"a": [1, 2, 3]})
+ with tm.assert_produces_warning(FutureWarning):
+ getattr(df, func)()
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index 9fded26c37caf..b30f2ca4b4acd 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -970,3 +970,10 @@ def test_fillna_parr(self):
filled = ser.fillna(method="pad")
tm.assert_series_equal(filled, expected)
+
+ @pytest.mark.parametrize("func", ["pad", "backfill"])
+ def test_pad_backfill_deprecated(self, func):
+ # GH#33396
+ ser = Series([1, 2, 3])
+ with tm.assert_produces_warning(FutureWarning):
+ getattr(ser, func)()
| - [x] xref #33396 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
We should make this consistent again with GroupBy etc. | https://api.github.com/repos/pandas-dev/pandas/pulls/51221 | 2023-02-07T23:25:22Z | 2023-02-08T17:46:44Z | 2023-02-08T17:46:44Z | 2023-02-08T18:07:24Z |
REF: de-duplicate BusinessDay apply, apply_array | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 79332f8ede936..132d1033016ae 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -1582,25 +1582,7 @@ cdef class BusinessDay(BusinessMixin):
# avoid slowness below by operating on weeks first
weeks = n // 5
- if n <= 0 and wday > 4:
- # roll forward
- n += 1
-
- n -= 5 * weeks
-
- # n is always >= 0 at this point
- if n == 0 and wday > 4:
- # roll back
- days = 4 - wday
- elif wday > 4:
- # roll forward
- days = (7 - wday) + (n - 1)
- elif wday + n <= 4:
- # shift by n days without leaving the current week
- days = n
- else:
- # shift by n days plus 2 to get past the weekend
- days = n + 2
+ days = self._adjust_ndays(wday, weeks)
result = other + timedelta(days=7 * weeks + days)
if self.offset:
@@ -1617,11 +1599,90 @@ cdef class BusinessDay(BusinessMixin):
"Only know how to combine business day with datetime or timedelta."
)
+ @cython.wraparound(False)
+ @cython.boundscheck(False)
+ cdef ndarray _shift_bdays(
+ self,
+ ndarray i8other,
+ NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns,
+ ):
+ """
+ Implementation of BusinessDay.apply_offset.
+
+ Parameters
+ ----------
+ i8other : const int64_t[:]
+ reso : NPY_DATETIMEUNIT, default NPY_FR_ns
+
+ Returns
+ -------
+ ndarray[int64_t]
+ """
+ cdef:
+ int periods = self.n
+ Py_ssize_t i, n = i8other.size
+ ndarray result = cnp.PyArray_EMPTY(
+ i8other.ndim, i8other.shape, cnp.NPY_INT64, 0
+ )
+ int64_t val, res_val
+ int wday, days
+ npy_datetimestruct dts
+ int64_t DAY_PERIODS = periods_per_day(reso)
+ cnp.broadcast mi = cnp.PyArray_MultiIterNew2(result, i8other)
+
+ for i in range(n):
+ # Analogous to: val = i8other[i]
+ val = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
+
+ if val == NPY_NAT:
+ res_val = NPY_NAT
+ else:
+ # The rest of this is effectively a copy of BusinessDay.apply
+ weeks = periods // 5
+ pandas_datetime_to_datetimestruct(val, reso, &dts)
+ wday = dayofweek(dts.year, dts.month, dts.day)
+
+ days = self._adjust_ndays(wday, weeks)
+ res_val = val + (7 * weeks + days) * DAY_PERIODS
+
+ # Analogous to: out[i] = res_val
+ (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_val
+
+ cnp.PyArray_MultiIter_NEXT(mi)
+
+ return result
+
+ cdef int _adjust_ndays(self, int wday, int weeks):
+ cdef:
+ int n = self.n
+ int days
+
+ if n <= 0 and wday > 4:
+ # roll forward
+ n += 1
+
+ n -= 5 * weeks
+
+ # n is always >= 0 at this point
+ if n == 0 and wday > 4:
+ # roll back
+ days = 4 - wday
+ elif wday > 4:
+ # roll forward
+ days = (7 - wday) + (n - 1)
+ elif wday + n <= 4:
+ # shift by n days without leaving the current week
+ days = n
+ else:
+ # shift by n days plus 2 to get past the weekend
+ days = n + 2
+ return days
+
@apply_array_wraps
def _apply_array(self, dtarr):
i8other = dtarr.view("i8")
reso = get_unit_from_dtype(dtarr.dtype)
- res = _shift_bdays(i8other, self.n, reso=reso)
+ res = self._shift_bdays(i8other, reso=reso)
if self.offset:
res = res.view(dtarr.dtype) + Timedelta(self.offset)
res = res.view("i8")
@@ -4328,80 +4389,6 @@ def shift_months(
return out
-@cython.wraparound(False)
-@cython.boundscheck(False)
-cdef ndarray _shift_bdays(
- ndarray i8other,
- int periods,
- NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns,
-):
- """
- Implementation of BusinessDay.apply_offset.
-
- Parameters
- ----------
- i8other : const int64_t[:]
- periods : int
- reso : NPY_DATETIMEUNIT, default NPY_FR_ns
-
- Returns
- -------
- ndarray[int64_t]
- """
- cdef:
- Py_ssize_t i, n = i8other.size
- ndarray result = cnp.PyArray_EMPTY(
- i8other.ndim, i8other.shape, cnp.NPY_INT64, 0
- )
- int64_t val, res_val
- int wday, nadj, days
- npy_datetimestruct dts
- int64_t DAY_PERIODS = periods_per_day(reso)
- cnp.broadcast mi = cnp.PyArray_MultiIterNew2(result, i8other)
-
- for i in range(n):
- # Analogous to: val = i8other[i]
- val = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
-
- if val == NPY_NAT:
- res_val = NPY_NAT
- else:
- # The rest of this is effectively a copy of BusinessDay.apply
- nadj = periods
- weeks = nadj // 5
- pandas_datetime_to_datetimestruct(val, reso, &dts)
- wday = dayofweek(dts.year, dts.month, dts.day)
-
- if nadj <= 0 and wday > 4:
- # roll forward
- nadj += 1
-
- nadj -= 5 * weeks
-
- # nadj is always >= 0 at this point
- if nadj == 0 and wday > 4:
- # roll back
- days = 4 - wday
- elif wday > 4:
- # roll forward
- days = (7 - wday) + (nadj - 1)
- elif wday + nadj <= 4:
- # shift by n days without leaving the current week
- days = nadj
- else:
- # shift by nadj days plus 2 to get past the weekend
- days = nadj + 2
-
- res_val = val + (7 * weeks + days) * DAY_PERIODS
-
- # Analogous to: out[i] = res_val
- (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_val
-
- cnp.PyArray_MultiIter_NEXT(mi)
-
- return result
-
-
def shift_month(stamp: datetime, months: int, day_opt: object = None) -> datetime:
"""
Given a datetime (or Timestamp) `stamp`, an integer `months` and an
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51220 | 2023-02-07T21:54:53Z | 2023-02-08T17:50:26Z | 2023-02-08T17:50:26Z | 2023-02-08T17:53:41Z |
CI/TST: Mark test_basic_series_frame_alignment for python engine as flaky | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 86be104b16367..8bd26a7b0b4c7 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -862,7 +862,7 @@ def test_basic_series_frame_alignment(
):
if (
engine == "numexpr"
- and parser == "pandas"
+ and parser in ("pandas", "python")
and index_name == "index"
and r_idx_type == "i"
and c_idx_type == "s"
| Seems to also be flaky with the python engine: https://github.com/pandas-dev/pandas/actions/runs/4109864025/jobs/7092127908 | https://api.github.com/repos/pandas-dev/pandas/pulls/51219 | 2023-02-07T20:53:35Z | 2023-02-10T12:18:18Z | 2023-02-10T12:18:18Z | 2023-02-10T17:43:11Z |
DOC Correcting EX02 errors | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 2fd772b3015b8..3ab63b9bcb860 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -590,7 +590,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Series.sparse.sp_values \
pandas.Timestamp.fromtimestamp \
pandas.api.types.infer_dtype \
- pandas.api.types.is_any_real_numeric_dtype \
pandas.api.types.is_bool_dtype \
pandas.api.types.is_categorical_dtype \
pandas.api.types.is_complex_dtype \
@@ -602,8 +601,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.api.types.is_int64_dtype \
pandas.api.types.is_integer_dtype \
pandas.api.types.is_interval_dtype \
- pandas.api.types.is_iterator \
- pandas.api.types.is_list_like \
pandas.api.types.is_named_tuple \
pandas.api.types.is_numeric_dtype \
pandas.api.types.is_object_dtype \
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 2a7c793c0096c..c14437be31f63 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -276,6 +276,7 @@ def is_iterator(obj: object) -> bool:
Examples
--------
>>> import datetime
+ >>> from pandas.api.types import is_iterator
>>> is_iterator((x for x in []))
True
>>> is_iterator([1, 2, 3])
@@ -1126,6 +1127,7 @@ def is_list_like(obj: object, allow_sets: bool = True) -> bool:
Examples
--------
>>> import datetime
+ >>> from pandas.api.types import is_list_like
>>> is_list_like([1, 2, 3])
True
>>> is_list_like({1, 2, 3})
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index eaa033e3d469a..2b21cee06a632 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1177,6 +1177,7 @@ def is_any_real_numeric_dtype(arr_or_dtype) -> bool:
Examples
--------
+ >>> from pandas.api.types import is_any_real_numeric_dtype
>>> is_any_real_numeric_dtype(int)
True
>>> is_any_real_numeric_dtype(float)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Towards https://github.com/pandas-dev/pandas/issues/37875 and https://github.com/pandas-dev/pandas/issues/27977 | https://api.github.com/repos/pandas-dev/pandas/pulls/51217 | 2023-02-07T18:37:50Z | 2023-02-07T19:42:05Z | 2023-02-07T19:42:05Z | 2023-02-08T08:10:43Z |
REF: unused group_keys, indexer from BaseGrouper | diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index dc109f6b30d5c..28e104ec1bc0e 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -677,11 +677,6 @@ class BaseGrouper:
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
- group_keys : bool, default True
- indexer : np.ndarray[np.intp], optional
- the indexer created by Grouper
- some groupers (TimeGrouper) will sort its axis and its
- group_info is also sorted, so need the indexer to reorder
"""
@@ -692,8 +687,6 @@ def __init__(
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
- group_keys: bool = True,
- indexer: npt.NDArray[np.intp] | None = None,
dropna: bool = True,
) -> None:
assert isinstance(axis, Index), axis
@@ -701,8 +694,6 @@ def __init__(
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self._sort = sort
- self.group_keys = group_keys
- self.indexer = indexer
self.dropna = dropna
@property
@@ -886,14 +877,10 @@ def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
return comp_ids, obs_group_ids, ngroups
- @final
@cache_readonly
def codes_info(self) -> npt.NDArray[np.intp]:
# return the codes of items in original grouped axis
ids, _, _ = self.group_info
- if self.indexer is not None:
- sorter = np.lexsort((ids, self.indexer))
- ids = ids[sorter]
return ids
@final
@@ -1047,7 +1034,10 @@ class BinGrouper(BaseGrouper):
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
- indexer : np.ndarray[np.intp]
+ indexer : np.ndarray[np.intp], optional
+ the indexer created by Grouper
+ some groupers (TimeGrouper) will sort its axis and its
+ group_info is also sorted, so need the indexer to reorder
Examples
--------
@@ -1101,6 +1091,15 @@ def nkeys(self) -> int:
# still matches len(self.groupings), but we can hard-code
return 1
+ @cache_readonly
+ def codes_info(self) -> npt.NDArray[np.intp]:
+ # return the codes of items in original grouped axis
+ ids, _, _ = self.group_info
+ if self.indexer is not None:
+ sorter = np.lexsort((ids, self.indexer))
+ ids = ids[sorter]
+ return ids
+
def get_iterator(self, data: NDFrame, axis: AxisInt = 0):
"""
Groupby iterator
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51214 | 2023-02-07T17:36:08Z | 2023-02-07T20:36:24Z | 2023-02-07T20:36:24Z | 2023-02-07T20:37:23Z |
REF: remove copy keyword from ensure_foo | diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index 5a2005722c85c..20a805533e8cc 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -127,12 +127,12 @@ def diff_2d(
) -> None: ...
def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ...
def ensure_object(arr: object) -> npt.NDArray[np.object_]: ...
-def ensure_float64(arr: object, copy=...) -> npt.NDArray[np.float64]: ...
-def ensure_int8(arr: object, copy=...) -> npt.NDArray[np.int8]: ...
-def ensure_int16(arr: object, copy=...) -> npt.NDArray[np.int16]: ...
-def ensure_int32(arr: object, copy=...) -> npt.NDArray[np.int32]: ...
-def ensure_int64(arr: object, copy=...) -> npt.NDArray[np.int64]: ...
-def ensure_uint64(arr: object, copy=...) -> npt.NDArray[np.uint64]: ...
+def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ...
+def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ...
+def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ...
+def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ...
+def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ...
+def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ...
def take_1d_int8_int8(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in
index ce2e1ffbb5870..ee815b8bbf7a6 100644
--- a/pandas/_libs/algos_common_helper.pxi.in
+++ b/pandas/_libs/algos_common_helper.pxi.in
@@ -60,14 +60,14 @@ def get_dispatch(dtypes):
{{for name, c_type, dtype in get_dispatch(dtypes)}}
-def ensure_{{name}}(object arr, copy=True):
+def ensure_{{name}}(object arr):
if util.is_array(arr):
if (<ndarray>arr).descr.type_num == NPY_{{c_type}}:
return arr
else:
- # equiv: arr.astype(np.{{dtype}}, copy=copy)
+ # equiv: arr.astype(np.{{dtype}})
return cnp.PyArray_Cast(<ndarray>arr, cnp.NPY_{{c_type}})
else:
- return np.array(arr, dtype=np.{{dtype}})
+ return np.asarray(arr, dtype=np.{{dtype}})
{{endfor}}
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 8c4576ee554ec..bb8e6e120f692 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -2149,10 +2149,14 @@ def injection(obj):
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(lbv.dtype)
by_type_caster = _type_casters[by_type]
- # error: Cannot call function of unknown type
- left_by_values = by_type_caster(lbv) # type: ignore[operator]
- # error: Cannot call function of unknown type
- right_by_values = by_type_caster(rbv) # type: ignore[operator]
+ # error: Incompatible types in assignment (expression has type
+ # "ndarray[Any, dtype[generic]]", variable has type
+ # "List[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]]")
+ left_by_values = by_type_caster(lbv) # type: ignore[assignment]
+ # error: Incompatible types in assignment (expression has type
+ # "ndarray[Any, dtype[generic]]", variable has type
+ # "List[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]]")
+ right_by_values = by_type_caster(rbv) # type: ignore[assignment]
# choose appropriate function by type
func = _asof_by_function(self.direction)
| It wasn't respected anyway. | https://api.github.com/repos/pandas-dev/pandas/pulls/51213 | 2023-02-07T17:27:10Z | 2023-02-08T17:57:43Z | 2023-02-08T17:57:43Z | 2023-02-08T17:59:41Z |
TST: suppress deprecation messages in doc tests for Index.is_foo | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ed2e3a7499728..de3e1028f5f00 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2268,15 +2268,15 @@ def is_boolean(self) -> bool:
Examples
--------
>>> idx = pd.Index([True, False, True])
- >>> idx.is_boolean()
+ >>> idx.is_boolean() # doctest: +SKIP
True
>>> idx = pd.Index(["True", "False", "True"])
- >>> idx.is_boolean()
+ >>> idx.is_boolean() # doctest: +SKIP
False
>>> idx = pd.Index([True, False, "True"])
- >>> idx.is_boolean()
+ >>> idx.is_boolean() # doctest: +SKIP
False
"""
warnings.warn(
@@ -2312,15 +2312,15 @@ def is_integer(self) -> bool:
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
- >>> idx.is_integer()
+ >>> idx.is_integer() # doctest: +SKIP
True
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
- >>> idx.is_integer()
+ >>> idx.is_integer() # doctest: +SKIP
False
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
- >>> idx.is_integer()
+ >>> idx.is_integer() # doctest: +SKIP
False
"""
warnings.warn(
@@ -2360,19 +2360,19 @@ def is_floating(self) -> bool:
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
- >>> idx.is_floating()
+ >>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1.0, 2.0, np.nan, 4.0])
- >>> idx.is_floating()
+ >>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4, np.nan])
- >>> idx.is_floating()
+ >>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4])
- >>> idx.is_floating()
+ >>> idx.is_floating() # doctest: +SKIP
False
"""
warnings.warn(
@@ -2408,23 +2408,23 @@ def is_numeric(self) -> bool:
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
- >>> idx.is_numeric()
+ >>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0])
- >>> idx.is_numeric()
+ >>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4])
- >>> idx.is_numeric()
+ >>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan])
- >>> idx.is_numeric()
+ >>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"])
- >>> idx.is_numeric()
+ >>> idx.is_numeric() # doctest: +SKIP
False
"""
warnings.warn(
@@ -2460,20 +2460,20 @@ def is_object(self) -> bool:
Examples
--------
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
- >>> idx.is_object()
+ >>> idx.is_object() # doctest: +SKIP
True
>>> idx = pd.Index(["Apple", "Mango", 2.0])
- >>> idx.is_object()
+ >>> idx.is_object() # doctest: +SKIP
True
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
- >>> idx.is_object()
+ >>> idx.is_object() # doctest: +SKIP
False
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
- >>> idx.is_object()
+ >>> idx.is_object() # doctest: +SKIP
False
"""
warnings.warn(
@@ -2511,11 +2511,11 @@ def is_categorical(self) -> bool:
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
- >>> idx.is_categorical()
+ >>> idx.is_categorical() # doctest: +SKIP
True
>>> idx = pd.Index([1, 3, 5, 7])
- >>> idx.is_categorical()
+ >>> idx.is_categorical() # doctest: +SKIP
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
@@ -2525,7 +2525,7 @@ def is_categorical(self) -> bool:
2 Elisabeth
3 Mar
dtype: object
- >>> s.index.is_categorical()
+ >>> s.index.is_categorical() # doctest: +SKIP
False
"""
warnings.warn(
@@ -2564,11 +2564,11 @@ def is_interval(self) -> bool:
--------
>>> idx = pd.Index([pd.Interval(left=0, right=5),
... pd.Interval(left=5, right=10)])
- >>> idx.is_interval()
+ >>> idx.is_interval() # doctest: +SKIP
True
>>> idx = pd.Index([1, 3, 5, 7])
- >>> idx.is_interval()
+ >>> idx.is_interval() # doctest: +SKIP
False
"""
warnings.warn(
| There were a lot of deprecation messages shown when running `ci/code_checks.sh docstrings`.
This suppresses those messages.
Example of messages that are now suppressed:
```
<doctest pandas.Index.is_boolean[1]>:1: FutureWarning: Index.is_boolean is deprecated. Use pandas.api.types.is_bool_type instead.
idx.is_boolean()
<doctest pandas.Index.is_categorical[1]>:1: FutureWarning: CategoricalIndex.is_categorical is deprecated.Use pandas.api.types.is_categorical_dtype instead
idx.is_categorical()
<doctest pandas.Index.is_categorical[3]>:1: FutureWarning: Index.is_categorical is deprecated.Use pandas.api.types.is_categorical_dtype instead
idx.is_categorical()
<doctest pandas.Index.is_categorical[6]>:1: FutureWarning: RangeIndex.is_categorical is deprecated.Use pandas.api.types.is_categorical_dtype instead
s.index.is_categorical()
```
xref #50042. | https://api.github.com/repos/pandas-dev/pandas/pulls/51212 | 2023-02-07T16:07:48Z | 2023-02-08T16:43:12Z | 2023-02-08T16:43:12Z | 2023-02-08T17:03:00Z |
TST: parametrize CoW indexing tests for extension (nullable) dtypes | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 892205cebf6a8..79c42d2577ade 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -827,8 +827,9 @@ def _slice_take_blocks_ax0(
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
+ deep = not (only_slice or using_copy_on_write())
for mgr_loc in mgr_locs:
- newblk = blk.copy(deep=not only_slice)
+ newblk = blk.copy(deep=deep)
newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))
blocks.append(newblk)
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index a673d8b37a008..7567ca27b8a97 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -3,6 +3,8 @@
from pandas.errors import SettingWithCopyWarning
+from pandas.core.dtypes.common import is_float_dtype
+
import pandas as pd
from pandas import (
DataFrame,
@@ -11,13 +13,47 @@
import pandas._testing as tm
from pandas.tests.copy_view.util import get_array
+
+@pytest.fixture(params=["numpy", "nullable"])
+def backend(request):
+ if request.param == "numpy":
+
+ def make_dataframe(*args, **kwargs):
+ return DataFrame(*args, **kwargs)
+
+ def make_series(*args, **kwargs):
+ return Series(*args, **kwargs)
+
+ elif request.param == "nullable":
+
+ def make_dataframe(*args, **kwargs):
+ df = DataFrame(*args, **kwargs)
+ df_nullable = df.convert_dtypes()
+ # convert_dtypes will try to cast float to int if there is no loss in
+ # precision -> undo that change
+ for col in df.columns:
+ if is_float_dtype(df[col].dtype) and not is_float_dtype(
+ df_nullable[col].dtype
+ ):
+ df_nullable[col] = df_nullable[col].astype("Float64")
+ # copy final result to ensure we start with a fully self-owning DataFrame
+ return df_nullable.copy()
+
+ def make_series(*args, **kwargs):
+ ser = Series(*args, **kwargs)
+ return ser.convert_dtypes().copy()
+
+ return request.param, make_dataframe, make_series
+
+
# -----------------------------------------------------------------------------
# Indexing operations taking subset + modifying the subset/parent
-def test_subset_column_selection(using_copy_on_write):
+def test_subset_column_selection(backend, using_copy_on_write):
# Case: taking a subset of the columns of a DataFrame
# + afterwards modifying the subset
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
df_orig = df.copy()
@@ -42,12 +78,14 @@ def test_subset_column_selection(using_copy_on_write):
tm.assert_frame_equal(df, df_orig)
-def test_subset_column_selection_modify_parent(using_copy_on_write):
+def test_subset_column_selection_modify_parent(backend, using_copy_on_write):
# Case: taking a subset of the columns of a DataFrame
# + afterwards modifying the parent
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
subset = df[["a", "c"]]
+
if using_copy_on_write:
# the subset shares memory ...
assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
@@ -63,9 +101,10 @@ def test_subset_column_selection_modify_parent(using_copy_on_write):
tm.assert_frame_equal(subset, expected)
-def test_subset_row_slice(using_copy_on_write):
+def test_subset_row_slice(backend, using_copy_on_write):
# Case: taking a subset of the rows of a DataFrame using a slice
# + afterwards modifying the subset
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
df_orig = df.copy()
@@ -100,10 +139,13 @@ def test_subset_row_slice(using_copy_on_write):
@pytest.mark.parametrize(
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
)
-def test_subset_column_slice(using_copy_on_write, using_array_manager, dtype):
+def test_subset_column_slice(backend, using_copy_on_write, using_array_manager, dtype):
# Case: taking a subset of the columns of a DataFrame using a slice
# + afterwards modifying the subset
- single_block = (dtype == "int64") and not using_array_manager
+ dtype_backend, DataFrame, _ = backend
+ single_block = (
+ dtype == "int64" and dtype_backend == "numpy"
+ ) and not using_array_manager
df = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
)
@@ -150,7 +192,12 @@ def test_subset_column_slice(using_copy_on_write, using_array_manager, dtype):
ids=["slice", "mask", "array"],
)
def test_subset_loc_rows_columns(
- dtype, row_indexer, column_indexer, using_array_manager, using_copy_on_write
+ backend,
+ dtype,
+ row_indexer,
+ column_indexer,
+ using_array_manager,
+ using_copy_on_write,
):
# Case: taking a subset of the rows+columns of a DataFrame using .loc
# + afterwards modifying the subset
@@ -158,6 +205,7 @@ def test_subset_loc_rows_columns(
# of those could actually return a view / need CoW (so this test is not
# checking memory sharing, only ensuring subsequent mutation doesn't
# affect the parent dataframe)
+ dtype_backend, DataFrame, _ = backend
df = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
)
@@ -177,7 +225,14 @@ def test_subset_loc_rows_columns(
if (
isinstance(row_indexer, slice)
and isinstance(column_indexer, slice)
- and (using_array_manager or (dtype == "int64" and not using_copy_on_write))
+ and (
+ using_array_manager
+ or (
+ dtype == "int64"
+ and dtype_backend == "numpy"
+ and not using_copy_on_write
+ )
+ )
):
df_orig.iloc[1, 1] = 0
tm.assert_frame_equal(df, df_orig)
@@ -197,7 +252,12 @@ def test_subset_loc_rows_columns(
ids=["slice", "mask", "array"],
)
def test_subset_iloc_rows_columns(
- dtype, row_indexer, column_indexer, using_array_manager, using_copy_on_write
+ backend,
+ dtype,
+ row_indexer,
+ column_indexer,
+ using_array_manager,
+ using_copy_on_write,
):
# Case: taking a subset of the rows+columns of a DataFrame using .iloc
# + afterwards modifying the subset
@@ -205,6 +265,7 @@ def test_subset_iloc_rows_columns(
# of those could actually return a view / need CoW (so this test is not
# checking memory sharing, only ensuring subsequent mutation doesn't
# affect the parent dataframe)
+ dtype_backend, DataFrame, _ = backend
df = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
)
@@ -224,7 +285,14 @@ def test_subset_iloc_rows_columns(
if (
isinstance(row_indexer, slice)
and isinstance(column_indexer, slice)
- and (using_array_manager or (dtype == "int64" and not using_copy_on_write))
+ and (
+ using_array_manager
+ or (
+ dtype == "int64"
+ and dtype_backend == "numpy"
+ and not using_copy_on_write
+ )
+ )
):
df_orig.iloc[1, 1] = 0
tm.assert_frame_equal(df, df_orig)
@@ -235,9 +303,10 @@ def test_subset_iloc_rows_columns(
[slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
ids=["slice", "mask", "array"],
)
-def test_subset_set_with_row_indexer(indexer_si, indexer, using_copy_on_write):
+def test_subset_set_with_row_indexer(backend, indexer_si, indexer, using_copy_on_write):
# Case: setting values with a row indexer on a viewing subset
# subset[indexer] = value and subset.iloc[indexer] = value
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
df_orig = df.copy()
subset = df[1:4]
@@ -271,8 +340,9 @@ def test_subset_set_with_row_indexer(indexer_si, indexer, using_copy_on_write):
tm.assert_frame_equal(df, df_orig)
-def test_subset_set_with_mask(using_copy_on_write):
+def test_subset_set_with_mask(backend, using_copy_on_write):
# Case: setting values with a mask on a viewing subset: subset[mask] = value
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
df_orig = df.copy()
subset = df[1:4]
@@ -300,18 +370,24 @@ def test_subset_set_with_mask(using_copy_on_write):
tm.assert_frame_equal(df, df_orig)
-def test_subset_set_column(using_copy_on_write):
+def test_subset_set_column(backend, using_copy_on_write):
# Case: setting a single column on a viewing subset -> subset[col] = value
+ dtype_backend, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
df_orig = df.copy()
subset = df[1:3]
+ if dtype_backend == "numpy":
+ arr = np.array([10, 11], dtype="int64")
+ else:
+ arr = pd.array([10, 11], dtype="Int64")
+
if using_copy_on_write:
- subset["a"] = np.array([10, 11], dtype="int64")
+ subset["a"] = arr
else:
with pd.option_context("chained_assignment", "warn"):
with tm.assert_produces_warning(SettingWithCopyWarning):
- subset["a"] = np.array([10, 11], dtype="int64")
+ subset["a"] = arr
subset._mgr._verify_integrity()
expected = DataFrame(
@@ -324,9 +400,12 @@ def test_subset_set_column(using_copy_on_write):
@pytest.mark.parametrize(
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
)
-def test_subset_set_column_with_loc(using_copy_on_write, using_array_manager, dtype):
+def test_subset_set_column_with_loc(
+ backend, using_copy_on_write, using_array_manager, dtype
+):
# Case: setting a single column with loc on a viewing subset
# -> subset.loc[:, col] = value
+ _, DataFrame, _ = backend
df = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
)
@@ -358,11 +437,12 @@ def test_subset_set_column_with_loc(using_copy_on_write, using_array_manager, dt
tm.assert_frame_equal(df, df_orig)
-def test_subset_set_column_with_loc2(using_copy_on_write, using_array_manager):
+def test_subset_set_column_with_loc2(backend, using_copy_on_write, using_array_manager):
# Case: setting a single column with loc on a viewing subset
# -> subset.loc[:, col] = value
# separate test for case of DataFrame of a single column -> takes a separate
# code path
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3]})
df_orig = df.copy()
subset = df[1:3]
@@ -392,9 +472,10 @@ def test_subset_set_column_with_loc2(using_copy_on_write, using_array_manager):
@pytest.mark.parametrize(
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
)
-def test_subset_set_columns(using_copy_on_write, dtype):
+def test_subset_set_columns(backend, using_copy_on_write, dtype):
# Case: setting multiple columns on a viewing subset
# -> subset[[col1, col2]] = value
+ dtype_backend, DataFrame, _ = backend
df = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
)
@@ -413,6 +494,12 @@ def test_subset_set_columns(using_copy_on_write, dtype):
# first and third column should certainly have no references anymore
assert all(subset._mgr._has_no_reference(i) for i in [0, 2])
expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3))
+ if dtype_backend == "nullable":
+ # there is not yet a global option, so overriding a column by setting a scalar
+ # defaults to numpy dtype even if original column was nullable
+ expected["a"] = expected["a"].astype("int64")
+ expected["c"] = expected["c"].astype("int64")
+
tm.assert_frame_equal(subset, expected)
tm.assert_frame_equal(df, df_orig)
@@ -422,11 +509,10 @@ def test_subset_set_columns(using_copy_on_write, dtype):
[slice("a", "b"), np.array([True, True, False]), ["a", "b"]],
ids=["slice", "mask", "array"],
)
-def test_subset_set_with_column_indexer(
- indexer, using_copy_on_write, using_array_manager
-):
+def test_subset_set_with_column_indexer(backend, indexer, using_copy_on_write):
# Case: setting multiple columns with a column indexer on a viewing subset
# -> subset.loc[:, [col1, col2]] = value
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
df_orig = df.copy()
subset = df[1:3]
@@ -475,10 +561,11 @@ def test_subset_set_with_column_indexer(
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
)
def test_subset_chained_getitem(
- request, method, dtype, using_copy_on_write, using_array_manager
+ request, backend, method, dtype, using_copy_on_write, using_array_manager
):
# Case: creating a subset using multiple, chained getitem calls using views
# still needs to guarantee proper CoW behaviour
+ _, DataFrame, _ = backend
df = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
)
@@ -486,14 +573,18 @@ def test_subset_chained_getitem(
# when not using CoW, it depends on whether we have a single block or not
# and whether we are slicing the columns -> in that case we have a view
- subset_is_view = request.node.callspec.id in (
- "single-block-column-iloc-slice",
- "single-block-column-loc-slice",
- ) or (
- request.node.callspec.id
- in ("mixed-block-column-iloc-slice", "mixed-block-column-loc-slice")
- and using_array_manager
- )
+ test_callspec = request.node.callspec.id
+ if not using_array_manager:
+ subset_is_view = test_callspec in (
+ "numpy-single-block-column-iloc-slice",
+ "numpy-single-block-column-loc-slice",
+ )
+ else:
+ # with ArrayManager, it doesn't matter whether we have
+ # single vs mixed block or numpy vs nullable dtypes
+ subset_is_view = test_callspec.endswith(
+ "column-iloc-slice"
+ ) or test_callspec.endswith("column-loc-slice")
# modify subset -> don't modify parent
subset = method(df)
@@ -516,9 +607,10 @@ def test_subset_chained_getitem(
@pytest.mark.parametrize(
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
)
-def test_subset_chained_getitem_column(dtype, using_copy_on_write):
+def test_subset_chained_getitem_column(backend, dtype, using_copy_on_write):
# Case: creating a subset using multiple, chained getitem calls using views
# still needs to guarantee proper CoW behaviour
+ _, DataFrame, Series = backend
df = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
)
@@ -558,9 +650,10 @@ def test_subset_chained_getitem_column(dtype, using_copy_on_write):
],
ids=["getitem", "iloc", "loc", "long-chain"],
)
-def test_subset_chained_getitem_series(method, using_copy_on_write):
+def test_subset_chained_getitem_series(backend, method, using_copy_on_write):
# Case: creating a subset using multiple, chained getitem calls using views
# still needs to guarantee proper CoW behaviour
+ _, _, Series = backend
s = Series([1, 2, 3], index=["a", "b", "c"])
s_orig = s.copy()
@@ -583,6 +676,7 @@ def test_subset_chained_getitem_series(method, using_copy_on_write):
def test_subset_chained_single_block_row(using_copy_on_write, using_array_manager):
+ # not parametrizing this for dtype backend, since this explicitly tests single block
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
df_orig = df.copy()
@@ -615,9 +709,10 @@ def test_subset_chained_single_block_row(using_copy_on_write, using_array_manage
],
ids=["getitem", "loc", "loc-rows", "iloc", "iloc-rows"],
)
-def test_null_slice(request, method, using_copy_on_write):
+def test_null_slice(backend, method, using_copy_on_write):
# Case: also all variants of indexing with a null slice (:) should return
# new objects to ensure we correctly use CoW for the results
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
df_orig = df.copy()
@@ -643,7 +738,8 @@ def test_null_slice(request, method, using_copy_on_write):
],
ids=["getitem", "loc", "iloc"],
)
-def test_null_slice_series(request, method, using_copy_on_write):
+def test_null_slice_series(backend, method, using_copy_on_write):
+ _, _, Series = backend
s = Series([1, 2, 3], index=["a", "b", "c"])
s_orig = s.copy()
@@ -667,18 +763,19 @@ def test_null_slice_series(request, method, using_copy_on_write):
# Series -- Indexing operations taking subset + modifying the subset/parent
-def test_series_getitem_slice(using_copy_on_write):
+def test_series_getitem_slice(backend, using_copy_on_write):
# Case: taking a slice of a Series + afterwards modifying the subset
+ _, _, Series = backend
s = Series([1, 2, 3], index=["a", "b", "c"])
s_orig = s.copy()
subset = s[:]
- assert np.shares_memory(subset.values, s.values)
+ assert np.shares_memory(get_array(subset), get_array(s))
subset.iloc[0] = 0
if using_copy_on_write:
- assert not np.shares_memory(subset.values, s.values)
+ assert not np.shares_memory(get_array(subset), get_array(s))
expected = Series([0, 2, 3], index=["a", "b", "c"])
tm.assert_series_equal(subset, expected)
@@ -696,8 +793,11 @@ def test_series_getitem_slice(using_copy_on_write):
[slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
ids=["slice", "mask", "array"],
)
-def test_series_subset_set_with_indexer(indexer_si, indexer, using_copy_on_write):
+def test_series_subset_set_with_indexer(
+ backend, indexer_si, indexer, using_copy_on_write
+):
# Case: setting values in a viewing Series with an indexer
+ _, _, Series = backend
s = Series([1, 2, 3], index=["a", "b", "c"])
s_orig = s.copy()
subset = s[:]
@@ -716,9 +816,10 @@ def test_series_subset_set_with_indexer(indexer_si, indexer, using_copy_on_write
# del operator
-def test_del_frame(using_copy_on_write):
+def test_del_frame(backend, using_copy_on_write):
# Case: deleting a column with `del` on a viewing child dataframe should
# not modify parent + update the references
+ _, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
df_orig = df.copy()
df2 = df[:]
@@ -743,16 +844,17 @@ def test_del_frame(using_copy_on_write):
assert df.loc[0, "a"] == 100
-def test_del_series():
+def test_del_series(backend):
+ _, _, Series = backend
s = Series([1, 2, 3], index=["a", "b", "c"])
s_orig = s.copy()
s2 = s[:]
- assert np.shares_memory(s.values, s2.values)
+ assert np.shares_memory(get_array(s), get_array(s2))
del s2["a"]
- assert not np.shares_memory(s.values, s2.values)
+ assert not np.shares_memory(get_array(s), get_array(s2))
tm.assert_series_equal(s, s_orig)
tm.assert_series_equal(s2, s_orig[["b", "c"]])
@@ -766,20 +868,22 @@ def test_del_series():
# Accessing column as Series
-def test_column_as_series(using_copy_on_write, using_array_manager):
+def test_column_as_series(backend, using_copy_on_write, using_array_manager):
# Case: selecting a single column now also uses Copy-on-Write
+ dtype_backend, DataFrame, Series = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
df_orig = df.copy()
s = df["a"]
- assert np.shares_memory(s.values, get_array(df, "a"))
+ assert np.shares_memory(get_array(s, "a"), get_array(df, "a"))
if using_copy_on_write or using_array_manager:
s[0] = 0
else:
+ warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
with pd.option_context("chained_assignment", "warn"):
- with tm.assert_produces_warning(SettingWithCopyWarning):
+ with tm.assert_produces_warning(warn):
s[0] = 0
expected = Series([0, 2, 3], name="a")
@@ -794,22 +898,30 @@ def test_column_as_series(using_copy_on_write, using_array_manager):
tm.assert_frame_equal(df, df_orig)
-def test_column_as_series_set_with_upcast(using_copy_on_write, using_array_manager):
+def test_column_as_series_set_with_upcast(
+ backend, using_copy_on_write, using_array_manager
+):
# Case: selecting a single column now also uses Copy-on-Write -> when
# setting a value causes an upcast, we don't need to update the parent
# DataFrame through the cache mechanism
+ dtype_backend, DataFrame, Series = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
df_orig = df.copy()
s = df["a"]
- if using_copy_on_write or using_array_manager:
+ if dtype_backend == "nullable":
+ with pytest.raises(TypeError, match="Invalid value"):
+ s[0] = "foo"
+ expected = Series([1, 2, 3], name="a")
+ elif using_copy_on_write or using_array_manager:
s[0] = "foo"
+ expected = Series(["foo", 2, 3], dtype=object, name="a")
else:
with pd.option_context("chained_assignment", "warn"):
with tm.assert_produces_warning(SettingWithCopyWarning):
s[0] = "foo"
+ expected = Series(["foo", 2, 3], dtype=object, name="a")
- expected = Series(["foo", 2, 3], dtype=object, name="a")
tm.assert_series_equal(s, expected)
if using_copy_on_write:
tm.assert_frame_equal(df, df_orig)
@@ -830,17 +942,18 @@ def test_column_as_series_set_with_upcast(using_copy_on_write, using_array_manag
ids=["getitem", "loc", "iloc"],
)
def test_column_as_series_no_item_cache(
- request, method, using_copy_on_write, using_array_manager
+ request, backend, method, using_copy_on_write, using_array_manager
):
# Case: selecting a single column (which now also uses Copy-on-Write to protect
# the view) should always give a new object (i.e. not make use of a cache)
+ dtype_backend, DataFrame, _ = backend
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
df_orig = df.copy()
s1 = method(df)
s2 = method(df)
- is_iloc = request.node.callspec.id == "iloc"
+ is_iloc = "iloc" in request.node.name
if using_copy_on_write or is_iloc:
assert s1 is not s2
else:
@@ -849,8 +962,9 @@ def test_column_as_series_no_item_cache(
if using_copy_on_write or using_array_manager:
s1.iloc[0] = 0
else:
+ warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
with pd.option_context("chained_assignment", "warn"):
- with tm.assert_produces_warning(SettingWithCopyWarning):
+ with tm.assert_produces_warning(warn):
s1.iloc[0] = 0
if using_copy_on_write:
@@ -863,11 +977,12 @@ def test_column_as_series_no_item_cache(
# TODO add tests for other indexing methods on the Series
-def test_dataframe_add_column_from_series():
+def test_dataframe_add_column_from_series(backend):
# Case: adding a new column to a DataFrame from an existing column/series
# -> always already takes a copy on assignment
# (no change in behaviour here)
# TODO can we achieve the same behaviour with Copy-on-Write?
+ _, DataFrame, Series = backend
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
s = Series([10, 11, 12])
diff --git a/pandas/tests/copy_view/util.py b/pandas/tests/copy_view/util.py
index 1c6b5b51fa265..f15560f91ae01 100644
--- a/pandas/tests/copy_view/util.py
+++ b/pandas/tests/copy_view/util.py
@@ -12,13 +12,11 @@ def get_array(obj, col=None):
"""
if isinstance(obj, Series) and (col is None or obj.name == col):
arr = obj._values
- if isinstance(arr, BaseMaskedArray):
- return arr._data
- return arr
- assert col is not None
- icol = obj.columns.get_loc(col)
- assert isinstance(icol, int)
- arr = obj._get_column_array(icol)
+ else:
+ assert col is not None
+ icol = obj.columns.get_loc(col)
+ assert isinstance(icol, int)
+ arr = obj._get_column_array(icol)
if isinstance(arr, BaseMaskedArray):
return arr._data
return arr
diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py
index d58c7528aeda7..ceea53e3dd8bf 100644
--- a/pandas/tests/frame/methods/test_reindex.py
+++ b/pandas/tests/frame/methods/test_reindex.py
@@ -133,7 +133,7 @@ def test_reindex_copies(self):
result2 = df.reindex(columns=cols, index=df.index, copy=True)
assert not np.shares_memory(result2[0]._values, df[0]._values)
- def test_reindex_copies_ea(self):
+ def test_reindex_copies_ea(self, using_copy_on_write):
# https://github.com/pandas-dev/pandas/pull/51197
# also ensure to honor copy keyword for ExtensionDtypes
N = 10
@@ -142,7 +142,10 @@ def test_reindex_copies_ea(self):
np.random.shuffle(cols)
result = df.reindex(columns=cols, copy=True)
- assert not np.shares_memory(result[0].array._data, df[0].array._data)
+ if using_copy_on_write:
+ assert np.shares_memory(result[0].array._data, df[0].array._data)
+ else:
+ assert not np.shares_memory(result[0].array._data, df[0].array._data)
# pass both columns and index
result2 = df.reindex(columns=cols, index=df.index, copy=True)
| Experimenting with one possible approach to have the Copy-on-Write tests cover more block types / dtypes (cfr https://github.com/pandas-dev/pandas/pull/51144#issuecomment-1416394873).
In this PR I parametrized with numpy vs nullable dtype, but that should cover ExtensionDtype in general (for internal coverage, we should probably also have tests for datetime-like data, but that would be a much bigger change to include in the current tests).
Doing this PR unveiled that we did have different copy/view rules for selecting multiple columns if they are extension dtypes. The fix for that is currently included here (otherwise a lot of test would either fail or need to be adapted), but I have a separate PR focusing on that actual change: https://github.com/pandas-dev/pandas/pull/51197 | https://api.github.com/repos/pandas-dev/pandas/pulls/51208 | 2023-02-07T10:00:38Z | 2023-02-20T08:11:08Z | 2023-02-20T08:11:08Z | 2023-02-20T08:11:15Z |
ENH: Allow ArrowDtype(pa.string()) to be compatable with str accessor | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7fc856be374e9..85993304b4407 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -284,6 +284,7 @@ Alternatively, copy on write can be enabled locally through:
Other enhancements
^^^^^^^^^^^^^^^^^^
+- Added support for ``str`` accessor methods when using :class:`ArrowDtype` with a ``pyarrow.string`` type (:issue:`50325`)
- Added support for ``dt`` accessor methods when using :class:`ArrowDtype` with a ``pyarrow.timestamp`` type (:issue:`50954`)
- :func:`read_sas` now supports using ``encoding='infer'`` to correctly read and use the encoding specified by the sas file. (:issue:`48048`)
- :meth:`.DataFrameGroupBy.quantile`, :meth:`.SeriesGroupBy.quantile` and :meth:`.DataFrameGroupBy.std` now preserve nullable dtypes instead of casting to numpy dtypes (:issue:`37493`)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 16cfb6d7c396b..df8d6172dd6f6 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -1,10 +1,13 @@
from __future__ import annotations
from copy import deepcopy
+import re
from typing import (
TYPE_CHECKING,
Any,
+ Callable,
Literal,
+ Sequence,
TypeVar,
cast,
)
@@ -55,6 +58,7 @@
unpack_tuple_and_ellipses,
validate_indices,
)
+from pandas.core.strings.base import BaseStringArrayMethods
from pandas.tseries.frequencies import to_offset
@@ -165,7 +169,7 @@ def to_pyarrow_type(
return None
-class ArrowExtensionArray(OpsMixin, ExtensionArray):
+class ArrowExtensionArray(OpsMixin, ExtensionArray, BaseStringArrayMethods):
"""
Pandas ExtensionArray backed by a PyArrow ChunkedArray.
@@ -1463,6 +1467,317 @@ def _replace_with_mask(
result[mask] = replacements
return pa.array(result, type=values.type, from_pandas=True)
+ def _str_count(self, pat: str, flags: int = 0):
+ if flags:
+ raise NotImplementedError(f"count not implemented with {flags=}")
+ return type(self)(pc.count_substring_regex(self._data, pat))
+
+ def _str_pad(
+ self,
+ width: int,
+ side: Literal["left", "right", "both"] = "left",
+ fillchar: str = " ",
+ ):
+ if side == "left":
+ pa_pad = pc.utf8_lpad
+ elif side == "right":
+ pa_pad = pc.utf8_rpad
+ elif side == "both":
+ pa_pad = pc.utf8_center
+ else:
+ raise ValueError(
+ f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'"
+ )
+ return type(self)(pa_pad(self._data, width=width, padding=fillchar))
+
+ def _str_contains(
+ self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True
+ ):
+ if flags:
+ raise NotImplementedError(f"contains not implemented with {flags=}")
+
+ if regex:
+ pa_contains = pc.match_substring_regex
+ else:
+ pa_contains = pc.match_substring
+ result = pa_contains(self._data, pat, ignore_case=not case)
+ if not isna(na):
+ result = result.fill_null(na)
+ return type(self)(result)
+
+ def _str_startswith(self, pat: str, na=None):
+ result = pc.starts_with(self._data, pattern=pat)
+ if not isna(na):
+ result = result.fill_null(na)
+ return type(self)(result)
+
+ def _str_endswith(self, pat: str, na=None):
+ result = pc.ends_with(self._data, pattern=pat)
+ if not isna(na):
+ result = result.fill_null(na)
+ return type(self)(result)
+
+ def _str_replace(
+ self,
+ pat: str | re.Pattern,
+ repl: str | Callable,
+ n: int = -1,
+ case: bool = True,
+ flags: int = 0,
+ regex: bool = True,
+ ):
+ if isinstance(pat, re.Pattern) or callable(repl) or not case or flags:
+ raise NotImplementedError(
+ "replace is not supported with a re.Pattern, callable repl, "
+ "case=False, or flags!=0"
+ )
+
+ func = pc.replace_substring_regex if regex else pc.replace_substring
+ result = func(self._data, pattern=pat, replacement=repl, max_replacements=n)
+ return type(self)(result)
+
+ def _str_repeat(self, repeats: int | Sequence[int]):
+ if not isinstance(repeats, int):
+ raise NotImplementedError(
+ f"repeat is not implemented when repeats is {type(repeats).__name__}"
+ )
+ elif pa_version_under7p0:
+ raise NotImplementedError("repeat is not implemented for pyarrow < 7")
+ else:
+ return type(self)(pc.binary_repeat(self._data, repeats))
+
+ def _str_match(
+ self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None
+ ):
+ if not pat.startswith("^"):
+ pat = f"^{pat}"
+ return self._str_contains(pat, case, flags, na, regex=True)
+
+ def _str_fullmatch(
+ self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None
+ ):
+ if not pat.endswith("$") or pat.endswith("//$"):
+ pat = f"{pat}$"
+ return self._str_match(pat, case, flags, na)
+
+ def _str_find(self, sub: str, start: int = 0, end: int | None = None):
+ if start != 0 and end is not None:
+ slices = pc.utf8_slice_codeunits(self._data, start, stop=end)
+ result = pc.find_substring(slices, sub)
+ not_found = pc.equal(result, -1)
+ offset_result = pc.add(result, end - start)
+ result = pc.if_else(not_found, result, offset_result)
+ elif start == 0 and end is None:
+ slices = self._data
+ result = pc.find_substring(slices, sub)
+ else:
+ raise NotImplementedError(
+ f"find not implemented with {sub=}, {start=}, {end=}"
+ )
+ return type(self)(result)
+
+ def _str_get(self, i: int):
+ lengths = pc.utf8_length(self._data)
+ if i >= 0:
+ out_of_bounds = pc.greater_equal(i, lengths)
+ start = i
+ stop = i + 1
+ step = 1
+ else:
+ out_of_bounds = pc.greater(-i, lengths)
+ start = i
+ stop = i - 1
+ step = -1
+ not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True))
+ selected = pc.utf8_slice_codeunits(
+ self._data, start=start, stop=stop, step=step
+ )
+ result = pa.array([None] * self._data.length(), type=self._data.type)
+ result = pc.if_else(not_out_of_bounds, selected, result)
+ return type(self)(result)
+
+ def _str_join(self, sep: str):
+ return type(self)(pc.binary_join(self._data, sep))
+
+ def _str_partition(self, sep: str, expand: bool):
+ raise NotImplementedError(
+ "str.partition not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_rpartition(self, sep: str, expand: bool):
+ raise NotImplementedError(
+ "str.rpartition not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_slice(
+ self, start: int | None = None, stop: int | None = None, step: int | None = None
+ ):
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
+ return type(self)(
+ pc.utf8_slice_codeunits(self._data, start=start, stop=stop, step=step)
+ )
+
+ def _str_slice_replace(
+ self, start: int | None = None, stop: int | None = None, repl: str | None = None
+ ):
+ if repl is None:
+ repl = ""
+ if start is None:
+ start = 0
+ return type(self)(pc.utf8_replace_slice(self._data, start, stop, repl))
+
+ def _str_isalnum(self):
+ return type(self)(pc.utf8_is_alnum(self._data))
+
+ def _str_isalpha(self):
+ return type(self)(pc.utf8_is_alpha(self._data))
+
+ def _str_isdecimal(self):
+ return type(self)(pc.utf8_is_decimal(self._data))
+
+ def _str_isdigit(self):
+ return type(self)(pc.utf8_is_digit(self._data))
+
+ def _str_islower(self):
+ return type(self)(pc.utf8_is_lower(self._data))
+
+ def _str_isnumeric(self):
+ return type(self)(pc.utf8_is_numeric(self._data))
+
+ def _str_isspace(self):
+ return type(self)(pc.utf8_is_space(self._data))
+
+ def _str_istitle(self):
+ return type(self)(pc.utf8_is_title(self._data))
+
+ def _str_capitalize(self):
+ return type(self)(pc.utf8_capitalize(self._data))
+
+ def _str_title(self):
+ return type(self)(pc.utf8_title(self._data))
+
+ def _str_isupper(self):
+ return type(self)(pc.utf8_is_upper(self._data))
+
+ def _str_swapcase(self):
+ return type(self)(pc.utf8_swapcase(self._data))
+
+ def _str_len(self):
+ return type(self)(pc.utf8_length(self._data))
+
+ def _str_lower(self):
+ return type(self)(pc.utf8_lower(self._data))
+
+ def _str_upper(self):
+ return type(self)(pc.utf8_upper(self._data))
+
+ def _str_strip(self, to_strip=None):
+ if to_strip is None:
+ result = pc.utf8_trim_whitespace(self._data)
+ else:
+ result = pc.utf8_trim(self._data, characters=to_strip)
+ return type(self)(result)
+
+ def _str_lstrip(self, to_strip=None):
+ if to_strip is None:
+ result = pc.utf8_ltrim_whitespace(self._data)
+ else:
+ result = pc.utf8_ltrim(self._data, characters=to_strip)
+ return type(self)(result)
+
+ def _str_rstrip(self, to_strip=None):
+ if to_strip is None:
+ result = pc.utf8_rtrim_whitespace(self._data)
+ else:
+ result = pc.utf8_rtrim(self._data, characters=to_strip)
+ return type(self)(result)
+
+ def _str_removeprefix(self, prefix: str):
+ raise NotImplementedError(
+ "str.removeprefix not supported with pd.ArrowDtype(pa.string())."
+ )
+ # TODO: Should work once https://github.com/apache/arrow/issues/14991 is fixed
+ # starts_with = pc.starts_with(self._data, pattern=prefix)
+ # removed = pc.utf8_slice_codeunits(self._data, len(prefix))
+ # result = pc.if_else(starts_with, removed, self._data)
+ # return type(self)(result)
+
+ def _str_removesuffix(self, suffix: str):
+ ends_with = pc.ends_with(self._data, pattern=suffix)
+ removed = pc.utf8_slice_codeunits(self._data, 0, stop=-len(suffix))
+ result = pc.if_else(ends_with, removed, self._data)
+ return type(self)(result)
+
+ def _str_casefold(self):
+ raise NotImplementedError(
+ "str.casefold not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_encode(self, encoding, errors: str = "strict"):
+ raise NotImplementedError(
+ "str.encode not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):
+ raise NotImplementedError(
+ "str.extract not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_findall(self, pat, flags: int = 0):
+ raise NotImplementedError(
+ "str.findall not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_get_dummies(self, sep: str = "|"):
+ raise NotImplementedError(
+ "str.get_dummies not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_index(self, sub, start: int = 0, end=None):
+ raise NotImplementedError(
+ "str.index not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_rindex(self, sub, start: int = 0, end=None):
+ raise NotImplementedError(
+ "str.rindex not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_normalize(self, form):
+ raise NotImplementedError(
+ "str.normalize not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_rfind(self, sub, start: int = 0, end=None):
+ raise NotImplementedError(
+ "str.rfind not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_split(
+ self, pat=None, n=-1, expand: bool = False, regex: bool | None = None
+ ):
+ raise NotImplementedError(
+ "str.split not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_rsplit(self, pat=None, n=-1):
+ raise NotImplementedError(
+ "str.rsplit not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_translate(self, table):
+ raise NotImplementedError(
+ "str.translate not supported with pd.ArrowDtype(pa.string())."
+ )
+
+ def _str_wrap(self, width, **kwargs):
+ raise NotImplementedError(
+ "str.wrap not supported with pd.ArrowDtype(pa.string())."
+ )
+
@property
def _dt_day(self):
return type(self)(pc.day(self._data))
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 9ad92471b98b4..4d2b39ec61fca 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -60,7 +60,7 @@ def _chk_pyarrow_available() -> None:
# fallback for the ones that pyarrow doesn't yet support
-class ArrowStringArray(ArrowExtensionArray, BaseStringArray, ObjectStringArrayMixin):
+class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray):
"""
Extension array for string data in a ``pyarrow.ChunkedArray``.
@@ -117,6 +117,16 @@ def __init__(self, values) -> None:
"ArrowStringArray requires a PyArrow (chunked) array of string type"
)
+ def __len__(self) -> int:
+ """
+ Length of this array.
+
+ Returns
+ -------
+ length : int
+ """
+ return len(self._data)
+
@classmethod
def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False):
from pandas.core.arrays.masked import BaseMaskedArray
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 70572897c1459..dfbb493636998 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -169,7 +169,7 @@
get_group_index_sorter,
nargsort,
)
-from pandas.core.strings import StringMethods
+from pandas.core.strings.accessor import StringMethods
from pandas.io.formats.printing import (
PrettyDict,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e4c7c4d3b3d73..d69c057c85783 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -163,7 +163,7 @@
ensure_key_mapped,
nargsort,
)
-from pandas.core.strings import StringMethods
+from pandas.core.strings.accessor import StringMethods
from pandas.core.tools.datetimes import to_datetime
import pandas.io.formats.format as fmt
diff --git a/pandas/core/strings/__init__.py b/pandas/core/strings/__init__.py
index 28aba7c9ce0b3..eb650477c2b6b 100644
--- a/pandas/core/strings/__init__.py
+++ b/pandas/core/strings/__init__.py
@@ -26,8 +26,3 @@
# - PandasArray
# - Categorical
# - ArrowStringArray
-
-from pandas.core.strings.accessor import StringMethods
-from pandas.core.strings.base import BaseStringArrayMethods
-
-__all__ = ["StringMethods", "BaseStringArrayMethods"]
diff --git a/pandas/core/strings/base.py b/pandas/core/strings/base.py
index c96e5a1abcf86..f1e716b64644a 100644
--- a/pandas/core/strings/base.py
+++ b/pandas/core/strings/base.py
@@ -246,7 +246,9 @@ def _str_removesuffix(self, suffix: str) -> Series:
pass
@abc.abstractmethod
- def _str_split(self, pat=None, n=-1, expand: bool = False):
+ def _str_split(
+ self, pat=None, n=-1, expand: bool = False, regex: bool | None = None
+ ):
pass
@abc.abstractmethod
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 9406c7c2f59c6..50fb636c2beb8 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -21,6 +21,7 @@
StringIO,
)
import pickle
+import re
import numpy as np
import pytest
@@ -1594,6 +1595,342 @@ def test_searchsorted_with_na_raises(data_for_sorting, as_series):
arr.searchsorted(b)
+@pytest.mark.parametrize("pat", ["abc", "a[a-z]{2}"])
+def test_str_count(pat):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.count(pat)
+ expected = pd.Series([1, None], dtype=ArrowDtype(pa.int32()))
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_count_flags_unsupported():
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ with pytest.raises(NotImplementedError, match="count not"):
+ ser.str.count("abc", flags=1)
+
+
+@pytest.mark.parametrize(
+ "side, str_func", [["left", "rjust"], ["right", "ljust"], ["both", "center"]]
+)
+def test_str_pad(side, str_func):
+ ser = pd.Series(["a", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.pad(width=3, side=side, fillchar="x")
+ expected = pd.Series(
+ [getattr("a", str_func)(3, "x"), None], dtype=ArrowDtype(pa.string())
+ )
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_pad_invalid_side():
+ ser = pd.Series(["a", None], dtype=ArrowDtype(pa.string()))
+ with pytest.raises(ValueError, match="Invalid side: foo"):
+ ser.str.pad(3, "foo", "x")
+
+
+@pytest.mark.parametrize(
+ "pat, case, na, regex, exp",
+ [
+ ["ab", False, None, False, [True, None]],
+ ["Ab", True, None, False, [False, None]],
+ ["ab", False, True, False, [True, True]],
+ ["a[a-z]{1}", False, None, True, [True, None]],
+ ["A[a-z]{1}", True, None, True, [False, None]],
+ ],
+)
+def test_str_contains(pat, case, na, regex, exp):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.contains(pat, case=case, na=na, regex=regex)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_()))
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_contains_flags_unsupported():
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ with pytest.raises(NotImplementedError, match="contains not"):
+ ser.str.contains("a", flags=1)
+
+
+@pytest.mark.parametrize(
+ "side, pat, na, exp",
+ [
+ ["startswith", "ab", None, [True, None]],
+ ["startswith", "b", False, [False, False]],
+ ["endswith", "b", True, [False, True]],
+ ["endswith", "bc", None, [True, None]],
+ ],
+)
+def test_str_start_ends_with(side, pat, na, exp):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ result = getattr(ser.str, side)(pat, na=na)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "arg_name, arg",
+ [["pat", re.compile("b")], ["repl", str], ["case", False], ["flags", 1]],
+)
+def test_str_replace_unsupported(arg_name, arg):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ kwargs = {"pat": "b", "repl": "x", "regex": True}
+ kwargs[arg_name] = arg
+ with pytest.raises(NotImplementedError, match="replace is not supported"):
+ ser.str.replace(**kwargs)
+
+
+@pytest.mark.parametrize(
+ "pat, repl, n, regex, exp",
+ [
+ ["a", "x", -1, False, ["xbxc", None]],
+ ["a", "x", 1, False, ["xbac", None]],
+ ["[a-b]", "x", -1, True, ["xxxc", None]],
+ ],
+)
+def test_str_replace(pat, repl, n, regex, exp):
+ ser = pd.Series(["abac", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.replace(pat, repl, n=n, regex=regex)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_repeat_unsupported():
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ with pytest.raises(NotImplementedError, match="repeat is not"):
+ ser.str.repeat([1, 2])
+
+
+@pytest.mark.xfail(
+ pa_version_under7p0,
+ reason="Unsupported for pyarrow < 7",
+ raises=NotImplementedError,
+)
+def test_str_repeat():
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.repeat(2)
+ expected = pd.Series(["abcabc", None], dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "pat, case, na, exp",
+ [
+ ["ab", False, None, [True, None]],
+ ["Ab", True, None, [False, None]],
+ ["bc", True, None, [False, None]],
+ ["ab", False, True, [True, True]],
+ ["a[a-z]{1}", False, None, [True, None]],
+ ["A[a-z]{1}", True, None, [False, None]],
+ ],
+)
+def test_str_match(pat, case, na, exp):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.match(pat, case=case, na=na)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "pat, case, na, exp",
+ [
+ ["abc", False, None, [True, None]],
+ ["Abc", True, None, [False, None]],
+ ["bc", True, None, [False, None]],
+ ["ab", False, True, [True, True]],
+ ["a[a-z]{2}", False, None, [True, None]],
+ ["A[a-z]{1}", True, None, [False, None]],
+ ],
+)
+def test_str_fullmatch(pat, case, na, exp):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.match(pat, case=case, na=na)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "sub, start, end, exp, exp_typ",
+ [["ab", 0, None, [0, None], pa.int32()], ["bc", 1, 3, [2, None], pa.int64()]],
+)
+def test_str_find(sub, start, end, exp, exp_typ):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.find(sub, start=start, end=end)
+ expected = pd.Series(exp, dtype=ArrowDtype(exp_typ))
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_find_notimplemented():
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ with pytest.raises(NotImplementedError, match="find not implemented"):
+ ser.str.find("ab", start=1)
+
+
+@pytest.mark.parametrize(
+ "i, exp",
+ [
+ [1, ["b", "e", None]],
+ [-1, ["c", "e", None]],
+ [2, ["c", None, None]],
+ [-3, ["a", None, None]],
+ [4, [None, None, None]],
+ ],
+)
+def test_str_get(i, exp):
+ ser = pd.Series(["abc", "de", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.get(i)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.xfail(
+ reason="TODO: StringMethods._validate should support Arrow list types",
+ raises=AttributeError,
+)
+def test_str_join():
+ ser = pd.Series(ArrowExtensionArray(pa.array([list("abc"), list("123"), None])))
+ result = ser.str.join("=")
+ expected = pd.Series(["a=b=c", "1=2=3", None], dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "start, stop, step, exp",
+ [
+ [None, 2, None, ["ab", None]],
+ [None, 2, 1, ["ab", None]],
+ [1, 3, 1, ["bc", None]],
+ ],
+)
+def test_str_slice(start, stop, step, exp):
+ ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.slice(start, stop, step)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "start, stop, repl, exp",
+ [
+ [1, 2, "x", ["axcd", None]],
+ [None, 2, "x", ["xcd", None]],
+ [None, 2, None, ["cd", None]],
+ ],
+)
+def test_str_slice_replace(start, stop, repl, exp):
+ ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.slice_replace(start, stop, repl)
+ expected = pd.Series(exp, dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "value, method, exp",
+ [
+ ["a1c", "isalnum", True],
+ ["!|,", "isalnum", False],
+ ["aaa", "isalpha", True],
+ ["!!!", "isalpha", False],
+ ["٠", "isdecimal", True],
+ ["~!", "isdecimal", False],
+ ["2", "isdigit", True],
+ ["~", "isdigit", False],
+ ["aaa", "islower", True],
+ ["aaA", "islower", False],
+ ["123", "isnumeric", True],
+ ["11I", "isnumeric", False],
+ [" ", "isspace", True],
+ ["", "isspace", False],
+ ["The That", "istitle", True],
+ ["the That", "istitle", False],
+ ["AAA", "isupper", True],
+ ["AAc", "isupper", False],
+ ],
+)
+def test_str_is_functions(value, method, exp):
+ ser = pd.Series([value, None], dtype=ArrowDtype(pa.string()))
+ result = getattr(ser.str, method)()
+ expected = pd.Series([exp, None], dtype=ArrowDtype(pa.bool_()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "method, exp",
+ [
+ ["capitalize", "Abc def"],
+ ["title", "Abc Def"],
+ ["swapcase", "AbC Def"],
+ ["lower", "abc def"],
+ ["upper", "ABC DEF"],
+ ],
+)
+def test_str_transform_functions(method, exp):
+ ser = pd.Series(["aBc dEF", None], dtype=ArrowDtype(pa.string()))
+ result = getattr(ser.str, method)()
+ expected = pd.Series([exp, None], dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+def test_str_len():
+ ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.len()
+ expected = pd.Series([4, None], dtype=ArrowDtype(pa.int32()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "method, to_strip, val",
+ [
+ ["strip", None, " abc "],
+ ["strip", "x", "xabcx"],
+ ["lstrip", None, " abc"],
+ ["lstrip", "x", "xabc"],
+ ["rstrip", None, "abc "],
+ ["rstrip", "x", "abcx"],
+ ],
+)
+def test_str_strip(method, to_strip, val):
+ ser = pd.Series([val, None], dtype=ArrowDtype(pa.string()))
+ result = getattr(ser.str, method)(to_strip=to_strip)
+ expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("val", ["abc123", "abc"])
+def test_str_removesuffix(val):
+ ser = pd.Series([val, None], dtype=ArrowDtype(pa.string()))
+ result = ser.str.removesuffix("123")
+ expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "method, args",
+ [
+ ["partition", ("abc", False)],
+ ["rpartition", ("abc", False)],
+ ["removeprefix", ("abc",)],
+ ["casefold", ()],
+ ["encode", ("abc",)],
+ ["extract", (r"[ab](\d)",)],
+ ["findall", ("abc",)],
+ ["get_dummies", ()],
+ ["index", ("abc",)],
+ ["rindex", ("abc",)],
+ ["normalize", ("abc",)],
+ ["rfind", ("abc",)],
+ ["split", ()],
+ ["rsplit", ()],
+ ["translate", ("abc",)],
+ ["wrap", ("abc",)],
+ ],
+)
+def test_str_unsupported_methods(method, args):
+ ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string()))
+ with pytest.raises(
+ NotImplementedError, match=f"str.{method} not supported with pd.ArrowDtype"
+ ):
+ getattr(ser.str, method)(*args)
+
+
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
def test_duration_from_strings_with_nat(unit):
# GH51175
diff --git a/pandas/tests/strings/conftest.py b/pandas/tests/strings/conftest.py
index cdc2b876194e6..3e1ee89e9a841 100644
--- a/pandas/tests/strings/conftest.py
+++ b/pandas/tests/strings/conftest.py
@@ -2,7 +2,7 @@
import pytest
from pandas import Series
-from pandas.core import strings
+from pandas.core.strings.accessor import StringMethods
_any_string_method = [
("cat", (), {"sep": ","}),
@@ -89,9 +89,7 @@
)
)
ids, _, _ = zip(*_any_string_method) # use method name as fixture-id
-missing_methods = {
- f for f in dir(strings.StringMethods) if not f.startswith("_")
-} - set(ids)
+missing_methods = {f for f in dir(StringMethods) if not f.startswith("_")} - set(ids)
# test that the above list captures all methods of StringMethods
assert not missing_methods
diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py
index 088affcb0506f..88d928ceecc43 100644
--- a/pandas/tests/strings/test_api.py
+++ b/pandas/tests/strings/test_api.py
@@ -8,13 +8,13 @@
_testing as tm,
get_option,
)
-from pandas.core import strings
+from pandas.core.strings.accessor import StringMethods
def test_api(any_string_dtype):
# GH 6106, GH 9322
- assert Series.str is strings.StringMethods
- assert isinstance(Series([""], dtype=any_string_dtype).str, strings.StringMethods)
+ assert Series.str is StringMethods
+ assert isinstance(Series([""], dtype=any_string_dtype).str, StringMethods)
def test_api_mi_raises():
@@ -44,7 +44,7 @@ def test_api_per_dtype(index_or_series, dtype, any_skipna_inferred_dtype):
]
if inferred_dtype in types_passing_constructor:
# GH 6106
- assert isinstance(t.str, strings.StringMethods)
+ assert isinstance(t.str, StringMethods)
else:
# GH 9184, GH 23011, GH 23163
msg = "Can only use .str accessor with string values.*"
@@ -137,7 +137,7 @@ def test_api_for_categorical(any_string_method, any_string_dtype, request):
s = Series(list("aabb"), dtype=any_string_dtype)
s = s + " " + s
c = s.astype("category")
- assert isinstance(c.str, strings.StringMethods)
+ assert isinstance(c.str, StringMethods)
method_name, args, kwargs = any_string_method
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index 9340fea14f801..b863425a24183 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -13,6 +13,7 @@
Series,
)
import pandas._testing as tm
+from pandas.core.strings.accessor import StringMethods
@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])
@@ -598,8 +599,6 @@ def test_normalize_index():
],
)
def test_index_str_accessor_visibility(values, inferred_type, index_or_series):
- from pandas.core.strings import StringMethods
-
obj = index_or_series(values)
if index_or_series is Index:
assert obj.inferred_type == inferred_type
| Reboot of #50325
Notable change was that `pandas.core.strings.BaseStringArrayMethods` in the `__init__.py` was causing a circular import when used in `ArrowExtensionArray`, so needed to remove the import into `__init__.py.` This breaks one of our dask tests but it has been fixed downstream https://github.com/dask/dask/pull/9907 | https://api.github.com/repos/pandas-dev/pandas/pulls/51207 | 2023-02-07T02:26:55Z | 2023-02-16T02:07:06Z | 2023-02-16T02:07:06Z | 2023-02-16T11:05:50Z |
DEPR: Grouper.ax, Grouper.obj, Grouper.indexer | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 532881fee0892..d7888c6c75454 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -786,6 +786,9 @@ Deprecations
- Deprecated calling ``float`` or ``int`` on a single element :class:`Series` to return a ``float`` or ``int`` respectively. Extract the element before calling ``float`` or ``int`` instead (:issue:`51101`)
- Deprecated :meth:`Grouper.groups`, use :meth:`Groupby.groups` instead (:issue:`51182`)
- Deprecated :meth:`Grouper.grouper`, use :meth:`Groupby.grouper` instead (:issue:`51182`)
+- Deprecated :meth:`Grouper.obj`, use :meth:`Groupby.obj` instead (:issue:`51206`)
+- Deprecated :meth:`Grouper.indexer`, use :meth:`Resampler.indexer` instead (:issue:`51206`)
+- Deprecated :meth:`Grouper.ax`, use :meth:`Resampler.ax` instead (:issue:`51206`)
- Deprecated :meth:`Series.pad` in favor of :meth:`Series.ffill` (:issue:`33396`)
- Deprecated :meth:`Series.backfill` in favor of :meth:`Series.bfill` (:issue:`33396`)
- Deprecated :meth:`DataFrame.pad` in favor of :meth:`DataFrame.ffill` (:issue:`33396`)
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index ee0ff93e0b538..5f69d297f4426 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -270,21 +270,13 @@ def __init__(
self.dropna = dropna
self._grouper_deprecated = None
+ self._indexer_deprecated = None
+ self._obj_deprecated = None
self._gpr_index = None
- self.obj = None
- self.indexer = None
self.binner = None
self._grouper = None
self._indexer = None
- @final
- @property
- def ax(self) -> Index:
- index = self._gpr_index
- if index is None:
- raise ValueError("_set_grouper must be called before ax is accessed")
- return index
-
def _get_grouper(
self, obj: NDFrameT, validate: bool = True
) -> tuple[ops.BaseGrouper, NDFrameT]:
@@ -299,7 +291,7 @@ def _get_grouper(
-------
a tuple of grouper, obj (possibly sorted)
"""
- obj, _ = self._set_grouper(obj)
+ obj, _, _ = self._set_grouper(obj)
grouper, _, obj = get_grouper(
obj,
[self.key],
@@ -335,9 +327,12 @@ def _set_grouper(
-------
NDFrame
Index
+ np.ndarray[np.intp] | None
"""
assert obj is not None
+ indexer = None
+
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
@@ -345,7 +340,7 @@ def _set_grouper(
if self._grouper is None:
# TODO: What are we assuming about subsequent calls?
self._grouper = gpr_index
- self._indexer = self.indexer
+ self._indexer = self._indexer_deprecated
# the key must be a valid info item
if self.key is not None:
@@ -387,7 +382,7 @@ def _set_grouper(
if (self.sort or sort) and not ax.is_monotonic_increasing:
# use stable sort to support first, last, nth
# TODO: why does putting na_position="first" fix datetimelike cases?
- indexer = self.indexer = ax.array.argsort(
+ indexer = self._indexer_deprecated = ax.array.argsort(
kind="mergesort", na_position="first"
)
ax = ax.take(indexer)
@@ -395,9 +390,45 @@ def _set_grouper(
# error: Incompatible types in assignment (expression has type
# "NDFrameT", variable has type "None")
- self.obj = obj # type: ignore[assignment]
+ self._obj_deprecated = obj # type: ignore[assignment]
self._gpr_index = ax
- return obj, ax
+ return obj, ax, indexer
+
+ @final
+ @property
+ def ax(self) -> Index:
+ warnings.warn(
+ f"{type(self).__name__}.ax is deprecated and will be removed in a "
+ "future version. Use Resampler.ax instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ index = self._gpr_index
+ if index is None:
+ raise ValueError("_set_grouper must be called before ax is accessed")
+ return index
+
+ @final
+ @property
+ def indexer(self):
+ warnings.warn(
+ f"{type(self).__name__}.indexer is deprecated and will be removed "
+ "in a future version. Use Resampler.indexer instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._indexer_deprecated
+
+ @final
+ @property
+ def obj(self):
+ warnings.warn(
+ f"{type(self).__name__}.obj is deprecated and will be removed "
+ "in a future version. Use GroupBy.indexer instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._obj_deprecated
@final
@property
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index ccc32043b4dd2..8729c998db7d5 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -130,7 +130,7 @@ class Resampler(BaseGroupBy, PandasObject):
_timegrouper: TimeGrouper
binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
- _internal_names_set = set({"obj", "ax"})
+ _internal_names_set = set({"obj", "ax", "_indexer"})
# to the groupby descriptor
_attributes = [
@@ -163,7 +163,7 @@ def __init__(
self.group_keys = group_keys
self.as_index = True
- self.obj, self.ax = self._timegrouper._set_grouper(
+ self.obj, self.ax, self._indexer = self._timegrouper._set_grouper(
self._convert_obj(obj), sort=True, gpr_index=gpr_index
)
self.binner, self.grouper = self._get_binner()
@@ -230,7 +230,7 @@ def _get_binner(self):
"""
binner, bins, binlabels = self._get_binner_for_time()
assert len(bins) == len(binlabels)
- bin_grouper = BinGrouper(bins, binlabels, indexer=self._timegrouper.indexer)
+ bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer)
return binner, bin_grouper
@Substitution(
@@ -1674,7 +1674,7 @@ def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
TypeError if incompatible axis
"""
- _, ax = self._set_grouper(obj, gpr_index=None)
+ _, ax, indexer = self._set_grouper(obj, gpr_index=None)
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index f7d4adc00260f..88ecac1ab24c9 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -1066,3 +1066,16 @@ def test_grouper_groups():
with tm.assert_produces_warning(FutureWarning, match=msg):
res = grper.grouper
assert res is gb.grouper
+
+ msg = "Grouper.obj is deprecated and will be removed"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = grper.obj
+ assert res is gb.obj
+
+ msg = "Use Resampler.ax instead"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ grper.ax
+
+ msg = "Grouper.indexer is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ grper.indexer
| - [x] closes #51134 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51206 | 2023-02-07T02:04:39Z | 2023-02-09T17:27:36Z | 2023-02-09T17:27:36Z | 2023-02-09T17:49:20Z |
API: dont infer dtype for object-dtype groupby reductions | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index b006d3820889f..3cc55f8682670 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -778,7 +778,9 @@ Other API changes
- The levels of the index of the :class:`Series` returned from ``Series.sparse.from_coo`` now always have dtype ``int32``. Previously they had dtype ``int64`` (:issue:`50926`)
- :func:`to_datetime` with ``unit`` of either "Y" or "M" will now raise if a sequence contains a non-round ``float`` value, matching the ``Timestamp`` behavior (:issue:`50301`)
- The methods :meth:`Series.round`, :meth:`DataFrame.__invert__`, :meth:`Series.__invert__`, :meth:`DataFrame.swapaxes`, :meth:`DataFrame.first`, :meth:`DataFrame.last`, :meth:`Series.first`, :meth:`Series.last` and :meth:`DataFrame.align` will now always return new objects (:issue:`51032`)
+- :class:`DataFrameGroupBy` aggregations (e.g. "sum") with object-dtype columns no longer infer non-object dtypes for their results, explicitly call ``result.infer_objects(copy=False)`` on the result to obtain the old behavior (:issue:`51205`)
- Added :func:`pandas.api.types.is_any_real_numeric_dtype` to check for real numeric dtypes (:issue:`51152`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_200.deprecations:
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index ed063a2987188..e42566bfa11a0 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1495,6 +1495,9 @@ def _agg_py_fallback(
# TODO: if we ever get "rank" working, exclude it here.
res_values = type(values)._from_sequence(res_values, dtype=values.dtype)
+ elif ser.dtype == object:
+ res_values = res_values.astype(object, copy=False)
+
# If we are DataFrameGroupBy and went through a SeriesGroupByPath
# then we need to reshape
# GH#32223 includes case with IntegerArray values, ndarray res_values
@@ -1537,8 +1540,7 @@ def array_func(values: ArrayLike) -> ArrayLike:
new_mgr = data.grouped_reduce(array_func)
res = self._wrap_agged_manager(new_mgr)
out = self._wrap_aggregated_output(res)
- if data.ndim == 2:
- # TODO: don't special-case DataFrame vs Series
+ if self.axis == 1:
out = out.infer_objects(copy=False)
return out
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index e7be78be55620..210dba8bbc44c 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -258,6 +258,7 @@ def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype
expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype(
result_dtype_dict
)
+
tm.assert_frame_equal(result, expected)
@@ -675,6 +676,7 @@ def test_agg_split_object_part_datetime():
"F": [1],
},
index=np.array([0]),
+ dtype=object,
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index eb667016b1e62..aad1218190a84 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -517,6 +517,7 @@ def test_sum_uint64_overflow():
expected = DataFrame(
{1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
index=index,
+ dtype=object,
)
expected.index.name = 0
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index e93dd022f46ac..1f081daf41b75 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1509,6 +1509,12 @@ def test_deprecate_numeric_only_series(dtype, groupby_func, request):
"sum",
"diff",
"pct_change",
+ "var",
+ "mean",
+ "median",
+ "min",
+ "max",
+ "prod",
)
# Test default behavior; kernels that fail may be enabled in the future but kernels
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index a7bd89942ea79..e175f6dda980f 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2380,7 +2380,9 @@ def test_groupby_duplicate_columns():
).astype(object)
df.columns = ["A", "B", "B"]
result = df.groupby([0, 0, 0, 0]).min()
- expected = DataFrame([["e", "a", 1]], index=np.array([0]), columns=["A", "B", "B"])
+ expected = DataFrame(
+ [["e", "a", 1]], index=np.array([0]), columns=["A", "B", "B"], dtype=object
+ )
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_min_max.py b/pandas/tests/groupby/test_min_max.py
index 2a997b3c84216..11f62c5d03c49 100644
--- a/pandas/tests/groupby/test_min_max.py
+++ b/pandas/tests/groupby/test_min_max.py
@@ -148,9 +148,13 @@ def test_aggregate_numeric_object_dtype():
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4},
).astype(object)
result = df.groupby("key").min()
- expected = DataFrame(
- {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]}
- ).set_index("key")
+ expected = (
+ DataFrame(
+ {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]},
+ )
+ .set_index("key")
+ .astype(object)
+ )
tm.assert_frame_equal(result, expected)
# same but with numbers
@@ -158,9 +162,11 @@ def test_aggregate_numeric_object_dtype():
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)},
).astype(object)
result = df.groupby("key").min()
- expected = DataFrame(
- {"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]}
- ).set_index("key")
+ expected = (
+ DataFrame({"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]})
+ .set_index("key")
+ .astype(object)
+ )
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51205 | 2023-02-06T23:21:42Z | 2023-02-10T18:08:59Z | 2023-02-10T18:08:59Z | 2023-02-10T18:10:59Z |
TYP: Upgrade mypy to 1.0 | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index df30a31889a99..353ae4455caba 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -650,7 +650,7 @@ If installed, we now require:
+-------------------+-----------------+----------+---------+
| Package | Minimum Version | Required | Changed |
+===================+=================+==========+=========+
-| mypy (dev) | 0.991 | | X |
+| mypy (dev) | 1.0 | | X |
+-------------------+-----------------+----------+---------+
| pytest (dev) | 7.0.0 | | X |
+-------------------+-----------------+----------+---------+
diff --git a/environment.yml b/environment.yml
index 076e6fa727332..05251001d8e86 100644
--- a/environment.yml
+++ b/environment.yml
@@ -79,7 +79,7 @@ dependencies:
- cpplint
- flake8=6.0.0
- isort>=5.2.1 # check that imports are in the right order
- - mypy=0.991
+ - mypy=1.0
- pre-commit>=2.15.0
- pyupgrade
- ruff=0.0.215
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 9247d26fc846d..254f7ad28f324 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -330,10 +330,7 @@ def __getitem__(self, item: PositionalIndexer):
elif isinstance(item, tuple):
item = unpack_tuple_and_ellipses(item)
- # error: Non-overlapping identity check (left operand type:
- # "Union[Union[int, integer[Any]], Union[slice, List[int],
- # ndarray[Any, Any]]]", right operand type: "ellipsis")
- if item is Ellipsis: # type: ignore[comparison-overlap]
+ if item is Ellipsis:
# TODO: should be handled by pyarrow?
item = slice(None)
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 5f8169829ab6b..543e39d25f030 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -288,8 +288,10 @@ class BooleanArray(BaseMaskedArray):
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
# Fill values used for any/all
- _truthy_value = True
- _falsey_value = False
+ # Incompatible types in assignment (expression has type "bool", base class
+ # "BaseMaskedArray" defined the type as "<typing special form>")
+ _truthy_value = True # type: ignore[assignment]
+ _falsey_value = False # type: ignore[assignment]
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 0766b1c6a5262..1bf43f61a67a7 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2500,9 +2500,7 @@ def _generate_range(
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
# expected "Union[integer[Any], float, str, date, datetime64]"
start = Timestamp(start) # type: ignore[arg-type]
- # Non-overlapping identity check (left operand type: "Timestamp", right
- # operand type: "NaTType")
- if start is not NaT: # type: ignore[comparison-overlap]
+ if start is not NaT:
start = start.as_unit(unit)
else:
start = None
@@ -2510,9 +2508,7 @@ def _generate_range(
# Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]";
# expected "Union[integer[Any], float, str, date, datetime64]"
end = Timestamp(end) # type: ignore[arg-type]
- # Non-overlapping identity check (left operand type: "Timestamp", right
- # operand type: "NaTType")
- if end is not NaT: # type: ignore[comparison-overlap]
+ if end is not NaT:
end = end.as_unit(unit)
else:
end = None
diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index ccf8431660acb..e08e99f7eab94 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -116,8 +116,10 @@ class FloatingArray(NumericArray):
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = np.nan
# Fill values used for any/all
- _truthy_value = 1.0
- _falsey_value = 0.0
+ # Incompatible types in assignment (expression has type "float", base class
+ # "BaseMaskedArray" defined the type as "<typing special form>")
+ _truthy_value = 1.0 # type: ignore[assignment]
+ _falsey_value = 0.0 # type: ignore[assignment]
_dtype_docstring = """
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 24e5fa1bef552..af0f80eb0c85d 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -132,8 +132,10 @@ class IntegerArray(NumericArray):
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 1
# Fill values used for any/all
- _truthy_value = 1
- _falsey_value = 0
+ # Incompatible types in assignment (expression has type "int", base class
+ # "BaseMaskedArray" defined the type as "<typing special form>")
+ _truthy_value = 1 # type: ignore[assignment]
+ _falsey_value = 0 # type: ignore[assignment]
_dtype_docstring = """
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index c8c33d3f52102..38f97e6f12501 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -912,11 +912,7 @@ def __getitem__(
if isinstance(key, tuple):
key = unpack_tuple_and_ellipses(key)
- # Non-overlapping identity check (left operand type:
- # "Union[Union[Union[int, integer[Any]], Union[slice, List[int],
- # ndarray[Any, Any]]], Tuple[Union[int, ellipsis], ...]]",
- # right operand type: "ellipsis")
- if key is Ellipsis: # type: ignore[comparison-overlap]
+ if key is Ellipsis:
raise ValueError("Cannot slice with Ellipsis")
if is_integer(key):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 3be89f6da2bd8..84e27d256e83d 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -787,16 +787,12 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj,
elif isinstance(val, (np.datetime64, dt.datetime)):
try:
val = Timestamp(val)
- # error: Non-overlapping identity check (left operand type:
- # "Timestamp", right operand type: "NaTType")
- if val is not NaT: # type: ignore[comparison-overlap]
+ if val is not NaT:
val = val.as_unit("ns")
except OutOfBoundsDatetime:
return _dtype_obj, val
- # error: Non-overlapping identity check (left operand type: "Timestamp",
- # right operand type: "NaTType")
- if val is NaT or val.tz is None: # type: ignore[comparison-overlap]
+ if val is NaT or val.tz is None:
val = val.to_datetime64()
dtype = val.dtype
# TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4f13ead4005e7..2361c254f5161 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2633,13 +2633,15 @@ def to_stata(
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
- # mypy: Name 'statawriter' already defined (possibly by an import)
- from pandas.io.stata import ( # type: ignore[no-redef]
+ # Incompatible import of "statawriter" (imported name has type
+ # "Type[StataWriter117]", local name has type "Type[StataWriter]")
+ from pandas.io.stata import ( # type: ignore[assignment]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
- # mypy: Name 'statawriter' already defined (possibly by an import)
- from pandas.io.stata import ( # type: ignore[no-redef]
+ # Incompatible import of "statawriter" (imported name has type
+ # "Type[StataWriter117]", local name has type "Type[StataWriter]")
+ from pandas.io.stata import ( # type: ignore[assignment]
StataWriterUTF8 as statawriter,
)
@@ -5514,8 +5516,7 @@ def pop(self, item: Hashable) -> Series:
"""
return super().pop(item=item)
- # error: Signature of "replace" incompatible with supertype "NDFrame"
- @overload # type: ignore[override]
+ @overload
def replace(
self,
to_replace=...,
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 7d8d7a37ff7e7..8c4576ee554ec 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1325,9 +1325,7 @@ def _maybe_coerce_merge_keys(self) -> None:
mask = ~np.isnan(lk)
match = lk == casted
- # error: Item "ExtensionArray" of "Union[ExtensionArray,
- # ndarray[Any, Any], Any]" has no attribute "all"
- if not match[mask].all(): # type: ignore[union-attr]
+ if not match[mask].all():
warnings.warn(
"You are merging on int and float "
"columns where the float values "
@@ -1347,9 +1345,7 @@ def _maybe_coerce_merge_keys(self) -> None:
mask = ~np.isnan(rk)
match = rk == casted
- # error: Item "ExtensionArray" of "Union[ExtensionArray,
- # ndarray[Any, Any], Any]" has no attribute "all"
- if not match[mask].all(): # type: ignore[union-attr]
+ if not match[mask].all():
warnings.warn(
"You are merging on int and float "
"columns where the float values "
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9a5412145366f..80dd0dd19f96f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3370,8 +3370,7 @@ def update(self, other: Series | Sequence | Mapping) -> None:
# ----------------------------------------------------------------------
# Reindexing, sorting
- # error: Signature of "sort_values" incompatible with supertype "NDFrame"
- @overload # type: ignore[override]
+ @overload
def sort_values(
self,
*,
@@ -5131,8 +5130,7 @@ def pop(self, item: Hashable) -> Any:
"""
return super().pop(item=item)
- # error: Signature of "replace" incompatible with supertype "NDFrame"
- @overload # type: ignore[override]
+ @overload
def replace(
self,
to_replace=...,
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 946c22f47596e..afe6a405afbb8 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1229,7 +1229,9 @@ def get_buffer(
raise ValueError("buf is not a file name and encoding is specified.")
if hasattr(buf, "write"):
- yield buf
+ # Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str],
+ # StringIO]", expected type "Union[WriteBuffer[str], StringIO]")
+ yield buf # type: ignore[misc]
elif isinstance(buf, str):
check_parent_directory(str(buf))
with open(buf, "w", encoding=encoding, newline="") as f:
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 66991eab54671..338e831ed184f 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -1138,12 +1138,7 @@ def _try_convert_data(
return data, False
return data.fillna(np.nan), True
- # error: Non-overlapping identity check (left operand type:
- # "Union[ExtensionDtype, str, dtype[Any], Type[object],
- # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
- # Type[str], Type[float], Type[int], Type[complex], Type[bool],
- # Type[object]]]]", right operand type: "Literal[True]")
- elif self.dtype is True: # type: ignore[comparison-overlap]
+ elif self.dtype is True:
pass
else:
# dtype to force
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 04d8b176dffae..3783c7c2aeb5f 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -56,7 +56,7 @@ black==22.10.0
cpplint
flake8==6.0.0
isort>=5.2.1
-mypy==0.991
+mypy==1.0
pre-commit>=2.15.0
pyupgrade
ruff==0.0.215
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51204 | 2023-02-06T22:42:57Z | 2023-02-07T19:22:41Z | 2023-02-07T19:22:41Z | 2023-02-07T19:29:00Z |
DOC: Minor doc cleanups | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 18f394b8e549b..462873d4e88b0 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -267,6 +267,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.api.types.is_integer \
pandas.api.types.pandas_dtype \
pandas.read_clipboard \
+ pandas.ExcelFile \
pandas.ExcelFile.parse \
pandas.DataFrame.to_html \
pandas.io.formats.style.Styler.to_html \
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 8ae71452874f7..e9f986e37ca86 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -299,6 +299,9 @@ MultiIndex components
MultiIndex.reorder_levels
MultiIndex.remove_unused_levels
MultiIndex.drop
+ MultiIndex.copy
+ MultiIndex.append
+ MultiIndex.truncate
MultiIndex selecting
~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/reference/io.rst b/doc/source/reference/io.rst
index 425b5f81be966..fbd0f6bd200b9 100644
--- a/doc/source/reference/io.rst
+++ b/doc/source/reference/io.rst
@@ -40,6 +40,9 @@ Excel
read_excel
DataFrame.to_excel
+ ExcelFile
+ ExcelFile.book
+ ExcelFile.sheet_names
ExcelFile.parse
.. currentmodule:: pandas.io.formats.style
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index b167b7e811d98..badf3f0f68627 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -361,7 +361,6 @@ Other enhancements
- Added ``numeric_only`` argument to :meth:`DataFrame.corr`, :meth:`DataFrame.corrwith`, :meth:`DataFrame.cov`, :meth:`DataFrame.idxmin`, :meth:`DataFrame.idxmax`, :meth:`.DataFrameGroupBy.idxmin`, :meth:`.DataFrameGroupBy.idxmax`, :meth:`.DataFrameGroupBy.var`, :meth:`.SeriesGroupBy.var`, :meth:`.DataFrameGroupBy.std`, :meth:`.SeriesGroupBy.std`, :meth:`.DataFrameGroupBy.sem`, :meth:`.SeriesGroupBy.sem`, and :meth:`.DataFrameGroupBy.quantile` (:issue:`46560`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`, :issue:`46725`)
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
-- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
- Added ``numeric_only`` argument to :meth:`.Resampler.sum`, :meth:`.Resampler.prod`, :meth:`.Resampler.min`, :meth:`.Resampler.max`, :meth:`.Resampler.first`, and :meth:`.Resampler.last` (:issue:`46442`)
- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`)
- :class:`.DataError`, :class:`.SpecificationError`, :class:`.SettingWithCopyError`, :class:`.SettingWithCopyWarning`, :class:`.NumExprClobberingError`, :class:`.UndefinedVariableError`, :class:`.IndexingError`, :class:`.PyperclipException`, :class:`.PyperclipWindowsException`, :class:`.CSSWarning`, :class:`.PossibleDataLossError`, :class:`.ClosedFileError`, :class:`.IncompatibilityWarning`, :class:`.AttributeConflictWarning`, :class:`.DatabaseError`, :class:`.PossiblePrecisionLoss`, :class:`.ValueLabelTypeMismatch`, :class:`.InvalidColumnName`, and :class:`.CategoricalConversionWarning` are now exposed in ``pandas.errors`` (:issue:`27656`)
@@ -1014,7 +1013,6 @@ Performance improvements
- Performance improvement in :meth:`Series.to_sql` and :meth:`DataFrame.to_sql` (:class:`SQLiteTable`) when processing time arrays. (:issue:`44764`)
- Performance improvement to :func:`read_sas` (:issue:`47404`)
- Performance improvement in ``argmax`` and ``argmin`` for :class:`arrays.SparseArray` (:issue:`34197`)
--
.. ---------------------------------------------------------------------------
.. _whatsnew_150.bug_fixes:
@@ -1041,19 +1039,16 @@ Datetimelike
- Bug in :class:`.DatetimeArray` construction when passed another :class:`.DatetimeArray` and ``freq=None`` incorrectly inferring the freq from the given array (:issue:`47296`)
- Bug in :func:`to_datetime` where ``OutOfBoundsDatetime`` would be thrown even if ``errors=coerce`` if there were more than 50 rows (:issue:`45319`)
- Bug when adding a :class:`DateOffset` to a :class:`Series` would not add the ``nanoseconds`` field (:issue:`47856`)
--
Timedelta
^^^^^^^^^
- Bug in :func:`astype_nansafe` astype("timedelta64[ns]") fails when np.nan is included (:issue:`45798`)
- Bug in constructing a :class:`Timedelta` with a ``np.timedelta64`` object and a ``unit`` sometimes silently overflowing and returning incorrect results instead of raising ``OutOfBoundsTimedelta`` (:issue:`46827`)
- Bug in constructing a :class:`Timedelta` from a large integer or float with ``unit="W"`` silently overflowing and returning incorrect results instead of raising ``OutOfBoundsTimedelta`` (:issue:`47268`)
--
Time Zones
^^^^^^^^^^
- Bug in :class:`Timestamp` constructor raising when passed a ``ZoneInfo`` tzinfo object (:issue:`46425`)
--
Numeric
^^^^^^^
@@ -1078,13 +1073,11 @@ Conversion
- Bug in :meth:`DataFrame.apply` that returns a :class:`DataFrame` instead of a :class:`Series` when applied to an empty :class:`DataFrame` and ``axis=1`` (:issue:`39111`)
- Bug when inferring the dtype from an iterable that is *not* a NumPy ``ndarray`` consisting of all NumPy unsigned integer scalars did not result in an unsigned integer dtype (:issue:`47294`)
- Bug in :meth:`DataFrame.eval` when pandas objects (e.g. ``'Timestamp'``) were column names (:issue:`44603`)
--
Strings
^^^^^^^
- Bug in :meth:`str.startswith` and :meth:`str.endswith` when using other series as parameter _pat_. Now raises ``TypeError`` (:issue:`3485`)
- Bug in :meth:`Series.str.zfill` when strings contain leading signs, padding '0' before the sign character rather than after as ``str.zfill`` from standard library (:issue:`20868`)
--
Interval
^^^^^^^^
@@ -1192,7 +1185,6 @@ Period
- Bug in adding ``np.timedelta64("NaT", "ns")`` to a :class:`Period` with a timedelta-like freq incorrectly raising ``IncompatibleFrequency`` instead of returning ``NaT`` (:issue:`47196`)
- Bug in adding an array of integers to an array with :class:`PeriodDtype` giving incorrect results when ``dtype.freq.n > 1`` (:issue:`47209`)
- Bug in subtracting a :class:`Period` from an array with :class:`PeriodDtype` returning incorrect results instead of raising ``OverflowError`` when the operation overflows (:issue:`47538`)
--
Plotting
^^^^^^^^
@@ -1236,7 +1228,6 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.resample` raises ``KeyError`` when getting the result from a key list which misses the resample key (:issue:`47362`)
- Bug in :meth:`DataFrame.groupby` would lose index columns when the DataFrame is empty for transforms, like fillna (:issue:`47787`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` with ``dropna=False`` and ``sort=False`` would put any null groups at the end instead the order that they are encountered (:issue:`46584`)
--
Reshaping
^^^^^^^^^
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 70c60401f29fb..e14b30e2b71b4 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -46,7 +46,7 @@ Previously it was only possible to use ``int64``, ``uint64`` & ``float64`` dtype
In [3]: pd.Index([1, 2, 3], dtype=np.float32)
Out[3]: Float64Index([1.0, 2.0, 3.0], dtype="float64")
-:class:`Int64Index`, :class:`UInt64Index` & :class:`Float64Index` were depreciated in pandas
+:class:`Int64Index`, :class:`UInt64Index` & :class:`Float64Index` were deprecated in pandas
version 1.4 and have now been removed. Instead :class:`Index` should be used directly, and
can it now take all numpy numeric dtypes, i.e.
``int8``/ ``int16``/``int32``/``int64``/``uint8``/``uint16``/``uint32``/``uint64``/``float32``/``float64`` dtypes:
@@ -57,7 +57,7 @@ can it now take all numpy numeric dtypes, i.e.
pd.Index([1, 2, 3], dtype=np.uint16)
pd.Index([1, 2, 3], dtype=np.float32)
-The ability for ``Index`` to hold the numpy numeric dtypes has meant some changes in Pandas
+The ability for :class:`Index` to hold the numpy numeric dtypes has meant some changes in Pandas
functionality. In particular, operations that previously were forced to create 64-bit indexes,
can now create indexes with lower bit sizes, e.g. 32-bit indexes.
@@ -246,11 +246,15 @@ Copy-on-Write improvements
can never update the original Series or DataFrame. Therefore, an informative
error is raised to the user instead of silently doing nothing (:issue:`49467`)
-Copy-on-Write can be enabled through
+Copy-on-Write can be enabled through one of
.. code-block:: python
pd.set_option("mode.copy_on_write", True)
+
+
+.. code-block:: python
+
pd.options.mode.copy_on_write = True
Alternatively, copy on write can be enabled locally through:
@@ -281,7 +285,7 @@ Other enhancements
- Added ``name`` parameter to :meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_arrays` and :meth:`IntervalIndex.from_tuples` (:issue:`48911`)
- Improve exception message when using :func:`.testing.assert_frame_equal` on a :class:`DataFrame` to include the column that is compared (:issue:`50323`)
- Improved error message for :func:`merge_asof` when join-columns were duplicated (:issue:`50102`)
-- Added support for extension array dtypes to :func:`get_dummies` (:func:`32430`)
+- Added support for extension array dtypes to :func:`get_dummies` (:issue:`32430`)
- Added :meth:`Index.infer_objects` analogous to :meth:`Series.infer_objects` (:issue:`50034`)
- Added ``copy`` parameter to :meth:`Series.infer_objects` and :meth:`DataFrame.infer_objects`, passing ``False`` will avoid making copies for series or columns that are already non-object or where no better dtype can be inferred (:issue:`50096`)
- :meth:`DataFrame.plot.hist` now recognizes ``xlabel`` and ``ylabel`` arguments (:issue:`49793`)
@@ -687,18 +691,18 @@ In the past, :func:`to_datetime` guessed the format for each element independent
*Old behavior*:
- .. code-block:: ipython
+.. code-block:: ipython
- In [1]: ser = pd.Series(['13-01-2000', '12-01-2000'])
- In [2]: pd.to_datetime(ser)
- Out[2]:
- 0 2000-01-13
- 1 2000-12-01
- dtype: datetime64[ns]
+ In [1]: ser = pd.Series(['13-01-2000', '12-01-2000'])
+ In [2]: pd.to_datetime(ser)
+ Out[2]:
+ 0 2000-01-13
+ 1 2000-12-01
+ dtype: datetime64[ns]
*New behavior*:
- .. ipython:: python
+.. ipython:: python
:okwarning:
ser = pd.Series(['13-01-2000', '12-01-2000'])
@@ -736,7 +740,7 @@ Other API changes
- :func:`pandas.testing.assert_index_equal` with parameter ``exact="equiv"`` now considers two indexes equal when both are either a :class:`RangeIndex` or :class:`Index` with an ``int64`` dtype. Previously it meant either a :class:`RangeIndex` or a :class:`Int64Index` (:issue:`51098`)
- :meth:`Series.unique` with dtype "timedelta64[ns]" or "datetime64[ns]" now returns :class:`TimedeltaArray` or :class:`DatetimeArray` instead of ``numpy.ndarray`` (:issue:`49176`)
- :func:`to_datetime` and :class:`DatetimeIndex` now allow sequences containing both ``datetime`` objects and numeric entries, matching :class:`Series` behavior (:issue:`49037`, :issue:`50453`)
-- :func:`api.dtypes.is_string_dtype` now only returns ``True`` for array-likes with ``dtype=object`` when the elements are inferred to be strings (:issue:`15585`)
+- :func:`pandas.api.types.is_string_dtype` now only returns ``True`` for array-likes with ``dtype=object`` when the elements are inferred to be strings (:issue:`15585`)
- Passing a sequence containing ``datetime`` objects and ``date`` objects to :class:`Series` constructor will return with ``object`` dtype instead of ``datetime64[ns]`` dtype, consistent with :class:`Index` behavior (:issue:`49341`)
- Passing strings that cannot be parsed as datetimes to :class:`Series` or :class:`DataFrame` with ``dtype="datetime64[ns]"`` will raise instead of silently ignoring the keyword and returning ``object`` dtype (:issue:`24435`)
- Passing a sequence containing a type that cannot be converted to :class:`Timedelta` to :func:`to_timedelta` or to the :class:`Series` or :class:`DataFrame` constructor with ``dtype="timedelta64[ns]"`` or to :class:`TimedeltaIndex` now raises ``TypeError`` instead of ``ValueError`` (:issue:`49525`)
@@ -779,7 +783,7 @@ Deprecations
- :meth:`Index.is_numeric` has been deprecated. Use :func:`pandas.api.types.is_any_real_numeric_dtype` instead (:issue:`50042`,:issue:`51152`)
- :meth:`Index.is_categorical` has been deprecated. Use :func:`pandas.api.types.is_categorical_dtype` instead (:issue:`50042`)
- :meth:`Index.is_object` has been deprecated. Use :func:`pandas.api.types.is_object_dtype` instead (:issue:`50042`)
-- :meth:`Index.is_interval` has been deprecated. Use :func:`pandas.api.types.is_intterval_dtype` instead (:issue:`50042`)
+- :meth:`Index.is_interval` has been deprecated. Use :func:`pandas.api.types.is_interval_dtype` instead (:issue:`50042`)
- Deprecated ``all`` and ``any`` reductions with ``datetime64`` and :class:`DatetimeTZDtype` dtypes, use e.g. ``(obj != pd.Timestamp(0), tz=obj.tz).all()`` instead (:issue:`34479`)
- Deprecated unused arguments ``*args`` and ``**kwargs`` in :class:`Resampler` (:issue:`50977`)
- Deprecated calling ``float`` or ``int`` on a single element :class:`Series` to return a ``float`` or ``int`` respectively. Extract the element before calling ``float`` or ``int`` instead (:issue:`51101`)
@@ -1162,7 +1166,7 @@ Numeric
- Bug in DataFrame reduction methods (e.g. :meth:`DataFrame.sum`) with object dtype, ``axis=1`` and ``numeric_only=False`` would not be coerced to float (:issue:`49551`)
- Bug in :meth:`DataFrame.sem` and :meth:`Series.sem` where an erroneous ``TypeError`` would always raise when using data backed by an :class:`ArrowDtype` (:issue:`49759`)
- Bug in :meth:`Series.__add__` casting to object for list and masked :class:`Series` (:issue:`22962`)
-- Bug in :meth:`query` with ``engine="numexpr"`` and column names are ``min`` or ``max`` would raise a ``TypeError`` (:issue:`50937`)
+- Bug in :meth:`DataFrame.query` with ``engine="numexpr"`` and column names are ``min`` or ``max`` would raise a ``TypeError`` (:issue:`50937`)
Conversion
^^^^^^^^^^
@@ -1182,7 +1186,7 @@ Conversion
Strings
^^^^^^^
-- Bug in :func:`pandas.api.dtypes.is_string_dtype` that would not return ``True`` for :class:`StringDtype` or :class:`ArrowDtype` with ``pyarrow.string()`` (:issue:`15585`)
+- Bug in :func:`pandas.api.types.is_string_dtype` that would not return ``True`` for :class:`StringDtype` or :class:`ArrowDtype` with ``pyarrow.string()`` (:issue:`15585`)
- Bug in converting string dtypes to "datetime64[ns]" or "timedelta64[ns]" incorrectly raising ``TypeError`` (:issue:`36153`)
-
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 19c17fd0a4358..95f35eabb342e 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1148,8 +1148,9 @@ def copy( # type: ignore[override]
name=None,
):
"""
- Make a copy of this object. Names, dtype, levels and codes can be
- passed and will be set on new copy.
+ Make a copy of this object.
+
+ Names, dtype, levels and codes can be passed and will be set on new copy.
Parameters
----------
@@ -1167,6 +1168,16 @@ def copy( # type: ignore[override]
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
+
+ Examples
+ --------
+ >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
+ >>> mi
+ MultiIndex([('a', 'b', 'c')],
+ )
+ >>> mi.copy()
+ MultiIndex([('a', 'b', 'c')],
+ )
"""
names = self._validate_names(name=name, names=names, deep=deep)
keep_id = not deep
@@ -2085,7 +2096,7 @@ def take(
def append(self, other):
"""
- Append a collection of Index options together
+ Append a collection of Index options together.
Parameters
----------
@@ -2093,7 +2104,18 @@ def append(self, other):
Returns
-------
- appended : Index
+ Index
+ The combined index.
+
+ Examples
+ --------
+ >>> mi = pd.MultiIndex.from_arrays([['a'], ['b']])
+ >>> mi
+ MultiIndex([('a', 'b')],
+ )
+ >>> mi.append(mi)
+ MultiIndex([('a', 'b'), ('a', 'b')],
+ )
"""
if not isinstance(other, (list, tuple)):
other = [other]
@@ -3397,18 +3419,29 @@ def _reorder_indexer(
def truncate(self, before=None, after=None) -> MultiIndex:
"""
- Slice index between two labels / tuples, return new MultiIndex
+ Slice index between two labels / tuples, return new MultiIndex.
Parameters
----------
before : label or tuple, can be partial. Default None
- None defaults to start
+ None defaults to start.
after : label or tuple, can be partial. Default None
- None defaults to end
+ None defaults to end.
Returns
-------
- truncated : MultiIndex
+ MultiIndex
+ The truncated MultiIndex.
+
+ Examples
+ --------
+ >>> mi = pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['x', 'y', 'z']])
+ >>> mi
+ MultiIndex([('a', 'x'), ('b', 'y'), ('c', 'z')],
+ )
+ >>> mi.truncate(before='a', after='b')
+ MultiIndex([('a', 'x'), ('b', 'y')],
+ )
"""
if after and before and after < before:
raise ValueError("after < before")
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 2a0d3a01d3383..79d174db5c0a7 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -1413,14 +1413,14 @@ def inspect_excel_format(
class ExcelFile:
"""
- Class for parsing tabular excel sheets into DataFrame objects.
+ Class for parsing tabular Excel sheets into DataFrame objects.
See read_excel for more documentation.
Parameters
----------
path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
- a file-like object, xlrd workbook or openpyxl workbook.
+ A file-like object, xlrd workbook or openpyxl workbook.
If a string or path object, expected to be a path to a
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
engine : str, default None
@@ -1448,6 +1448,7 @@ class ExcelFile:
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
.. versionadded:: 1.3.0
+
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
then ``openpyxl`` will be used.
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 615d03589047c..c3d7cb5df717f 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -27,21 +27,20 @@
@doc(storage_options=_shared_docs["storage_options"])
class ODFReader(BaseExcelReader):
- """
- Read tables out of OpenDocument formatted files.
-
- Parameters
- ----------
- filepath_or_buffer : str, path to be parsed or
- an open readable stream.
- {storage_options}
- """
-
def __init__(
self,
filepath_or_buffer: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions = None,
) -> None:
+ """
+ Read tables out of OpenDocument formatted files.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path to be parsed or
+ an open readable stream.
+ {storage_options}
+ """
import_optional_dependency("odf")
super().__init__(filepath_or_buffer, storage_options=storage_options)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
The MultiIndex methods have dedicated docstrings, so should render docs for them | https://api.github.com/repos/pandas-dev/pandas/pulls/51202 | 2023-02-06T20:01:55Z | 2023-02-09T17:36:31Z | 2023-02-09T17:36:31Z | 2023-02-09T17:38:06Z |
REF: de-duplicate wrap_agged_manager/wrap_aggregate_result | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index e6f2e300c5567..cdb5dddf03a64 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -163,15 +163,7 @@ def prop(self):
class SeriesGroupBy(GroupBy[Series]):
def _wrap_agged_manager(self, mgr: Manager) -> Series:
- if mgr.ndim == 1:
- mgr = cast(SingleManager, mgr)
- single = mgr
- else:
- mgr = cast(Manager2D, mgr)
- single = mgr.iget(0)
- ser = self.obj._constructor(single, name=self.obj.name)
- # NB: caller is responsible for setting ser.index
- return ser
+ return self.obj._constructor(mgr, name=self.obj.name)
def _get_data_to_aggregate(
self, *, numeric_only: bool = False, name: str | None = None
@@ -1902,25 +1894,7 @@ def _indexed_output_to_ndframe(
return result
def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
- if not self.as_index:
- # GH 41998 - empty mgr always gets index of length 0
- rows = mgr.shape[1] if mgr.shape[0] > 0 else 0
- index = Index(range(rows))
- mgr.set_axis(1, index)
- result = self.obj._constructor(mgr)
-
- result = self._insert_inaxis_grouper(result)
- result = result._consolidate()
- else:
- index = self.grouper.result_index
- mgr.set_axis(1, index)
- result = self.obj._constructor(mgr)
-
- if self.axis == 1:
- result = result.T
-
- # Note: we really only care about inferring numeric dtypes here
- return self._reindex_output(result).infer_objects(copy=False)
+ return self.obj._constructor(mgr)
def _iterate_column_groupbys(self, obj: DataFrame | Series):
for i, colname in enumerate(obj.columns):
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 26e1105ea879d..5f5bb1c8833da 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1501,7 +1501,6 @@ def _cython_agg_general(
# that goes through SeriesGroupBy
data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how)
- is_ser = data.ndim == 1
def array_func(values: ArrayLike) -> ArrayLike:
try:
@@ -1523,16 +1522,12 @@ def array_func(values: ArrayLike) -> ArrayLike:
return result
new_mgr = data.grouped_reduce(array_func)
-
res = self._wrap_agged_manager(new_mgr)
- if is_ser:
- if self.as_index:
- res.index = self.grouper.result_index
- else:
- res = self._insert_inaxis_grouper(res)
- return self._reindex_output(res)
- else:
- return res
+ out = self._wrap_aggregated_output(res)
+ if data.ndim == 2:
+ # TODO: don't special-case DataFrame vs Series
+ out = out.infer_objects(copy=False)
+ return out
def _cython_transform(
self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs
@@ -1793,19 +1788,14 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike:
return counted
new_mgr = data.grouped_reduce(hfunc)
+ new_obj = self._wrap_agged_manager(new_mgr)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
- # _wrap_agged_manager() returns. GH 35028
+ # _wrap_aggregated_output() returns. GH 35028
# e.g. test_dataframe_groupby_on_2_categoricals_when_observed_is_false
with com.temp_setattr(self, "observed", True):
- result = self._wrap_agged_manager(new_mgr)
-
- if result.ndim == 1:
- if self.as_index:
- result.index = self.grouper.result_index
- else:
- result = self._insert_inaxis_grouper(result)
+ result = self._wrap_aggregated_output(new_obj)
return self._reindex_output(result, fill_value=0)
@@ -2790,9 +2780,7 @@ def blk_func(values: ArrayLike) -> ArrayLike:
mgr = obj._mgr
res_mgr = mgr.apply(blk_func)
- new_obj = obj._constructor(res_mgr)
- if isinstance(new_obj, Series):
- new_obj.name = obj.name
+ new_obj = self._wrap_agged_manager(res_mgr)
if self.axis == 1:
# Only relevant for DataFrameGroupBy
@@ -3197,15 +3185,10 @@ def blk_func(values: ArrayLike) -> ArrayLike:
out = out.reshape(ncols, ngroups * nqs)
return post_processor(out, inference, result_mask, orig_vals)
- obj = self._obj_with_exclusions
- is_ser = obj.ndim == 1
data = self._get_data_to_aggregate(numeric_only=numeric_only, name="quantile")
res_mgr = data.grouped_reduce(blk_func)
- if is_ser:
- res = self._wrap_agged_manager(res_mgr)
- else:
- res = obj._constructor(res_mgr)
+ res = self._wrap_agged_manager(res_mgr)
if orig_scalar:
# Avoid expensive MultiIndex construction
@@ -3652,19 +3635,12 @@ def blk_func(values: ArrayLike) -> ArrayLike:
return result.T
- obj = self._obj_with_exclusions
-
# Operate block-wise instead of column-by-column
- is_ser = obj.ndim == 1
mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name=how)
res_mgr = mgr.grouped_reduce(blk_func)
- if is_ser:
- out = self._wrap_agged_manager(res_mgr)
- else:
- out = obj._constructor(res_mgr)
-
+ out = self._wrap_agged_manager(res_mgr)
return self._wrap_aggregated_output(out)
@final
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 82316806d3d47..76da973e110bf 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -947,9 +947,10 @@ def grouped_reduce(self: T, func: Callable) -> T:
result_indices.append(i)
if len(result_arrays) == 0:
- index = Index([None]) # placeholder
+ nrows = 0
else:
- index = Index(range(result_arrays[0].shape[0]))
+ nrows = result_arrays[0].shape[0]
+ index = Index(range(nrows))
columns = self.items
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index ff80cccaa20d3..8a4fa4c10bf5f 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1538,9 +1538,10 @@ def grouped_reduce(self: T, func: Callable) -> T:
result_blocks = extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
- index = Index([None]) # placeholder
+ nrows = 0
else:
- index = Index(range(result_blocks[0].values.shape[-1]))
+ nrows = result_blocks[0].values.shape[-1]
+ index = Index(range(nrows))
return type(self).from_blocks(result_blocks, [self.axes[0], index])
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51201 | 2023-02-06T19:26:17Z | 2023-02-06T23:12:14Z | 2023-02-06T23:12:14Z | 2023-02-06T23:12:54Z |
DOC: fix-up numpy numeric Index docs | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 802e2e6a488d0..e3dd67175fdc2 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -65,15 +65,14 @@ Below is a possibly non-exhaustive list of changes:
1. Instantiating using a numpy numeric array now follows the dtype of the numpy array.
Previously, all indexes created from numpy numeric arrays were forced to 64-bit. Now,
- the index dtype follows the dtype of the numpy array. For example, it would for all
- signed integer arrays previously return an index with ``int64`` dtype, but will now
- reuse the dtype of the supplied numpy array. So ``Index(np.array([1, 2, 3]))`` will be ``int32`` on 32-bit systems.
+ for example, ``Index(np.array([1, 2, 3]))`` will be ``int32`` on 32-bit systems, where
+ it previously would have been ``int64``` even on 32-bit systems.
Instantiating :class:`Index` using a list of numbers will still return 64bit dtypes,
e.g. ``Index([1, 2, 3])`` will have a ``int64`` dtype, which is the same as previously.
-2. The various numeric datetime attributes of :class:`DateTimeIndex` (:attr:`~Date_TimeIndex.day`,
- :attr:`~DateTimeIndex.month`, :attr:`~DateTimeIndex.year` etc.) were previously in of
+2. The various numeric datetime attributes of :class:`DatetimeIndex` (:attr:`~DatetimeIndex.day`,
+ :attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) were previously in of
dtype ``int64``, while they were ``int32`` for :class:`DatetimeArray`. They are now
- ``int32`` on ``DateTimeIndex`` also:
+ ``int32`` on ``DatetimeIndex`` also:
.. ipython:: python
@@ -92,7 +91,7 @@ Below is a possibly non-exhaustive list of changes:
([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
)
ser = pd.Series.sparse.from_coo(A)
- ser.index.dtype
+ ser.index.dtypes
4. :class:`Index` cannot be instantiated using a float16 dtype. Previously instantiating
an :class:`Index` using dtype ``float16`` resulted in a :class:`Float64Index` with a
| Minor doc corrections related to #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/51200 | 2023-02-06T18:32:27Z | 2023-02-06T19:49:32Z | 2023-02-06T19:49:32Z | 2023-02-06T21:00:11Z |
DOC Correcting EX02 errors | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index cb66a1f350e8f..62a5d81fc8f73 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -597,10 +597,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.api.types.is_datetime64_dtype \
pandas.api.types.is_datetime64_ns_dtype \
pandas.api.types.is_datetime64tz_dtype \
- pandas.api.types.is_dict_like \
- pandas.api.types.is_file_like \
pandas.api.types.is_float_dtype \
- pandas.api.types.is_hashable \
pandas.api.types.is_int64_dtype \
pandas.api.types.is_integer_dtype \
pandas.api.types.is_interval_dtype \
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index d607ea994543d..401b2c80b2c18 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -119,6 +119,7 @@ def is_file_like(obj) -> bool:
Examples
--------
>>> import io
+ >>> from pandas.api.types import is_file_like
>>> buffer = io.StringIO("data")
>>> is_file_like(buffer)
True
@@ -275,6 +276,7 @@ def is_dict_like(obj) -> bool:
Examples
--------
+ >>> from pandas.api.types import is_dict_like
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
@@ -336,6 +338,7 @@ def is_hashable(obj) -> bool:
Examples
--------
>>> import collections
+ >>> from pandas.api.types import is_hashable
>>> a = ([],)
>>> isinstance(a, collections.abc.Hashable)
True
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Towards https://github.com/pandas-dev/pandas/issues/37875 and https://github.com/pandas-dev/pandas/issues/27977 | https://api.github.com/repos/pandas-dev/pandas/pulls/51198 | 2023-02-06T17:52:26Z | 2023-02-06T19:40:00Z | 2023-02-06T19:40:00Z | 2023-02-07T11:01:11Z |
BUG: ensure reindex / getitem to select columns properly copies data for extension dtypes | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index a37503460901b..75aec514031b4 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1395,6 +1395,7 @@ ExtensionArray
- Bug in :meth:`api.types.is_integer_dtype`, :meth:`api.types.is_unsigned_integer_dtype`, :meth:`api.types.is_signed_integer_dtype`, :meth:`api.types.is_float_dtype` where a custom :class:`ExtensionDtype` would not return ``True`` if ``kind`` returned the corresponding NumPy type (:issue:`50667`)
- Bug in :class:`Series` constructor unnecessarily overflowing for nullable unsigned integer dtypes (:issue:`38798`, :issue:`25880`)
- Bug in setting non-string value into ``StringArray`` raising ``ValueError`` instead of ``TypeError`` (:issue:`49632`)
+- Bug in :meth:`DataFrame.reindex` not honoring the default ``copy=True`` keyword in case of columns with ExtensionDtype (and as a result also selecting multiple columns with getitem (``[]``) didn't correctly result in a copy) (:issue:`51197`)
Styler
^^^^^^
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index eb4c2d642862b..d86507a421ebc 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -830,7 +830,7 @@ def _slice_take_blocks_ax0(
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
- newblk = blk.copy(deep=False)
+ newblk = blk.copy(deep=not only_slice)
newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))
blocks.append(newblk)
diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py
index f455213bd436b..44cbd9b7b38e8 100644
--- a/pandas/tests/frame/methods/test_reindex.py
+++ b/pandas/tests/frame/methods/test_reindex.py
@@ -134,6 +134,21 @@ def test_reindex_copies(self):
result2 = df.reindex(columns=cols, index=df.index, copy=True)
assert not np.shares_memory(result2[0]._values, df[0]._values)
+ def test_reindex_copies_ea(self):
+ # https://github.com/pandas-dev/pandas/pull/51197
+ # also ensure to honor copy keyword for ExtensionDtypes
+ N = 10
+ df = DataFrame(np.random.randn(N * 10, N), dtype="Float64")
+ cols = np.arange(N)
+ np.random.shuffle(cols)
+
+ result = df.reindex(columns=cols, copy=True)
+ assert not np.shares_memory(result[0].array._data, df[0].array._data)
+
+ # pass both columns and index
+ result2 = df.reindex(columns=cols, index=df.index, copy=True)
+ assert not np.shares_memory(result2[0].array._data, df[0].array._data)
+
@td.skip_array_manager_not_yet_implemented
def test_reindex_date_fill_value(self):
# passing date to dt64 is deprecated; enforced in 2.0 to cast to object
| I encountered this while writing more tests for Copy-on-Write. Currently, the _general_ rule is that selecting columns with a list-like indexer using getitem gives a copy:
```python
df = pd.DataFrame(np.random.randn(10, 4), columns=['a', 'b', 'c', 'd'])
subset = df[["a", "b"]]
# subset is a copy
subset.iloc[0, 0] = 0
assert df.iloc[0, 0] != 0
```
However, that doesn't seem to be the case when the columns we select are extension dtypes. When using `dtype="Float64"` in the above example, the original `df` gets updated because subset isn't a copy.
While I am not sure this is an explicitly documented rule (AFAIK this is de-facto behaviour, and only described as such in the discussions related to copy/view and CoW), I do think it would be expected that extension dtypes behave the same as numpy dtypes on this front.
(it also makes writing tests for copy/view behaviour harder if the behaviour doesn't only change for CoW or not, but also depending on numpy vs extension dtypes. This is where I encountered the issue)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51197 | 2023-02-06T15:16:55Z | 2023-02-15T10:38:12Z | 2023-02-15T10:38:12Z | 2023-02-15T10:38:16Z |
BUG: loc.setitem modifying values with empty indexer | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index ef6bb1340dc69..de3941682f539 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1200,6 +1200,7 @@ Indexing
- Bug in :meth:`Series.loc` raising error for out of bounds end of slice indexer (:issue:`50161`)
- Bug in :meth:`DataFrame.loc` raising ``ValueError`` with ``bool`` indexer and :class:`MultiIndex` (:issue:`47687`)
- Bug in :meth:`DataFrame.loc` raising ``IndexError`` when setting values for a pyarrow-backed column with a non-scalar indexer (:issue:`50085`)
+- Bug in :meth:`DataFrame.loc` modifying object when setting incompatible value with an empty indexer (:issue:`45981`)
- Bug in :meth:`DataFrame.__setitem__` raising ``ValueError`` when right hand side is :class:`DataFrame` with :class:`MultiIndex` columns (:issue:`49121`)
- Bug in :meth:`DataFrame.reindex` casting dtype to ``object`` when :class:`DataFrame` has single extension array column when re-indexing ``columns`` and ``index`` (:issue:`48190`)
- Bug in :meth:`DataFrame.iloc` raising ``IndexError`` when indexer is a :class:`Series` with numeric extension array dtype (:issue:`49521`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index aaa5134ed1aaa..6713ccd417dd4 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -316,6 +316,18 @@ def is_null_slice(obj) -> bool:
)
+def is_empty_slice(obj) -> bool:
+ """
+ We have an empty slice, e.g. no values are selected.
+ """
+ return (
+ isinstance(obj, slice)
+ and obj.start is not None
+ and obj.stop is not None
+ and obj.start == obj.stop
+ )
+
+
def is_true_slices(line) -> list[bool]:
"""
Find non-trivial slices in "line": return a list of booleans with same length.
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 26b5a4077b0ff..8c3e56b686e79 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2017,7 +2017,13 @@ def _setitem_single_column(self, loc: int, value, plane_indexer) -> None:
is_full_setter = com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj))
- if is_full_setter:
+ is_null_setter = com.is_empty_slice(pi) or is_array_like(pi) and len(pi) == 0
+
+ if is_null_setter:
+ # no-op, don't cast dtype later
+ return
+
+ elif is_full_setter:
try:
self.obj._mgr.column_setitem(
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 387272eb807e8..f214ade0a31aa 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -382,6 +382,16 @@ def test_getitem_bool_int_key():
ser.loc[0]
+@pytest.mark.parametrize("val", [{}, {"b": "x"}])
+@pytest.mark.parametrize("indexer", [[], [False, False], slice(0, -1), np.array([])])
+def test_setitem_empty_indexer(indexer, val):
+ # GH#45981
+ df = DataFrame({"a": [1, 2], **val})
+ expected = df.copy()
+ df.loc[indexer] = 1.5
+ tm.assert_frame_equal(df, expected)
+
+
class TestDeprecatedIndexers:
@pytest.mark.parametrize("key", [{1}, {1: 1}])
def test_getitem_dict_and_set_deprecated(self, key):
| - [x] closes #45981 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51193 | 2023-02-06T11:43:38Z | 2023-02-06T18:42:11Z | 2023-02-06T18:42:10Z | 2023-02-16T16:31:40Z |
BUG: setting non-string value into StringArray raises ValueError instead of TypeError | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index ef6bb1340dc69..64f21e027e048 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1324,6 +1324,7 @@ ExtensionArray
- Bug in :meth:`api.types.is_numeric_dtype` where a custom :class:`ExtensionDtype` would not return ``True`` if ``_is_numeric`` returned ``True`` (:issue:`50563`)
- Bug in :meth:`api.types.is_integer_dtype`, :meth:`api.types.is_unsigned_integer_dtype`, :meth:`api.types.is_signed_integer_dtype`, :meth:`api.types.is_float_dtype` where a custom :class:`ExtensionDtype` would not return ``True`` if ``kind`` returned the corresponding NumPy type (:issue:`50667`)
- Bug in :class:`Series` constructor unnecessarily overflowing for nullable unsigned integer dtypes (:issue:`38798`, :issue:`25880`)
+- Bug in setting non-string value into ``StringArray`` raising ``ValueError`` instead of ``TypeError`` (:issue:`49632`)
Styler
^^^^^^
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 9b26db07fc28f..952fd3a19daae 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -414,14 +414,14 @@ def __setitem__(self, key, value):
if isna(value):
value = libmissing.NA
elif not isinstance(value, str):
- raise ValueError(
+ raise TypeError(
f"Cannot set non-string value '{value}' into a StringArray."
)
else:
if not is_array_like(value):
value = np.asarray(value, dtype=object)
if len(value) and not lib.is_string_array(value, skipna=True):
- raise ValueError("Must provide strings.")
+ raise TypeError("Must provide strings.")
value[isna(value)] = libmissing.NA
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 4aebe61412866..92385b64f5c70 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -163,13 +163,13 @@ def _maybe_convert_setitem_value(self, value):
if isna(value):
value = None
elif not isinstance(value, str):
- raise ValueError("Scalar must be NA or str")
+ raise TypeError("Scalar must be NA or str")
else:
value = np.array(value, dtype=object, copy=True)
value[isna(value)] = None
for v in value:
if not (v is None or isinstance(v, str)):
- raise ValueError("Scalar must be NA or str")
+ raise TypeError("Scalar must be NA or str")
return super()._maybe_convert_setitem_value(value)
def isin(self, values) -> npt.NDArray[np.bool_]:
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index da5fc46c03d92..cced9cfe8cb20 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -53,14 +53,14 @@ def test_setitem_validates(cls):
msg = "Cannot set non-string value '10' into a StringArray."
else:
msg = "Scalar must be NA or str"
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(TypeError, match=msg):
arr[0] = 10
if cls is pd.arrays.StringArray:
msg = "Must provide strings."
else:
msg = "Scalar must be NA or str"
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(TypeError, match=msg):
arr[:] = np.array([1, 2])
@@ -403,12 +403,10 @@ def test_fillna_args(dtype, request):
tm.assert_extension_array_equal(res, expected)
if dtype.storage == "pyarrow":
- err = TypeError
msg = "Invalid value '1' for dtype string"
else:
- err = ValueError
msg = "Cannot set non-string value '1' into a StringArray."
- with pytest.raises(err, match=msg):
+ with pytest.raises(TypeError, match=msg):
arr.fillna(value=1)
@@ -574,7 +572,7 @@ def test_setitem_scalar_with_mask_validation(dtype):
msg = "Cannot set non-string value"
else:
msg = "Scalar must be NA or str"
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(TypeError, match=msg):
ser[mask] = 1
| - [x] closes #49632 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51191 | 2023-02-06T08:03:24Z | 2023-02-06T20:03:35Z | 2023-02-06T20:03:35Z | 2023-02-07T03:29:18Z |
CI, ENH: Check each minimum dependency is enforced in *.yaml and environment.yml | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f369fcabe3f01..b1028ea9f52c3 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -313,7 +313,7 @@ repos:
entry: python scripts/generate_pip_deps_from_conda.py
files: ^(environment.yml|requirements-dev.txt)$
pass_filenames: false
- additional_dependencies: [pyyaml, toml]
+ additional_dependencies: [tomli, pyyaml]
- id: title-capitalization
name: Validate correct capitalization among titles in documentation
entry: python scripts/validate_rst_title_capitalization.py
@@ -391,10 +391,11 @@ repos:
types: [yaml]
- id: validate-min-versions-in-sync
name: Check minimum version of dependencies are aligned
- entry: python scripts/validate_min_versions_in_sync.py
+ entry: python -m scripts.validate_min_versions_in_sync
language: python
files: ^(ci/deps/actions-.*-minimum_versions\.yaml|pandas/compat/_optional\.py)$
- additional_dependencies: [tomli]
+ additional_dependencies: [tomli, pyyaml]
+ pass_filenames: false
- id: validate-errors-locations
name: Validate errors locations
description: Validate errors are in appropriate locations.
diff --git a/ci/deps/actions-310-numpydev.yaml b/ci/deps/actions-310-numpydev.yaml
index d1c4338f1806e..1a461319685d2 100644
--- a/ci/deps/actions-310-numpydev.yaml
+++ b/ci/deps/actions-310-numpydev.yaml
@@ -12,7 +12,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- hypothesis>=6.34.2
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
# pandas dependencies
- python-dateutil
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index e2bfe6e57d216..64f9a3fd1ffbc 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -12,7 +12,7 @@ dependencies:
- pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- boto3
# required dependencies
@@ -21,36 +21,36 @@ dependencies:
- pytz
# optional dependencies
- - beautifulsoup4
+ - beautifulsoup4>=4.9.3
- blosc
- - bottleneck
- - brotlipy
- - fastparquet
- - fsspec
- - html5lib
- - hypothesis
- - gcsfs
- - jinja2
- - lxml
+ - bottleneck>=1.3.2
+ - brotlipy>=0.7.0
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
- matplotlib>=3.6.1, <3.7.0
- - numba
- - numexpr
- - openpyxl<3.1.1
- - odfpy
- - pandas-gbq
- - psycopg2
- - pymysql
- - pytables
+ - numba>=0.53.1
+ - numexpr>=2.7.3
+ - openpyxl<3.1.1, >=3.0.7
+ - odfpy>=1.4.1
+ - pandas-gbq>=0.15.0
+ - psycopg2>=2.8.6
+ - pymysql>=1.0.2
+ - pytables>=3.6.1
- pyarrow
- - pyreadstat
- - python-snappy
- - pyxlsb
+ - pyreadstat>=1.1.2
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
- s3fs>=2021.08.0
- - scipy
- - sqlalchemy
- - tabulate
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
- tzdata>=2022a
- - xarray
- - xlrd
- - xlsxwriter
- - zstandard
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml
index 237924b5c6f0b..d474df1e75655 100644
--- a/ci/deps/actions-311.yaml
+++ b/ci/deps/actions-311.yaml
@@ -9,10 +9,10 @@ dependencies:
- cython>=0.29.32
# test dependencies
- - pytest>=7.0
+ - pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- boto3
# required dependencies
@@ -21,36 +21,36 @@ dependencies:
- pytz
# optional dependencies
- - beautifulsoup4
+ - beautifulsoup4>=4.9.3
- blosc
- - bottleneck
- - brotlipy
- - fastparquet
- - fsspec
- - html5lib
- - hypothesis
- - gcsfs
- - jinja2
- - lxml
+ - bottleneck>=1.3.2
+ - brotlipy>=0.7.0
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
- matplotlib>=3.6.1, <3.7.0
# - numba not compatible with 3.11
- - numexpr
- - openpyxl<3.1.1
- - odfpy
- - pandas-gbq
- - psycopg2
- - pymysql
+ - numexpr>=2.7.3
+ - openpyxl<3.1.1, >=3.0.7
+ - odfpy>=1.4.1
+ - pandas-gbq>=0.15.0
+ - psycopg2>=2.8.6
+ - pymysql>=1.0.2
# - pytables>=3.8.0 # first version that supports 3.11
- pyarrow
- - pyreadstat
- - python-snappy
- - pyxlsb
+ - pyreadstat>=1.1.2
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
- s3fs>=2021.08.0
- - scipy
- - sqlalchemy
- - tabulate
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
- tzdata>=2022a
- - xarray
- - xlrd
- - xlsxwriter
- - zstandard
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml
index 9b62b25a15740..a9cd4c93dd604 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-38-downstream_compat.yaml
@@ -13,7 +13,7 @@ dependencies:
- pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- boto3
# required dependencies
@@ -22,37 +22,37 @@ dependencies:
- pytz
# optional dependencies
- - beautifulsoup4
+ - beautifulsoup4>=4.9.3
- blosc
- - brotlipy
- - bottleneck
- - fastparquet
- - fsspec
- - html5lib
- - hypothesis
- - gcsfs
- - jinja2
- - lxml
+ - brotlipy>=0.7.0
+ - bottleneck>=1.3.2
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
- matplotlib>=3.6.1, <3.7.0
- - numba
- - numexpr
- - openpyxl<3.1.1
- - odfpy
- - psycopg2
+ - numba>=0.53.1
+ - numexpr>=2.7.3
+ - openpyxl<3.1.1, >=3.0.7
+ - odfpy>=1.4.1
+ - psycopg2>=2.8.6
- pyarrow
- - pymysql
- - pyreadstat
- - pytables
- - python-snappy
- - pyxlsb
+ - pymysql>=1.0.2
+ - pyreadstat>=1.1.2
+ - pytables>=3.6.1
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
- s3fs>=2021.08.0
- - scipy
- - sqlalchemy
- - tabulate
- - xarray
- - xlrd
- - xlsxwriter
- - zstandard
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
# downstream packages
- botocore
@@ -65,6 +65,6 @@ dependencies:
- statsmodels
- coverage
- pandas-datareader
- - pandas-gbq
+ - pandas-gbq>=0.15.0
- pyyaml
- py
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index f3ff36a1b2ada..6877d7f14f66a 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -14,7 +14,7 @@ dependencies:
- pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- boto3
# required dependencies
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index 95bab9897ac63..ccde0f57f7bc4 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -12,7 +12,7 @@ dependencies:
- pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- boto3
# required dependencies
@@ -21,35 +21,35 @@ dependencies:
- pytz
# optional dependencies
- - beautifulsoup4
+ - beautifulsoup4>=4.9.3
- blosc
- - bottleneck
- - brotlipy
- - fastparquet
- - fsspec
- - html5lib
- - hypothesis
- - gcsfs
- - jinja2
- - lxml
+ - bottleneck>=1.3.2
+ - brotlipy>=0.7.0
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
- matplotlib>=3.6.1, <3.7.0
- - numba
- - numexpr
- - openpyxl<3.1.1
- - odfpy
- - pandas-gbq
- - psycopg2
+ - numba>=0.53.1
+ - numexpr>=2.7.3
+ - openpyxl<3.1.1, >=3.0.7
+ - odfpy>=1.4.1
+ - pandas-gbq>=0.15.0
+ - psycopg2>=2.8.6
- pyarrow
- - pymysql
- - pyreadstat
- - pytables
- - python-snappy
- - pyxlsb
+ - pymysql>=1.0.2
+ - pyreadstat>=1.1.2
+ - pytables>=3.6.1
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
- s3fs>=2021.08.0
- - scipy
- - sqlalchemy
- - tabulate
- - xarray
- - xlrd
- - xlsxwriter
- - zstandard
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 9d95e28ae9fb6..aeb887d7ec1ab 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -12,7 +12,7 @@ dependencies:
- pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- boto3
# required dependencies
@@ -21,36 +21,36 @@ dependencies:
- pytz
# optional dependencies
- - beautifulsoup4
+ - beautifulsoup4>=4.9.3
- blosc
- - bottleneck
- - brotlipy
- - fastparquet
- - fsspec
- - html5lib
- - hypothesis
- - gcsfs
- - jinja2
- - lxml
+ - bottleneck>=1.3.2
+ - brotlipy>=0.7.0
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
- matplotlib>=3.6.1, <3.7.0
- - numba
- - numexpr
- - openpyxl<3.1.1
- - odfpy
- - pandas-gbq
- - psycopg2
- - pymysql
+ - numba>=0.53.1
+ - numexpr>=2.7.3
+ - openpyxl<3.1.1, >=3.0.7
+ - odfpy>=1.4.1
+ - pandas-gbq>=0.15.0
+ - psycopg2>=2.8.6
+ - pymysql>=1.0.2
- pyarrow
- - pyreadstat
- - pytables
- - python-snappy
- - pyxlsb
+ - pyreadstat>=1.1.2
+ - pytables>=3.6.1
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
- s3fs>=2021.08.0
- - scipy
- - sqlalchemy
- - tabulate
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
- tzdata>=2022a
- - xarray
- - xlrd
- - xlsxwriter
- - zstandard
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-38.yaml
index 3218ec13a9c40..1fde1e733be5a 100644
--- a/ci/deps/actions-pypy-38.yaml
+++ b/ci/deps/actions-pypy-38.yaml
@@ -14,7 +14,7 @@ dependencies:
# test dependencies
- pytest>=7.0.0
- pytest-cov
- - pytest-asyncio
+ - pytest-asyncio>=0.17.0
- pytest-xdist>=2.2.0
- hypothesis>=6.34.2
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml
index 1548eb3d4929d..0d1a5f765b5ce 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-38-arm64.yaml
@@ -12,7 +12,7 @@ dependencies:
- pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- boto3
# required dependencies
@@ -21,36 +21,36 @@ dependencies:
- pytz
# optional dependencies
- - beautifulsoup4
+ - beautifulsoup4>=4.9.3
- blosc
- - bottleneck
- - brotlipy
- - fastparquet
- - fsspec
- - html5lib
- - hypothesis
- - gcsfs
- - jinja2
- - lxml
+ - bottleneck>=1.3.2
+ - brotlipy>=0.7.0
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
- matplotlib>=3.6.1, <3.7.0
- - numba
- - numexpr
- - openpyxl<3.1.1
- - odfpy
- - pandas-gbq
- - psycopg2
+ - numba>=0.53.1
+ - numexpr>=2.7.3
+ - openpyxl<3.1.1, >=3.0.7
+ - odfpy>=1.4.1
+ - pandas-gbq>=0.15.0
+ - psycopg2>=2.8.6
- pyarrow
- - pymysql
+ - pymysql>=1.0.2
# Not provided on ARM
#- pyreadstat
- - pytables
- - python-snappy
- - pyxlsb
+ - pytables>=3.6.1
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
- s3fs>=2021.08.0
- - scipy
- - sqlalchemy
- - tabulate
- - xarray
- - xlrd
- - xlsxwriter
- - zstandard
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
diff --git a/environment.yml b/environment.yml
index 41c93de50bff3..e748d20d6d6f0 100644
--- a/environment.yml
+++ b/environment.yml
@@ -14,7 +14,7 @@ dependencies:
- pytest>=7.0.0
- pytest-cov
- pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17
+ - pytest-asyncio>=0.17.0
- coverage
# required dependencies
@@ -23,40 +23,40 @@ dependencies:
- pytz
# optional dependencies
- - beautifulsoup4
+ - beautifulsoup4>=4.9.3
- blosc
- - brotlipy
- - bottleneck
- - fastparquet
- - fsspec
- - html5lib
- - hypothesis
- - gcsfs
+ - brotlipy>=0.7.0
+ - bottleneck>=1.3.2
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
- ipython
- - jinja2
- - lxml
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
- matplotlib>=3.6.1, <3.7.0
- numba>=0.53.1
- - numexpr>=2.8.0 # pin for "Run checks on imported code" job
- - openpyxl<3.1.1
- - odfpy
+ - numexpr>=2.7.3 # pin for "Run checks on imported code" job
+ - openpyxl<3.1.1, >=3.0.7
+ - odfpy>=1.4.1
- py
- - psycopg2
+ - psycopg2>=2.8.6
- pyarrow
- - pymysql
- - pyreadstat
- - pytables
- - python-snappy
- - pyxlsb
+ - pymysql>=1.0.2
+ - pyreadstat>=1.1.2
+ - pytables>=3.6.1
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
- s3fs>=2021.08.0
- - scipy
- - sqlalchemy
- - tabulate
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
- tzdata>=2022a
- - xarray
- - xlrd
- - xlsxwriter
- - zstandard
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
# downstream packages
- dask-core
@@ -96,6 +96,7 @@ dependencies:
- types-python-dateutil
- types-PyMySQL
- types-pytz
+ - types-PyYAML
- types-setuptools
# documentation (jupyter notebooks)
diff --git a/pyproject.toml b/pyproject.toml
index 8c3d27b6bb5d3..c3a7cb013ca6c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -289,6 +289,7 @@ disable = [
"broad-except",
"c-extension-no-member",
"comparison-with-itself",
+ "consider-using-enumerate",
"import-error",
"import-outside-toplevel",
"invalid-name",
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 1ad81472b1779..0329588de17fd 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -7,45 +7,45 @@ cython==0.29.32
pytest>=7.0.0
pytest-cov
pytest-xdist>=2.2.0
-pytest-asyncio>=0.17
+pytest-asyncio>=0.17.0
coverage
python-dateutil
numpy
pytz
-beautifulsoup4
+beautifulsoup4>=4.9.3
blosc
-brotlipy
-bottleneck
-fastparquet
-fsspec
-html5lib
-hypothesis
-gcsfs
+brotlipy>=0.7.0
+bottleneck>=1.3.2
+fastparquet>=0.6.3
+fsspec>=2021.07.0
+html5lib>=1.1
+hypothesis>=6.34.2
+gcsfs>=2021.07.0
ipython
-jinja2
-lxml
+jinja2>=3.0.0
+lxml>=4.6.3
matplotlib>=3.6.1, <3.7.0
numba>=0.53.1
-numexpr>=2.8.0
-openpyxl<3.1.1
-odfpy
+numexpr>=2.7.3
+openpyxl<3.1.1, >=3.0.7
+odfpy>=1.4.1
py
-psycopg2-binary
+psycopg2-binary>=2.8.6
pyarrow
-pymysql
-pyreadstat
-tables
-python-snappy
-pyxlsb
+pymysql>=1.0.2
+pyreadstat>=1.1.2
+tables>=3.6.1
+python-snappy>=0.6.0
+pyxlsb>=1.0.8
s3fs>=2021.08.0
-scipy
-sqlalchemy
-tabulate
+scipy>=1.7.1
+SQLAlchemy>=1.4.16
+tabulate>=0.8.9
tzdata>=2022.1
-xarray
-xlrd
-xlsxwriter
-zstandard
+xarray>=0.21.0
+xlrd>=2.0.1
+xlsxwriter>=1.4.3
+zstandard>=0.15.2
dask
seaborn
moto
@@ -71,6 +71,7 @@ sphinx-copybutton
types-python-dateutil
types-PyMySQL
types-pytz
+types-PyYAML
types-setuptools
nbconvert>=6.4.5
nbsphinx
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 8190104428724..2ca4455158db5 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -17,7 +17,10 @@
import re
import sys
-import toml
+if sys.version_info >= (3, 11):
+ import tomllib
+else:
+ import tomli as tomllib
import yaml
EXCLUDE = {"python", "c-compiler", "cxx-compiler"}
@@ -27,6 +30,7 @@
"psycopg2": "psycopg2-binary",
"dask-core": "dask",
"seaborn-base": "seaborn",
+ "sqlalchemy": "SQLAlchemy",
}
@@ -105,7 +109,8 @@ def generate_pip_from_conda(
pip_content = header + "\n".join(pip_deps) + "\n"
# add setuptools to requirements-dev.txt
- meta = toml.load(pathlib.Path(conda_path.parent, "pyproject.toml"))
+ with open(pathlib.Path(conda_path.parent, "pyproject.toml"), "rb") as fd:
+ meta = tomllib.load(fd)
for requirement in meta["build-system"]["requires"]:
if "setuptools" in requirement:
pip_content += requirement
diff --git a/scripts/tests/data/deps_expected_duplicate_package.yaml b/scripts/tests/data/deps_expected_duplicate_package.yaml
new file mode 100644
index 0000000000000..72721c2842707
--- /dev/null
+++ b/scripts/tests/data/deps_expected_duplicate_package.yaml
@@ -0,0 +1,4 @@
+# Test: duplicate package
+dependencies:
+ - jinja2>=3.0.0
+ - jinja2>=3.0.0
diff --git a/scripts/tests/data/deps_expected_no_version.yaml b/scripts/tests/data/deps_expected_no_version.yaml
new file mode 100644
index 0000000000000..843e48330a928
--- /dev/null
+++ b/scripts/tests/data/deps_expected_no_version.yaml
@@ -0,0 +1,5 @@
+# Test: empty version
+dependencies:
+ - jinja2>=3.0.0
+ - scipy>=1.7.1
+ - SQLAlchemy>=1.4.16
diff --git a/scripts/tests/data/deps_expected_random.yaml b/scripts/tests/data/deps_expected_random.yaml
new file mode 100644
index 0000000000000..be5e467b57e10
--- /dev/null
+++ b/scripts/tests/data/deps_expected_random.yaml
@@ -0,0 +1,57 @@
+# Test: random
+name: pandas-dev
+channels:
+ - conda-forge
+dependencies:
+ - python=3.8
+
+ # build dependencies
+ - versioneer[toml]
+ - cython>=0.29.32
+
+ # test dependencies
+ - pytest>=7.0.0
+ - pytest-cov
+ - pytest-xdist>=2.2.0
+ - psutil
+ - pytest-asyncio>=0.17.0
+ - boto3
+
+ # required dependencies
+ - python-dateutil
+ - numpy
+ - pytz
+
+ # optional dependencies
+ - beautifulsoup4>=5.9.3
+ - blosc
+ - bottleneck>=1.3.2
+ - brotlipy>=0.7.0
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis>=6.34.2
+ - gcsfs>=2021.07.0
+ - jinja2>=3.0.0
+ - lxml>=4.6.3
+ - matplotlib>=3.6.1
+ - numba>=0.53.1
+ - numexpr>=2.7.3
+ - openpyxl>=3.0.7
+ - odfpy>=1.4.1
+ - pandas-gbq>=0.15.0
+ - psycopg2>=2.8.6
+ - pyarrow<11, >=7.0.0
+ - pymysql>=1.0.2
+ - pyreadstat>=1.1.2
+ - pytables>=3.6.1
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
+ - s3fs>=2021.08.0
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
diff --git a/scripts/tests/data/deps_expected_range.yaml b/scripts/tests/data/deps_expected_range.yaml
new file mode 100644
index 0000000000000..c8e25076ef3b0
--- /dev/null
+++ b/scripts/tests/data/deps_expected_range.yaml
@@ -0,0 +1,5 @@
+# Test: range
+dependencies:
+ - jinja2<8, >=3.0.0
+ - scipy<9, >=1.7.1
+ - SQLAlchemy<2.0, >=1.4.16
diff --git a/scripts/tests/data/deps_expected_same_version.yaml b/scripts/tests/data/deps_expected_same_version.yaml
new file mode 100644
index 0000000000000..e07b221ecd44f
--- /dev/null
+++ b/scripts/tests/data/deps_expected_same_version.yaml
@@ -0,0 +1,3 @@
+# Test: same version
+dependencies:
+ - jinja2>=3.0.0
diff --git a/scripts/tests/data/deps_minimum.toml b/scripts/tests/data/deps_minimum.toml
new file mode 100644
index 0000000000000..97a5ce1180bfb
--- /dev/null
+++ b/scripts/tests/data/deps_minimum.toml
@@ -0,0 +1,537 @@
+[build-system]
+# Minimum requirements for the build system to execute.
+# See https://github.com/scipy/scipy/pull/12940 for the AIX issue.
+requires = [
+ "setuptools>=61.0.0",
+ "wheel",
+ "Cython>=0.29.32,<3", # Note: sync with setup.py, environment.yml and asv.conf.json
+ "oldest-supported-numpy>=2022.8.16",
+ "versioneer[toml]"
+]
+# build-backend = "setuptools.build_meta"
+
+[project]
+name = 'pandas'
+dynamic = [
+ 'version'
+]
+description = 'Powerful data structures for data analysis, time series, and statistics'
+readme = 'README.md'
+authors = [
+ { name = 'The Pandas Development Team', email='pandas-dev@python.org' },
+]
+license = {file = 'LICENSE'}
+requires-python = '>=3.8'
+dependencies = [
+ "numpy>=1.20.3; python_version<'3.10'",
+ "numpy>=1.21.0; python_version>='3.10'",
+ "numpy>=1.23.2; python_version>='3.11'",
+ "python-dateutil>=2.8.2",
+ "pytz>=2020.1"
+]
+classifiers = [
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Console',
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: BSD License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Cython',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3 :: Only',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: 3.11',
+ 'Topic :: Scientific/Engineering'
+]
+
+[project.urls]
+homepage = 'https://pandas.pydata.org'
+documentation = 'https://pandas.pydata.org/docs/'
+repository = 'https://github.com/pandas-dev/pandas'
+
+[project.entry-points."pandas_plotting_backends"]
+matplotlib = "pandas:plotting._matplotlib"
+
+[project.optional-dependencies]
+test = ['hypothesis>=6.34.2', 'pytest>=7.0.0', 'pytest-xdist>=2.2.0', 'pytest-asyncio>=0.17.0']
+performance = ['bottleneck>=1.3.2', 'numba>=0.53.1', 'numexpr>=2.7.1']
+timezone = ['tzdata>=2022.1']
+computation = ['scipy>=1.7.1', 'xarray>=0.21.0']
+fss = ['fsspec>=2021.07.0']
+aws = ['s3fs>=2021.08.0']
+gcp = ['gcsfs>=2021.07.0', 'pandas-gbq>=0.15.0']
+excel = ['odfpy>=1.4.1', 'openpyxl>=3.0.7', 'pyxlsb>=1.0.8', 'xlrd>=2.0.1', 'xlsxwriter>=1.4.3']
+parquet = ['pyarrow>=7.0.0']
+feather = ['pyarrow>=7.0.0']
+hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297)
+ #'blosc>=1.20.1',
+ 'tables>=3.6.1']
+spss = ['pyreadstat>=1.1.2']
+postgresql = ['SQLAlchemy>=1.4.16', 'psycopg2>=2.8.6']
+mysql = ['SQLAlchemy>=1.4.16', 'pymysql>=1.0.2']
+sql-other = ['SQLAlchemy>=1.4.16']
+html = ['beautifulsoup4>=4.9.3', 'html5lib>=1.1', 'lxml>=4.6.3']
+xml = ['lxml>=4.6.3']
+plot = ['matplotlib>=3.6.1']
+output_formatting = ['jinja2>=3.0.0', 'tabulate>=0.8.9']
+clipboard = ['PyQt5>=5.15.1', 'qtpy>=2.2.0']
+compression = ['brotlipy>=0.7.0', 'python-snappy>=0.6.0', 'zstandard>=0.15.2']
+all = ['beautifulsoup4>=5.9.3',
+ # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297)
+ #'blosc>=1.21.0',
+ 'bottleneck>=1.3.2',
+ 'brotlipy>=0.7.0',
+ 'fastparquet>=0.6.3',
+ 'fsspec>=2021.07.0',
+ 'gcsfs>=2021.07.0',
+ 'html5lib>=1.1',
+ 'hypothesis>=6.34.2',
+ 'jinja2>=3.0.0',
+ 'lxml>=4.6.3',
+ 'matplotlib>=3.6.1',
+ 'numba>=0.53.1',
+ 'numexpr>=2.7.3',
+ 'odfpy>=1.4.1',
+ 'openpyxl>=3.0.7',
+ 'pandas-gbq>=0.15.0',
+ 'psycopg2>=2.8.6',
+ 'pyarrow>=7.0.0',
+ 'pymysql>=1.0.2',
+ 'PyQt5>=5.15.1',
+ 'pyreadstat>=1.1.2',
+ 'pytest>=7.0.0',
+ 'pytest-xdist>=2.2.0',
+ 'pytest-asyncio>=0.17.0',
+ 'python-snappy>=0.6.0',
+ 'pyxlsb>=1.0.8',
+ 'qtpy>=2.2.0',
+ 'scipy>=1.7.1',
+ 's3fs>=2021.08.0',
+ 'SQLAlchemy>=1.4.16',
+ 'tables>=3.6.1',
+ 'tabulate>=0.8.9',
+ 'tzdata>=2022.1',
+ 'xarray>=0.21.0',
+ 'xlrd>=2.0.1',
+ 'xlsxwriter>=1.4.3',
+ 'zstandard>=0.15.2']
+
+# TODO: Remove after setuptools support is dropped.
+[tool.setuptools]
+include-package-data = true
+
+[tool.setuptools.packages.find]
+include = ["pandas", "pandas.*"]
+namespaces = false
+
+[tool.setuptools.exclude-package-data]
+"*" = ["*.c", "*.h"]
+
+# See the docstring in versioneer.py for instructions. Note that you must
+# re-run 'versioneer.py setup' after changing this section, and commit the
+# resulting files.
+[tool.versioneer]
+VCS = "git"
+style = "pep440"
+versionfile_source = "pandas/_version.py"
+versionfile_build = "pandas/_version.py"
+tag_prefix = "v"
+parentdir_prefix = "pandas-"
+
+[tool.cibuildwheel]
+skip = "cp36-* cp37-* pp37-* *-manylinux_i686 *_ppc64le *_s390x *-musllinux*"
+build-verbosity = "3"
+test-requires = "hypothesis>=6.34.2 pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17"
+test-command = "python {project}/ci/test_wheels.py"
+
+[tool.cibuildwheel.macos]
+archs = "x86_64 arm64"
+test-skip = "*_arm64"
+
+[tool.cibuildwheel.windows]
+repair-wheel-command = "python ci/fix_wheels.py {wheel} {dest_dir}"
+
+[[tool.cibuildwheel.overrides]]
+select = "*-win*"
+# We test separately for Windows, since we use
+# the base windows docker image to check if any dlls are
+# missing from the wheel
+test-command = ""
+
+[[tool.cibuildwheel.overrides]]
+select = "*-win32"
+environment = { IS_32_BIT="true" }
+
+[tool.black]
+target-version = ['py38', 'py39']
+exclude = '''
+(
+ asv_bench/env
+ | \.egg
+ | \.git
+ | \.hg
+ | \.mypy_cache
+ | \.nox
+ | \.tox
+ | \.venv
+ | _build
+ | buck-out
+ | build
+ | dist
+ | setup.py
+)
+'''
+
+[tool.ruff]
+line-length = 88
+update-check = false
+target-version = "py38"
+
+select = [
+ # pyflakes
+ "F",
+ # pycodestyle
+ "E",
+ "W",
+ # flake8-2020
+ "YTT",
+ # flake8-bugbear
+ "B",
+ # flake8-quotes
+ "Q",
+ # pylint
+ "PLE", "PLR", "PLW",
+]
+
+ignore = [
+ # space before : (needed for how black formats slicing)
+ # "E203", # not yet implemented
+ # module level import not at top of file
+ "E402",
+ # do not assign a lambda expression, use a def
+ "E731",
+ # line break before binary operator
+ # "W503", # not yet implemented
+ # line break after binary operator
+ # "W504", # not yet implemented
+ # controversial
+ "B006",
+ # controversial
+ "B007",
+ # controversial
+ "B008",
+ # setattr is used to side-step mypy
+ "B009",
+ # getattr is used to side-step mypy
+ "B010",
+ # tests use assert False
+ "B011",
+ # tests use comparisons but not their returned value
+ "B015",
+ # false positives
+ "B019",
+ # Loop control variable overrides iterable it iterates
+ "B020",
+ # Function definition does not bind loop variable
+ "B023",
+ # Functions defined inside a loop must not use variables redefined in the loop
+ # "B301", # not yet implemented
+
+ # Additional checks that don't pass yet
+ # Within an except clause, raise exceptions with ...
+ "B904",
+]
+
+exclude = [
+ "doc/sphinxext/*.py",
+ "doc/build/*.py",
+ "doc/temp/*.py",
+ ".eggs/*.py",
+ "versioneer.py",
+ # exclude asv benchmark environments from linting
+ "env",
+]
+
+[tool.pylint.messages_control]
+max-line-length = 88
+disable = [
+ # intentionally turned off
+ "broad-except",
+ "c-extension-no-member",
+ "comparison-with-itself",
+ "import-error",
+ "import-outside-toplevel",
+ "invalid-name",
+ "invalid-unary-operand-type",
+ "line-too-long",
+ "no-else-continue",
+ "no-else-raise",
+ "no-else-return",
+ "no-member",
+ "no-name-in-module",
+ "not-an-iterable",
+ "overridden-final-method",
+ "pointless-statement",
+ "redundant-keyword-arg",
+ "singleton-comparison",
+ "too-many-ancestors",
+ "too-many-arguments",
+ "too-many-boolean-expressions",
+ "too-many-branches",
+ "too-many-function-args",
+ "too-many-instance-attributes",
+ "too-many-locals",
+ "too-many-nested-blocks",
+ "too-many-public-methods",
+ "too-many-return-statements",
+ "too-many-statements",
+ "unexpected-keyword-arg",
+ "ungrouped-imports",
+ "unsubscriptable-object",
+ "unsupported-assignment-operation",
+ "unsupported-membership-test",
+ "unused-import",
+ "use-implicit-booleaness-not-comparison",
+ "use-implicit-booleaness-not-len",
+ "wrong-import-order",
+ "wrong-import-position",
+
+ # misc
+ "abstract-class-instantiated",
+ "no-value-for-parameter",
+ "undefined-variable",
+ "unpacking-non-sequence",
+
+ # pylint type "C": convention, for programming standard violation
+ "missing-class-docstring",
+ "missing-function-docstring",
+ "missing-module-docstring",
+ "too-many-lines",
+ "unidiomatic-typecheck",
+ "unnecessary-dunder-call",
+ "unnecessary-lambda-assignment",
+
+ # pylint type "R": refactor, for bad code smell
+ "consider-using-with",
+ "cyclic-import",
+ "duplicate-code",
+ "inconsistent-return-statements",
+ "redefined-argument-from-local",
+ "too-few-public-methods",
+
+ # pylint type "W": warning, for python specific problems
+ "abstract-method",
+ "arguments-differ",
+ "arguments-out-of-order",
+ "arguments-renamed",
+ "attribute-defined-outside-init",
+ "comparison-with-callable",
+ "dangerous-default-value",
+ "deprecated-module",
+ "eval-used",
+ "expression-not-assigned",
+ "fixme",
+ "global-statement",
+ "invalid-overridden-method",
+ "keyword-arg-before-vararg",
+ "possibly-unused-variable",
+ "protected-access",
+ "raise-missing-from",
+ "redefined-builtin",
+ "redefined-outer-name",
+ "self-cls-assignment",
+ "signature-differs",
+ "super-init-not-called",
+ "try-except-raise",
+ "unnecessary-lambda",
+ "unspecified-encoding",
+ "unused-argument",
+ "unused-variable",
+ "using-constant-test"
+]
+
+[tool.pytest.ini_options]
+# sync minversion with pyproject.toml & install.rst
+minversion = "7.0"
+addopts = "--strict-data-files --strict-markers --strict-config --capture=no --durations=30 --junitxml=test-data.xml"
+empty_parameter_set_mark = "fail_at_collect"
+xfail_strict = true
+testpaths = "pandas"
+doctest_optionflags = [
+ "NORMALIZE_WHITESPACE",
+ "IGNORE_EXCEPTION_DETAIL",
+ "ELLIPSIS",
+]
+filterwarnings = [
+ # Will be fixed in numba 0.56: https://github.com/numba/numba/issues/7758
+ "ignore:`np.MachAr` is deprecated:DeprecationWarning:numba",
+ "ignore:.*urllib3:DeprecationWarning:botocore",
+ "ignore:Setuptools is replacing distutils.:UserWarning:_distutils_hack",
+ # https://github.com/PyTables/PyTables/issues/822
+ "ignore:a closed node found in the registry:UserWarning:tables",
+ "ignore:`np.object` is a deprecated:DeprecationWarning:tables",
+ "ignore:tostring:DeprecationWarning:tables",
+ "ignore:distutils Version classes are deprecated:DeprecationWarning:numexpr",
+ "ignore:distutils Version classes are deprecated:DeprecationWarning:fastparquet",
+ "ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec",
+]
+junit_family = "xunit2"
+markers = [
+ "single_cpu: tests that should run on a single cpu only",
+ "slow: mark a test as slow",
+ "network: mark a test as network",
+ "db: tests requiring a database (mysql or postgres)",
+ "clipboard: mark a pd.read_clipboard test",
+ "arm_slow: mark a test as slow for arm64 architecture",
+ "arraymanager: mark a test to run with ArrayManager enabled",
+]
+asyncio_mode = "strict"
+
+[tool.mypy]
+# Import discovery
+mypy_path = "typings"
+files = ["pandas", "typings"]
+namespace_packages = false
+explicit_package_bases = false
+ignore_missing_imports = true
+follow_imports = "normal"
+follow_imports_for_stubs = false
+no_site_packages = false
+no_silence_site_packages = false
+# Platform configuration
+python_version = "3.8"
+platform = "linux-64"
+# Disallow dynamic typing
+disallow_any_unimported = false # TODO
+disallow_any_expr = false # TODO
+disallow_any_decorated = false # TODO
+disallow_any_explicit = false # TODO
+disallow_any_generics = false # TODO
+disallow_subclassing_any = false # TODO
+# Untyped definitions and calls
+disallow_untyped_calls = false # TODO
+disallow_untyped_defs = false # TODO
+disallow_incomplete_defs = false # TODO
+check_untyped_defs = true
+disallow_untyped_decorators = true
+# None and Optional handling
+no_implicit_optional = true
+strict_optional = true
+# Configuring warnings
+warn_redundant_casts = true
+warn_unused_ignores = true
+warn_no_return = true
+warn_return_any = false # TODO
+warn_unreachable = false # GH#27396
+# Suppressing errors
+ignore_errors = false
+enable_error_code = "ignore-without-code"
+# Miscellaneous strictness flags
+allow_untyped_globals = false
+allow_redefinition = false
+local_partial_types = false
+implicit_reexport = true
+strict_equality = true
+# Configuring error messages
+show_error_context = false
+show_column_numbers = false
+show_error_codes = true
+
+[[tool.mypy.overrides]]
+module = [
+ "pandas.tests.*",
+ "pandas._version",
+ "pandas.io.clipboard",
+]
+check_untyped_defs = false
+
+[[tool.mypy.overrides]]
+module = [
+ "pandas.tests.apply.test_series_apply",
+ "pandas.tests.arithmetic.conftest",
+ "pandas.tests.arrays.sparse.test_combine_concat",
+ "pandas.tests.dtypes.test_common",
+ "pandas.tests.frame.methods.test_to_records",
+ "pandas.tests.groupby.test_rank",
+ "pandas.tests.groupby.transform.test_transform",
+ "pandas.tests.indexes.interval.test_interval",
+ "pandas.tests.indexing.test_categorical",
+ "pandas.tests.io.excel.test_writers",
+ "pandas.tests.reductions.test_reductions",
+ "pandas.tests.test_expressions",
+]
+ignore_errors = true
+
+# To be kept consistent with "Import Formatting" section in contributing.rst
+[tool.isort]
+known_pre_libs = "pandas._config"
+known_pre_core = ["pandas._libs", "pandas._typing", "pandas.util._*", "pandas.compat", "pandas.errors"]
+known_dtypes = "pandas.core.dtypes"
+known_post_core = ["pandas.tseries", "pandas.io", "pandas.plotting"]
+sections = ["FUTURE", "STDLIB", "THIRDPARTY" ,"PRE_LIBS" , "PRE_CORE", "DTYPES", "FIRSTPARTY", "POST_CORE", "LOCALFOLDER"]
+profile = "black"
+combine_as_imports = true
+force_grid_wrap = 2
+force_sort_within_sections = true
+skip_glob = "env"
+skip = "pandas/__init__.py"
+
+[tool.pyright]
+pythonVersion = "3.8"
+typeCheckingMode = "basic"
+include = ["pandas", "typings"]
+exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
+# enable subset of "strict"
+reportDuplicateImport = true
+reportInvalidStubStatement = true
+reportOverlappingOverload = true
+reportPropertyTypeMismatch = true
+reportUntypedClassDecorator = true
+reportUntypedFunctionDecorator = true
+reportUntypedNamedTuple = true
+reportUnusedImport = true
+# disable subset of "basic"
+reportGeneralTypeIssues = false
+reportMissingModuleSource = false
+reportOptionalCall = false
+reportOptionalIterable = false
+reportOptionalMemberAccess = false
+reportOptionalOperand = false
+reportOptionalSubscript = false
+reportPrivateImportUsage = false
+reportUnboundVariable = false
+
+[tool.coverage.run]
+branch = true
+omit = ["pandas/_typing.py", "pandas/_version.py"]
+plugins = ["Cython.Coverage"]
+source = ["pandas"]
+
+[tool.coverage.report]
+ignore_errors = false
+show_missing = true
+omit = ["pandas/_version.py"]
+exclude_lines = [
+ # Have to re-enable the standard pragma
+ "pragma: no cover",
+ # Don't complain about missing debug-only code:s
+ "def __repr__",
+ "if self.debug",
+ # Don't complain if tests don't hit defensive assertion code:
+ "raise AssertionError",
+ "raise NotImplementedError",
+ "AbstractMethodError",
+ # Don't complain if non-runnable code isn't run:
+ "if 0:",
+ "if __name__ == .__main__.:",
+ "if TYPE_CHECKING:",
+]
+
+[tool.coverage.html]
+directory = "coverage_html_report"
+
+[tool.codespell]
+ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse"
+ignore-regex = 'https://([\w/\.])+'
diff --git a/scripts/tests/data/deps_unmodified_duplicate_package.yaml b/scripts/tests/data/deps_unmodified_duplicate_package.yaml
new file mode 100644
index 0000000000000..72721c2842707
--- /dev/null
+++ b/scripts/tests/data/deps_unmodified_duplicate_package.yaml
@@ -0,0 +1,4 @@
+# Test: duplicate package
+dependencies:
+ - jinja2>=3.0.0
+ - jinja2>=3.0.0
diff --git a/scripts/tests/data/deps_unmodified_no_version.yaml b/scripts/tests/data/deps_unmodified_no_version.yaml
new file mode 100644
index 0000000000000..c57b49a003efd
--- /dev/null
+++ b/scripts/tests/data/deps_unmodified_no_version.yaml
@@ -0,0 +1,5 @@
+# Test: empty version
+dependencies:
+ - jinja2
+ - scipy
+ - SQLAlchemy
diff --git a/scripts/tests/data/deps_unmodified_random.yaml b/scripts/tests/data/deps_unmodified_random.yaml
new file mode 100644
index 0000000000000..4ca758af1c8ad
--- /dev/null
+++ b/scripts/tests/data/deps_unmodified_random.yaml
@@ -0,0 +1,57 @@
+# Test: random
+name: pandas-dev
+channels:
+ - conda-forge
+dependencies:
+ - python=3.8
+
+ # build dependencies
+ - versioneer[toml]
+ - cython>=0.29.32
+
+ # test dependencies
+ - pytest>=7.0.0
+ - pytest-cov
+ - pytest-xdist>=2.2.0
+ - psutil
+ - pytest-asyncio>=0.17
+ - boto3
+
+ # required dependencies
+ - python-dateutil
+ - numpy
+ - pytz
+
+ # optional dependencies
+ - beautifulsoup4
+ - blosc
+ - bottleneck>=1.3.2
+ - brotlipy
+ - fastparquet>=0.6.3
+ - fsspec>=2021.07.0
+ - html5lib>=1.1
+ - hypothesis
+ - gcsfs>=2021.07.0
+ - jinja2
+ - lxml>=4.6.3
+ - matplotlib>=3.6.1
+ - numba
+ - numexpr>=2.7.3
+ - openpyxl>=3.0.7
+ - odfpy>=1.4.1
+ - pandas-gbq>=0.15.0
+ - psycopg2
+ - pyarrow<11, >=7.0.0
+ - pymysql>=1.0.2
+ - pyreadstat>=1.1.2
+ - pytables>=3.6.1
+ - python-snappy>=0.6.0
+ - pyxlsb>=1.0.8
+ - s3fs>=2021.08.0
+ - scipy>=1.7.1
+ - sqlalchemy>=1.4.16
+ - tabulate>=0.8.9
+ - xarray>=0.21.0
+ - xlrd>=2.0.1
+ - xlsxwriter>=1.4.3
+ - zstandard>=0.15.2
diff --git a/scripts/tests/data/deps_unmodified_range.yaml b/scripts/tests/data/deps_unmodified_range.yaml
new file mode 100644
index 0000000000000..22882af2cbc4b
--- /dev/null
+++ b/scripts/tests/data/deps_unmodified_range.yaml
@@ -0,0 +1,5 @@
+# Test: range
+dependencies:
+ - jinja2<8
+ - scipy<9
+ - SQLAlchemy<2.0
diff --git a/scripts/tests/data/deps_unmodified_same_version.yaml b/scripts/tests/data/deps_unmodified_same_version.yaml
new file mode 100644
index 0000000000000..e07b221ecd44f
--- /dev/null
+++ b/scripts/tests/data/deps_unmodified_same_version.yaml
@@ -0,0 +1,3 @@
+# Test: same version
+dependencies:
+ - jinja2>=3.0.0
diff --git a/scripts/tests/test_validate_min_versions_in_sync.py b/scripts/tests/test_validate_min_versions_in_sync.py
new file mode 100644
index 0000000000000..13e8965bb7591
--- /dev/null
+++ b/scripts/tests/test_validate_min_versions_in_sync.py
@@ -0,0 +1,61 @@
+import pathlib
+import sys
+
+import pytest
+import yaml
+
+if sys.version_info >= (3, 11):
+ import tomllib
+else:
+ import tomli as tomllib
+
+from scripts.validate_min_versions_in_sync import (
+ get_toml_map_from,
+ get_yaml_map_from,
+ pin_min_versions_to_yaml_file,
+)
+
+
+@pytest.mark.parametrize(
+ "src_toml, src_yaml, expected_yaml",
+ [
+ (
+ pathlib.Path("scripts/tests/data/deps_minimum.toml"),
+ pathlib.Path("scripts/tests/data/deps_unmodified_random.yaml"),
+ pathlib.Path("scripts/tests/data/deps_expected_random.yaml"),
+ ),
+ (
+ pathlib.Path("scripts/tests/data/deps_minimum.toml"),
+ pathlib.Path("scripts/tests/data/deps_unmodified_same_version.yaml"),
+ pathlib.Path("scripts/tests/data/deps_expected_same_version.yaml"),
+ ),
+ (
+ pathlib.Path("scripts/tests/data/deps_minimum.toml"),
+ pathlib.Path("scripts/tests/data/deps_unmodified_duplicate_package.yaml"),
+ pathlib.Path("scripts/tests/data/deps_expected_duplicate_package.yaml"),
+ ),
+ (
+ pathlib.Path("scripts/tests/data/deps_minimum.toml"),
+ pathlib.Path("scripts/tests/data/deps_unmodified_no_version.yaml"),
+ pathlib.Path("scripts/tests/data/deps_expected_no_version.yaml"),
+ ),
+ (
+ pathlib.Path("scripts/tests/data/deps_minimum.toml"),
+ pathlib.Path("scripts/tests/data/deps_unmodified_range.yaml"),
+ pathlib.Path("scripts/tests/data/deps_expected_range.yaml"),
+ ),
+ ],
+)
+def test_pin_min_versions_to_yaml_file(src_toml, src_yaml, expected_yaml):
+ with open(src_toml, "rb") as toml_f:
+ toml_map = tomllib.load(toml_f)
+ with open(src_yaml) as yaml_f:
+ yaml_file_data = yaml_f.read()
+ yaml_file = yaml.safe_load(yaml_file_data)
+ yaml_dependencies = yaml_file["dependencies"]
+ yaml_map = get_yaml_map_from(yaml_dependencies)
+ toml_map = get_toml_map_from(toml_map)
+ result_yaml_file = pin_min_versions_to_yaml_file(yaml_map, toml_map, yaml_file_data)
+ with open(expected_yaml) as yaml_f:
+ dummy_yaml_expected_file_1 = yaml_f.read()
+ assert result_yaml_file == dummy_yaml_expected_file_1
diff --git a/scripts/validate_min_versions_in_sync.py b/scripts/validate_min_versions_in_sync.py
index 7c102096c1690..3c12f17fe72cf 100755
--- a/scripts/validate_min_versions_in_sync.py
+++ b/scripts/validate_min_versions_in_sync.py
@@ -17,18 +17,31 @@
import pathlib
import sys
+import yaml
+
if sys.version_info >= (3, 11):
import tomllib
else:
import tomli as tomllib
+from typing import Any
+
+from scripts.generate_pip_deps_from_conda import RENAME
+
DOC_PATH = pathlib.Path("doc/source/getting_started/install.rst").resolve()
CI_PATH = next(
pathlib.Path("ci/deps").absolute().glob("actions-*-minimum_versions.yaml")
)
CODE_PATH = pathlib.Path("pandas/compat/_optional.py").resolve()
SETUP_PATH = pathlib.Path("pyproject.toml").resolve()
+YAML_PATH = pathlib.Path("ci/deps")
+ENV_PATH = pathlib.Path("environment.yml")
EXCLUDE_DEPS = {"tzdata", "blosc"}
+EXCLUSION_LIST = {
+ "python=3.8[build=*_pypy]": None,
+ "tzdata": None,
+ "pyarrow": None,
+}
# pandas package is not available
# in pre-commit environment
sys.path.append("pandas/compat")
@@ -41,6 +54,147 @@
import _optional
+def pin_min_versions_to_ci_deps() -> int:
+ """
+ Pin minimum versions to CI dependencies.
+
+ Pip dependencies are not pinned.
+ """
+ all_yaml_files = list(YAML_PATH.iterdir())
+ all_yaml_files.append(ENV_PATH)
+ toml_dependencies = {}
+ with open(SETUP_PATH, "rb") as toml_f:
+ toml_dependencies = tomllib.load(toml_f)
+ ret = 0
+ for curr_file in all_yaml_files:
+ with open(curr_file) as yaml_f:
+ yaml_start_data = yaml_f.read()
+ yaml_file = yaml.safe_load(yaml_start_data)
+ yaml_dependencies = yaml_file["dependencies"]
+ yaml_map = get_yaml_map_from(yaml_dependencies)
+ toml_map = get_toml_map_from(toml_dependencies)
+ yaml_result_data = pin_min_versions_to_yaml_file(
+ yaml_map, toml_map, yaml_start_data
+ )
+ if yaml_result_data != yaml_start_data:
+ with open(curr_file, "w") as f:
+ f.write(yaml_result_data)
+ ret |= 1
+ return ret
+
+
+def get_toml_map_from(toml_dic: dict[str, Any]) -> dict[str, str]:
+ toml_deps = {}
+ toml_dependencies = set(toml_dic["project"]["optional-dependencies"]["all"])
+ for dependency in toml_dependencies:
+ toml_package, toml_version = dependency.strip().split(">=")
+ toml_deps[toml_package] = toml_version
+ return toml_deps
+
+
+def get_operator_from(dependency: str) -> str | None:
+ if "<=" in dependency:
+ operator = "<="
+ elif ">=" in dependency:
+ operator = ">="
+ elif "=" in dependency:
+ operator = "="
+ elif ">" in dependency:
+ operator = ">"
+ elif "<" in dependency:
+ operator = "<"
+ else:
+ operator = None
+ return operator
+
+
+def get_yaml_map_from(
+ yaml_dic: list[str | dict[str, list[str]]]
+) -> dict[str, list[str] | None]:
+ yaml_map: dict[str, list[str] | None] = {}
+ for dependency in yaml_dic:
+ if (
+ isinstance(dependency, dict)
+ or dependency in EXCLUSION_LIST
+ or dependency in yaml_map
+ ):
+ continue
+ search_text = str(dependency)
+ operator = get_operator_from(search_text)
+ if "," in dependency:
+ yaml_dependency, yaml_version1 = search_text.split(",")
+ operator = get_operator_from(yaml_dependency)
+ assert operator is not None
+ yaml_package, yaml_version2 = yaml_dependency.split(operator)
+ yaml_version2 = operator + yaml_version2
+ yaml_map[yaml_package] = [yaml_version1, yaml_version2]
+ elif operator is not None:
+ yaml_package, yaml_version = search_text.split(operator)
+ yaml_version = operator + yaml_version
+ yaml_map[yaml_package] = [yaml_version]
+ else:
+ yaml_package, yaml_version = search_text.strip(), None
+ yaml_map[yaml_package] = yaml_version
+ return yaml_map
+
+
+def clean_version_list(
+ yaml_versions: list[str], toml_version: version.Version
+) -> list[str]:
+ for i in range(len(yaml_versions)):
+ yaml_version = yaml_versions[i]
+ operator = get_operator_from(yaml_version)
+ assert operator is not None
+ if "<=" in operator or ">=" in operator:
+ yaml_version = yaml_version[2:]
+ else:
+ yaml_version = yaml_version[1:]
+ yaml_version = version.parse(yaml_version)
+ if yaml_version < toml_version:
+ yaml_versions[i] = "-" + str(yaml_version)
+ elif yaml_version >= toml_version:
+ if ">" in operator:
+ yaml_versions[i] = "-" + str(yaml_version)
+ return yaml_versions
+
+
+def pin_min_versions_to_yaml_file(
+ yaml_map: dict[str, list[str] | None], toml_map: dict[str, str], yaml_file_data: str
+) -> str:
+ data = yaml_file_data
+ for yaml_package, yaml_versions in yaml_map.items():
+ if yaml_package in EXCLUSION_LIST:
+ continue
+ old_dep = yaml_package
+ if yaml_versions is not None:
+ for yaml_version in yaml_versions:
+ old_dep += yaml_version + ", "
+ old_dep = old_dep[:-2]
+ if RENAME.get(yaml_package, yaml_package) in toml_map:
+ min_dep = toml_map[RENAME.get(yaml_package, yaml_package)]
+ elif yaml_package in toml_map:
+ min_dep = toml_map[yaml_package]
+ else:
+ continue
+ if yaml_versions is None:
+ new_dep = old_dep + ">=" + min_dep
+ data = data.replace(old_dep, new_dep, 1)
+ continue
+ toml_version = version.parse(min_dep)
+ yaml_versions = clean_version_list(yaml_versions, toml_version)
+ cleaned_yaml_versions = [x for x in yaml_versions if "-" not in x]
+ new_dep = yaml_package
+ for yaml_version in cleaned_yaml_versions:
+ new_dep += yaml_version + ", "
+ operator = get_operator_from(new_dep)
+ if operator != "=":
+ new_dep += ">=" + min_dep
+ else:
+ new_dep = new_dep[:-2]
+ data = data.replace(old_dep, new_dep)
+ return data
+
+
def get_versions_from_code() -> dict[str, str]:
"""Min versions for checking within pandas code."""
install_map = _optional.INSTALL_MAPPING
@@ -92,7 +246,6 @@ def get_versions_from_toml() -> dict[str, str]:
"""Min versions in pyproject.toml for pip install pandas[extra]."""
install_map = _optional.INSTALL_MAPPING
optional_dependencies = {}
-
with open(SETUP_PATH, "rb") as pyproject_f:
pyproject_toml = tomllib.load(pyproject_f)
opt_deps = pyproject_toml["project"]["optional-dependencies"]
@@ -108,11 +261,12 @@ def get_versions_from_toml() -> dict[str, str]:
for item in EXCLUDE_DEPS:
optional_dependencies.pop(item, None)
-
return optional_dependencies
-def main():
+def main() -> int:
+ ret = 0
+ ret |= pin_min_versions_to_ci_deps()
with open(CI_PATH, encoding="utf-8") as f:
_, ci_optional = get_versions_from_ci(f.readlines())
code_optional = get_versions_from_code()
@@ -138,9 +292,9 @@ def main():
f"{CODE_PATH}: {code_optional.get(package, 'Not specified')}\n"
f"{SETUP_PATH}: {setup_optional.get(package, 'Not specified')}\n\n"
)
- sys.exit(1)
- sys.exit(0)
+ ret |= 1
+ return ret
if __name__ == "__main__":
- main()
+ sys.exit(main())
| - [x] closes #50207
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51189 | 2023-02-06T03:20:17Z | 2023-02-23T16:38:44Z | 2023-02-23T16:38:44Z | 2023-02-23T20:25:04Z |
REF: prune groupby paths | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 815f9936057f4..aec2037d044b8 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -380,10 +380,16 @@ def _wrap_applied_output(
"""
if len(values) == 0:
# GH #6265
+ if is_transform:
+ # GH#47787 see test_group_on_empty_multiindex
+ res_index = data.index
+ else:
+ res_index = self.grouper.result_index
+
return self.obj._constructor(
[],
name=self.obj.name,
- index=self.grouper.result_index,
+ index=res_index,
dtype=data.dtype,
)
assert values is not None
@@ -1136,14 +1142,12 @@ def cov(
@property
@doc(Series.is_monotonic_increasing.__doc__)
def is_monotonic_increasing(self) -> Series:
- result = self._op_via_apply("is_monotonic_increasing")
- return result
+ return self.apply(lambda ser: ser.is_monotonic_increasing)
@property
@doc(Series.is_monotonic_decreasing.__doc__)
def is_monotonic_decreasing(self) -> Series:
- result = self._op_via_apply("is_monotonic_decreasing")
- return result
+ return self.apply(lambda ser: ser.is_monotonic_decreasing)
@doc(Series.hist.__doc__)
def hist(
@@ -1181,8 +1185,7 @@ def hist(
@property
@doc(Series.dtype.__doc__)
def dtype(self) -> Series:
- result = self._op_via_apply("dtype")
- return result
+ return self.apply(lambda ser: ser.dtype)
@doc(Series.unique.__doc__)
def unique(self) -> Series:
@@ -1428,9 +1431,13 @@ def _wrap_applied_output(
):
if len(values) == 0:
- result = self.obj._constructor(
- index=self.grouper.result_index, columns=data.columns
- )
+ if is_transform:
+ # GH#47787 see test_group_on_empty_multiindex
+ res_index = data.index
+ else:
+ res_index = self.grouper.result_index
+
+ result = self.obj._constructor(index=res_index, columns=data.columns)
result = result.astype(data.dtypes, copy=False)
return result
@@ -1719,18 +1726,11 @@ def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns, see test_transform_exclude_nuisance
# gets here with non-unique columns
output = {}
- inds = []
for i, (colname, sgb) in enumerate(self._iterate_column_groupbys(obj)):
output[i] = sgb.transform(wrapper)
- inds.append(i)
-
- if not output:
- raise TypeError("Transform function invalid for data types")
-
- columns = obj.columns.take(inds)
result = self.obj._constructor(output, index=obj.index)
- result.columns = columns
+ result.columns = obj.columns
return result
def filter(self, func, dropna: bool = True, *args, **kwargs):
@@ -2677,8 +2677,8 @@ def hist(
@property
@doc(DataFrame.dtypes.__doc__)
def dtypes(self) -> Series:
- result = self._op_via_apply("dtypes")
- return result
+ # error: Incompatible return value type (got "DataFrame", expected "Series")
+ return self.apply(lambda df: df.dtypes) # type: ignore[return-value]
@doc(DataFrame.corrwith.__doc__)
def corrwith(
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index fd9a06a06cfa7..be22b05cbe1ab 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -956,9 +956,6 @@ def __getattr__(self, attr: str):
def _op_via_apply(self, name: str, *args, **kwargs):
"""Compute the result of an operation by using GroupBy's apply."""
f = getattr(type(self._obj_with_exclusions), name)
- if not callable(f):
- return self.apply(lambda self: getattr(self, name))
-
sig = inspect.signature(f)
# a little trickery for aggregation functions that need an axis
@@ -980,9 +977,6 @@ def curried(x):
return self.apply(curried)
is_transform = name in base.transformation_kernels
- # Transform needs to keep the same schema, including when empty
- if is_transform and self._obj_with_exclusions.empty:
- return self._obj_with_exclusions
result = self._python_apply_general(
curried,
self._obj_with_exclusions,
@@ -1105,6 +1099,7 @@ def _set_result_index_ordered(
return result
+ @final
def _insert_inaxis_grouper(self, result: Series | DataFrame) -> DataFrame:
if isinstance(result, Series):
result = result.to_frame()
@@ -1131,7 +1126,7 @@ def _indexed_output_to_ndframe(
@final
def _wrap_aggregated_output(
self,
- output: Series | DataFrame | Mapping[base.OutputKey, ArrayLike],
+ result: Series | DataFrame,
qs: npt.NDArray[np.float64] | None = None,
):
"""
@@ -1139,22 +1134,14 @@ def _wrap_aggregated_output(
Parameters
----------
- output : Series, DataFrame, or Mapping[base.OutputKey, ArrayLike]
- Data to wrap.
+ result : Series, DataFrame
Returns
-------
Series or DataFrame
"""
-
- if isinstance(output, (Series, DataFrame)):
- # We get here (for DataFrameGroupBy) if we used Manager.grouped_reduce,
- # in which case our columns are already set correctly.
- # ATM we do not get here for SeriesGroupBy; when we do, we will
- # need to require that result.name already match self.obj.name
- result = output
- else:
- result = self._indexed_output_to_ndframe(output)
+ # ATM we do not get here for SeriesGroupBy; when we do, we will
+ # need to require that result.name already match self.obj.name
if not self.as_index:
# `not self.as_index` is only relevant for DataFrameGroupBy,
@@ -1183,36 +1170,6 @@ def _wrap_aggregated_output(
return self._reindex_output(result, qs=qs)
- @final
- def _wrap_transformed_output(
- self, output: Mapping[base.OutputKey, ArrayLike]
- ) -> Series | DataFrame:
- """
- Wraps the output of GroupBy transformations into the expected result.
-
- Parameters
- ----------
- output : Mapping[base.OutputKey, ArrayLike]
- Data to wrap.
-
- Returns
- -------
- Series or DataFrame
- Series for SeriesGroupBy, DataFrame for DataFrameGroupBy
- """
- if isinstance(output, (Series, DataFrame)):
- result = output
- else:
- result = self._indexed_output_to_ndframe(output)
-
- if self.axis == 1:
- # Only relevant for DataFrameGroupBy
- result = result.T
- result.columns = self.obj.columns
-
- result.index = self.obj.index
- return result
-
def _wrap_applied_output(
self,
data,
@@ -1456,7 +1413,8 @@ def _python_agg_general(self, func, *args, **kwargs):
output: dict[base.OutputKey, ArrayLike] = {}
if self.ngroups == 0:
- # agg_series below assumes ngroups > 0
+ # e.g. test_evaluate_with_empty_groups different path gets different
+ # result dtype in empty case.
return self._python_apply_general(f, self._selected_obj, is_agg=True)
for idx, obj in enumerate(self._iterate_slices()):
@@ -1466,9 +1424,11 @@ def _python_agg_general(self, func, *args, **kwargs):
output[key] = result
if not output:
+ # e.g. test_groupby_crash_on_nunique, test_margins_no_values_no_cols
return self._python_apply_general(f, self._selected_obj)
- return self._wrap_aggregated_output(output)
+ res = self._indexed_output_to_ndframe(output)
+ return self._wrap_aggregated_output(res)
@final
def _agg_general(
@@ -1850,6 +1810,7 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike:
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_agged_manager() returns. GH 35028
+ # e.g. test_dataframe_groupby_on_2_categoricals_when_observed_is_false
with com.temp_setattr(self, "observed", True):
result = self._wrap_agged_manager(new_mgr)
@@ -2577,6 +2538,7 @@ def ohlc(self) -> DataFrame:
)
return self._reindex_output(result)
+ # TODO: 2023-02-05 all tests that get here have self.as_index
return self._apply_to_column_groupbys(
lambda x: x.ohlc(), self._obj_with_exclusions
)
@@ -2854,7 +2816,13 @@ def blk_func(values: ArrayLike) -> ArrayLike:
if isinstance(new_obj, Series):
new_obj.name = obj.name
- return self._wrap_transformed_output(new_obj)
+ if self.axis == 1:
+ # Only relevant for DataFrameGroupBy
+ new_obj = new_obj.T
+ new_obj.columns = self.obj.columns
+
+ new_obj.index = self.obj.index
+ return new_obj
@final
@Substitution(name="groupby")
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index bff61ec135d74..14ca9066dae77 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -1028,7 +1028,6 @@ def _aggregate_series_pure_python(
) -> npt.NDArray[np.object_]:
ids, _, ngroups = self.group_info
- counts = np.zeros(ngroups, dtype=int)
result = np.empty(ngroups, dtype="O")
initialized = False
@@ -1044,7 +1043,6 @@ def _aggregate_series_pure_python(
libreduction.check_result_array(res, group.dtype)
initialized = True
- counts[i] = group.shape[0]
result[i] = res
return result
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 0cb2765b439bc..01f0ddd1627c7 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -435,6 +435,7 @@ def _groupby_and_aggregate(self, how, *args, **kwargs):
try:
if isinstance(obj, ABCDataFrame) and callable(how):
# Check if the function is reducing or not.
+ # e.g. test_resample_apply_with_additional_args
result = grouped._aggregate_item_by_item(how, *args, **kwargs)
else:
result = grouped.aggregate(how, *args, **kwargs)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51187 | 2023-02-06T00:42:36Z | 2023-02-06T18:45:47Z | 2023-02-06T18:45:47Z | 2023-02-06T19:37:41Z |
REF: consolidate numeric_only checks in GroupBy | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 815f9936057f4..77bc30d5512a6 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -58,6 +58,7 @@
is_dict_like,
is_integer_dtype,
is_interval_dtype,
+ is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import (
@@ -172,9 +173,18 @@ def _wrap_agged_manager(self, mgr: Manager) -> Series:
# NB: caller is responsible for setting ser.index
return ser
- def _get_data_to_aggregate(self) -> SingleManager:
+ def _get_data_to_aggregate(
+ self, *, numeric_only: bool = False, name: str | None = None
+ ) -> SingleManager:
ser = self._selected_obj
single = ser._mgr
+ if numeric_only and not is_numeric_dtype(ser.dtype):
+ # GH#41291 match Series behavior
+ kwd_name = "numeric_only"
+ raise TypeError(
+ f"Cannot use {kwd_name}=True with "
+ f"{type(self).__name__}.{name} and non-numeric dtypes."
+ )
return single
def _iterate_slices(self) -> Iterable[Series]:
@@ -1542,9 +1552,9 @@ def _cython_transform(
# test_transform_numeric_ret
# With self.axis == 1, _get_data_to_aggregate does a transpose
# so we always have a single block.
- mgr: Manager2D = self._get_data_to_aggregate()
- if numeric_only:
- mgr = mgr.get_numeric_data(copy=False)
+ mgr: Manager2D = self._get_data_to_aggregate(
+ numeric_only=numeric_only, name=how
+ )
def arr_func(bvalues: ArrayLike) -> ArrayLike:
return self.grouper._cython_operation(
@@ -1864,12 +1874,18 @@ def _gotitem(self, key, ndim: int, subset=None):
raise AssertionError("invalid ndim for _gotitem")
- def _get_data_to_aggregate(self) -> Manager2D:
+ def _get_data_to_aggregate(
+ self, *, numeric_only: bool = False, name: str | None = None
+ ) -> Manager2D:
obj = self._obj_with_exclusions
if self.axis == 1:
- return obj.T._mgr
+ mgr = obj.T._mgr
else:
- return obj._mgr
+ mgr = obj._mgr
+
+ if numeric_only:
+ mgr = mgr.get_numeric_data(copy=False)
+ return mgr
def _indexed_output_to_ndframe(
self, output: Mapping[base.OutputKey, ArrayLike]
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index fd9a06a06cfa7..3106ae1ec701b 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1540,22 +1540,9 @@ def _cython_agg_general(
# Note: we never get here with how="ohlc" for DataFrameGroupBy;
# that goes through SeriesGroupBy
- data = self._get_data_to_aggregate()
+ data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how)
is_ser = data.ndim == 1
- if numeric_only:
- if is_ser and not is_numeric_dtype(self._selected_obj.dtype):
- # GH#41291 match Series behavior
- kwd_name = "numeric_only"
- if how in ["any", "all"]:
- kwd_name = "bool_only"
- raise TypeError(
- f"Cannot use {kwd_name}={numeric_only} with "
- f"{type(self).__name__}.{how} and non-numeric types."
- )
- if not is_ser:
- data = data.get_numeric_data(copy=False)
-
def array_func(values: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
@@ -2034,15 +2021,6 @@ def std(
return np.sqrt(self._numba_agg_general(sliding_var, engine_kwargs, ddof))
else:
- if (
- numeric_only
- and self.obj.ndim == 1
- and not is_numeric_dtype(self.obj.dtype)
- ):
- raise TypeError(
- f"{type(self).__name__}.std called with "
- f"numeric_only={numeric_only} and dtype {self.obj.dtype}"
- )
def _preprocessing(values):
if isinstance(values, BaseMaskedArray):
@@ -3114,11 +3092,6 @@ def quantile(
a 2.0
b 3.0
"""
- if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype):
- raise TypeError(
- f"{type(self).__name__}.quantile called with "
- f"numeric_only={numeric_only} and dtype {self.obj.dtype}"
- )
def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]:
if is_object_dtype(vals):
@@ -3258,8 +3231,7 @@ def blk_func(values: ArrayLike) -> ArrayLike:
obj = self._obj_with_exclusions
is_ser = obj.ndim == 1
- mgr = self._get_data_to_aggregate()
- data = mgr.get_numeric_data() if numeric_only else mgr
+ data = self._get_data_to_aggregate(numeric_only=numeric_only, name="quantile")
res_mgr = data.grouped_reduce(blk_func)
if is_ser:
@@ -3716,10 +3688,7 @@ def blk_func(values: ArrayLike) -> ArrayLike:
# Operate block-wise instead of column-by-column
is_ser = obj.ndim == 1
- mgr = self._get_data_to_aggregate()
-
- if numeric_only:
- mgr = mgr.get_numeric_data()
+ mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name=how)
res_mgr = mgr.grouped_reduce(blk_func)
diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py
index 7e7d3d682f20f..d3f9dd31e9fa1 100644
--- a/pandas/tests/groupby/aggregate/test_cython.py
+++ b/pandas/tests/groupby/aggregate/test_cython.py
@@ -92,7 +92,8 @@ def test_cython_agg_boolean():
def test_cython_agg_nothing_to_agg():
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
- with pytest.raises(TypeError, match="Cannot use numeric_only=True"):
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
+ with pytest.raises(TypeError, match=msg):
frame.groupby("a")["b"].mean(numeric_only=True)
with pytest.raises(TypeError, match="Could not convert (foo|bar)*"):
@@ -117,7 +118,8 @@ def test_cython_agg_nothing_to_agg_with_dates():
"dates": pd.date_range("now", periods=50, freq="T"),
}
)
- with pytest.raises(TypeError, match="Cannot use numeric_only=True"):
+ msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes"
+ with pytest.raises(TypeError, match=msg):
frame.groupby("b").dates.mean(numeric_only=True)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 1fcbc7c305a06..1fd61e6eb268e 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1555,11 +1555,10 @@ def test_deprecate_numeric_only_series(dtype, groupby_func, request):
elif dtype is object:
msg = "|".join(
[
- "Cannot use numeric_only=True",
- "called with numeric_only=True and dtype object",
+ "SeriesGroupBy.sem called with numeric_only=True and dtype object",
"Series.skew does not allow numeric_only=True with non-numeric",
- "got an unexpected keyword argument 'numeric_only'",
- "is not supported for object dtype",
+ "cum(sum|prod|min|max) is not supported for object dtype",
+ r"Cannot use numeric_only=True with SeriesGroupBy\..* and non-numeric",
]
)
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 0f18c5c5774b7..e6e924793389d 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -908,7 +908,8 @@ def test_series_downsample_method(method, numeric_only, expected_data):
func = getattr(resampled, method)
if numeric_only and numeric_only is not lib.no_default:
- with pytest.raises(TypeError, match="Cannot use numeric_only=True"):
+ msg = rf"Cannot use numeric_only=True with SeriesGroupBy\.{method}"
+ with pytest.raises(TypeError, match=msg):
func(**kwargs)
elif method == "prod":
with pytest.raises(TypeError, match="can't multiply sequence by non-int"):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51185 | 2023-02-06T00:17:32Z | 2023-02-06T12:33:22Z | 2023-02-06T12:33:22Z | 2023-02-06T16:01:51Z |
REF: dont rely on Grouper.ax | diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 366be9e79004c..f9817d3e72f73 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -8,7 +8,6 @@
TYPE_CHECKING,
Hashable,
Iterator,
- cast,
final,
)
import warnings
@@ -300,9 +299,9 @@ def _get_grouper(
-------
a tuple of grouper, obj (possibly sorted)
"""
- self._set_grouper(obj)
+ obj, _ = self._set_grouper(obj)
grouper, _, obj = get_grouper(
- cast(NDFrameT, self.obj),
+ obj,
[self.key],
axis=self.axis,
level=self.level,
@@ -318,7 +317,9 @@ def _get_grouper(
return grouper, obj
@final
- def _set_grouper(self, obj: NDFrame, sort: bool = False) -> None:
+ def _set_grouper(
+ self, obj: NDFrame, sort: bool = False, *, gpr_index: Index | None = None
+ ):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
@@ -328,6 +329,12 @@ def _set_grouper(self, obj: NDFrame, sort: bool = False) -> None:
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
+ gpr_index : Index or None, default None
+
+ Returns
+ -------
+ NDFrame
+ Index
"""
assert obj is not None
@@ -337,16 +344,14 @@ def _set_grouper(self, obj: NDFrame, sort: bool = False) -> None:
# Keep self._grouper value before overriding
if self._grouper is None:
# TODO: What are we assuming about subsequent calls?
- self._grouper = self._gpr_index
+ self._grouper = gpr_index
self._indexer = self.indexer
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
- if getattr(self._gpr_index, "name", None) == key and isinstance(
- obj, Series
- ):
+ if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):
# Sometimes self._grouper will have been resorted while
# obj has not. In this case there is a mismatch when we
# call self._grouper.take(obj.index) so we need to undo the sorting
@@ -392,6 +397,7 @@ def _set_grouper(self, obj: NDFrame, sort: bool = False) -> None:
# "NDFrameT", variable has type "None")
self.obj = obj # type: ignore[assignment]
self._gpr_index = ax
+ return obj, ax
@final
@property
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 01d5591d3d590..31d1be6c9ccbe 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -133,6 +133,7 @@ class Resampler(BaseGroupBy, PandasObject):
_timegrouper: TimeGrouper
binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
+ _internal_names_set = set({"obj", "ax"})
# to the groupby descriptor
_attributes = [
@@ -153,6 +154,7 @@ def __init__(
axis: Axis = 0,
kind=None,
*,
+ gpr_index: Index,
group_keys: bool | lib.NoDefault = lib.no_default,
selection=None,
) -> None:
@@ -166,7 +168,9 @@ def __init__(
self.group_keys = group_keys
self.as_index = True
- self._timegrouper._set_grouper(self._convert_obj(obj), sort=True)
+ self.obj, self.ax = self._timegrouper._set_grouper(
+ self._convert_obj(obj), sort=True, gpr_index=gpr_index
+ )
self.binner, self.grouper = self._get_binner()
self._selection = selection
if self._timegrouper.key is not None:
@@ -195,19 +199,6 @@ def __getattr__(self, attr: str):
return object.__getattribute__(self, attr)
- # error: Signature of "obj" incompatible with supertype "BaseGroupBy"
- @property
- def obj(self) -> NDFrame: # type: ignore[override]
- # error: Incompatible return value type (got "Optional[Any]",
- # expected "NDFrameT")
- return self._timegrouper.obj # type: ignore[return-value]
-
- @property
- def ax(self):
- # we can infer that this is a PeriodIndex/DatetimeIndex/TimedeltaIndex,
- # but skipping annotating bc the overrides overwhelming
- return self._timegrouper.ax
-
@property
def _from_selection(self) -> bool:
"""
@@ -1189,6 +1180,9 @@ def __init__(
self._groupby = groupby
self._timegrouper = copy.copy(parent._timegrouper)
+ self.ax = parent.ax
+ self.obj = parent.obj
+
@no_type_check
def _apply(self, f, *args, **kwargs):
"""
@@ -1197,7 +1191,7 @@ def _apply(self, f, *args, **kwargs):
"""
def func(x):
- x = self._resampler_cls(x, timegrouper=self._timegrouper)
+ x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax)
if isinstance(f, str):
return getattr(x, f)(**kwargs)
@@ -1226,8 +1220,7 @@ def _gotitem(self, key, ndim, subset=None):
"""
# create a new object to prevent aliasing
if subset is None:
- # error: "GotItemMixin" has no attribute "obj"
- subset = self.obj # type: ignore[attr-defined]
+ subset = self.obj
# Try to select from a DataFrame, falling back to a Series
try:
@@ -1688,9 +1681,8 @@ def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
TypeError if incompatible axis
"""
- self._set_grouper(obj)
+ _, ax = self._set_grouper(obj, gpr_index=None)
- ax = self.ax
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(
obj,
@@ -1698,6 +1690,7 @@ def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
kind=kind,
axis=self.axis,
group_keys=self.group_keys,
+ gpr_index=ax,
)
elif isinstance(ax, PeriodIndex) or kind == "period":
return PeriodIndexResampler(
@@ -1706,10 +1699,15 @@ def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
kind=kind,
axis=self.axis,
group_keys=self.group_keys,
+ gpr_index=ax,
)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(
- obj, timegrouper=self, axis=self.axis, group_keys=self.group_keys
+ obj,
+ timegrouper=self,
+ axis=self.axis,
+ group_keys=self.group_keys,
+ gpr_index=ax,
)
raise TypeError(
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
After this we an deprecate Grouper.ax | https://api.github.com/repos/pandas-dev/pandas/pulls/51184 | 2023-02-05T22:52:55Z | 2023-02-06T23:08:43Z | 2023-02-06T23:08:43Z | 2023-02-06T23:12:15Z |
DEPR: Grouper.grouper, Grouper.groups | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 1bfe9f50efbc8..f634e21132bed 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -781,6 +781,9 @@ Deprecations
- :meth:`Index.is_interval` has been deprecated. Use :func:`pandas.api.types.is_intterval_dtype` instead (:issue:`50042`)
- Deprecated ``all`` and ``any`` reductions with ``datetime64`` and :class:`DatetimeTZDtype` dtypes, use e.g. ``(obj != pd.Timestamp(0), tz=obj.tz).all()`` instead (:issue:`34479`)
- Deprecated calling ``float`` or ``int`` on a single element :class:`Series` to return a ``float`` or ``int`` respectively. Extract the element before calling ``float`` or ``int`` instead (:issue:`51101`)
+- Deprecated :meth:`Grouper.groups`, use :meth:`Groupby.groups` instead (:issue:`51182`)
+- Deprecated :meth:`Grouper.grouper`, use :meth:`Groupby.grouper` instead (:issue:`51182`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_200.prior_deprecations:
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index d77ad59a4bb82..366be9e79004c 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -11,6 +11,7 @@
cast,
final,
)
+import warnings
import numpy as np
@@ -24,6 +25,7 @@
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_categorical_dtype,
@@ -268,7 +270,7 @@ def __init__(
self.sort = sort
self.dropna = dropna
- self.grouper = None
+ self._grouper_deprecated = None
self._gpr_index = None
self.obj = None
self.indexer = None
@@ -308,6 +310,10 @@ def _get_grouper(
validate=validate,
dropna=self.dropna,
)
+ # Without setting this, subsequent lookups to .groups raise
+ # error: Incompatible types in assignment (expression has type "BaseGrouper",
+ # variable has type "None")
+ self._grouper_deprecated = grouper # type: ignore[assignment]
return grouper, obj
@@ -328,7 +334,7 @@ def _set_grouper(self, obj: NDFrame, sort: bool = False) -> None:
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
- # Keep self.grouper value before overriding
+ # Keep self._grouper value before overriding
if self._grouper is None:
# TODO: What are we assuming about subsequent calls?
self._grouper = self._gpr_index
@@ -387,11 +393,28 @@ def _set_grouper(self, obj: NDFrame, sort: bool = False) -> None:
self.obj = obj # type: ignore[assignment]
self._gpr_index = ax
+ @final
+ @property
+ def grouper(self):
+ warnings.warn(
+ f"{type(self).__name__}.grouper is deprecated and will be removed "
+ "in a future version. Use GroupBy.grouper instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._grouper_deprecated
+
@final
@property
def groups(self):
+ warnings.warn(
+ f"{type(self).__name__}.groups is deprecated and will be removed "
+ "in a future version. Use GroupBy.groups instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
# error: "None" has no attribute "groups"
- return self.grouper.groups # type: ignore[attr-defined]
+ return self._grouper_deprecated.groups # type: ignore[attr-defined]
@final
def __repr__(self) -> str:
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 30cfe638c8540..f7d4adc00260f 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -1049,3 +1049,20 @@ def test_grouping_by_key_is_in_axis():
result = gb.sum()
expected = DataFrame({"b": [1, 2], "c": [7, 5]})
tm.assert_frame_equal(result, expected)
+
+
+def test_grouper_groups():
+ # GH#51182 check Grouper.groups does not raise AttributeError
+ df = DataFrame({"a": [1, 2, 3], "b": 1})
+ grper = Grouper(key="a")
+ gb = df.groupby(grper)
+
+ msg = "Use GroupBy.groups instead"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = grper.groups
+ assert res is gb.groups
+
+ msg = "Use GroupBy.grouper instead"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = grper.grouper
+ assert res is gb.grouper
| Partially reverts #51145 cc @phofl | https://api.github.com/repos/pandas-dev/pandas/pulls/51182 | 2023-02-05T19:53:29Z | 2023-02-05T23:13:52Z | 2023-02-05T23:13:52Z | 2023-02-05T23:14:53Z |
TYP: resample | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6e893c46acfdb..6008e6b6cb566 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8783,7 +8783,7 @@ def resample(
axis = self._get_axis_number(axis)
return get_resampler(
- self,
+ cast("Series | DataFrame", self),
freq=rule,
label=label,
closed=closed,
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index bb716628114ea..52ccef0f5b602 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -129,6 +129,7 @@ class Resampler(BaseGroupBy, PandasObject):
grouper: BinGrouper
_timegrouper: TimeGrouper
+ binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
# to the groupby descriptor
@@ -145,15 +146,15 @@ class Resampler(BaseGroupBy, PandasObject):
def __init__(
self,
- obj: DataFrame | Series,
- groupby: TimeGrouper,
+ obj: NDFrame,
+ timegrouper: TimeGrouper,
axis: Axis = 0,
kind=None,
*,
group_keys: bool | lib.NoDefault = lib.no_default,
selection=None,
) -> None:
- self._timegrouper = groupby
+ self._timegrouper = timegrouper
self.keys = None
self.sort = True
# error: Incompatible types in assignment (expression has type "Union
@@ -463,7 +464,7 @@ def _groupby_and_aggregate(self, how, *args, **kwargs):
return self._wrap_result(result)
- def _get_resampler_for_grouping(self, groupby, key=None):
+ def _get_resampler_for_grouping(self, groupby: GroupBy, key):
"""
Return the correct class for resampling with groupby.
"""
@@ -1142,18 +1143,29 @@ class _GroupByMixin(PandasObject):
_attributes: list[str] # in practice the same as Resampler._attributes
_selection: IndexLabel | None = None
+ _groupby: GroupBy
+ _timegrouper: TimeGrouper
- def __init__(self, *, parent: Resampler, groupby=None, key=None, **kwargs) -> None:
+ def __init__(
+ self,
+ *,
+ parent: Resampler,
+ groupby: GroupBy,
+ key=None,
+ selection: IndexLabel | None = None,
+ ) -> None:
# reached via ._gotitem and _get_resampler_for_grouping
+ assert isinstance(groupby, GroupBy), type(groupby)
+
# parent is always a Resampler, sometimes a _GroupByMixin
assert isinstance(parent, Resampler), type(parent)
# initialize our GroupByMixin object with
# the resampler attributes
for attr in self._attributes:
- setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))
- self._selection = kwargs.get("selection")
+ setattr(self, attr, getattr(parent, attr))
+ self._selection = selection
self.binner = parent.binner
self.key = key
@@ -1169,7 +1181,7 @@ def _apply(self, f, *args, **kwargs):
"""
def func(x):
- x = self._resampler_cls(x, groupby=self._timegrouper)
+ x = self._resampler_cls(x, timegrouper=self._timegrouper)
if isinstance(f, str):
return getattr(x, f)(**kwargs)
@@ -1201,10 +1213,6 @@ def _gotitem(self, key, ndim, subset=None):
# error: "GotItemMixin" has no attribute "obj"
subset = self.obj # type: ignore[attr-defined]
- # we need to make a shallow copy of ourselves
- # with the same groupby
- kwargs = {attr: getattr(self, attr) for attr in self._attributes}
-
# Try to select from a DataFrame, falling back to a Series
try:
if isinstance(key, list) and self.key not in key and self.key is not None:
@@ -1223,7 +1231,6 @@ def _gotitem(self, key, ndim, subset=None):
groupby=groupby,
parent=cast(Resampler, self),
selection=selection,
- **kwargs,
)
return new_rs
@@ -1499,9 +1506,7 @@ def _resampler_cls(self):
return TimedeltaIndexResampler
-def get_resampler(
- obj, kind=None, **kwds
-) -> DatetimeIndexResampler | PeriodIndexResampler | TimedeltaIndexResampler:
+def get_resampler(obj: Series | DataFrame, kind=None, **kwds) -> Resampler:
"""
Create a TimeGrouper and return our resampler.
"""
@@ -1513,8 +1518,15 @@ def get_resampler(
def get_resampler_for_grouping(
- groupby, rule, how=None, fill_method=None, limit=None, kind=None, on=None, **kwargs
-):
+ groupby: GroupBy,
+ rule,
+ how=None,
+ fill_method=None,
+ limit=None,
+ kind=None,
+ on=None,
+ **kwargs,
+) -> Resampler:
"""
Return our appropriate resampler when grouping as well.
"""
@@ -1641,19 +1653,19 @@ def __init__(
super().__init__(freq=freq, axis=axis, **kwargs)
- def _get_resampler(self, obj, kind=None):
+ def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler:
"""
Return my resampler or raise if we have an invalid axis.
Parameters
----------
- obj : input object
+ obj : Series or DataFrame
kind : string, optional
'period','timestamp','timedelta' are valid
Returns
-------
- a Resampler
+ Resampler
Raises
------
@@ -1665,15 +1677,23 @@ def _get_resampler(self, obj, kind=None):
ax = self.ax
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(
- obj, groupby=self, kind=kind, axis=self.axis, group_keys=self.group_keys
+ obj,
+ timegrouper=self,
+ kind=kind,
+ axis=self.axis,
+ group_keys=self.group_keys,
)
elif isinstance(ax, PeriodIndex) or kind == "period":
return PeriodIndexResampler(
- obj, groupby=self, kind=kind, axis=self.axis, group_keys=self.group_keys
+ obj,
+ timegrouper=self,
+ kind=kind,
+ axis=self.axis,
+ group_keys=self.group_keys,
)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(
- obj, groupby=self, axis=self.axis, group_keys=self.group_keys
+ obj, timegrouper=self, axis=self.axis, group_keys=self.group_keys
)
raise TypeError(
@@ -1682,10 +1702,12 @@ def _get_resampler(self, obj, kind=None):
f"but got an instance of '{type(ax).__name__}'"
)
- def _get_grouper(self, obj, validate: bool = True):
+ def _get_grouper(
+ self, obj: NDFrameT, validate: bool = True
+ ) -> tuple[BinGrouper, NDFrameT]:
# create the resampler and return our binner
r = self._get_resampler(obj)
- return r.grouper, r.obj
+ return r.grouper, cast(NDFrameT, r.obj)
def _get_time_bins(self, ax: DatetimeIndex):
if not isinstance(ax, DatetimeIndex):
@@ -1750,19 +1772,21 @@ def _get_time_bins(self, ax: DatetimeIndex):
return binner, bins, labels
- def _adjust_bin_edges(self, binner, ax_values):
+ def _adjust_bin_edges(
+ self, binner: DatetimeIndex, ax_values: npt.NDArray[np.int64]
+ ) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]:
# Some hacks for > daily data, see #1471, #1458, #1483
if self.freq != "D" and is_superperiod(self.freq, "D"):
if self.closed == "right":
# GH 21459, GH 9119: Adjust the bins relative to the wall time
- bin_edges = binner.tz_localize(None)
- bin_edges = (
- bin_edges
- + Timedelta(days=1, unit=bin_edges.unit).as_unit(bin_edges.unit)
- - Timedelta(1, unit=bin_edges.unit).as_unit(bin_edges.unit)
+ edges_dti = binner.tz_localize(None)
+ edges_dti = (
+ edges_dti
+ + Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit)
+ - Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit)
)
- bin_edges = bin_edges.tz_localize(binner.tz).asi8
+ bin_edges = edges_dti.tz_localize(binner.tz).asi8
else:
bin_edges = binner.asi8
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51178 | 2023-02-05T02:58:00Z | 2023-02-06T18:50:25Z | 2023-02-06T18:50:25Z | 2023-02-06T19:31:05Z |
DOC: fix various typos in plotting documentation | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index fbd1eef138792..e861a10968af2 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -308,7 +308,7 @@ def hist_frame(
See Also
--------
-Series.plot.hist: Make a histogram.
+pandas.Series.plot.hist: Make a histogram.
matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
Notes
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index b5e23511f03bc..0eb6c826e2d4c 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -214,7 +214,7 @@ def radviz(
Returns
-------
- class:`matplotlib.axes.Axes`
+ :class:`matplotlib.axes.Axes`
See Also
--------
@@ -274,12 +274,13 @@ def andrews_curves(
Andrews curves have the functional form:
- f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
- x_4 sin(2t) + x_5 cos(2t) + ...
+ .. math::
+ f(t) = \\frac{x_1}{\\sqrt{2}} + x_2 \\sin(t) + x_3 \\cos(t) +
+ x_4 \\sin(2t) + x_5 \\cos(2t) + \\cdots
- Where x coefficients correspond to the values of each dimension and t is
- linearly spaced between -pi and +pi. Each row of frame then corresponds to
- a single curve.
+ Where :math:`x` coefficients correspond to the values of each dimension
+ and :math:`t` is linearly spaced between :math:`-\\pi` and :math:`+\\pi`.
+ Each row of frame then corresponds to a single curve.
Parameters
----------
@@ -302,7 +303,7 @@ def andrews_curves(
Returns
-------
- class:`matplotlip.axis.Axes`
+ :class:`matplotlib.axes.Axes`
Examples
--------
@@ -369,8 +370,8 @@ def bootstrap_plot(
See Also
--------
- DataFrame.plot : Basic plotting for DataFrame objects.
- Series.plot : Basic plotting for Series objects.
+ pandas.DataFrame.plot : Basic plotting for DataFrame objects.
+ pandas.Series.plot : Basic plotting for Series objects.
Examples
--------
@@ -434,7 +435,7 @@ def parallel_coordinates(
Returns
-------
- matplotlib.axis.Axes
+ matplotlib.axes.Axes
Examples
--------
@@ -485,7 +486,7 @@ def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Ax
Returns
-------
- matplotlib.axis.Axes
+ matplotlib.axes.Axes
Examples
--------
@@ -529,7 +530,7 @@ def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Ax
Returns
-------
- matplotlib.axis.Axes
+ matplotlib.axes.Axes
Examples
--------
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51176 | 2023-02-05T02:27:30Z | 2023-02-05T20:29:01Z | 2023-02-05T20:29:01Z | 2023-02-05T20:29:08Z |
CI: unpin pyarrow, fix failing test | diff --git a/.github/actions/setup-conda/action.yml b/.github/actions/setup-conda/action.yml
index f4b1d9e49f63a..002d0020c2df1 100644
--- a/.github/actions/setup-conda/action.yml
+++ b/.github/actions/setup-conda/action.yml
@@ -18,7 +18,7 @@ runs:
- name: Set Arrow version in ${{ inputs.environment-file }} to ${{ inputs.pyarrow-version }}
run: |
grep -q ' - pyarrow' ${{ inputs.environment-file }}
- sed -i"" -e "s/ - pyarrow<11/ - pyarrow=${{ inputs.pyarrow-version }}/" ${{ inputs.environment-file }}
+ sed -i"" -e "s/ - pyarrow/ - pyarrow=${{ inputs.pyarrow-version }}/" ${{ inputs.environment-file }}
cat ${{ inputs.environment-file }}
shell: bash
if: ${{ inputs.pyarrow-version }}
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 24676856f9fad..28ff56d8619b9 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -42,7 +42,7 @@ dependencies:
- psycopg2
- pymysql
- pytables
- - pyarrow<11
+ - pyarrow
- pyreadstat
- python-snappy
- pyxlsb
diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml
index dcc5932826716..32e3fe740b431 100644
--- a/ci/deps/actions-311.yaml
+++ b/ci/deps/actions-311.yaml
@@ -42,7 +42,7 @@ dependencies:
- psycopg2
- pymysql
# - pytables>=3.8.0 # first version that supports 3.11
- - pyarrow<11
+ - pyarrow
- pyreadstat
- python-snappy
- pyxlsb
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml
index 8f6fe60403b18..a2f22de43fb23 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-38-downstream_compat.yaml
@@ -40,7 +40,7 @@ dependencies:
- openpyxl
- odfpy
- psycopg2
- - pyarrow<11
+ - pyarrow
- pymysql
- pyreadstat
- pytables
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index ea9e3fea365a0..e17941f93ecf1 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -40,7 +40,7 @@ dependencies:
- odfpy
- pandas-gbq
- psycopg2
- - pyarrow<11
+ - pyarrow
- pymysql
- pyreadstat
- pytables
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 80cf1c1e539b7..ed8dc6f760254 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -41,7 +41,7 @@ dependencies:
- pandas-gbq
- psycopg2
- pymysql
- - pyarrow<11
+ - pyarrow
- pyreadstat
- pytables
- python-snappy
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml
index e8fc1a1459943..4d406460eab70 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-38-arm64.yaml
@@ -40,7 +40,7 @@ dependencies:
- odfpy
- pandas-gbq
- psycopg2
- - pyarrow<11
+ - pyarrow
- pymysql
# Not provided on ARM
#- pyreadstat
diff --git a/environment.yml b/environment.yml
index 9169cbf08b45d..f0678abbfe211 100644
--- a/environment.yml
+++ b/environment.yml
@@ -43,7 +43,7 @@ dependencies:
- odfpy
- py
- psycopg2
- - pyarrow<11
+ - pyarrow
- pymysql
- pyreadstat
- pytables
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 075beca106e6a..3a3f0b8ce61be 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -275,6 +275,18 @@ def _from_sequence_of_strings(
from pandas.core.tools.timedeltas import to_timedelta
scalars = to_timedelta(strings, errors="raise")
+ if pa_type.unit != "ns":
+ # GH51175: test_from_sequence_of_strings_pa_array
+ # attempt to parse as int64 reflecting pyarrow's
+ # duration to string casting behavior
+ mask = isna(scalars)
+ if not isinstance(strings, (pa.Array, pa.ChunkedArray)):
+ strings = pa.array(strings, type=pa.string(), from_pandas=True)
+ strings = pc.if_else(mask, None, strings)
+ try:
+ scalars = strings.cast(pa.int64())
+ except pa.ArrowInvalid:
+ pass
elif pa.types.is_time(pa_type):
from pandas.core.tools.times import to_time
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index b968004846e8e..42cf92c6b2a35 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -240,7 +240,9 @@ def _convert_listlike(
# returning arg (errors == "ignore"), and where the input is a
# generator, we return a useful list-like instead of a
# used-up generator
- arg = np.array(list(arg), dtype=object)
+ if not hasattr(arg, "__array__"):
+ arg = list(arg)
+ arg = np.array(arg, dtype=object)
try:
td64arr = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 1dac8faa3a9e2..681d048f38485 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -32,6 +32,7 @@
pa_version_under7p0,
pa_version_under8p0,
pa_version_under9p0,
+ pa_version_under11p0,
)
from pandas.errors import PerformanceWarning
@@ -287,7 +288,7 @@ def test_from_sequence_of_strings_pa_array(self, data, request):
reason="Nanosecond time parsing not supported.",
)
)
- elif pa.types.is_duration(pa_dtype):
+ elif pa_version_under11p0 and pa.types.is_duration(pa_dtype):
request.node.add_marker(
pytest.mark.xfail(
raises=pa.ArrowNotImplementedError,
@@ -1594,6 +1595,16 @@ def test_searchsorted_with_na_raises(data_for_sorting, as_series):
arr.searchsorted(b)
+@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+def test_duration_from_strings_with_nat(unit):
+ # GH51175
+ strings = ["1000", "NaT"]
+ pa_type = pa.duration(unit)
+ result = ArrowExtensionArray._from_sequence_of_strings(strings, dtype=pa_type)
+ expected = ArrowExtensionArray(pa.array([1000, None], type=pa_type))
+ tm.assert_extension_array_equal(result, expected)
+
+
def test_unsupported_dt(data):
pa_dtype = data.dtype.pyarrow_dtype
if not pa.types.is_temporal(pa_dtype):
diff --git a/requirements-dev.txt b/requirements-dev.txt
index b6992a7266600..6d9bd1c93ded0 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -32,7 +32,7 @@ openpyxl
odfpy
py
psycopg2-binary
-pyarrow<11
+pyarrow
pymysql
pyreadstat
tables
| - [x] closes #51146
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Unpins pyarrow and fixes `test_from_sequence_of_strings_pa_array` which was xfailed for pyarrow duration types.
pyarrow 11.0 introduces support for casting from duration to string:
```
In [4]: pa.array([1], type=pa.duration("s")).cast(pa.string())
Out[4]:
<pyarrow.lib.StringArray object at 0x7f77ed6cc9a0>
[
"1"
]
```
pyarrow < 11.0 raises:
```
ArrowNotImplementedError: Unsupported cast from duration[s] to utf8 using function cast_string
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/51175 | 2023-02-05T01:40:46Z | 2023-02-10T18:15:09Z | 2023-02-10T18:15:09Z | 2023-02-23T01:38:52Z |
BUG: PandasArray.astype use astype_nansafe | diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 2ed42d699e862..216dbede39a6a 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -16,7 +16,12 @@
)
from pandas.compat.numpy import function as nv
+from pandas.core.dtypes.astype import astype_array
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
+from pandas.core.dtypes.common import (
+ is_dtype_equal,
+ pandas_dtype,
+)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.missing import isna
@@ -185,6 +190,17 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
+ def astype(self, dtype, copy: bool = True):
+ dtype = pandas_dtype(dtype)
+
+ if is_dtype_equal(dtype, self.dtype):
+ if copy:
+ return self.copy()
+ return self
+
+ result = astype_array(self._ndarray, dtype=dtype, copy=copy)
+ return result
+
def isna(self) -> np.ndarray:
return isna(self._ndarray)
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 9b26db07fc28f..ff32fd5697e07 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -454,7 +454,8 @@ def astype(self, dtype, copy: bool = True):
values = arr.astype(dtype.numpy_dtype)
return FloatingArray(values, mask, copy=False)
elif isinstance(dtype, ExtensionDtype):
- return super().astype(dtype, copy=copy)
+ # Skip the PandasArray.astype method
+ return ExtensionArray.astype(self, dtype, copy)
elif np.issubdtype(dtype, np.floating):
arr = self._ndarray.copy()
mask = self.isna()
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 4dd541ef3f0aa..9cf7a08357720 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -190,10 +190,7 @@ def assert_series_equal(cls, left, right, *args, **kwargs):
class TestCasting(BaseNumPyTests, base.BaseCastingTests):
- @skip_nested
- def test_astype_str(self, data):
- # ValueError: setting an array element with a sequence
- super().test_astype_str(data)
+ pass
class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index 024cdf9300157..ce17614e1f8b7 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -352,14 +352,25 @@ def test_astype_cast_object_int_fail(self, dtype):
def test_astype_float_to_uint_negatives_raise(
self, float_numpy_dtype, any_unsigned_int_numpy_dtype
):
- # GH#45151
- # TODO: same for EA float/uint dtypes
+ # GH#45151 We don't cast negative numbers to nonsense values
+ # TODO: same for EA float/uint dtypes, signed integers?
arr = np.arange(5).astype(float_numpy_dtype) - 3 # includes negatives
ser = Series(arr)
- with pytest.raises(ValueError, match="losslessly"):
+ msg = "Cannot losslessly cast from .* to .*"
+ with pytest.raises(ValueError, match=msg):
ser.astype(any_unsigned_int_numpy_dtype)
+ with pytest.raises(ValueError, match=msg):
+ ser.to_frame().astype(any_unsigned_int_numpy_dtype)
+
+ with pytest.raises(ValueError, match=msg):
+ # We currently catch and re-raise in Index.astype
+ Index(ser).astype(any_unsigned_int_numpy_dtype)
+
+ with pytest.raises(ValueError, match=msg):
+ ser.array.astype(any_unsigned_int_numpy_dtype)
+
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51174 | 2023-02-04T22:42:56Z | 2023-02-06T19:51:16Z | 2023-02-06T19:51:16Z | 2023-02-06T19:52:50Z |
DOC: fix a typo "docstring" -> "docstrings" | diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index b619523b16eef..63554447f295e 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -25,7 +25,7 @@ contributing them to the project::
The script validates the doctests, formatting in docstrings, and
imported modules. It is possible to run the checks independently by using the
-parameters ``docstring``, ``code``, and ``doctests``
+parameters ``docstrings``, ``code``, and ``doctests``
(e.g. ``./ci/code_checks.sh doctests``).
In addition, because a lot of people use our library, it is important that we
| when using the "docstring" parameter I get an error:
```
$ ./ci/code_checks.sh docstring
Unknown command docstring. Usage: ./ci/code_checks.sh [code|doctests|docstrings|single-docs|notebooks]
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/51169 | 2023-02-04T21:30:40Z | 2023-02-04T21:35:26Z | 2023-02-04T21:35:26Z | 2023-02-04T21:42:46Z |
BUG: pd.array failing to raise with DataFrame | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7cf88a642f511..f5dd5666b6ca5 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1333,6 +1333,7 @@ Metadata
Other
^^^^^
- Bug in :meth:`Series.searchsorted` inconsistent behavior when accepting :class:`DataFrame` as parameter ``value`` (:issue:`49620`)
+- Bug in :func:`array` failing to raise on :class:`DataFrame` inputs (:issue:`51167`)
-
.. ***DO NOT USE THIS SECTION***
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 5593826218fec..69ba7e0bb0848 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -52,6 +52,7 @@
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.generic import (
+ ABCDataFrame,
ABCExtensionArray,
ABCIndex,
ABCPandasArray,
@@ -308,6 +309,8 @@ def array(
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
+ elif isinstance(data, ABCDataFrame):
+ raise TypeError("Cannot pass DataFrame to 'pandas.array'")
if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)):
# Note: we exclude np.ndarray here, will do type inference on it
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index 2288ac408d99e..59e5c6fa2dda3 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -343,6 +343,14 @@ def test_scalar_raises():
pd.array(1)
+def test_dataframe_raises():
+ # GH#51167 don't accidentally cast to StringArray by doing inference on columns
+ df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
+ msg = "Cannot pass DataFrame to 'pandas.array'"
+ with pytest.raises(TypeError, match=msg):
+ pd.array(df)
+
+
def test_bounds_check():
# GH21796
with pytest.raises(
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51167 | 2023-02-04T21:09:15Z | 2023-02-05T15:38:31Z | 2023-02-05T15:38:30Z | 2023-02-05T16:13:25Z |
REF: let EAs override WrappedCythonOp groupby implementations | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index a5032c590300c..accbf4468405d 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1722,6 +1722,82 @@ def map(self, mapper, na_action=None):
"""
return map_array(self, mapper, na_action=na_action)
+ # ------------------------------------------------------------------------
+ # GroupBy Methods
+
+ def _groupby_op(
+ self,
+ *,
+ how: str,
+ has_dropped_na: bool,
+ min_count: int,
+ ngroups: int,
+ ids: npt.NDArray[np.intp],
+ **kwargs,
+ ) -> ArrayLike:
+ """
+ Dispatch GroupBy reduction or transformation operation.
+
+ This is an *experimental* API to allow ExtensionArray authors to implement
+ reductions and transformations. The API is subject to change.
+
+ Parameters
+ ----------
+ how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median',
+ 'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc',
+ 'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'}
+ has_dropped_na : bool
+ min_count : int
+ ngroups : int
+ ids : np.ndarray[np.intp]
+ ids[i] gives the integer label for the group that self[i] belongs to.
+ **kwargs : operation-specific
+ 'any', 'all' -> ['skipna']
+ 'var', 'std', 'sem' -> ['ddof']
+ 'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna']
+ 'rank' -> ['ties_method', 'ascending', 'na_option', 'pct']
+
+ Returns
+ -------
+ np.ndarray or ExtensionArray
+ """
+ from pandas.core.arrays.string_ import StringDtype
+ from pandas.core.groupby.ops import WrappedCythonOp
+
+ kind = WrappedCythonOp.get_kind_from_how(how)
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
+
+ # GH#43682
+ if isinstance(self.dtype, StringDtype):
+ # StringArray
+ npvalues = self.to_numpy(object, na_value=np.nan)
+ else:
+ raise NotImplementedError(
+ f"function is not implemented for this dtype: {self.dtype}"
+ )
+
+ res_values = op._cython_op_ndim_compat(
+ npvalues,
+ min_count=min_count,
+ ngroups=ngroups,
+ comp_ids=ids,
+ mask=None,
+ **kwargs,
+ )
+
+ if op.how in op.cast_blocklist:
+ # i.e. how in ["rank"], since other cast_blocklist methods don't go
+ # through cython_operation
+ return res_values
+
+ if isinstance(self.dtype, StringDtype):
+ dtype = self.dtype
+ string_array_cls = dtype.construct_array_type()
+ return string_array_cls._from_sequence(res_values, dtype=dtype)
+
+ else:
+ raise NotImplementedError
+
class ExtensionArraySupportsAnyAll(ExtensionArray):
def any(self, *, skipna: bool = True) -> bool:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 0219d97f12e8f..e1c9410946164 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2386,6 +2386,65 @@ def _str_get_dummies(self, sep: str = "|"):
return PandasArray(self.astype(str))._str_get_dummies(sep)
+ # ------------------------------------------------------------------------
+ # GroupBy Methods
+
+ def _groupby_op(
+ self,
+ *,
+ how: str,
+ has_dropped_na: bool,
+ min_count: int,
+ ngroups: int,
+ ids: npt.NDArray[np.intp],
+ **kwargs,
+ ):
+ from pandas.core.groupby.ops import WrappedCythonOp
+
+ kind = WrappedCythonOp.get_kind_from_how(how)
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
+
+ dtype = self.dtype
+ if how in ["sum", "prod", "cumsum", "cumprod", "skew"]:
+ raise TypeError(f"{dtype} type does not support {how} operations")
+ if how in ["min", "max", "rank"] and not dtype.ordered:
+ # raise TypeError instead of NotImplementedError to ensure we
+ # don't go down a group-by-group path, since in the empty-groups
+ # case that would fail to raise
+ raise TypeError(f"Cannot perform {how} with non-ordered Categorical")
+ if how not in ["rank", "any", "all", "first", "last", "min", "max"]:
+ if kind == "transform":
+ raise TypeError(f"{dtype} type does not support {how} operations")
+ raise TypeError(f"{dtype} dtype does not support aggregation '{how}'")
+
+ result_mask = None
+ mask = self.isna()
+ if how == "rank":
+ assert self.ordered # checked earlier
+ npvalues = self._ndarray
+ elif how in ["first", "last", "min", "max"]:
+ npvalues = self._ndarray
+ result_mask = np.zeros(ngroups, dtype=bool)
+ else:
+ # any/all
+ npvalues = self.astype(bool)
+
+ res_values = op._cython_op_ndim_compat(
+ npvalues,
+ min_count=min_count,
+ ngroups=ngroups,
+ comp_ids=ids,
+ mask=mask,
+ result_mask=result_mask,
+ **kwargs,
+ )
+
+ if how in op.cast_blocklist:
+ return res_values
+ elif how in ["first", "last", "min", "max"]:
+ res_values[result_mask == 1] = -1
+ return self._from_backing_data(res_values)
+
# The Series.cat accessor
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index c036dc09948d8..3b903cca3ed0c 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1549,6 +1549,88 @@ def _mode(self, dropna: bool = True):
npmodes = cast(np.ndarray, npmodes)
return self._from_backing_data(npmodes)
+ # ------------------------------------------------------------------
+ # GroupBy Methods
+
+ def _groupby_op(
+ self,
+ *,
+ how: str,
+ has_dropped_na: bool,
+ min_count: int,
+ ngroups: int,
+ ids: npt.NDArray[np.intp],
+ **kwargs,
+ ):
+ dtype = self.dtype
+ if dtype.kind == "M":
+ # Adding/multiplying datetimes is not valid
+ if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
+ raise TypeError(f"datetime64 type does not support {how} operations")
+ if how in ["any", "all"]:
+ # GH#34479
+ warnings.warn(
+ f"'{how}' with datetime64 dtypes is deprecated and will raise in a "
+ f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ elif isinstance(dtype, PeriodDtype):
+ # Adding/multiplying Periods is not valid
+ if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
+ raise TypeError(f"Period type does not support {how} operations")
+ if how in ["any", "all"]:
+ # GH#34479
+ warnings.warn(
+ f"'{how}' with PeriodDtype is deprecated and will raise in a "
+ f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ # timedeltas we can add but not multiply
+ if how in ["prod", "cumprod", "skew"]:
+ raise TypeError(f"timedelta64 type does not support {how} operations")
+
+ # All of the functions implemented here are ordinal, so we can
+ # operate on the tz-naive equivalents
+ npvalues = self._ndarray.view("M8[ns]")
+
+ from pandas.core.groupby.ops import WrappedCythonOp
+
+ kind = WrappedCythonOp.get_kind_from_how(how)
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
+
+ res_values = op._cython_op_ndim_compat(
+ npvalues,
+ min_count=min_count,
+ ngroups=ngroups,
+ comp_ids=ids,
+ mask=None,
+ **kwargs,
+ )
+
+ if op.how in op.cast_blocklist:
+ # i.e. how in ["rank"], since other cast_blocklist methods don't go
+ # through cython_operation
+ return res_values
+
+ # We did a view to M8[ns] above, now we go the other direction
+ assert res_values.dtype == "M8[ns]"
+ if how in ["std", "sem"]:
+ from pandas.core.arrays import TimedeltaArray
+
+ if isinstance(self.dtype, PeriodDtype):
+ raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
+ self = cast("DatetimeArray | TimedeltaArray", self)
+ new_dtype = f"m8[{self.unit}]"
+ res_values = res_values.view(new_dtype)
+ return TimedeltaArray(res_values)
+
+ res_values = res_values.view(self._ndarray.dtype)
+ return self._from_backing_data(res_values)
+
class DatelikeOps(DatetimeLikeArrayMixin):
"""
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index aa3516c3ecb4f..c95e54ecbe5a3 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -1382,3 +1382,46 @@ def _accumulate(
data, mask = op(data, mask, skipna=skipna, **kwargs)
return type(self)(data, mask, copy=False)
+
+ # ------------------------------------------------------------------
+ # GroupBy Methods
+
+ def _groupby_op(
+ self,
+ *,
+ how: str,
+ has_dropped_na: bool,
+ min_count: int,
+ ngroups: int,
+ ids: npt.NDArray[np.intp],
+ **kwargs,
+ ):
+ from pandas.core.groupby.ops import WrappedCythonOp
+
+ kind = WrappedCythonOp.get_kind_from_how(how)
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
+
+ # libgroupby functions are responsible for NOT altering mask
+ mask = self._mask
+ if op.kind != "aggregate":
+ result_mask = mask.copy()
+ else:
+ result_mask = np.zeros(ngroups, dtype=bool)
+
+ res_values = op._cython_op_ndim_compat(
+ self._data,
+ min_count=min_count,
+ ngroups=ngroups,
+ comp_ids=ids,
+ mask=mask,
+ result_mask=result_mask,
+ **kwargs,
+ )
+
+ if op.how == "ohlc":
+ arity = op._cython_arity.get(op.how, 1)
+ result_mask = np.tile(result_mask, (arity, 1)).T
+
+ # res_values should already have the correct dtype, we just need to
+ # wrap in a MaskedArray
+ return self._maybe_mask_result(res_values, result_mask)
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index f55fde9c75e4b..b628b750eb3c5 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -1806,6 +1806,21 @@ def _formatter(self, boxed: bool = False):
# This will infer the correct formatter from the dtype of the values.
return None
+ # ------------------------------------------------------------------------
+ # GroupBy Methods
+
+ def _groupby_op(
+ self,
+ *,
+ how: str,
+ has_dropped_na: bool,
+ min_count: int,
+ ngroups: int,
+ ids: npt.NDArray[np.intp],
+ **kwargs,
+ ):
+ raise NotImplementedError(f"{self.dtype} dtype not supported")
+
def _make_sparse(
arr: np.ndarray,
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 55155ea41f430..e4149dbf3969a 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -18,7 +18,6 @@
Sequence,
final,
)
-import warnings
import numpy as np
@@ -31,14 +30,12 @@
from pandas._typing import (
ArrayLike,
AxisInt,
- DtypeObj,
NDFrameT,
Shape,
npt,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
-from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
@@ -52,33 +49,16 @@
is_1d_only_ea_dtype,
is_bool_dtype,
is_complex_dtype,
- is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
- is_period_dtype,
- is_sparse,
- is_timedelta64_dtype,
needs_i8_conversion,
)
-from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
-from pandas.core.arrays import (
- Categorical,
- DatetimeArray,
- ExtensionArray,
- PeriodArray,
- TimedeltaArray,
-)
-from pandas.core.arrays.masked import (
- BaseMaskedArray,
- BaseMaskedDtype,
-)
-from pandas.core.arrays.string_ import StringDtype
from pandas.core.frame import DataFrame
from pandas.core.groupby import grouper
from pandas.core.indexes.api import (
@@ -155,6 +135,12 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
_cython_arity = {"ohlc": 4} # OHLC
+ @classmethod
+ def get_kind_from_how(cls, how: str) -> str:
+ if how in cls._CYTHON_FUNCTIONS["aggregate"]:
+ return "aggregate"
+ return "transform"
+
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@@ -237,70 +223,6 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:
return values
- # TODO: general case implementation overridable by EAs.
- def _disallow_invalid_ops(self, dtype: DtypeObj):
- """
- Check if we can do this operation with our cython functions.
-
- Raises
- ------
- TypeError
- This is not a valid operation for this dtype.
- NotImplementedError
- This may be a valid operation, but does not have a cython implementation.
- """
- how = self.how
-
- if is_numeric_dtype(dtype):
- # never an invalid op for those dtypes, so return early as fastpath
- return
-
- if isinstance(dtype, CategoricalDtype):
- if how in ["sum", "prod", "cumsum", "cumprod", "skew"]:
- raise TypeError(f"{dtype} type does not support {how} operations")
- if how in ["min", "max", "rank"] and not dtype.ordered:
- # raise TypeError instead of NotImplementedError to ensure we
- # don't go down a group-by-group path, since in the empty-groups
- # case that would fail to raise
- raise TypeError(f"Cannot perform {how} with non-ordered Categorical")
- if how not in ["rank", "any", "all", "first", "last", "min", "max"]:
- if self.kind == "transform":
- raise TypeError(f"{dtype} type does not support {how} operations")
- raise TypeError(f"{dtype} dtype does not support aggregation '{how}'")
-
- elif is_sparse(dtype):
- raise NotImplementedError(f"{dtype} dtype not supported")
- elif is_datetime64_any_dtype(dtype):
- # Adding/multiplying datetimes is not valid
- if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
- raise TypeError(f"datetime64 type does not support {how} operations")
- if how in ["any", "all"]:
- # GH#34479
- warnings.warn(
- f"'{how}' with datetime64 dtypes is deprecated and will raise in a "
- f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
- elif is_period_dtype(dtype):
- # Adding/multiplying Periods is not valid
- if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]:
- raise TypeError(f"Period type does not support {how} operations")
- if how in ["any", "all"]:
- # GH#34479
- warnings.warn(
- f"'{how}' with PeriodDtype is deprecated and will raise in a "
- f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
- elif is_timedelta64_dtype(dtype):
- # timedeltas we can add but not multiply
- if how in ["prod", "cumprod", "skew"]:
- raise TypeError(f"timedelta64 type does not support {how} operations")
-
def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
@@ -358,163 +280,6 @@ def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
return np.dtype(np.float64)
return dtype
- @final
- def _ea_wrap_cython_operation(
- self,
- values: ExtensionArray,
- min_count: int,
- ngroups: int,
- comp_ids: np.ndarray,
- **kwargs,
- ) -> ArrayLike:
- """
- If we have an ExtensionArray, unwrap, call _cython_operation, and
- re-wrap if appropriate.
- """
- if isinstance(values, BaseMaskedArray):
- return self._masked_ea_wrap_cython_operation(
- values,
- min_count=min_count,
- ngroups=ngroups,
- comp_ids=comp_ids,
- **kwargs,
- )
-
- elif isinstance(values, Categorical):
- assert self.how in ["rank", "any", "all", "first", "last", "min", "max"]
- mask = values.isna()
- if self.how == "rank":
- assert values.ordered # checked earlier
- npvalues = values._ndarray
- elif self.how in ["first", "last", "min", "max"]:
- if self.how in ["min", "max"]:
- assert values.ordered # checked earlier
- npvalues = values._ndarray
- result_mask = np.zeros(ngroups, dtype=np.uint8)
- kwargs["result_mask"] = result_mask
- else:
- npvalues = values.astype(bool)
-
- res_values = self._cython_op_ndim_compat(
- npvalues,
- min_count=min_count,
- ngroups=ngroups,
- comp_ids=comp_ids,
- mask=mask,
- **kwargs,
- )
-
- if self.how in self.cast_blocklist:
- return res_values
- elif self.how in ["first", "last", "min", "max"]:
- res_values[result_mask == 1] = -1
- return values._from_backing_data(res_values)
-
- npvalues = self._ea_to_cython_values(values)
-
- res_values = self._cython_op_ndim_compat(
- npvalues,
- min_count=min_count,
- ngroups=ngroups,
- comp_ids=comp_ids,
- mask=None,
- **kwargs,
- )
-
- if self.how in self.cast_blocklist:
- # i.e. how in ["rank"], since other cast_blocklist methods don't go
- # through cython_operation
- return res_values
-
- return self._reconstruct_ea_result(values, res_values)
-
- # TODO: general case implementation overridable by EAs.
- def _ea_to_cython_values(self, values: ExtensionArray) -> np.ndarray:
- # GH#43682
- if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):
- # All of the functions implemented here are ordinal, so we can
- # operate on the tz-naive equivalents
- npvalues = values._ndarray.view("M8[ns]")
- elif isinstance(values.dtype, StringDtype):
- # StringArray
- npvalues = values.to_numpy(object, na_value=np.nan)
- else:
- raise NotImplementedError(
- f"function is not implemented for this dtype: {values.dtype}"
- )
- return npvalues
-
- # TODO: general case implementation overridable by EAs.
- def _reconstruct_ea_result(
- self, values: ExtensionArray, res_values: np.ndarray
- ) -> ExtensionArray:
- """
- Construct an ExtensionArray result from an ndarray result.
- """
- dtype: BaseMaskedDtype | StringDtype
-
- if isinstance(values.dtype, StringDtype):
- dtype = values.dtype
- string_array_cls = dtype.construct_array_type()
- return string_array_cls._from_sequence(res_values, dtype=dtype)
-
- elif isinstance(values, (DatetimeArray, TimedeltaArray, PeriodArray)):
- # In to_cython_values we took a view as M8[ns]
- assert res_values.dtype == "M8[ns]"
- if self.how in ["std", "sem"]:
- if isinstance(values, PeriodArray):
- raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
- new_dtype = f"m8[{values.unit}]"
- res_values = res_values.view(new_dtype)
- return TimedeltaArray(res_values)
-
- res_values = res_values.view(values._ndarray.dtype)
- return values._from_backing_data(res_values)
-
- raise NotImplementedError
-
- @final
- def _masked_ea_wrap_cython_operation(
- self,
- values: BaseMaskedArray,
- min_count: int,
- ngroups: int,
- comp_ids: np.ndarray,
- **kwargs,
- ) -> BaseMaskedArray:
- """
- Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's
- and cython algorithms which accept a mask.
- """
- orig_values = values
-
- # libgroupby functions are responsible for NOT altering mask
- mask = values._mask
- if self.kind != "aggregate":
- result_mask = mask.copy()
- else:
- result_mask = np.zeros(ngroups, dtype=bool)
-
- arr = values._data
-
- res_values = self._cython_op_ndim_compat(
- arr,
- min_count=min_count,
- ngroups=ngroups,
- comp_ids=comp_ids,
- mask=mask,
- result_mask=result_mask,
- **kwargs,
- )
-
- if self.how == "ohlc":
- arity = self._cython_arity.get(self.how, 1)
- result_mask = np.tile(result_mask, (arity, 1)).T
-
- # res_values should already have the correct dtype, we just need to
- # wrap in a MaskedArray
- return orig_values._maybe_mask_result(res_values, result_mask)
-
@final
def _cython_op_ndim_compat(
self,
@@ -707,6 +472,17 @@ def _call_cython_op(
return op_result
+ @final
+ def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None:
+ if values.ndim > 2:
+ raise NotImplementedError("number of dimensions is currently limited to 2")
+ if values.ndim == 2:
+ assert axis == 1, axis
+ elif not is_1d_only_ea_dtype(values.dtype):
+ # Note: it is *not* the case that axis is always 0 for 1-dim values,
+ # as we can have 1D ExtensionArrays that we need to treat as 2D
+ assert axis == 0
+
@final
def cython_operation(
self,
@@ -721,26 +497,16 @@ def cython_operation(
"""
Call our cython function, with appropriate pre- and post- processing.
"""
- if values.ndim > 2:
- raise NotImplementedError("number of dimensions is currently limited to 2")
- if values.ndim == 2:
- assert axis == 1, axis
- elif not is_1d_only_ea_dtype(values.dtype):
- # Note: it is *not* the case that axis is always 0 for 1-dim values,
- # as we can have 1D ExtensionArrays that we need to treat as 2D
- assert axis == 0
-
- # can we do this operation with our cython functions
- # if not raise NotImplementedError
- self._disallow_invalid_ops(values.dtype)
+ self._validate_axis(axis, values)
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
- return self._ea_wrap_cython_operation(
- values,
+ return values._groupby_op(
+ how=self.how,
+ has_dropped_na=self.has_dropped_na,
min_count=min_count,
ngroups=ngroups,
- comp_ids=comp_ids,
+ ids=comp_ids,
**kwargs,
)
| - [x] closes #43682
Goal is to allow EAs (including our own pyarrow-backed) to implement performant GroupBy reductions/transforms without necessarily having to convert to numpy.
Analogous to #51003 (quantile) and #51116 (any, all, std).
This implements EA.grouby_op which takes a "how" keyword to specify which reduction/transform is being done. By contrast, #51003 implements EA.groupby_quantile and #51116 implements EA.groupby_std and EA.groupby_any_all. If we go this direction (we should), we should choose one naming scheme for all these methods instead of three slightly different ones. i.e. either a) shove everything into something like `groupby_op` or b) explicitly have `groupby_foo` for each of the relevant methods.
Also will need docs before moving forward. | https://api.github.com/repos/pandas-dev/pandas/pulls/51166 | 2023-02-04T20:53:37Z | 2023-04-05T22:11:30Z | 2023-04-05T22:11:30Z | 2023-04-05T22:18:28Z |
REF: Resampler subclasses remove unused arg, ensure parent is not None | diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index ebb803ee8f3b4..7b174be9df99b 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -7,6 +7,7 @@
Callable,
Hashable,
Literal,
+ cast,
final,
no_type_check,
)
@@ -479,7 +480,7 @@ def _get_resampler_for_grouping(self, groupby, key=None):
"""
Return the correct class for resampling with groupby.
"""
- return self._resampler_for_grouping(self, groupby=groupby, key=key)
+ return self._resampler_for_grouping(groupby=groupby, key=key, parent=self)
def _wrap_result(self, result):
"""
@@ -1155,11 +1156,11 @@ class _GroupByMixin(PandasObject):
_attributes: list[str] # in practice the same as Resampler._attributes
_selection: IndexLabel | None = None
- def __init__(self, obj, parent=None, groupby=None, key=None, **kwargs) -> None:
+ def __init__(self, *, parent: Resampler, groupby=None, key=None, **kwargs) -> None:
# reached via ._gotitem and _get_resampler_for_grouping
- if parent is None:
- parent = obj
+ # parent is always a Resampler, sometimes a _GroupByMixin
+ assert isinstance(parent, Resampler), type(parent)
# initialize our GroupByMixin object with
# the resampler attributes
@@ -1232,7 +1233,10 @@ def _gotitem(self, key, ndim, subset=None):
selection = key
new_rs = type(self)(
- subset, groupby=groupby, parent=self, selection=selection, **kwargs
+ groupby=groupby,
+ parent=cast(Resampler, self),
+ selection=selection,
+ **kwargs,
)
return new_rs
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51163 | 2023-02-04T16:27:14Z | 2023-02-04T22:04:32Z | 2023-02-04T22:04:32Z | 2023-02-04T22:43:28Z |
BUG: DataFrame.from_records with tzaware | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 802e2e6a488d0..3d0a502bf408d 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1131,6 +1131,7 @@ Datetimelike
- Bug in :func:`to_datetime` with unit of "Y" or "M" giving incorrect results, not matching pointwise :class:`Timestamp` results (:issue:`50870`)
- Bug in :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` with datetime or timedelta dtypes incorrectly raising ``ValueError`` (:issue:`11312`)
- Bug in :func:`to_datetime` was not returning input with ``errors='ignore'`` when input was out-of-bounds (:issue:`50587`)
+- Bug in :func:`DataFrame.from_records` when given a :class:`DataFrame` input with timezone-aware datetime64 columns incorrectly dropping the timezone-awareness (:issue:`51162`)
-
Timedelta
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 9bdfd7991689b..ce709917d7123 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -764,13 +764,13 @@ def to_arrays(
# see test_from_records_with_index_data, test_from_records_bad_index_column
if columns is not None:
arrays = [
- data._ixs(i, axis=1).values
+ data._ixs(i, axis=1)._values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
- arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
+ arrays = [data._ixs(i, axis=1)._values for i in range(len(columns))]
return arrays, columns
diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py
index d4427dd789b16..60cb0f4490705 100644
--- a/pandas/tests/frame/constructors/test_from_records.py
+++ b/pandas/tests/frame/constructors/test_from_records.py
@@ -15,11 +15,19 @@
Interval,
RangeIndex,
Series,
+ date_range,
)
import pandas._testing as tm
class TestFromRecords:
+ def test_from_records_dt64tz_frame(self):
+ # GH#51162 don't lose tz when calling from_records with DataFrame input
+ dti = date_range("2016-01-01", periods=10, tz="US/Pacific")
+ df = DataFrame({i: dti for i in range(4)})
+ res = DataFrame.from_records(df)
+ tm.assert_frame_equal(res, df)
+
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51162 | 2023-02-04T16:05:10Z | 2023-02-04T23:31:26Z | 2023-02-04T23:31:26Z | 2023-02-05T01:18:34Z |
BUG: Add ``is_any_real_numeric_dtype`` to solve discrepancy between ```Index.is_numeric()``` and ```is_numeric_dtype()``` | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 62a5d81fc8f73..2fd772b3015b8 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -590,6 +590,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Series.sparse.sp_values \
pandas.Timestamp.fromtimestamp \
pandas.api.types.infer_dtype \
+ pandas.api.types.is_any_real_numeric_dtype \
pandas.api.types.is_bool_dtype \
pandas.api.types.is_categorical_dtype \
pandas.api.types.is_complex_dtype \
diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index aeaca7caea25d..457edb46a7ec0 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -653,6 +653,7 @@ Data type introspection
.. autosummary::
:toctree: api/
+ api.types.is_any_real_numeric_dtype
api.types.is_bool_dtype
api.types.is_categorical_dtype
api.types.is_complex_dtype
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index addebad8b45af..b02861ac14dcf 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -760,6 +760,7 @@ Other API changes
- The levels of the index of the :class:`Series` returned from ``Series.sparse.from_coo`` now always have dtype ``int32``. Previously they had dtype ``int64`` (:issue:`50926`)
- :func:`to_datetime` with ``unit`` of either "Y" or "M" will now raise if a sequence contains a non-round ``float`` value, matching the ``Timestamp`` behavior (:issue:`50301`)
- The methods :meth:`Series.round`, :meth:`DataFrame.__invert__`, :meth:`Series.__invert__`, :meth:`DataFrame.swapaxes`, :meth:`DataFrame.first`, :meth:`DataFrame.last`, :meth:`Series.first`, :meth:`Series.last` and :meth:`DataFrame.align` will now always return new objects (:issue:`51032`)
+- Added :func:`pandas.api.types.is_any_real_numeric_dtype` to check for real numeric dtypes (:issue:`51152`)
.. ---------------------------------------------------------------------------
.. _whatsnew_200.deprecations:
@@ -774,7 +775,7 @@ Deprecations
- :meth:`Index.is_integer` has been deprecated. Use :func:`pandas.api.types.is_integer_dtype` instead (:issue:`50042`)
- :meth:`Index.is_floating` has been deprecated. Use :func:`pandas.api.types.is_float_dtype` instead (:issue:`50042`)
- :meth:`Index.holds_integer` has been deprecated. Use :func:`pandas.api.types.infer_dtype` instead (:issue:`50243`)
-- :meth:`Index.is_numeric` has been deprecated. Use :func:`pandas.api.types.is_numeric_dtype` instead (:issue:`50042`)
+- :meth:`Index.is_numeric` has been deprecated. Use :func:`pandas.api.types.is_any_real_numeric_dtype` instead (:issue:`50042`,:issue:`51152`)
- :meth:`Index.is_categorical` has been deprecated. Use :func:`pandas.api.types.is_categorical_dtype` instead (:issue:`50042`)
- :meth:`Index.is_object` has been deprecated. Use :func:`pandas.api.types.is_object_dtype` instead (:issue:`50042`)
- :meth:`Index.is_interval` has been deprecated. Use :func:`pandas.api.types.is_intterval_dtype` instead (:issue:`50042`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f816b30b825b7..fb953e601735e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -47,7 +47,7 @@
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
- is_any_numeric_dtype,
+ is_any_real_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
@@ -596,7 +596,7 @@ def _from_inferred_categories(
if known_categories:
# Convert to a specialized type with `dtype` if specified.
- if is_any_numeric_dtype(dtype.categories):
+ if is_any_real_numeric_dtype(dtype.categories):
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
@@ -1752,7 +1752,7 @@ def _values_for_rank(self):
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
- elif is_any_numeric_dtype(self.categories):
+ elif is_any_real_numeric_dtype(self.categories):
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
diff --git a/pandas/core/dtypes/api.py b/pandas/core/dtypes/api.py
index 00300c5c74e51..254abe330b8e7 100644
--- a/pandas/core/dtypes/api.py
+++ b/pandas/core/dtypes/api.py
@@ -1,4 +1,5 @@
from pandas.core.dtypes.common import (
+ is_any_real_numeric_dtype,
is_array_like,
is_bool,
is_bool_dtype,
@@ -41,6 +42,7 @@
)
__all__ = [
+ "is_any_real_numeric_dtype",
"is_array_like",
"is_bool",
"is_bool_dtype",
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 3281c7fe859e5..eaa033e3d469a 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1161,9 +1161,9 @@ def is_numeric_dtype(arr_or_dtype) -> bool:
)
-def is_any_numeric_dtype(arr_or_dtype) -> bool:
+def is_any_real_numeric_dtype(arr_or_dtype) -> bool:
"""
- Check whether the provided array or dtype is of a real number dtype
+ Check whether the provided array or dtype is of a real number dtype.
Parameters
----------
@@ -1173,19 +1173,21 @@ def is_any_numeric_dtype(arr_or_dtype) -> bool:
Returns
-------
boolean
- Whether or not the array or dtype is of a real number dtype
+ Whether or not the array or dtype is of a real number dtype.
Examples
- -------
- >>> is_any_numeric_dtype(str)
- False
- >>> is_any_numeric_dtype(int)
+ --------
+ >>> is_any_real_numeric_dtype(int)
True
- >>> is_any_numeric_dtype(float)
+ >>> is_any_real_numeric_dtype(float)
True
- >>> is_any_numeric_dtype(complex(1,2))
+ >>> is_any_real_numeric_dtype(object)
+ False
+ >>> is_any_real_numeric_dtype(str)
+ False
+ >>> is_any_real_numeric_dtype(complex(1, 2))
False
- >>> is_any_numeric_dtype(bool)
+ >>> is_any_real_numeric_dtype(bool)
False
"""
return (
@@ -1721,6 +1723,7 @@ def is_all_strings(value: ArrayLike) -> bool:
"is_1d_only_ea_obj",
"is_all_strings",
"is_any_int_dtype",
+ "is_any_real_numeric_dtype",
"is_array_like",
"is_bool",
"is_bool_dtype",
@@ -1749,7 +1752,6 @@ def is_all_strings(value: ArrayLike) -> bool:
"is_nested_list_like",
"is_number",
"is_numeric_dtype",
- "is_any_numeric_dtype",
"is_object_dtype",
"is_period_dtype",
"is_re",
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 0caa8005f1ebc..ed2e3a7499728 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -88,7 +88,7 @@
ensure_int64,
ensure_object,
ensure_platform_int,
- is_any_numeric_dtype,
+ is_any_real_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_dtype_equal,
@@ -2429,7 +2429,7 @@ def is_numeric(self) -> bool:
"""
warnings.warn(
f"{type(self).__name__}.is_numeric is deprecated. "
- "Use pandas.api.types.is_numeric_dtype instead",
+ "Use pandas.api.types.is_any_real_numeric_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
@@ -6029,8 +6029,8 @@ def _should_compare(self, other: Index) -> bool:
Check if `self == other` can ever have non-False entries.
"""
- if (is_bool_dtype(other) and is_any_numeric_dtype(self)) or (
- is_bool_dtype(self) and is_any_numeric_dtype(other)
+ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or (
+ is_bool_dtype(self) and is_any_real_numeric_dtype(other)
):
# GH#16877 Treat boolean labels passed to a numeric index as not
# found. Without this fix False and True would be treated as 0 and 1
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 49b92e0984713..fd737cdcb967f 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -27,7 +27,7 @@
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
- is_any_numeric_dtype,
+ is_any_real_numeric_dtype,
is_categorical_dtype,
is_extension_array_dtype,
is_float,
@@ -841,7 +841,7 @@ def _get_xticks(self, convert_period: bool = False):
if convert_period and isinstance(index, ABCPeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
- elif is_any_numeric_dtype(index):
+ elif is_any_real_numeric_dtype(index):
# Matplotlib supports numeric values or datetime objects as
# xaxis values. Taking LBYL approach here, by the time
# matplotlib raises exception when using non numeric/datetime
diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py
index 8c729fd19cbc7..0f89549f2ab03 100644
--- a/pandas/tests/api/test_types.py
+++ b/pandas/tests/api/test_types.py
@@ -8,6 +8,7 @@
class TestTypes(Base):
allowed = [
+ "is_any_real_numeric_dtype",
"is_bool",
"is_bool_dtype",
"is_categorical_dtype",
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 73abea30029b1..695ba359f60d1 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -558,6 +558,20 @@ def _is_numeric(self) -> bool:
assert com.is_numeric_dtype(MyNumericDType())
+def test_is_any_real_numeric_dtype():
+ assert not com.is_any_real_numeric_dtype(str)
+ assert not com.is_any_real_numeric_dtype(bool)
+ assert not com.is_any_real_numeric_dtype(complex)
+ assert not com.is_any_real_numeric_dtype(object)
+ assert not com.is_any_real_numeric_dtype(np.datetime64)
+ assert not com.is_any_real_numeric_dtype(np.array(["a", "b", complex(1, 2)]))
+ assert not com.is_any_real_numeric_dtype(pd.DataFrame([complex(1, 2), True]))
+
+ assert com.is_any_real_numeric_dtype(int)
+ assert com.is_any_real_numeric_dtype(float)
+ assert com.is_any_real_numeric_dtype(np.array([1, 2.5]))
+
+
def test_is_float_dtype():
assert not com.is_float_dtype(str)
assert not com.is_float_dtype(int)
diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py
index 3861d74cee092..9babbd5b8d56d 100644
--- a/pandas/tests/indexes/multi/test_equivalence.py
+++ b/pandas/tests/indexes/multi/test_equivalence.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas.core.dtypes.common import is_any_numeric_dtype
+from pandas.core.dtypes.common import is_any_real_numeric_dtype
import pandas as pd
from pandas import (
@@ -255,7 +255,7 @@ def test_is_all_dates(idx):
def test_is_numeric(idx):
# MultiIndex is never numeric
- assert not is_any_numeric_dtype(idx)
+ assert not is_any_real_numeric_dtype(idx)
def test_multiindex_compare():
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 27f501e1d7a19..783cf76403059 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -19,7 +19,7 @@
from pandas.util._test_decorators import async_mark
from pandas.core.dtypes.common import (
- is_any_numeric_dtype,
+ is_any_real_numeric_dtype,
is_numeric_dtype,
is_object_dtype,
)
@@ -659,7 +659,7 @@ def test_append_empty_preserve_name(self, name, expected):
indirect=["index"],
)
def test_is_numeric(self, index, expected):
- assert is_any_numeric_dtype(index) is expected
+ assert is_any_real_numeric_dtype(index) is expected
@pytest.mark.parametrize(
"index, expected",
| - [x] closes #51152
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51160 | 2023-02-04T15:43:04Z | 2023-02-07T10:26:48Z | 2023-02-07T10:26:48Z | 2023-02-07T10:27:00Z |
BUG: catch decimal.InvalidOperation exception from dateutil | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index bdf27020dbdfa..f629e669edf49 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1132,6 +1132,7 @@ Datetimelike
- Bug in :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` with datetime or timedelta dtypes incorrectly raising ``ValueError`` (:issue:`11312`)
- Bug in :func:`to_datetime` was not returning input with ``errors='ignore'`` when input was out-of-bounds (:issue:`50587`)
- Bug in :func:`DataFrame.from_records` when given a :class:`DataFrame` input with timezone-aware datetime64 columns incorrectly dropping the timezone-awareness (:issue:`51162`)
+- Bug in :func:`to_datetime` was raising ``decimal.InvalidOperation`` when parsing date strings with ``errors='coerce'`` (:issue:`51084`)
-
Timedelta
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 3a5920b74412f..e48871c537310 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -40,6 +40,8 @@ cnp.import_array()
# dateutil compat
+from decimal import InvalidOperation
+
from dateutil.parser import (
DEFAULTPARSER,
parse as du_parse,
@@ -646,7 +648,10 @@ cdef datetime dateutil_parse(
str reso = None
dict repl = {}
- res, _ = DEFAULTPARSER._parse(timestr, dayfirst=dayfirst, yearfirst=yearfirst)
+ try:
+ res, _ = DEFAULTPARSER._parse(timestr, dayfirst=dayfirst, yearfirst=yearfirst)
+ except InvalidOperation:
+ res = None
if res is None:
raise DateParseError(
@@ -891,7 +896,7 @@ def guess_datetime_format(dt_str: str, bint dayfirst=False) -> str | None:
try:
parsed_datetime = du_parse(dt_str, dayfirst=dayfirst)
- except (ValueError, OverflowError):
+ except (ValueError, OverflowError, InvalidOperation):
# In case the datetime can't be parsed, its format cannot be guessed
return None
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 7a93d2fe8b5ce..b930bbc2e3e69 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -2501,6 +2501,16 @@ def test_to_datetime_overflow(self):
with pytest.raises(OutOfBoundsTimedelta, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000)
+ def test_string_invalid_operation(self, cache):
+ invalid = np.array(["87156549591102612381000001219H5"], dtype=object)
+ # GH #51084
+
+ with pytest.raises(ValueError, match="Unknown datetime string format"):
+ with tm.assert_produces_warning(
+ UserWarning, match="Could not infer format"
+ ):
+ to_datetime(invalid, errors="raise", cache=cache)
+
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index 701c11bb7f52d..6500afdf87beb 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -246,6 +246,7 @@ def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
"13/2019",
"202001", # YYYYMM isn't ISO8601
"2020/01", # YYYY/MM isn't ISO8601 either
+ "87156549591102612381000001219H5",
],
)
def test_guess_datetime_format_invalid_inputs(invalid_dt):
| - [X] closes #51084
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51157 | 2023-02-04T09:18:42Z | 2023-02-07T14:09:40Z | 2023-02-07T14:09:40Z | 2023-02-07T15:48:30Z |
REF: remove _get_grouper, make Grouper.__init__ less stateful | diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index d77ad59a4bb82..78517e88a5296 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -441,7 +441,6 @@ class Grouping:
_codes: npt.NDArray[np.signedinteger] | None = None
_group_index: Index | None = None
- _passed_categorical: bool
_all_grouper: Categorical | None
_orig_cats: Index | None
_index: Index
@@ -460,7 +459,7 @@ def __init__(
) -> None:
self.level = level
self._orig_grouper = grouper
- self.grouping_vector = _convert_grouper(index, grouper)
+ grouping_vector = _convert_grouper(index, grouper)
self._all_grouper = None
self._orig_cats = None
self._index = index
@@ -471,8 +470,6 @@ def __init__(
self._dropna = dropna
self._uniques = uniques
- self._passed_categorical = False
-
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
@@ -486,78 +483,83 @@ def __init__(
else:
index_level = index
- if self.grouping_vector is None:
- self.grouping_vector = index_level
+ if grouping_vector is None:
+ grouping_vector = index_level
else:
- mapper = self.grouping_vector
- self.grouping_vector = index_level.map(mapper)
+ mapper = grouping_vector
+ grouping_vector = index_level.map(mapper)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get codes
- elif isinstance(self.grouping_vector, Grouper):
+ elif isinstance(grouping_vector, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
assert self.obj is not None # for mypy
- newgrouper, newobj = self.grouping_vector._get_grouper(
- self.obj, validate=False
- )
+ newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False)
self.obj = newobj
- ng = newgrouper._get_grouper()
if isinstance(newgrouper, ops.BinGrouper):
- # in this case we have `ng is newgrouper`
- self.grouping_vector = ng
+ # TODO: can we unwrap this and get a tighter typing
+ # for self.grouping_vector?
+ grouping_vector = newgrouper
else:
# ops.BaseGrouper
+ # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1.
+ # If that were to occur, would we be throwing out information?
+ # error: Cannot determine type of "grouping_vector" [has-type]
+ ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type]
# use Index instead of ndarray so we can recover the name
- self.grouping_vector = Index(ng, name=newgrouper.result_index.name)
+ grouping_vector = Index(ng, name=newgrouper.result_index.name)
elif not isinstance(
- self.grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
+ grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
):
# no level passed
- if getattr(self.grouping_vector, "ndim", 1) != 1:
- t = self.name or str(type(self.grouping_vector))
+ if getattr(grouping_vector, "ndim", 1) != 1:
+ t = str(type(grouping_vector))
raise ValueError(f"Grouper for '{t}' not 1-dimensional")
- self.grouping_vector = index.map(self.grouping_vector)
+ grouping_vector = index.map(grouping_vector)
if not (
- hasattr(self.grouping_vector, "__len__")
- and len(self.grouping_vector) == len(index)
+ hasattr(grouping_vector, "__len__")
+ and len(grouping_vector) == len(index)
):
- grper = pprint_thing(self.grouping_vector)
+ grper = pprint_thing(grouping_vector)
errmsg = (
"Grouper result violates len(labels) == "
f"len(data)\nresult: {grper}"
)
- self.grouping_vector = None # Try for sanity
raise AssertionError(errmsg)
- if isinstance(self.grouping_vector, np.ndarray):
- if self.grouping_vector.dtype.kind in ["m", "M"]:
+ if isinstance(grouping_vector, np.ndarray):
+ if grouping_vector.dtype.kind in ["m", "M"]:
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
# TODO 2022-10-08 we only have one test that gets here and
# values are already in nanoseconds in that case.
- self.grouping_vector = Series(self.grouping_vector).to_numpy()
- elif is_categorical_dtype(self.grouping_vector):
+ grouping_vector = Series(grouping_vector).to_numpy()
+ elif is_categorical_dtype(grouping_vector):
# a passed Categorical
- self._passed_categorical = True
-
- self._orig_cats = self.grouping_vector.categories
- self.grouping_vector, self._all_grouper = recode_for_groupby(
- self.grouping_vector, sort, observed
+ self._orig_cats = grouping_vector.categories
+ grouping_vector, self._all_grouper = recode_for_groupby(
+ grouping_vector, sort, observed
)
+ self.grouping_vector = grouping_vector
+
def __repr__(self) -> str:
return f"Grouping({self.name})"
def __iter__(self) -> Iterator:
return iter(self.indices)
+ @cache_readonly
+ def _passed_categorical(self) -> bool:
+ return is_categorical_dtype(self.grouping_vector)
+
@cache_readonly
def name(self) -> Hashable:
ilevel = self._ilevel
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index bff61ec135d74..08d657a41e332 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -745,15 +745,6 @@ def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:
ids, _, ngroups = self.group_info
return get_splitter(data, ids, ngroups, axis=axis)
- def _get_grouper(self):
- """
- We are a grouper as part of another's groupings.
-
- We have a specific method of grouping, so cannot
- convert to a Index for our grouper.
- """
- return self.groupings[0].grouping_vector
-
@final
@cache_readonly
def group_keys_seq(self):
@@ -1112,15 +1103,6 @@ def nkeys(self) -> int:
# still matches len(self.groupings), but we can hard-code
return 1
- def _get_grouper(self):
- """
- We are a grouper as part of another's groupings.
-
- We have a specific method of grouping, so cannot
- convert to a Index for our grouper.
- """
- return self
-
def get_iterator(self, data: NDFrame, axis: AxisInt = 0):
"""
Groupby iterator
| Having a BaseGrouper._get_grouper, BinGrouper._get_grouper, and Grouper._get_grouper that all do different things is making my eye twitch.
L509-513 the case where `isinstance(newgrouper, ops.BaseGrouper)` looks like things might go wrong if we ever get there with `len(newgrouper.groupings) > 1`, but we have no tests that get there and this is too spaghettified for me to be sure. cc @jreback | https://api.github.com/repos/pandas-dev/pandas/pulls/51155 | 2023-02-04T02:04:18Z | 2023-02-06T21:02:25Z | 2023-02-06T21:02:25Z | 2023-02-06T21:05:40Z |
DOC: Additions to month_name & day_name | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 0766b1c6a5262..659a5d1a7181e 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1202,7 +1202,9 @@ def month_name(self, locale=None) -> npt.NDArray[np.object_]:
----------
locale : str, optional
Locale determining the language in which to return the month name.
- Default is English locale.
+ Default is English locale (``'en_US.utf8'``). Use the command
+ ``locale -a`` on your terminal on Unix systems to find your locale
+ language code.
Returns
-------
@@ -1229,6 +1231,17 @@ def month_name(self, locale=None) -> npt.NDArray[np.object_]:
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
+
+ Using the ``locale`` parameter you can set a different locale language,
+ for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month
+ names in Brazilian Portuguese language.
+
+ >>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
+ >>> idx
+ DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
+ dtype='datetime64[ns]', freq='M')
+ >>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP
+ Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object')
"""
values = self._local_timestamps()
@@ -1246,7 +1259,9 @@ def day_name(self, locale=None) -> npt.NDArray[np.object_]:
----------
locale : str, optional
Locale determining the language in which to return the day name.
- Default is English locale.
+ Default is English locale (``'en_US.utf8'``). Use the command
+ ``locale -a`` on your terminal on Unix systems to find your locale
+ language code.
Returns
-------
@@ -1273,6 +1288,17 @@ def day_name(self, locale=None) -> npt.NDArray[np.object_]:
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
+
+ Using the ``locale`` parameter you can set a different locale language,
+ for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day
+ names in Brazilian Portuguese language.
+
+ >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
+ >>> idx
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
+ dtype='datetime64[ns]', freq='D')
+ >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP
+ Index(['Segunda', 'Terça', 'Quarta'], dtype='object')
"""
values = self._local_timestamps()
| - [x] closes #51138
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
In the issue #51138 I was trying to set a column in my DF for the month and day's names in portuguese, then a good guy told me about the `locale -a` command from terminal to look for the right way to set the `locale` string parameter for my code to work properly.
So I thought I would be nice if the respective docs, [month_name](https://pandas.pydata.org/docs/reference/api/pandas.Series.dt.month_name.html) & [day_name](https://pandas.pydata.org/docs/reference/api/pandas.Series.dt.day_name.html), had these informations for the user to know about.
So I added these infos plus a quick example for each method in the docstrings from `/pandas/core/arrays/datetimes.py`
**PS.:** I just don't know why the `validade_docstrings.py` don't recognize the changes I made in the method's docstring...
<details>
<summary>Output: <code>validate_docstrings.py pandas.Series.dt.month_name</code></summary>

</details>
<details>
<summary>Output: <code>validate_docstrings.py pandas.Series.dt.day_name</code></summary>

</details>
<details>
<summary>Output: <code>pre-commit run --files pandas/core/arrays/datetimes.py</code></summary>

</details>
| https://api.github.com/repos/pandas-dev/pandas/pulls/51154 | 2023-02-04T00:06:18Z | 2023-02-08T17:59:03Z | 2023-02-08T17:59:03Z | 2023-02-08T18:24:50Z |
DOC: Clean whatsnews | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index b61547d1523cf..b167b7e811d98 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -362,7 +362,7 @@ Other enhancements
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`, :issue:`46725`)
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
-- Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`)
+- Added ``numeric_only`` argument to :meth:`.Resampler.sum`, :meth:`.Resampler.prod`, :meth:`.Resampler.min`, :meth:`.Resampler.max`, :meth:`.Resampler.first`, and :meth:`.Resampler.last` (:issue:`46442`)
- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`)
- :class:`.DataError`, :class:`.SpecificationError`, :class:`.SettingWithCopyError`, :class:`.SettingWithCopyWarning`, :class:`.NumExprClobberingError`, :class:`.UndefinedVariableError`, :class:`.IndexingError`, :class:`.PyperclipException`, :class:`.PyperclipWindowsException`, :class:`.CSSWarning`, :class:`.PossibleDataLossError`, :class:`.ClosedFileError`, :class:`.IncompatibilityWarning`, :class:`.AttributeConflictWarning`, :class:`.DatabaseError`, :class:`.PossiblePrecisionLoss`, :class:`.ValueLabelTypeMismatch`, :class:`.InvalidColumnName`, and :class:`.CategoricalConversionWarning` are now exposed in ``pandas.errors`` (:issue:`27656`)
- Added ``check_like`` argument to :func:`testing.assert_series_equal` (:issue:`47247`)
@@ -966,7 +966,7 @@ Other Deprecations
- Deprecated allowing ``unit="M"`` or ``unit="Y"`` in :class:`Timestamp` constructor with a non-round float value (:issue:`47267`)
- Deprecated the ``display.column_space`` global configuration option (:issue:`7576`)
- Deprecated the argument ``na_sentinel`` in :func:`factorize`, :meth:`Index.factorize`, and :meth:`.ExtensionArray.factorize`; pass ``use_na_sentinel=True`` instead to use the sentinel ``-1`` for NaN values and ``use_na_sentinel=False`` instead of ``na_sentinel=None`` to encode NaN values (:issue:`46910`)
-- Deprecated :meth:`DataFrameGroupBy.transform` not aligning the result when the UDF returned DataFrame (:issue:`45648`)
+- Deprecated :meth:`.DataFrameGroupBy.transform` not aligning the result when the UDF returned DataFrame (:issue:`45648`)
- Clarified warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument (:issue:`46210`)
- Emit warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument even for dates where leading zero is omitted (e.g. ``31/1/2001``) (:issue:`47880`)
- Deprecated :class:`Series` and :class:`Resampler` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) raising a ``NotImplementedError`` when the dtype is non-numric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
@@ -1034,7 +1034,7 @@ Datetimelike
- Bug in :func:`to_datetime` with sequences of ``np.str_`` objects incorrectly raising (:issue:`32264`)
- Bug in :class:`Timestamp` construction when passing datetime components as positional arguments and ``tzinfo`` as a keyword argument incorrectly raising (:issue:`31929`)
- Bug in :meth:`Index.astype` when casting from object dtype to ``timedelta64[ns]`` dtype incorrectly casting ``np.datetime64("NaT")`` values to ``np.timedelta64("NaT")`` instead of raising (:issue:`45722`)
-- Bug in :meth:`SeriesGroupBy.value_counts` index when passing categorical column (:issue:`44324`)
+- Bug in :meth:`.SeriesGroupBy.value_counts` index when passing categorical column (:issue:`44324`)
- Bug in :meth:`DatetimeIndex.tz_localize` localizing to UTC failing to make a copy of the underlying data (:issue:`46460`)
- Bug in :meth:`DatetimeIndex.resolution` incorrectly returning "day" instead of "nanosecond" for nanosecond-resolution indexes (:issue:`46903`)
- Bug in :class:`Timestamp` with an integer or float value and ``unit="Y"`` or ``unit="M"`` giving slightly-wrong results (:issue:`47266`)
@@ -1168,7 +1168,7 @@ I/O
- Bug in Parquet roundtrip for Interval dtype with ``datetime64[ns]`` subtype (:issue:`45881`)
- Bug in :func:`read_excel` when reading a ``.ods`` file with newlines between xml elements (:issue:`45598`)
- Bug in :func:`read_parquet` when ``engine="fastparquet"`` where the file was not closed on error (:issue:`46555`)
-- :meth:`to_html` now excludes the ``border`` attribute from ``<table>`` elements when ``border`` keyword is set to ``False``.
+- :meth:`DataFrame.to_html` now excludes the ``border`` attribute from ``<table>`` elements when ``border`` keyword is set to ``False``.
- Bug in :func:`read_sas` with certain types of compressed SAS7BDAT files (:issue:`35545`)
- Bug in :func:`read_excel` not forward filling :class:`MultiIndex` when no names were given (:issue:`47487`)
- Bug in :func:`read_sas` returned ``None`` rather than an empty DataFrame for SAS7BDAT files with zero rows (:issue:`18198`)
@@ -1213,7 +1213,7 @@ Groupby/resample/rolling
- Bug in :meth:`.ExponentialMovingWindow.mean` with ``axis=1`` and ``engine='numba'`` when the :class:`DataFrame` has more columns than rows (:issue:`46086`)
- Bug when using ``engine="numba"`` would return the same jitted function when modifying ``engine_kwargs`` (:issue:`46086`)
- Bug in :meth:`.DataFrameGroupBy.transform` fails when ``axis=1`` and ``func`` is ``"first"`` or ``"last"`` (:issue:`45986`)
-- Bug in :meth:`DataFrameGroupBy.cumsum` with ``skipna=False`` giving incorrect results (:issue:`46216`)
+- Bug in :meth:`.DataFrameGroupBy.cumsum` with ``skipna=False`` giving incorrect results (:issue:`46216`)
- Bug in :meth:`.DataFrameGroupBy.sum`, :meth:`.SeriesGroupBy.sum`, :meth:`.DataFrameGroupBy.prod`, :meth:`.SeriesGroupBy.prod, :meth:`.DataFrameGroupBy.cumsum`, and :meth:`.SeriesGroupBy.cumsum` with integer dtypes losing precision (:issue:`37493`)
- Bug in :meth:`.DataFrameGroupBy.cumsum` and :meth:`.SeriesGroupBy.cumsum` with ``timedelta64[ns]`` dtype failing to recognize ``NaT`` as a null value (:issue:`46216`)
- Bug in :meth:`.DataFrameGroupBy.cumsum` and :meth:`.SeriesGroupBy.cumsum` with integer dtypes causing overflows when sum was bigger than maximum of dtype (:issue:`37493`)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index bc1cf8d03ce98..c1a8f80c78351 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -245,7 +245,7 @@ Other enhancements
- :func:`read_sas` now supports using ``encoding='infer'`` to correctly read and use the encoding specified by the sas file. (:issue:`48048`)
- :meth:`.DataFrameGroupBy.quantile`, :meth:`.SeriesGroupBy.quantile` and :meth:`.DataFrameGroupBy.std` now preserve nullable dtypes instead of casting to numpy dtypes (:issue:`37493`)
- :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support an ``axis`` argument. If ``axis`` is set, the default behaviour of which axis to consider can be overwritten (:issue:`47819`)
-- :func:`assert_frame_equal` now shows the first element where the DataFrames differ, analogously to ``pytest``'s output (:issue:`47910`)
+- :func:`.testing.assert_frame_equal` now shows the first element where the DataFrames differ, analogously to ``pytest``'s output (:issue:`47910`)
- Added ``index`` parameter to :meth:`DataFrame.to_dict` (:issue:`46398`)
- Added support for extension array dtypes in :func:`merge` (:issue:`44240`)
- Added metadata propagation for binary operators on :class:`DataFrame` (:issue:`28283`)
@@ -257,7 +257,7 @@ Other enhancements
- :func:`timedelta_range` now supports a ``unit`` keyword ("s", "ms", "us", or "ns") to specify the desired resolution of the output index (:issue:`49824`)
- :meth:`DataFrame.to_json` now supports a ``mode`` keyword with supported inputs 'w' and 'a'. Defaulting to 'w', 'a' can be used when lines=True and orient='records' to append record oriented json lines to an existing json file. (:issue:`35849`)
- Added ``name`` parameter to :meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_arrays` and :meth:`IntervalIndex.from_tuples` (:issue:`48911`)
-- Improve exception message when using :func:`assert_frame_equal` on a :class:`DataFrame` to include the column that is compared (:issue:`50323`)
+- Improve exception message when using :func:`.testing.assert_frame_equal` on a :class:`DataFrame` to include the column that is compared (:issue:`50323`)
- Improved error message for :func:`merge_asof` when join-columns were duplicated (:issue:`50102`)
- Added support for extension array dtypes to :func:`get_dummies` (:func:`32430`)
- Added :meth:`Index.infer_objects` analogous to :meth:`Series.infer_objects` (:issue:`50034`)
@@ -713,7 +713,7 @@ Other API changes
- :func:`pandas.testing.assert_index_equal` with parameter ``exact="equiv"`` now considers two indexes equal when both are either a :class:`RangeIndex` or :class:`Index` with an ``int64`` dtype. Previously it meant either a :class:`RangeIndex` or a :class:`Int64Index` (:issue:`51098`)
- :meth:`Series.unique` with dtype "timedelta64[ns]" or "datetime64[ns]" now returns :class:`TimedeltaArray` or :class:`DatetimeArray` instead of ``numpy.ndarray`` (:issue:`49176`)
- :func:`to_datetime` and :class:`DatetimeIndex` now allow sequences containing both ``datetime`` objects and numeric entries, matching :class:`Series` behavior (:issue:`49037`, :issue:`50453`)
-- :func:`pandas.api.dtypes.is_string_dtype` now only returns ``True`` for array-likes with ``dtype=object`` when the elements are inferred to be strings (:issue:`15585`)
+- :func:`api.dtypes.is_string_dtype` now only returns ``True`` for array-likes with ``dtype=object`` when the elements are inferred to be strings (:issue:`15585`)
- Passing a sequence containing ``datetime`` objects and ``date`` objects to :class:`Series` constructor will return with ``object`` dtype instead of ``datetime64[ns]`` dtype, consistent with :class:`Index` behavior (:issue:`49341`)
- Passing strings that cannot be parsed as datetimes to :class:`Series` or :class:`DataFrame` with ``dtype="datetime64[ns]"`` will raise instead of silently ignoring the keyword and returning ``object`` dtype (:issue:`24435`)
- Passing a sequence containing a type that cannot be converted to :class:`Timedelta` to :func:`to_timedelta` or to the :class:`Series` or :class:`DataFrame` constructor with ``dtype="timedelta64[ns]"`` or to :class:`TimedeltaIndex` now raises ``TypeError`` instead of ``ValueError`` (:issue:`49525`)
@@ -977,7 +977,7 @@ Removal of prior version deprecations/changes
- Changed behavior of :meth:`Series.diff` and :meth:`DataFrame.diff` with :class:`ExtensionDtype` dtypes whose arrays do not implement ``diff``, these now raise ``TypeError`` rather than casting to numpy (:issue:`31025`)
- Enforced deprecation of calling numpy "ufunc"s on :class:`DataFrame` with ``method="outer"``; this now raises ``NotImplementedError`` (:issue:`36955`)
- Enforced deprecation disallowing passing ``numeric_only=True`` to :class:`Series` reductions (``rank``, ``any``, ``all``, ...) with non-numeric dtype (:issue:`47500`)
-- Changed behavior of :meth:`DataFrameGroupBy.apply` and :meth:`SeriesGroupBy.apply` so that ``group_keys`` is respected even if a transformer is detected (:issue:`34998`)
+- Changed behavior of :meth:`.DataFrameGroupBy.apply` and :meth:`.SeriesGroupBy.apply` so that ``group_keys`` is respected even if a transformer is detected (:issue:`34998`)
- Comparisons between a :class:`DataFrame` and a :class:`Series` where the frame's columns do not match the series's index raise ``ValueError`` instead of automatically aligning, do ``left, right = left.align(right, axis=1, copy=False)`` before comparing (:issue:`36795`)
- Enforced deprecation ``numeric_only=None`` (the default) in DataFrame reductions that would silently drop columns that raised; ``numeric_only`` now defaults to ``False`` (:issue:`41480`)
- Changed default of ``numeric_only`` to ``False`` in all DataFrame methods with that argument (:issue:`46096`, :issue:`46906`)
@@ -1076,7 +1076,7 @@ Categorical
- Bug in :meth:`Series.replace` with categorical dtype losing nullable dtypes of underlying categories (:issue:`49404`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` would reorder categories when used as a grouper (:issue:`48749`)
- Bug in :class:`Categorical` constructor when constructing from a :class:`Categorical` object and ``dtype="category"`` losing ordered-ness (:issue:`49309`)
-- Bug in :meth:`SeriesGroupBy.min`, :meth:`SeriesGroupBy.max`, :meth:`DataFrameGroupBy.min`, and :meth:`DataFrameGroupBy.max` with unordered :class:`CategoricalDtype` with no groups failing to raise ``TypeError`` (:issue:`51034`)
+- Bug in :meth:`.SeriesGroupBy.min`, :meth:`.SeriesGroupBy.max`, :meth:`.DataFrameGroupBy.min`, and :meth:`.DataFrameGroupBy.max` with unordered :class:`CategoricalDtype` with no groups failing to raise ``TypeError`` (:issue:`51034`)
-
Datetimelike
@@ -1232,7 +1232,7 @@ Period
- Bug in adding a :class:`Period` object to an array of :class:`DateOffset` objects incorrectly raising ``TypeError`` (:issue:`50162`)
- Bug in :class:`Period` where passing a string with finer resolution than nanosecond would result in a ``KeyError`` instead of dropping the extra precision (:issue:`50417`)
- Bug in parsing strings representing Week-periods e.g. "2017-01-23/2017-01-29" as minute-frequency instead of week-frequency (:issue:`50803`)
-- Bug in :meth:`GroupBy.sum`, :meth:`GroupBy.cumsum`, :meth:`GroupBy.prod`, :meth:`GroupBy.cumprod` with :class:`PeriodDtype` failing to raise ``TypeError`` (:issue:`51040`)
+- Bug in :meth:`.GroupBy.sum`, :meth:`.GroupBy.cumsum`, :meth:`.GroupBy.prod`, :meth:`.GroupBy.cumprod` with :class:`PeriodDtype` failing to raise ``TypeError`` (:issue:`51040`)
-
Plotting
@@ -1244,21 +1244,21 @@ Plotting
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :class:`.ExponentialMovingWindow` with ``online`` not raising a ``NotImplementedError`` for unsupported operations (:issue:`48834`)
-- Bug in :meth:`DataFrameGroupBy.sample` raises ``ValueError`` when the object is empty (:issue:`48459`)
+- Bug in :meth:`.DataFrameGroupBy.sample` raises ``ValueError`` when the object is empty (:issue:`48459`)
- Bug in :meth:`Series.groupby` raises ``ValueError`` when an entry of the index is equal to the name of the index (:issue:`48567`)
-- Bug in :meth:`DataFrameGroupBy.resample` produces inconsistent results when passing empty DataFrame (:issue:`47705`)
+- Bug in :meth:`.DataFrameGroupBy.resample` produces inconsistent results when passing empty DataFrame (:issue:`47705`)
- Bug in :class:`.DataFrameGroupBy` and :class:`.SeriesGroupBy` would not include unobserved categories in result when grouping by categorical indexes (:issue:`49354`)
- Bug in :class:`.DataFrameGroupBy` and :class:`.SeriesGroupBy` would change result order depending on the input index when grouping by categoricals (:issue:`49223`)
- Bug in :class:`.DataFrameGroupBy` and :class:`.SeriesGroupBy` when grouping on categorical data would sort result values even when used with ``sort=False`` (:issue:`42482`)
-- Bug in :meth:`.DataFrameGroupBy.apply` and :class:`SeriesGroupBy.apply` with ``as_index=False`` would not attempt the computation without using the grouping keys when using them failed with a ``TypeError`` (:issue:`49256`)
+- Bug in :meth:`.DataFrameGroupBy.apply` and :class:`.SeriesGroupBy.apply` with ``as_index=False`` would not attempt the computation without using the grouping keys when using them failed with a ``TypeError`` (:issue:`49256`)
- Bug in :meth:`.DataFrameGroupBy.describe` would describe the group keys (:issue:`49256`)
- Bug in :meth:`.SeriesGroupBy.describe` with ``as_index=False`` would have the incorrect shape (:issue:`49256`)
- Bug in :class:`.DataFrameGroupBy` and :class:`.SeriesGroupBy` with ``dropna=False`` would drop NA values when the grouper was categorical (:issue:`36327`)
- Bug in :meth:`.SeriesGroupBy.nunique` would incorrectly raise when the grouper was an empty categorical and ``observed=True`` (:issue:`21334`)
- Bug in :meth:`.SeriesGroupBy.nth` would raise when grouper contained NA values after subsetting from a :class:`DataFrameGroupBy` (:issue:`26454`)
- Bug in :meth:`DataFrame.groupby` would not include a :class:`.Grouper` specified by ``key`` in the result when ``as_index=False`` (:issue:`50413`)
-- Bug in :meth:`.DataFrameGrouBy.value_counts` would raise when used with a :class:`.TimeGrouper` (:issue:`50486`)
-- Bug in :meth:`Resampler.size` caused a wide :class:`DataFrame` to be returned instead of a :class:`Series` with :class:`MultiIndex` (:issue:`46826`)
+- Bug in :meth:`.DataFrameGroupBy.value_counts` would raise when used with a :class:`.TimeGrouper` (:issue:`50486`)
+- Bug in :meth:`.Resampler.size` caused a wide :class:`DataFrame` to be returned instead of a :class:`Series` with :class:`MultiIndex` (:issue:`46826`)
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` would raise incorrectly when grouper had ``axis=1`` for ``"idxmin"`` and ``"idxmax"`` arguments (:issue:`45986`)
- Bug in :class:`.DataFrameGroupBy` would raise when used with an empty DataFrame, categorical grouper, and ``dropna=False`` (:issue:`50634`)
- Bug in :meth:`.SeriesGroupBy.value_counts` did not respect ``sort=False`` (:issue:`50482`)
@@ -1273,7 +1273,7 @@ Reshaping
- Bug in :meth:`DataFrame.unstack` and :meth:`Series.unstack` unstacking wrong level of :class:`MultiIndex` when :class:`MultiIndex` has mixed names (:issue:`48763`)
- Bug in :meth:`DataFrame.melt` losing extension array dtype (:issue:`41570`)
- Bug in :meth:`DataFrame.pivot` not respecting ``None`` as column name (:issue:`48293`)
-- Bug in :func:`join` when ``left_on`` or ``right_on`` is or includes a :class:`CategoricalIndex` incorrectly raising ``AttributeError`` (:issue:`48464`)
+- Bug in :meth:`DataFrame.join` when ``left_on`` or ``right_on`` is or includes a :class:`CategoricalIndex` incorrectly raising ``AttributeError`` (:issue:`48464`)
- Bug in :meth:`DataFrame.pivot_table` raising ``ValueError`` with parameter ``margins=True`` when result is an empty :class:`DataFrame` (:issue:`49240`)
- Clarified error message in :func:`merge` when passing invalid ``validate`` option (:issue:`49417`)
- Bug in :meth:`DataFrame.explode` raising ``ValueError`` on multiple columns with ``NaN`` values or empty lists (:issue:`46084`)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51153 | 2023-02-03T22:38:59Z | 2023-02-04T01:10:55Z | 2023-02-04T01:10:55Z | 2023-02-04T11:31:13Z |
Upgrade pyarrow minimum version to 7.0 | diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index caeee07c324d1..7652b6347ad4f 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -43,7 +43,7 @@ dependencies:
- openpyxl=3.0.7
- pandas-gbq=0.15.0
- psycopg2=2.8.6
- - pyarrow=6.0.0
+ - pyarrow=7.0.0
- pymysql=1.0.2
- pyreadstat=1.1.2
- pytables=3.6.1
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 687f00a3dffd9..c1295df53fc2b 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -441,7 +441,7 @@ PyTables 3.6.1 hdf5 HDF5-based reading
blosc 1.21.0 hdf5 Compression for HDF5; only available on ``conda``
zlib hdf5 Compression for HDF5
fastparquet 0.6.3 - Parquet reading / writing (pyarrow is default)
-pyarrow 6.0.0 parquet, feather Parquet, ORC, and feather reading / writing
+pyarrow 7.0.0 parquet, feather Parquet, ORC, and feather reading / writing
pyreadstat 1.1.2 spss SPSS files (.sav) reading
odfpy 1.4.1 excel Open document format (.odf, .ods, .odt) reading / writing
========================= ================== ================ =============================================================
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 42d3e5230f2a1..e6e4408c23159 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -668,7 +668,7 @@ Optional libraries below the lowest tested version may still work, but are not c
+-----------------+-----------------+---------+
| Package | Minimum Version | Changed |
+=================+=================+=========+
-| pyarrow | 6.0.0 | X |
+| pyarrow | 7.0.0 | X |
+-----------------+-----------------+---------+
| matplotlib | 3.6.1 | X |
+-----------------+-----------------+---------+
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index a738220db20da..dac949f69bfaf 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -29,7 +29,7 @@
Frequency,
NpDtype,
)
-from pandas.compat import pa_version_under6p0
+from pandas.compat import pa_version_under7p0
from pandas.core.dtypes.common import (
is_float_dtype,
@@ -191,7 +191,7 @@
]
]
-if not pa_version_under6p0:
+if not pa_version_under7p0:
import pyarrow as pa
UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index b59b9632913e4..052eb7792a19c 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -27,7 +27,6 @@
np_version_under1p21,
)
from pandas.compat.pyarrow import (
- pa_version_under6p0,
pa_version_under7p0,
pa_version_under8p0,
pa_version_under9p0,
@@ -157,7 +156,6 @@ def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]:
__all__ = [
"is_numpy_dev",
"np_version_under1p21",
- "pa_version_under6p0",
"pa_version_under7p0",
"pa_version_under8p0",
"pa_version_under9p0",
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index d98b23b215565..01ac462eeb659 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -31,7 +31,7 @@
"pandas_gbq": "0.15.0",
"psycopg2": "2.8.6", # (dt dec pq3 ext lo64)
"pymysql": "1.0.2",
- "pyarrow": "6.0.0",
+ "pyarrow": "7.0.0",
"pyreadstat": "1.1.2",
"pytest": "7.0.0",
"pyxlsb": "1.0.8",
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index 280fdabf2cc05..ea8e18437fcfb 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -9,13 +9,11 @@
_pa_version = pa.__version__
_palv = Version(_pa_version)
- pa_version_under6p0 = _palv < Version("6.0.0")
pa_version_under7p0 = _palv < Version("7.0.0")
pa_version_under8p0 = _palv < Version("8.0.0")
pa_version_under9p0 = _palv < Version("9.0.0")
pa_version_under10p0 = _palv < Version("10.0.0")
except ImportError:
- pa_version_under6p0 = True
pa_version_under7p0 = True
pa_version_under8p0 = True
pa_version_under9p0 = True
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 9247d26fc846d..81fabf40d05b6 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -26,7 +26,6 @@
npt,
)
from pandas.compat import (
- pa_version_under6p0,
pa_version_under7p0,
pa_version_under8p0,
pa_version_under9p0,
@@ -54,7 +53,7 @@
validate_indices,
)
-if not pa_version_under6p0:
+if not pa_version_under7p0:
import pyarrow as pa
import pyarrow.compute as pc
@@ -199,8 +198,8 @@ class ArrowExtensionArray(OpsMixin, ExtensionArray):
_dtype: ArrowDtype
def __init__(self, values: pa.Array | pa.ChunkedArray) -> None:
- if pa_version_under6p0:
- msg = "pyarrow>=6.0.0 is required for PyArrow backed ArrowExtensionArray."
+ if pa_version_under7p0:
+ msg = "pyarrow>=7.0.0 is required for PyArrow backed ArrowExtensionArray."
raise ImportError(msg)
if isinstance(values, pa.Array):
self._data = pa.chunked_array([values])
@@ -529,11 +528,6 @@ def _argmin_max(self, skipna: bool, method: str) -> int:
# let ExtensionArray.arg{max|min} raise
return getattr(super(), f"arg{method}")(skipna=skipna)
- if pa_version_under6p0:
- raise NotImplementedError(
- f"arg{method} only implemented for pyarrow version >= 6.0"
- )
-
data = self._data
if pa.types.is_duration(data.type):
data = data.cast(pa.int64())
@@ -567,11 +561,7 @@ def dropna(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT:
-------
ArrowExtensionArray
"""
- if pa_version_under6p0:
- fallback_performancewarning(version="6")
- return super().dropna()
- else:
- return type(self)(pc.drop_null(self._data))
+ return type(self)(pc.drop_null(self._data))
@doc(ExtensionArray.fillna)
def fillna(
@@ -1293,9 +1283,6 @@ def _mode(self: ArrowExtensionArrayT, dropna: bool = True) -> ArrowExtensionArra
same type as self
Sorted, if possible.
"""
- if pa_version_under6p0:
- raise NotImplementedError("mode only supported for pyarrow version >= 6.0")
-
pa_type = self._data.type
if pa.types.is_temporal(pa_type):
nbits = pa_type.bit_width
diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index 3e3213b48670f..bed2ed113606e 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -5,7 +5,7 @@
import numpy as np
from pandas._typing import DtypeObj
-from pandas.compat import pa_version_under6p0
+from pandas.compat import pa_version_under7p0
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import (
@@ -13,7 +13,7 @@
register_extension_dtype,
)
-if not pa_version_under6p0:
+if not pa_version_under7p0:
import pyarrow as pa
@@ -66,8 +66,8 @@ class ArrowDtype(StorageExtensionDtype):
def __init__(self, pyarrow_dtype: pa.DataType) -> None:
super().__init__("pyarrow")
- if pa_version_under6p0:
- raise ImportError("pyarrow>=6.0.0 is required for ArrowDtype")
+ if pa_version_under7p0:
+ raise ImportError("pyarrow>=7.0.0 is required for ArrowDtype")
if not isinstance(pyarrow_dtype, pa.DataType):
raise ValueError(
f"pyarrow_dtype ({pyarrow_dtype}) must be an instance "
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index ff32fd5697e07..a8ae9d05cd13b 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -21,7 +21,7 @@
npt,
type_t,
)
-from pandas.compat import pa_version_under6p0
+from pandas.compat import pa_version_under7p0
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
@@ -115,9 +115,9 @@ def __init__(self, storage=None) -> None:
raise ValueError(
f"Storage must be 'python' or 'pyarrow'. Got {storage} instead."
)
- if storage == "pyarrow" and pa_version_under6p0:
+ if storage == "pyarrow" and pa_version_under7p0:
raise ImportError(
- "pyarrow>=6.0.0 is required for PyArrow backed StringArray."
+ "pyarrow>=7.0.0 is required for PyArrow backed StringArray."
)
self.storage = storage
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 4aebe61412866..717d41785f002 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -17,7 +17,7 @@
Scalar,
npt,
)
-from pandas.compat import pa_version_under6p0
+from pandas.compat import pa_version_under7p0
from pandas.core.dtypes.common import (
is_bool_dtype,
@@ -40,7 +40,7 @@
)
from pandas.core.strings.object_array import ObjectStringArrayMixin
-if not pa_version_under6p0:
+if not pa_version_under7p0:
import pyarrow as pa
import pyarrow.compute as pc
@@ -50,8 +50,8 @@
def _chk_pyarrow_available() -> None:
- if pa_version_under6p0:
- msg = "pyarrow>=6.0.0 is required for PyArrow backed ArrowExtensionArray."
+ if pa_version_under7p0:
+ msg = "pyarrow>=7.0.0 is required for PyArrow backed ArrowExtensionArray."
raise ImportError(msg)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index da5fc46c03d92..ecb9878fbb341 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -5,7 +5,6 @@
import numpy as np
import pytest
-from pandas.compat import pa_version_under6p0
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_dtype_equal
@@ -358,11 +357,6 @@ def test_reduce_missing(skipna, dtype):
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max(method, skipna, dtype, request):
- if dtype.storage == "pyarrow" and pa_version_under6p0:
- reason = "'ArrowStringArray' object has no attribute 'max'"
- mark = pytest.mark.xfail(raises=TypeError, reason=reason)
- request.node.add_marker(mark)
-
arr = pd.Series(["a", "b", "c", None], dtype=dtype)
result = getattr(arr, method)(skipna=skipna)
if skipna:
@@ -375,7 +369,7 @@ def test_min_max(method, skipna, dtype, request):
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("box", [pd.Series, pd.array])
def test_min_max_numpy(method, box, dtype, request):
- if dtype.storage == "pyarrow" and (pa_version_under6p0 or box is pd.array):
+ if dtype.storage == "pyarrow" and box is pd.array:
if box is pd.array:
reason = "'<=' not supported between instances of 'str' and 'NoneType'"
else:
diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py
index 071f5cad725cf..07c6bca67311b 100644
--- a/pandas/tests/arrays/string_/test_string_arrow.py
+++ b/pandas/tests/arrays/string_/test_string_arrow.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas.compat import pa_version_under6p0
+from pandas.compat import pa_version_under7p0
import pandas as pd
import pandas._testing as tm
@@ -15,8 +15,8 @@
from pandas.core.arrays.string_arrow import ArrowStringArray
skip_if_no_pyarrow = pytest.mark.skipif(
- pa_version_under6p0,
- reason="pyarrow>=6.0.0 is required for PyArrow backed StringArray",
+ pa_version_under7p0,
+ reason="pyarrow>=7.0.0 is required for PyArrow backed StringArray",
)
@@ -119,11 +119,11 @@ def test_from_sequence_wrong_dtype_raises():
@pytest.mark.skipif(
- not pa_version_under6p0,
+ not pa_version_under7p0,
reason="pyarrow is installed",
)
def test_pyarrow_not_installed_raises():
- msg = re.escape("pyarrow>=6.0.0 is required for PyArrow backed")
+ msg = re.escape("pyarrow>=7.0.0 is required for PyArrow backed")
with pytest.raises(ImportError, match=msg):
StringDtype(storage="pyarrow")
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index e31d8605eeb06..522a0d59e4161 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -29,7 +29,6 @@
PY311,
is_ci_environment,
is_platform_windows,
- pa_version_under6p0,
pa_version_under7p0,
pa_version_under8p0,
pa_version_under9p0,
@@ -51,7 +50,7 @@
)
from pandas.tests.extension import base
-pa = pytest.importorskip("pyarrow", minversion="6.0.0")
+pa = pytest.importorskip("pyarrow", minversion="7.0.0")
from pandas.core.arrays.arrow.array import ArrowExtensionArray
@@ -275,13 +274,6 @@ def test_from_sequence_pa_array(self, data, request):
assert isinstance(result._data, pa.ChunkedArray)
def test_from_sequence_pa_array_notimplemented(self, request):
- if pa_version_under6p0:
- request.node.add_marker(
- pytest.mark.xfail(
- raises=AttributeError,
- reason="month_day_nano_interval not implemented by pyarrow.",
- )
- )
with pytest.raises(NotImplementedError, match="Converting strings to"):
ArrowExtensionArray._from_sequence_of_strings(
["12-1"], dtype=pa.month_day_nano_interval()
@@ -320,13 +312,6 @@ def test_from_sequence_of_strings_pa_array(self, data, request):
),
)
)
- elif pa_version_under6p0 and pa.types.is_temporal(pa_dtype):
- request.node.add_marker(
- pytest.mark.xfail(
- raises=pa.ArrowNotImplementedError,
- reason=f"pyarrow doesn't support string cast from {pa_dtype}",
- )
- )
pa_array = data._data.cast(pa.string())
result = type(data)._from_sequence_of_strings(pa_array, dtype=data.dtype)
tm.assert_extension_array_equal(result, data)
@@ -525,28 +510,8 @@ def test_reduce_series(self, data, all_numeric_reductions, skipna, request):
)
if all_numeric_reductions in {"skew", "kurt"}:
request.node.add_marker(xfail_mark)
- elif (
- all_numeric_reductions in {"median", "var", "std", "prod", "max", "min"}
- and pa_version_under6p0
- ):
- request.node.add_marker(xfail_mark)
elif all_numeric_reductions == "sem" and pa_version_under8p0:
request.node.add_marker(xfail_mark)
- elif (
- all_numeric_reductions in {"sum", "mean"}
- and skipna is False
- and pa_version_under6p0
- and (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype))
- ):
- request.node.add_marker(
- pytest.mark.xfail(
- raises=AssertionError,
- reason=(
- f"{all_numeric_reductions} with skip_nulls={skipna} did not "
- f"return NA for {pa_dtype} with pyarrow={pa.__version__}"
- ),
- )
- )
elif all_numeric_reductions in [
"mean",
@@ -784,12 +749,6 @@ def test_view(self, data):
class TestBaseMissing(base.BaseMissingTests):
- def test_dropna_array(self, data_missing):
- with tm.maybe_produces_warning(
- PerformanceWarning, pa_version_under6p0, check_stacklevel=False
- ):
- super().test_dropna_array(data_missing)
-
def test_fillna_no_op_returns_copy(self, data):
with tm.maybe_produces_warning(
PerformanceWarning, pa_version_under7p0, check_stacklevel=False
@@ -910,11 +869,6 @@ def test_value_counts_with_normalize(self, data, request):
):
super().test_value_counts_with_normalize(data)
- @pytest.mark.xfail(
- pa_version_under6p0,
- raises=NotImplementedError,
- reason="argmin/max only implemented for pyarrow version >= 6.0",
- )
def test_argmin_argmax(
self, data_for_sorting, data_missing_for_sorting, na_value, request
):
@@ -943,13 +897,6 @@ def test_argmin_argmax(
def test_argreduce_series(
self, data_missing_for_sorting, op_name, skipna, expected, request
):
- if pa_version_under6p0 and skipna:
- request.node.add_marker(
- pytest.mark.xfail(
- raises=NotImplementedError,
- reason="min_max not supported in pyarrow",
- )
- )
super().test_argreduce_series(
data_missing_for_sorting, op_name, skipna, expected
)
@@ -1118,7 +1065,7 @@ def _get_arith_xfail_marker(self, opname, pa_dtype):
if (
opname == "__rpow__"
and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
- and not pa_version_under6p0
+ and not pa_version_under7p0
):
mark = pytest.mark.xfail(
reason=(
@@ -1137,7 +1084,7 @@ def _get_arith_xfail_marker(self, opname, pa_dtype):
elif (
opname in {"__rtruediv__", "__rfloordiv__"}
and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
- and not pa_version_under6p0
+ and not pa_version_under7p0
):
mark = pytest.mark.xfail(
raises=pa.ArrowInvalid,
@@ -1224,7 +1171,7 @@ def test_arith_series_with_array(
"__rsub__",
)
and pa.types.is_unsigned_integer(pa_dtype)
- and not pa_version_under6p0
+ and not pa_version_under7p0
):
request.node.add_marker(
pytest.mark.xfail(
@@ -1430,11 +1377,6 @@ def test_quantile(data, interpolation, quantile, request):
tm.assert_series_equal(result, expected)
-@pytest.mark.xfail(
- pa_version_under6p0,
- raises=NotImplementedError,
- reason="mode only supported for pyarrow version >= 6.0",
-)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize(
"take_idx, exp_idx",
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index 3e865947aa968..0743c1e26c62f 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -18,10 +18,7 @@
import numpy as np
import pytest
-from pandas.compat import (
- pa_version_under6p0,
- pa_version_under7p0,
-)
+from pandas.compat import pa_version_under7p0
from pandas.errors import PerformanceWarning
import pandas as pd
@@ -160,11 +157,7 @@ class TestIndex(base.BaseIndexTests):
class TestMissing(base.BaseMissingTests):
def test_dropna_array(self, data_missing):
- with tm.maybe_produces_warning(
- PerformanceWarning,
- pa_version_under6p0 and data_missing.dtype.storage == "pyarrow",
- ):
- result = data_missing.dropna()
+ result = data_missing.dropna()
expected = data_missing[[1]]
self.assert_extension_array_equal(result, expected)
@@ -220,13 +213,6 @@ def test_argsort_missing(self, data_missing_for_sorting):
def test_argmin_argmax(
self, data_for_sorting, data_missing_for_sorting, na_value, request
):
- if pa_version_under6p0 and data_missing_for_sorting.dtype.storage == "pyarrow":
- request.node.add_marker(
- pytest.mark.xfail(
- raises=NotImplementedError,
- reason="min_max not supported in pyarrow",
- )
- )
super().test_argmin_argmax(data_for_sorting, data_missing_for_sorting, na_value)
@pytest.mark.parametrize(
@@ -245,17 +231,6 @@ def test_argmin_argmax(
def test_argreduce_series(
self, data_missing_for_sorting, op_name, skipna, expected, request
):
- if (
- pa_version_under6p0
- and data_missing_for_sorting.dtype.storage == "pyarrow"
- and skipna
- ):
- request.node.add_marker(
- pytest.mark.xfail(
- raises=NotImplementedError,
- reason="min_max not supported in pyarrow",
- )
- )
super().test_argreduce_series(
data_missing_for_sorting, op_name, skipna, expected
)
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index 8a1a2783b5dc6..c13817dd1cdb7 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas.compat import pa_version_under6p0
+from pandas.compat import pa_version_under7p0
import pandas.util._test_decorators as td
import pandas as pd
@@ -870,7 +870,7 @@ def test_frame_astype_no_copy():
assert np.shares_memory(df.b.values, result.b.values)
-@pytest.mark.skipif(pa_version_under6p0, reason="pyarrow is required for this test")
+@pytest.mark.skipif(pa_version_under7p0, reason="pyarrow is required for this test")
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
def test_astype_copies(dtype):
# GH#50984
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index 5418a2a60dc80..31a8e7a7d36ac 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas.compat.pyarrow import pa_version_under6p0
+from pandas.compat.pyarrow import pa_version_under7p0
from pandas.core.dtypes.missing import na_value_for_dtype
@@ -416,7 +416,7 @@ def test_groupby_drop_nan_with_multi_index():
pytest.param(
"string[pyarrow]",
marks=pytest.mark.skipif(
- pa_version_under6p0, reason="pyarrow is not installed"
+ pa_version_under7p0, reason="pyarrow is not installed"
),
),
"datetime64[ns]",
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index fa03855facedf..78b46e5a32a48 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from pandas.compat import pa_version_under6p0
+from pandas.compat import pa_version_under7p0
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
@@ -650,7 +650,7 @@ def test_from_frame():
tm.assert_index_equal(expected, result)
-@pytest.mark.skipif(pa_version_under6p0, reason="minimum pyarrow not installed")
+@pytest.mark.skipif(pa_version_under7p0, reason="minimum pyarrow not installed")
def test_from_frame_missing_values_multiIndex():
# GH 39984
import pyarrow as pa
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index d985800d943bd..09a2967d62fee 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -19,10 +19,7 @@
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import py_parse_datetime_string
-from pandas.compat.pyarrow import (
- pa_version_under6p0,
- pa_version_under7p0,
-)
+from pandas.compat.pyarrow import pa_version_under7p0
import pandas as pd
from pandas import (
@@ -456,7 +453,7 @@ def test_date_col_as_index_col(all_parsers):
columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"],
index=index,
)
- if parser.engine == "pyarrow" and not pa_version_under6p0:
+ if parser.engine == "pyarrow" and not pa_version_under7p0:
# https://github.com/pandas-dev/pandas/issues/44231
# pyarrow 6.0 starts to infer time type
expected["X2"] = pd.to_datetime("1970-01-01" + expected["X2"]).dt.time
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 862c7d4c30fa8..7aba335040098 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -12,7 +12,7 @@
from pandas.compat import is_platform_windows
from pandas.compat.pyarrow import (
- pa_version_under6p0,
+ pa_version_under7p0,
pa_version_under8p0,
)
import pandas.util._test_decorators as td
@@ -221,7 +221,7 @@ def check_partition_names(path, expected):
expected: iterable of str
Expected partition names.
"""
- if pa_version_under6p0:
+ if pa_version_under7p0:
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
@@ -591,7 +591,7 @@ def test_write_column_index_nonstring(self, pa):
msg = r"parquet must have string column names"
self.check_error_on_write(df, engine, ValueError, msg)
- @pytest.mark.skipif(pa_version_under6p0, reason="minimum pyarrow not installed")
+ @pytest.mark.skipif(pa_version_under7p0, reason="minimum pyarrow not installed")
def test_use_nullable_dtypes(self, engine, request):
import pyarrow.parquet as pq
@@ -641,7 +641,7 @@ def test_use_nullable_dtypes(self, engine, request):
expected = expected.drop("c", axis=1)
tm.assert_frame_equal(result2, expected)
- @pytest.mark.skipif(pa_version_under6p0, reason="minimum pyarrow not installed")
+ @pytest.mark.skipif(pa_version_under7p0, reason="minimum pyarrow not installed")
def test_use_nullable_dtypes_option(self, engine, request):
# GH#50748
import pyarrow.parquet as pq
@@ -968,8 +968,8 @@ def test_additional_extension_types(self, pa):
def test_timestamp_nanoseconds(self, pa):
# with version 2.6, pyarrow defaults to writing the nanoseconds, so
# this should work without error
- # Note in previous pyarrows(<6.0.0), only the pseudo-version 2.0 was available
- if not pa_version_under6p0:
+ # Note in previous pyarrows(<7.0.0), only the pseudo-version 2.0 was available
+ if not pa_version_under7p0:
ver = "2.6"
else:
ver = "2.0"
@@ -978,7 +978,7 @@ def test_timestamp_nanoseconds(self, pa):
def test_timezone_aware_index(self, request, pa, timezone_aware_date_list):
if (
- not pa_version_under6p0
+ not pa_version_under7p0
and timezone_aware_date_list.tzinfo != datetime.timezone.utc
):
request.node.add_marker(
diff --git a/pyproject.toml b/pyproject.toml
index 3ddc247292ca9..e5d6f420915ef 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -63,8 +63,8 @@ fss = ['fsspec>=2021.07.0']
aws = ['s3fs>=2021.08.0']
gcp = ['gcsfs>=2021.07.0', 'pandas-gbq>=0.15.0']
excel = ['odfpy>=1.4.1', 'openpyxl>=3.0.7', 'pyxlsb>=1.0.8', 'xlrd>=2.0.1', 'xlsxwriter>=1.4.3']
-parquet = ['pyarrow>=6.0.0']
-feather = ['pyarrow>=6.0.0']
+parquet = ['pyarrow>=7.0.0']
+feather = ['pyarrow>=7.0.0']
hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297)
#'blosc>=1.20.1',
'tables>=3.6.1']
@@ -97,7 +97,7 @@ all = ['beautifulsoup4>=4.9.3',
'openpyxl>=3.0.7',
'pandas-gbq>=0.15.0',
'psycopg2>=2.8.6',
- 'pyarrow>=6.0.0',
+ 'pyarrow>=7.0.0',
'pymysql>=1.0.2',
'PyQt5>=5.15.1',
'pyreadstat>=1.1.2',
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51151 | 2023-02-03T19:51:29Z | 2023-02-07T02:10:36Z | 2023-02-07T02:10:36Z | 2023-02-07T10:27:08Z |
REF: avoid passing silently-ignored kwargs to Resampler | diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index ebb803ee8f3b4..255b926ba226a 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -151,7 +151,6 @@ def __init__(
*,
group_keys: bool | lib.NoDefault = lib.no_default,
selection=None,
- **kwargs,
) -> None:
self._timegrouper = groupby
self.keys = None
@@ -171,18 +170,6 @@ def __init__(
else:
self.exclusions = frozenset()
- @final
- def _shallow_copy(self, obj, **kwargs):
- """
- return a new object with the replacement attributes
- """
- if isinstance(obj, self._constructor):
- obj = obj.obj
- for attr in self._attributes:
- if attr not in kwargs:
- kwargs[attr] = getattr(self, attr)
- return self._constructor(obj, **kwargs)
-
def __str__(self) -> str:
"""
Provide a nice str repr of our rolling object.
@@ -1181,7 +1168,7 @@ def _apply(self, f, *args, **kwargs):
"""
def func(x):
- x = self._shallow_copy(x, groupby=self._timegrouper)
+ x = self._resampler_cls(x, groupby=self._timegrouper)
if isinstance(f, str):
return getattr(x, f)(**kwargs)
@@ -1364,7 +1351,7 @@ class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):
"""
@property
- def _constructor(self):
+ def _resampler_cls(self):
return DatetimeIndexResampler
@@ -1476,7 +1463,7 @@ class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):
"""
@property
- def _constructor(self):
+ def _resampler_cls(self):
return PeriodIndexResampler
@@ -1504,7 +1491,7 @@ class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):
"""
@property
- def _constructor(self):
+ def _resampler_cls(self):
return TimedeltaIndexResampler
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51149 | 2023-02-03T18:54:01Z | 2023-02-04T19:19:33Z | 2023-02-04T19:19:33Z | 2023-02-04T20:02:14Z |
DOC: Add CoW optimizations to whatsnew | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index bc1cf8d03ce98..5ba0d5b60e372 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -189,6 +189,7 @@ Copy-on-Write improvements
- :meth:`DataFrame.reset_index` / :meth:`Series.reset_index`
- :meth:`DataFrame.set_index`
- :meth:`DataFrame.set_axis` / :meth:`Series.set_axis`
+ - :meth:`DataFrame.set_flags` / :meth:`Series.set_flags`
- :meth:`DataFrame.rename_axis` / :meth:`Series.rename_axis`
- :meth:`DataFrame.reindex` / :meth:`Series.reindex`
- :meth:`DataFrame.reindex_like` / :meth:`Series.reindex_like`
@@ -202,7 +203,28 @@ Copy-on-Write improvements
- :meth:`DataFrame.add_prefix` / :meth:`Series.add_prefix`
- :meth:`DataFrame.add_suffix` / :meth:`Series.add_suffix`
- :meth:`DataFrame.drop_duplicates` / :meth:`Series.drop_duplicates`
+ - :meth:`DataFrame.droplevel` / :meth:`Series.droplevel`
- :meth:`DataFrame.reorder_levels` / :meth:`Series.reorder_levels`
+ - :meth:`DataFrame.between_time` / :meth:`Series.between_time`
+ - :meth:`DataFrame.filter` / :meth:`Series.filter`
+ - :meth:`DataFrame.head` / :meth:`Series.head`
+ - :meth:`DataFrame.tail` / :meth:`Series.tail`
+ - :meth:`DataFrame.isetitem`
+ - :meth:`DataFrame.pipe` / :meth:`Series.pipe`
+ - :meth:`DataFrame.pop` / :meth:`Series.pop`
+ - :meth:`DataFrame.replace` / :meth:`Series.replace`
+ - :meth:`DataFrame.shift` / :meth:`Series.shift`
+ - :meth:`DataFrame.sort_index` / :meth:`Series.sort_index`
+ - :meth:`DataFrame.sort_values` / :meth:`Series.sort_values`
+ - :meth:`DataFrame.squeeze` / :meth:`Series.squeeze`
+ - :meth:`DataFrame.swapaxes`
+ - :meth:`DataFrame.swaplevel` / :meth:`Series.swaplevel`
+ - :meth:`DataFrame.take` / :meth:`Series.take`
+ - :meth:`DataFrame.to_timestamp` / :meth:`Series.to_timestamp`
+ - :meth:`DataFrame.to_period` / :meth:`Series.to_period`
+ - :meth:`DataFrame.truncate`
+ - :meth:`DataFrame.tz_convert` / :meth:`Series.tz_localize`
+ - :func:`concat`
These methods return views when Copy-on-Write is enabled, which provides a significant
performance improvement compared to the regular execution (:issue:`49473`).
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
cc @lithomas1 @jorisvandenbossche
Let's add them on a pr basis after this is merged | https://api.github.com/repos/pandas-dev/pandas/pulls/51148 | 2023-02-03T18:40:29Z | 2023-02-03T20:58:52Z | 2023-02-03T20:58:52Z | 2023-02-08T15:57:11Z |
BUG: interpolate not respecting CoW | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 81c5810d29456..ff80cccaa20d3 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -409,8 +409,15 @@ def diff(self: T, n: int, axis: AxisInt) -> T:
axis = self._normalize_axis(axis)
return self.apply("diff", n=n, axis=axis)
- def interpolate(self: T, **kwargs) -> T:
- return self.apply("interpolate", **kwargs)
+ def interpolate(self: T, inplace: bool, **kwargs) -> T:
+ if inplace:
+ # TODO(CoW) can be optimized to only copy those blocks that have refs
+ if using_copy_on_write() and any(
+ not self._has_no_reference_block(i) for i in range(len(self.blocks))
+ ):
+ self = self.copy()
+
+ return self.apply("interpolate", inplace=inplace, **kwargs)
def shift(self: T, periods: int, axis: AxisInt, fill_value) -> T:
axis = self._normalize_axis(axis)
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index fbd4bbfb38c27..26038725c544f 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1194,6 +1194,22 @@ def test_asfreq_noop(using_copy_on_write):
tm.assert_frame_equal(df, df_orig)
+def test_interpolate_creates_copy(using_copy_on_write):
+ # GH#51126
+ df = DataFrame({"a": [1.5, np.nan, 3]})
+ view = df[:]
+ expected = df.copy()
+
+ df.ffill(inplace=True)
+ df.iloc[0, 0] = 100.5
+
+ if using_copy_on_write:
+ tm.assert_frame_equal(view, expected)
+ else:
+ expected = DataFrame({"a": [100.5, 1.5, 3]})
+ tm.assert_frame_equal(view, expected)
+
+
def test_isetitem(using_copy_on_write):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
df_orig = df.copy()
| - [x] closes #51126 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
this is independent of #51144 | https://api.github.com/repos/pandas-dev/pandas/pulls/51147 | 2023-02-03T18:31:24Z | 2023-02-04T19:16:47Z | 2023-02-04T19:16:47Z | 2023-02-04T19:16:51Z |
REF/API: dont alter Grouper in _get_grouper | diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 52700a29cb592..d77ad59a4bb82 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -6,9 +6,9 @@
from typing import (
TYPE_CHECKING,
- Any,
Hashable,
Iterator,
+ cast,
final,
)
@@ -286,7 +286,7 @@ def ax(self) -> Index:
def _get_grouper(
self, obj: NDFrameT, validate: bool = True
- ) -> tuple[Any, ops.BaseGrouper, NDFrameT]:
+ ) -> tuple[ops.BaseGrouper, NDFrameT]:
"""
Parameters
----------
@@ -296,15 +296,11 @@ def _get_grouper(
Returns
-------
- a tuple of binner, grouper, obj (possibly sorted)
+ a tuple of grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
- # error: Value of type variable "NDFrameT" of "get_grouper" cannot be
- # "Optional[Any]"
- # error: Incompatible types in assignment (expression has type "BaseGrouper",
- # variable has type "None")
- self.grouper, _, self.obj = get_grouper( # type: ignore[type-var,assignment]
- self.obj,
+ grouper, _, obj = get_grouper(
+ cast(NDFrameT, self.obj),
[self.key],
axis=self.axis,
level=self.level,
@@ -313,9 +309,7 @@ def _get_grouper(
dropna=self.dropna,
)
- # error: Incompatible return value type (got "Tuple[None, None, None]",
- # expected "Tuple[Any, BaseGrouper, NDFrameT]")
- return self.binner, self.grouper, self.obj # type: ignore[return-value]
+ return grouper, obj
@final
def _set_grouper(self, obj: NDFrame, sort: bool = False) -> None:
@@ -506,7 +500,7 @@ def __init__(
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
assert self.obj is not None # for mypy
- _, newgrouper, newobj = self.grouping_vector._get_grouper(
+ newgrouper, newobj = self.grouping_vector._get_grouper(
self.obj, validate=False
)
self.obj = newobj
@@ -814,7 +808,7 @@ def get_grouper(
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
- binner, grouper, obj = key._get_grouper(obj, validate=False)
+ grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, frozenset(), obj
else:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index d3b3c844e8c4e..4863308670674 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1691,7 +1691,7 @@ def _get_resampler(self, obj, kind=None):
def _get_grouper(self, obj, validate: bool = True):
# create the resampler and return our binner
r = self._get_resampler(obj)
- return r.binner, r.grouper, r.obj
+ return r.grouper, r.obj
def _get_time_bins(self, ax: DatetimeIndex):
if not isinstance(ax, DatetimeIndex):
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index 9df3439f7b75b..debfb48c2b39c 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -64,7 +64,7 @@ def test_apply_iteration():
df = DataFrame({"open": 1, "close": 2}, index=ind)
tg = Grouper(freq="M")
- _, grouper, _ = tg._get_grouper(df)
+ grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
| This doesn't break any tests and I haven't concocted any new cases in which it would matter, but the logic is hard enough to disentangle (xref #51134) that I can't promise it won't | https://api.github.com/repos/pandas-dev/pandas/pulls/51145 | 2023-02-03T16:56:06Z | 2023-02-03T21:07:07Z | 2023-02-03T21:07:07Z | 2023-02-03T23:20:22Z |
CoW: Push reference tracking down to the block level | diff --git a/doc/source/development/copy_on_write.rst b/doc/source/development/copy_on_write.rst
new file mode 100644
index 0000000000000..34625ed645615
--- /dev/null
+++ b/doc/source/development/copy_on_write.rst
@@ -0,0 +1,41 @@
+.. _copy_on_write:
+
+{{ header }}
+
+*************
+Copy on write
+*************
+
+Copy on Write is a mechanism to simplify the indexing API and improve
+performance through avoiding copies if possible.
+CoW means that any DataFrame or Series derived from another in any way always
+behaves as a copy.
+
+Reference tracking
+------------------
+
+To be able to determine, if we have to make a copy when writing into a DataFrame,
+we have to be aware, if the values are shared with another DataFrame. pandas
+keeps track of all ``Blocks`` that share values with another block internally to
+be able to tell when a copy needs to be triggered. The reference tracking
+mechanism is implemented on the Block level.
+
+We use a custom reference tracker object, ``BlockValuesRefs``, that keeps
+track of every block, whose values share memory with each other. The reference
+is held through a weak-reference. Every two blocks that share some memory should
+point to the same ``BlockValuesRefs`` object. If one block goes out of
+scope, the reference to this block dies. As a consequence, the reference tracker
+object always knows how many blocks are alive and share memory.
+
+Whenever a :class:`DataFrame` or :class:`Series` object is sharing data with another
+object, it is required that each of those objects have its own BlockManager and Block
+objects. Thus, in other words, one Block instance (that is held by a DataFrame, not
+necessarily for intermediate objects) should always be uniquely used for only
+a single DataFrame/Series object. For example, when you want to use the same
+Block for another object, you can create a shallow copy of the Block instance
+with ``block.copy(deep=False)`` (which will create a new Block instance with
+the same underlying values and which will correctly set up the references).
+
+We can ask the reference tracking object if there is another block alive that shares
+data with us before writing into the values. We can trigger a copy before
+writing if there is in fact another block alive.
diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst
index c741441cf67a1..69f04494a271c 100644
--- a/doc/source/development/index.rst
+++ b/doc/source/development/index.rst
@@ -18,6 +18,7 @@ Development
contributing_codebase
maintaining
internals
+ copy_on_write
debugging_extensions
extending
developer
diff --git a/pandas/_libs/internals.pyi b/pandas/_libs/internals.pyi
index 79bdbea71e4d8..5dfcc3726c84f 100644
--- a/pandas/_libs/internals.pyi
+++ b/pandas/_libs/internals.pyi
@@ -4,6 +4,7 @@ from typing import (
final,
overload,
)
+import weakref
import numpy as np
@@ -59,8 +60,13 @@ class SharedBlock:
_mgr_locs: BlockPlacement
ndim: int
values: ArrayLike
+ refs: BlockValuesRefs
def __init__(
- self, values: ArrayLike, placement: BlockPlacement, ndim: int
+ self,
+ values: ArrayLike,
+ placement: BlockPlacement,
+ ndim: int,
+ refs: BlockValuesRefs | None = ...,
) -> None: ...
class NumpyBlock(SharedBlock):
@@ -87,3 +93,9 @@ class BlockManager:
) -> None: ...
def get_slice(self: T, slobj: slice, axis: int = ...) -> T: ...
def _rebuild_blknos_and_blklocs(self) -> None: ...
+
+class BlockValuesRefs:
+ referenced_blocks: list[weakref.ref]
+ def __init__(self, blk: SharedBlock) -> None: ...
+ def add_reference(self, blk: SharedBlock) -> None: ...
+ def has_reference(self) -> bool: ...
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 3333ac1115177..b5ff69d92492f 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -580,9 +580,16 @@ cdef class SharedBlock:
"""
cdef:
public BlockPlacement _mgr_locs
+ public BlockValuesRefs refs
readonly int ndim
- def __cinit__(self, values, placement: BlockPlacement, ndim: int):
+ def __cinit__(
+ self,
+ values,
+ placement: BlockPlacement,
+ ndim: int,
+ refs: BlockValuesRefs | None = None,
+ ):
"""
Parameters
----------
@@ -591,9 +598,22 @@ cdef class SharedBlock:
placement : BlockPlacement
ndim : int
1 for SingleBlockManager/Series, 2 for BlockManager/DataFrame
+ refs: BlockValuesRefs, optional
+ Ref tracking object or None if block does not have any refs.
"""
self._mgr_locs = placement
self.ndim = ndim
+ if refs is None:
+ # if no refs are passed, that means we are creating a Block from
+ # new values that it uniquely owns -> start a new BlockValuesRefs
+ # object that only references this block
+ self.refs = BlockValuesRefs(self)
+ else:
+ # if refs are passed, this is the BlockValuesRefs object that is shared
+ # with the parent blocks which share the values, and a reference to this
+ # new block is added
+ refs.add_reference(self)
+ self.refs = refs
cpdef __reduce__(self):
args = (self.values, self.mgr_locs.indexer, self.ndim)
@@ -619,9 +639,15 @@ cdef class NumpyBlock(SharedBlock):
cdef:
public ndarray values
- def __cinit__(self, ndarray values, BlockPlacement placement, int ndim):
+ def __cinit__(
+ self,
+ ndarray values,
+ BlockPlacement placement,
+ int ndim,
+ refs: BlockValuesRefs | None = None,
+ ):
# set values here; the (implicit) call to SharedBlock.__cinit__ will
- # set placement and ndim
+ # set placement, ndim and refs
self.values = values
cpdef NumpyBlock getitem_block_index(self, slice slicer):
@@ -631,7 +657,7 @@ cdef class NumpyBlock(SharedBlock):
Assumes self.ndim == 2
"""
new_values = self.values[..., slicer]
- return type(self)(new_values, self._mgr_locs, ndim=self.ndim)
+ return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)
cdef class NDArrayBackedBlock(SharedBlock):
@@ -641,9 +667,15 @@ cdef class NDArrayBackedBlock(SharedBlock):
cdef public:
NDArrayBacked values
- def __cinit__(self, NDArrayBacked values, BlockPlacement placement, int ndim):
+ def __cinit__(
+ self,
+ NDArrayBacked values,
+ BlockPlacement placement,
+ int ndim,
+ refs: BlockValuesRefs | None = None,
+ ):
# set values here; the (implicit) call to SharedBlock.__cinit__ will
- # set placement and ndim
+ # set placement, ndim and refs
self.values = values
cpdef NDArrayBackedBlock getitem_block_index(self, slice slicer):
@@ -653,16 +685,22 @@ cdef class NDArrayBackedBlock(SharedBlock):
Assumes self.ndim == 2
"""
new_values = self.values[..., slicer]
- return type(self)(new_values, self._mgr_locs, ndim=self.ndim)
+ return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)
cdef class Block(SharedBlock):
cdef:
public object values
- def __cinit__(self, object values, BlockPlacement placement, int ndim):
+ def __cinit__(
+ self,
+ object values,
+ BlockPlacement placement,
+ int ndim,
+ refs: BlockValuesRefs | None = None,
+ ):
# set values here; the (implicit) call to SharedBlock.__cinit__ will
- # set placement and ndim
+ # set placement, ndim and refs
self.values = values
@@ -673,15 +711,11 @@ cdef class BlockManager:
public list axes
public bint _known_consolidated, _is_consolidated
public ndarray _blknos, _blklocs
- public list refs
- public object parent
def __cinit__(
self,
blocks=None,
axes=None,
- refs=None,
- parent=None,
verify_integrity=True,
):
# None as defaults for unpickling GH#42345
@@ -695,8 +729,6 @@ cdef class BlockManager:
self.blocks = blocks
self.axes = axes.copy() # copy to make sure we are not remotely-mutable
- self.refs = refs
- self.parent = parent
# Populate known_consolidate, blknos, and blklocs lazily
self._known_consolidated = False
@@ -805,16 +837,12 @@ cdef class BlockManager:
ndarray blknos, blklocs
nbs = []
- nrefs = []
for blk in self.blocks:
nb = blk.getitem_block_index(slobj)
nbs.append(nb)
- nrefs.append(weakref.ref(blk))
new_axes = [self.axes[0], self.axes[1]._getitem_slice(slobj)]
- mgr = type(self)(
- tuple(nbs), new_axes, nrefs, parent=self, verify_integrity=False
- )
+ mgr = type(self)(tuple(nbs), new_axes, verify_integrity=False)
# We can avoid having to rebuild blklocs/blknos
blklocs = self._blklocs
@@ -827,7 +855,7 @@ cdef class BlockManager:
def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager:
if axis == 0:
- new_blocks, new_refs = self._slice_take_blocks_ax0(slobj)
+ new_blocks = self._slice_take_blocks_ax0(slobj)
elif axis == 1:
return self._get_index_slice(slobj)
else:
@@ -836,6 +864,40 @@ cdef class BlockManager:
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
- return type(self)(
- tuple(new_blocks), new_axes, new_refs, parent=self, verify_integrity=False
- )
+ return type(self)(tuple(new_blocks), new_axes, verify_integrity=False)
+
+
+cdef class BlockValuesRefs:
+ """Tracks all references to a given array.
+
+ Keeps track of all blocks (through weak references) that reference the same
+ data.
+ """
+ cdef:
+ public object referenced_blocks
+
+ def __cinit__(self, blk: SharedBlock) -> None:
+ self.referenced_blocks = weakref.WeakSet([blk])
+
+ def add_reference(self, blk: SharedBlock) -> None:
+ """Adds a new reference to our reference collection.
+
+ Parameters
+ ----------
+ blk: SharedBlock
+ The block that the new references should point to.
+ """
+ self.referenced_blocks.add(blk)
+
+ def has_reference(self) -> bool:
+ """Checks if block has foreign references.
+
+ A reference is only relevant if it is still alive. The reference to
+ ourselves does not count.
+
+ Returns
+ -------
+ bool
+ """
+ # Checking for more references than block pointing to itself
+ return len(self.referenced_blocks) > 1
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 2fbd3a6cdb046..bfee616d52aac 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -168,7 +168,7 @@ def load_newobj(self) -> None:
arr = np.array([], dtype="m8[ns]")
obj = cls.__new__(cls, arr, arr.dtype)
elif cls is BlockManager and not args:
- obj = cls.__new__(cls, (), [], None, False)
+ obj = cls.__new__(cls, (), [], False)
else:
obj = cls.__new__(cls, *args)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6008e6b6cb566..9ad291c39cbc5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -768,8 +768,10 @@ def swapaxes(
)
assert isinstance(new_mgr, BlockManager)
assert isinstance(self._mgr, BlockManager)
- new_mgr.parent = self._mgr
- new_mgr.refs = [weakref.ref(self._mgr.blocks[0])]
+ new_mgr.blocks[0].refs = self._mgr.blocks[0].refs
+ new_mgr.blocks[0].refs.add_reference(
+ new_mgr.blocks[0] # type: ignore[arg-type]
+ )
return self._constructor(new_mgr).__finalize__(self, method="swapaxes")
elif (copy or copy is None) and self._mgr.is_single_block:
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 22fe227d21727..15abc143cd081 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -20,7 +20,10 @@
lib,
writers,
)
-from pandas._libs.internals import BlockPlacement
+from pandas._libs.internals import (
+ BlockPlacement,
+ BlockValuesRefs,
+)
from pandas._libs.missing import NA
from pandas._libs.tslibs import IncompatibleFrequency
from pandas._typing import (
@@ -144,6 +147,7 @@ class Block(PandasObject):
values: np.ndarray | ExtensionArray
ndim: int
+ refs: BlockValuesRefs
__init__: Callable
__slots__ = ()
@@ -261,7 +265,8 @@ def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block:
new_mgr_locs = self._mgr_locs[slicer]
new_values = self._slice(slicer)
- return type(self)(new_values, new_mgr_locs, self.ndim)
+ refs = self.refs if isinstance(slicer, slice) else None
+ return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs)
@final
def getitem_block_columns(
@@ -277,7 +282,7 @@ def getitem_block_columns(
if new_values.ndim != self.values.ndim:
raise ValueError("Only same dim slicing is allowed")
- return type(self)(new_values, new_mgr_locs, self.ndim)
+ return type(self)(new_values, new_mgr_locs, self.ndim, refs=self.refs)
@final
def _can_hold_element(self, element: Any) -> bool:
@@ -502,9 +507,13 @@ def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block:
def copy(self, deep: bool = True) -> Block:
"""copy constructor"""
values = self.values
+ refs: BlockValuesRefs | None
if deep:
values = values.copy()
- return type(self)(values, placement=self._mgr_locs, ndim=self.ndim)
+ refs = None
+ else:
+ refs = self.refs
+ return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs)
# ---------------------------------------------------------------------
# Replace
@@ -1337,6 +1346,10 @@ def delete(self, loc) -> list[Block]:
new_blocks: list[Block] = []
previous_loc = -1
+ # TODO(CoW): This is tricky, if parent block goes out of scope
+ # all split blocks are referencing each other even though they
+ # don't share data
+ refs = self.refs if self.refs.has_reference() else None
for idx in loc:
if idx == previous_loc + 1:
@@ -1347,7 +1360,9 @@ def delete(self, loc) -> list[Block]:
# argument type "Tuple[slice, slice]"
values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa
locs = mgr_locs_arr[previous_loc + 1 : idx]
- nb = type(self)(values, placement=BlockPlacement(locs), ndim=self.ndim)
+ nb = type(self)(
+ values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs
+ )
new_blocks.append(nb)
previous_loc = idx
@@ -1804,7 +1819,7 @@ def getitem_block_index(self, slicer: slice) -> ExtensionBlock:
# GH#42787 in principle this is equivalent to values[..., slicer], but we don't
# require subclasses of ExtensionArray to support that form (for now).
new_values = self.values[slicer]
- return type(self)(new_values, self._mgr_locs, ndim=self.ndim)
+ return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)
def diff(self, n: int, axis: AxisInt = 1) -> list[Block]:
# only reached with ndim == 2 and axis == 1
@@ -2137,7 +2152,9 @@ def new_block_2d(values: ArrayLike, placement: BlockPlacement):
return klass(values, ndim=2, placement=placement)
-def new_block(values, placement, *, ndim: int) -> Block:
+def new_block(
+ values, placement, *, ndim: int, refs: BlockValuesRefs | None = None
+) -> Block:
# caller is responsible for ensuring values is NOT a PandasArray
if not isinstance(placement, BlockPlacement):
@@ -2148,7 +2165,7 @@ def new_block(values, placement, *, ndim: int) -> Block:
klass = get_block_type(values.dtype)
values = maybe_coerce_values(values)
- return klass(values, ndim=ndim, placement=placement)
+ return klass(values, ndim=ndim, placement=placement, refs=refs)
def check_ndim(values, placement: BlockPlacement, ndim: int) -> None:
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index d46b51a2ee954..bedd4d92a1ea3 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -7,7 +7,6 @@
Sequence,
cast,
)
-import weakref
import numpy as np
@@ -62,10 +61,7 @@
ensure_block_shape,
new_block_2d,
)
-from pandas.core.internals.managers import (
- BlockManager,
- using_copy_on_write,
-)
+from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
@@ -271,8 +267,6 @@ def _concat_managers_axis0(
offset = 0
blocks = []
- refs: list[weakref.ref | None] = []
- parents: list = []
for i, mgr in enumerate(mgrs):
# If we already reindexed, then we definitely don't need another copy
made_copy = had_reindexers[i]
@@ -289,17 +283,9 @@ def _concat_managers_axis0(
nb._mgr_locs = nb._mgr_locs.add(offset)
blocks.append(nb)
- if not made_copy and not copy and using_copy_on_write():
- refs.extend([weakref.ref(blk) for blk in mgr.blocks])
- parents.append(mgr)
- elif using_copy_on_write():
- refs.extend([None] * len(mgr.blocks))
-
offset += len(mgr.items)
- result_parents = parents if parents else None
- result_ref = refs if refs else None
- result = BlockManager(tuple(blocks), axes, parent=result_parents, refs=result_ref)
+ result = BlockManager(tuple(blocks), axes)
return result
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 8a4fa4c10bf5f..74116dd855e3e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -11,7 +11,6 @@
cast,
)
import warnings
-import weakref
import numpy as np
@@ -146,8 +145,6 @@ class BaseBlockManager(DataManager):
_blklocs: npt.NDArray[np.intp]
blocks: tuple[Block, ...]
axes: list[Index]
- refs: list[weakref.ref | None] | None
- parent: object
@property
def ndim(self) -> int:
@@ -156,17 +153,11 @@ def ndim(self) -> int:
_known_consolidated: bool
_is_consolidated: bool
- def __init__(self, blocks, axes, refs=None, verify_integrity: bool = True) -> None:
+ def __init__(self, blocks, axes, verify_integrity: bool = True) -> None:
raise NotImplementedError
@classmethod
- def from_blocks(
- cls: type_t[T],
- blocks: list[Block],
- axes: list[Index],
- refs: list[weakref.ref | None] | None = None,
- parent: object = None,
- ) -> T:
+ def from_blocks(cls: type_t[T], blocks: list[Block], axes: list[Index]) -> T:
raise NotImplementedError
@property
@@ -254,19 +245,7 @@ def _has_no_reference_block(self, blkno: int) -> bool:
(whether it references another array or is itself being referenced)
Returns True if the block has no references.
"""
- # TODO(CoW) include `or self.refs[blkno]() is None` ?
- return (
- self.refs is None or self.refs[blkno] is None
- ) and weakref.getweakrefcount(self.blocks[blkno]) == 0
-
- def _clear_reference_block(self, blkno: int) -> None:
- """
- Clear any reference for column `i`.
- """
- if self.refs is not None:
- self.refs[blkno] = None
- if com.all_none(*self.refs):
- self.parent = None
+ return not self.blocks[blkno].refs.has_reference()
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
@@ -580,23 +559,17 @@ def _combine(
# TODO(CoW) we could optimize here if we know that the passed blocks
# are fully "owned" (eg created from an operation, not coming from
# an existing manager)
- new_refs: list[weakref.ref | None] | None = None if copy else []
for b in blocks:
nb = b.copy(deep=copy)
nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer])
new_blocks.append(nb)
- if not copy:
- # None has no attribute "append"
- new_refs.append(weakref.ref(b)) # type: ignore[union-attr]
axes = list(self.axes)
if index is not None:
axes[-1] = index
axes[0] = self.items.take(indexer)
- return type(self).from_blocks(
- new_blocks, axes, new_refs, parent=None if copy else self
- )
+ return type(self).from_blocks(new_blocks, axes)
@property
def nblocks(self) -> int:
@@ -636,17 +609,7 @@ def copy_func(ax):
new_axes = list(self.axes)
res = self.apply("copy", deep=deep)
- new_refs: list[weakref.ref | None] | None
- if deep:
- new_refs = None
- parent = None
- else:
- new_refs = [weakref.ref(blk) for blk in self.blocks]
- parent = self
-
res.axes = new_axes
- res.refs = new_refs
- res.parent = parent
if self.ndim > 1:
# Avoid needing to re-compute these
@@ -670,7 +633,7 @@ def consolidate(self: T) -> T:
if self.is_consolidated():
return self
- bm = type(self)(self.blocks, self.axes, self.refs, verify_integrity=False)
+ bm = type(self)(self.blocks, self.axes, verify_integrity=False)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
@@ -732,13 +695,12 @@ def reindex_indexer(
raise IndexError("Requested axis not found in manager")
if axis == 0:
- new_blocks, new_refs = self._slice_take_blocks_ax0(
+ new_blocks = self._slice_take_blocks_ax0(
indexer,
fill_value=fill_value,
only_slice=only_slice,
use_na_proxy=use_na_proxy,
)
- parent = None if com.all_none(*new_refs) else self
else:
new_blocks = [
blk.take_nd(
@@ -750,13 +712,11 @@ def reindex_indexer(
)
for blk in self.blocks
]
- new_refs = None
- parent = None
new_axes = list(self.axes)
new_axes[axis] = new_axis
- new_mgr = type(self).from_blocks(new_blocks, new_axes, new_refs, parent=parent)
+ new_mgr = type(self).from_blocks(new_blocks, new_axes)
if axis == 1:
# We can avoid the need to rebuild these
new_mgr._blknos = self.blknos.copy()
@@ -770,7 +730,7 @@ def _slice_take_blocks_ax0(
only_slice: bool = False,
*,
use_na_proxy: bool = False,
- ) -> tuple[list[Block], list[weakref.ref | None]]:
+ ) -> list[Block]:
"""
Slice/take blocks along axis=0.
@@ -803,11 +763,9 @@ def _slice_take_blocks_ax0(
# GH#32959 EABlock would fail since we can't make 0-width
# TODO(EA2D): special casing unnecessary with 2D EAs
if sllen == 0:
- return [], []
+ return []
bp = BlockPlacement(slice(0, sllen))
- return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)], [
- weakref.ref(blk)
- ]
+ return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_value is None:
fill_value = blk.fill_value
@@ -821,9 +779,7 @@ def _slice_take_blocks_ax0(
)
for i, ml in enumerate(slobj)
]
- # We have
- # all(np.shares_memory(nb.values, blk.values) for nb in blocks)
- return blocks, [weakref.ref(blk)] * len(blocks)
+ return blocks
else:
bp = BlockPlacement(slice(0, sllen))
return [
@@ -833,7 +789,7 @@ def _slice_take_blocks_ax0(
new_mgr_locs=bp,
fill_value=fill_value,
)
- ], [None]
+ ]
if sl_type == "slice":
blknos = self.blknos[slobj]
@@ -849,7 +805,6 @@ def _slice_take_blocks_ax0(
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
blocks = []
- refs: list[weakref.ref | None] = []
group = not only_slice
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
if blkno == -1:
@@ -862,7 +817,6 @@ def _slice_take_blocks_ax0(
use_na_proxy=use_na_proxy,
)
)
- refs.append(None)
else:
blk = self.blocks[blkno]
@@ -876,7 +830,6 @@ def _slice_take_blocks_ax0(
newblk = blk.copy(deep=False)
newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))
blocks.append(newblk)
- refs.append(weakref.ref(blk))
else:
# GH#32779 to avoid the performance penalty of copying,
@@ -889,7 +842,6 @@ def _slice_take_blocks_ax0(
if isinstance(taker, slice):
nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)
blocks.append(nb)
- refs.append(weakref.ref(blk))
elif only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
@@ -899,13 +851,11 @@ def _slice_take_blocks_ax0(
nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)
# We have np.shares_memory(nb.values, blk.values)
blocks.append(nb)
- refs.append(weakref.ref(blk))
else:
nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
blocks.append(nb)
- refs.append(None)
- return blocks, refs
+ return blocks
def _make_na_block(
self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False
@@ -990,8 +940,6 @@ def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
- refs: list[weakref.ref | None] | None = None,
- parent: object = None,
verify_integrity: bool = True,
) -> None:
@@ -1023,28 +971,13 @@ def _verify_integrity(self) -> None:
f"block items\n# manager items: {len(self.items)}, # "
f"tot_items: {tot_items}"
)
- if self.refs is not None:
- if len(self.refs) != len(self.blocks):
- raise AssertionError(
- "Number of passed refs must equal the number of blocks: "
- f"{len(self.refs)} refs vs {len(self.blocks)} blocks."
- "\nIf you see this error, please report a bug at "
- "https://github.com/pandas-dev/pandas/issues"
- )
@classmethod
- def from_blocks(
- cls,
- blocks: list[Block],
- axes: list[Index],
- refs: list[weakref.ref | None] | None = None,
- parent: object = None,
- ) -> BlockManager:
+ def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager:
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
- parent = parent if using_copy_on_write() else None
- return cls(blocks, axes, refs, parent, verify_integrity=False)
+ return cls(blocks, axes, verify_integrity=False)
# ----------------------------------------------------------------
# Indexing
@@ -1063,10 +996,14 @@ def fast_xs(self, loc: int) -> SingleBlockManager:
"""
if len(self.blocks) == 1:
result = self.blocks[0].iget((slice(None), loc))
- block = new_block(result, placement=slice(0, len(result)), ndim=1)
# in the case of a single block, the new block is a view
- ref = weakref.ref(self.blocks[0])
- return SingleBlockManager(block, self.axes[0], [ref], parent=self)
+ block = new_block(
+ result,
+ placement=slice(0, len(result)),
+ ndim=1,
+ refs=self.blocks[0].refs,
+ )
+ return SingleBlockManager(block, self.axes[0])
dtype = interleaved_dtype([blk.dtype for blk in self.blocks])
@@ -1109,10 +1046,10 @@ def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager:
# shortcut for select a single-dim from a 2-dim BM
bp = BlockPlacement(slice(0, len(values)))
- nb = type(block)(values, placement=bp, ndim=1)
- ref = weakref.ref(block) if track_ref else None
- parent = self if track_ref else None
- return SingleBlockManager(nb, self.axes[1], [ref], parent)
+ nb = type(block)(
+ values, placement=bp, ndim=1, refs=block.refs if track_ref else None
+ )
+ return SingleBlockManager(nb, self.axes[1])
def iget_values(self, i: int) -> ArrayLike:
"""
@@ -1244,7 +1181,7 @@ def value_getitem(placement):
self._iset_split_block(blkno_l, blk_locs)
if len(removed_blknos):
- # Remove blocks & update blknos and refs accordingly
+ # Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
@@ -1255,18 +1192,14 @@ def value_getitem(placement):
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
- if self.refs is not None:
- self.refs = [
- ref
- for i, ref in enumerate(self.refs)
- if i not in set(removed_blknos)
- ]
if unfit_val_locs:
unfit_idxr = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_idxr)
new_blocks: list[Block] = []
+ # TODO(CoW) is this always correct to assume that the new_blocks
+ # are not referencing anything else?
if value_is_extension_type:
# This code (ab-)uses the fact that EA blocks contain only
# one item.
@@ -1297,10 +1230,6 @@ def value_getitem(placement):
self._blklocs[unfit_idxr] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
- # TODO(CoW) is this always correct to assume that the new_blocks
- # are not referencing anything else?
- if self.refs is not None:
- self.refs = list(self.refs) + [None] * len(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
@@ -1337,13 +1266,6 @@ def _iset_split_block(
first_nb = nbs_tup[0]
nbs_tup = tuple(nbs_tup[1:])
- if self.refs is not None:
- self.refs.extend([self.refs[blkno_l]] * len(nbs_tup))
-
- if value is not None:
- # Only clear if we set new values
- self._clear_reference_block(blkno_l)
-
nr_blocks = len(self.blocks)
blocks_tup = (
self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup
@@ -1373,7 +1295,6 @@ def _iset_single(
if using_copy_on_write() and not self._has_no_reference_block(blkno):
# perform Copy-on-Write and clear the reference
copy = True
- self._clear_reference_block(blkno)
iloc = self.blklocs[loc]
blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy)
return
@@ -1382,7 +1303,6 @@ def _iset_single(
old_blocks = self.blocks
new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :]
self.blocks = new_blocks
- self._clear_reference_block(blkno)
return
def column_setitem(
@@ -1400,7 +1320,6 @@ def column_setitem(
blocks = list(self.blocks)
blocks[blkno] = blocks[blkno].copy()
self.blocks = tuple(blocks)
- self._clear_reference_block(blkno)
# this manager is only created temporarily to mutate the values in place
# so don't track references, otherwise the `setitem` would perform CoW again
@@ -1434,6 +1353,7 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
value = ensure_block_shape(value, ndim=self.ndim)
bp = BlockPlacement(slice(loc, loc + 1))
+ # TODO(CoW) do we always "own" the passed `value`?
block = new_block_2d(values=value, placement=bp)
if not len(self.blocks):
@@ -1446,9 +1366,6 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
self.axes[0] = new_axis
self.blocks += (block,)
- # TODO(CoW) do we always "own" the passed `value`?
- if self.refs is not None:
- self.refs += [None]
self._known_consolidated = False
@@ -1502,12 +1419,10 @@ def idelete(self, indexer) -> BlockManager:
is_deleted[indexer] = True
taker = (~is_deleted).nonzero()[0]
- nbs, new_refs = self._slice_take_blocks_ax0(taker, only_slice=True)
+ nbs = self._slice_take_blocks_ax0(taker, only_slice=True)
new_columns = self.items[~is_deleted]
axes = [new_columns, self.axes[1]]
- # TODO this might not be needed (can a delete ever be done in chained manner?)
- parent = None if com.all_none(*new_refs) else self
- return type(self)(tuple(nbs), axes, new_refs, parent, verify_integrity=False)
+ return type(self)(tuple(nbs), axes, verify_integrity=False)
# ----------------------------------------------------------------
# Block-wise Operation
@@ -1854,10 +1769,7 @@ def _consolidate_inplace(self) -> None:
# the DataFrame's _item_cache. The exception is for newly-created
# BlockManager objects not yet attached to a DataFrame.
if not self.is_consolidated():
- if self.refs is None:
- self.blocks = _consolidate(self.blocks)
- else:
- self.blocks, self.refs = _consolidate_with_refs(self.blocks, self.refs)
+ self.blocks = _consolidate(self.blocks)
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
@@ -1879,8 +1791,6 @@ def __init__(
self,
block: Block,
axis: Index,
- refs: list[weakref.ref | None] | None = None,
- parent: object = None,
verify_integrity: bool = False,
) -> None:
# Assertions disabled for performance
@@ -1889,25 +1799,19 @@ def __init__(
self.axes = [axis]
self.blocks = (block,)
- self.refs = refs
- self.parent = parent if using_copy_on_write() else None
@classmethod
def from_blocks(
cls,
blocks: list[Block],
axes: list[Index],
- refs: list[weakref.ref | None] | None = None,
- parent: object = None,
) -> SingleBlockManager:
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
assert len(blocks) == 1
assert len(axes) == 1
- if refs is not None:
- assert len(refs) == 1
- return cls(blocks[0], axes[0], refs, parent, verify_integrity=False)
+ return cls(blocks[0], axes[0], verify_integrity=False)
@classmethod
def from_array(cls, array: ArrayLike, index: Index) -> SingleBlockManager:
@@ -1924,13 +1828,9 @@ def to_2d_mgr(self, columns: Index) -> BlockManager:
blk = self.blocks[0]
arr = ensure_block_shape(blk.values, ndim=2)
bp = BlockPlacement(0)
- new_blk = type(blk)(arr, placement=bp, ndim=2)
+ new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs)
axes = [columns, self.axes[0]]
- refs: list[weakref.ref | None] = [weakref.ref(blk)]
- parent = self if using_copy_on_write() else None
- return BlockManager(
- [new_blk], axes=axes, refs=refs, parent=parent, verify_integrity=False
- )
+ return BlockManager([new_blk], axes=axes, verify_integrity=False)
def _has_no_reference(self, i: int = 0) -> bool:
"""
@@ -1938,9 +1838,7 @@ def _has_no_reference(self, i: int = 0) -> bool:
(whether it references another array or is itself being referenced)
Returns True if the column has no references.
"""
- return (self.refs is None or self.refs[0] is None) and weakref.getweakrefcount(
- self.blocks[0]
- ) == 0
+ return not self.blocks[0].refs.has_reference()
def __getstate__(self):
block_values = [b.values for b in self.blocks]
@@ -2008,19 +1906,18 @@ def getitem_mgr(self, indexer: slice | np.ndarray) -> SingleBlockManager:
and com.is_bool_indexer(indexer)
and indexer.all()
):
- return type(self)(blk, self.index, [weakref.ref(blk)], parent=self)
+ return type(self)(blk.copy(deep=False), self.index)
array = blk._slice(indexer)
if array.ndim > 1:
# This will be caught by Series._get_values
raise ValueError("dimension-expanding indexing not allowed")
bp = BlockPlacement(slice(0, len(array)))
- block = type(blk)(array, placement=bp, ndim=1)
+ # TODO(CoW) in theory only need to track reference if new_array is a view
+ block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs)
new_idx = self.index[indexer]
- # TODO(CoW) in theory only need to track reference if new_array is a view
- ref = weakref.ref(blk)
- return type(self)(block, new_idx, [ref], parent=self)
+ return type(self)(block, new_idx)
def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager:
# Assertion disabled for performance
@@ -2031,11 +1928,11 @@ def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager:
blk = self._block
array = blk._slice(slobj)
bp = BlockPlacement(slice(0, len(array)))
- block = type(blk)(array, placement=bp, ndim=1)
- new_index = self.index._getitem_slice(slobj)
# TODO this method is only used in groupby SeriesSplitter at the moment,
- # so passing refs / parent is not yet covered by the tests
- return type(self)(block, new_index, [weakref.ref(blk)], parent=self)
+ # so passing refs is not yet covered by the tests
+ block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs)
+ new_index = self.index._getitem_slice(slobj)
+ return type(self)(block, new_index)
@property
def index(self) -> Index:
@@ -2081,8 +1978,6 @@ def setitem_inplace(self, indexer, value) -> None:
"""
if using_copy_on_write() and not self._has_no_reference(0):
self.blocks = (self._block.copy(),)
- self.refs = None
- self.parent = None
self._cache.clear()
super().setitem_inplace(indexer, value)
@@ -2097,9 +1992,6 @@ def idelete(self, indexer) -> SingleBlockManager:
self.blocks = (nb,)
self.axes[0] = self.axes[0].delete(indexer)
self._cache.clear()
- # clear reference since delete always results in a new array
- self.refs = None
- self.parent = None
return self
def fast_xs(self, loc):
@@ -2319,31 +2211,6 @@ def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]:
return tuple(new_blocks)
-def _consolidate_with_refs(
- blocks: tuple[Block, ...], refs
-) -> tuple[tuple[Block, ...], list[weakref.ref | None]]:
- """
- Merge blocks having same dtype, exclude non-consolidating blocks, handling
- refs
- """
- gkey = lambda x: x[0]._consolidate_key
- grouper = itertools.groupby(sorted(zip(blocks, refs), key=gkey), gkey)
-
- new_blocks: list[Block] = []
- new_refs: list[weakref.ref | None] = []
- for (_can_consolidate, dtype), group_blocks_refs in grouper:
- group_blocks, group_refs = list(zip(*list(group_blocks_refs)))
- merged_blocks, consolidated = _merge_blocks(
- list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
- )
- new_blocks = extend_blocks(merged_blocks, new_blocks)
- if consolidated:
- new_refs.extend([None])
- else:
- new_refs.extend(group_refs)
- return tuple(new_blocks), new_refs
-
-
def _merge_blocks(
blocks: list[Block], dtype: DtypeObj, can_consolidate: bool
) -> tuple[list[Block], bool]:
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index 477dc98aa2b2b..24fc51a96d9df 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -36,7 +36,7 @@ def _iter_block_pairs(
left_ea = blk_vals.ndim == 1
- rblks, _ = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
+ rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
# Assertions are disabled for performance, but should hold:
# if left_ea:
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index f8220649bf890..43e0b77a90a85 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -14,7 +14,6 @@
cast,
overload,
)
-import weakref
import numpy as np
@@ -551,8 +550,9 @@ def __init__(
obj = sample._constructor(obj, columns=[name], copy=False)
if using_copy_on_write():
# TODO(CoW): Remove when ref tracking in constructors works
- obj._mgr.parent = original_obj # type: ignore[union-attr]
- obj._mgr.refs = [weakref.ref(original_obj._mgr.blocks[0])] # type: ignore[union-attr] # noqa: E501
+ for i, block in enumerate(original_obj._mgr.blocks): # type: ignore[union-attr] # noqa
+ obj._mgr.blocks[i].refs = block.refs # type: ignore[union-attr] # noqa
+ obj._mgr.blocks[i].refs.add_reference(obj._mgr.blocks[i]) # type: ignore[arg-type, union-attr] # noqa
obj.columns = [new_name]
@@ -612,13 +612,9 @@ def get_result(self):
typ=get_option("mode.data_manager"),
)
if using_copy_on_write() and not self.copy:
- parents = [obj._mgr for obj in self.objs]
- mgr.parent = parents # type: ignore[union-attr]
- refs = [
- weakref.ref(obj._mgr.blocks[0]) # type: ignore[union-attr]
- for obj in self.objs
- ]
- mgr.refs = refs # type: ignore[union-attr]
+ for i, obj in enumerate(self.objs):
+ mgr.blocks[i].refs = obj._mgr.blocks[0].refs # type: ignore[union-attr] # noqa
+ mgr.blocks[i].refs.add_reference(mgr.blocks[i]) # type: ignore[arg-type, union-attr] # noqa
df = cons(mgr, copy=False)
df.columns = columns
return df.__finalize__(self, method="concat")
diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py
index c04c733e5ee1d..f5805455e326f 100644
--- a/pandas/tests/copy_view/test_constructors.py
+++ b/pandas/tests/copy_view/test_constructors.py
@@ -18,7 +18,7 @@ def test_series_from_series(using_copy_on_write):
assert np.shares_memory(ser.values, result.values)
if using_copy_on_write:
- assert result._mgr.refs is not None
+ assert result._mgr.blocks[0].refs.has_reference()
if using_copy_on_write:
# mutating new series copy doesn't mutate original
@@ -72,4 +72,4 @@ def test_series_from_series_with_reindex(using_copy_on_write):
result = Series(ser, index=[0, 1, 2, 3])
assert not np.shares_memory(ser.values, result.values)
if using_copy_on_write:
- assert result._mgr.refs is None or result._mgr.refs[0] is None
+ assert not result._mgr.blocks[0].refs.has_reference()
diff --git a/pandas/tests/copy_view/test_core_functionalities.py b/pandas/tests/copy_view/test_core_functionalities.py
new file mode 100644
index 0000000000000..204e26b35d680
--- /dev/null
+++ b/pandas/tests/copy_view/test_core_functionalities.py
@@ -0,0 +1,88 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+from pandas.tests.copy_view.util import get_array
+
+
+def test_assigning_to_same_variable_removes_references(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3]})
+ df = df.reset_index()
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(1)
+ arr = get_array(df, "a")
+ df.iloc[0, 1] = 100 # Write into a
+
+ assert np.shares_memory(arr, get_array(df, "a"))
+
+
+def test_setitem_dont_track_unnecessary_references(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
+
+ df["b"] = 100
+ arr = get_array(df, "a")
+ # We split the block in setitem, if we are not careful the new blocks will
+ # reference each other triggering a copy
+ df.iloc[0, 0] = 100
+ assert np.shares_memory(arr, get_array(df, "a"))
+
+
+def test_setitem_with_view_copies(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
+ view = df[:]
+ expected = df.copy()
+
+ df["b"] = 100
+ arr = get_array(df, "a")
+ df.iloc[0, 0] = 100 # Check that we correctly track reference
+ if using_copy_on_write:
+ assert not np.shares_memory(arr, get_array(df, "a"))
+ tm.assert_frame_equal(view, expected)
+
+
+def test_setitem_with_view_invalidated_does_not_copy(using_copy_on_write, request):
+ df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
+ view = df[:]
+
+ df["b"] = 100
+ arr = get_array(df, "a")
+ view = None # noqa
+ df.iloc[0, 0] = 100
+ if using_copy_on_write:
+ # Setitem split the block. Since the old block shared data with view
+ # all the new blocks are referencing view and each other. When view
+ # goes out of scope, they don't share data with any other block,
+ # so we should not trigger a copy
+ mark = pytest.mark.xfail(
+ reason="blk.delete does not track references correctly"
+ )
+ request.node.add_marker(mark)
+ assert np.shares_memory(arr, get_array(df, "a"))
+
+
+def test_out_of_scope(using_copy_on_write):
+ def func():
+ df = DataFrame({"a": [1, 2], "b": 1.5, "c": 1})
+ # create some subset
+ result = df[["a", "b"]]
+ return result
+
+ result = func()
+ if using_copy_on_write:
+ assert not result._mgr.blocks[0].refs.has_reference()
+ assert not result._mgr.blocks[1].refs.has_reference()
+
+
+def test_delete(using_copy_on_write):
+ df = DataFrame(np.random.randn(4, 3), columns=["a", "b", "c"])
+ del df["b"]
+ if using_copy_on_write:
+ # TODO: This should not have references, delete makes a shallow copy
+ # but keeps the blocks alive
+ assert df._mgr.blocks[0].refs.has_reference()
+ assert df._mgr.blocks[1].refs.has_reference()
+
+ df = df[["a"]]
+ if using_copy_on_write:
+ assert not df._mgr.blocks[0].refs.has_reference()
diff --git a/pandas/tests/copy_view/test_internals.py b/pandas/tests/copy_view/test_internals.py
index 506ae7d3465c5..67022e533dbc4 100644
--- a/pandas/tests/copy_view/test_internals.py
+++ b/pandas/tests/copy_view/test_internals.py
@@ -20,52 +20,32 @@ def test_consolidate(using_copy_on_write):
subset = df[:]
# each block of subset references a block of df
- assert subset._mgr.refs is not None and all(
- ref is not None for ref in subset._mgr.refs
- )
+ assert all(blk.refs.has_reference() for blk in subset._mgr.blocks)
# consolidate the two int64 blocks
subset._consolidate_inplace()
# the float64 block still references the parent one because it still a view
- assert subset._mgr.refs[0] is not None
+ assert subset._mgr.blocks[0].refs.has_reference()
# equivalent of assert np.shares_memory(df["b"].values, subset["b"].values)
# but avoids caching df["b"]
assert np.shares_memory(get_array(df, "b"), get_array(subset, "b"))
# the new consolidated int64 block does not reference another
- assert subset._mgr.refs[1] is None
+ assert not subset._mgr.blocks[1].refs.has_reference()
# the parent dataframe now also only is linked for the float column
- assert df._mgr._has_no_reference(0)
- assert not df._mgr._has_no_reference(1)
- assert df._mgr._has_no_reference(2)
+ assert not df._mgr.blocks[0].refs.has_reference()
+ assert df._mgr.blocks[1].refs.has_reference()
+ assert not df._mgr.blocks[2].refs.has_reference()
# and modifying subset still doesn't modify parent
if using_copy_on_write:
subset.iloc[0, 1] = 0.0
- assert df._mgr._has_no_reference(1)
+ assert not df._mgr.blocks[1].refs.has_reference()
assert df.loc[0, "b"] == 0.1
-@td.skip_array_manager_invalid_test
-def test_clear_parent(using_copy_on_write):
- # ensure to clear parent reference if we are no longer viewing data from parent
- if not using_copy_on_write:
- pytest.skip("test only relevant when using copy-on-write")
-
- df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
- subset = df[:]
- assert subset._mgr.parent is not None
-
- # replacing existing columns loses the references to the parent df
- subset["a"] = 0
- assert subset._mgr.parent is not None
- # when losing the last reference, also the parent should be reset
- subset["b"] = 0
- assert subset._mgr.parent is None
-
-
@pytest.mark.single_cpu
@td.skip_array_manager_invalid_test
def test_switch_options():
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 26038725c544f..b814b9089aabd 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -22,7 +22,8 @@ def test_copy(using_copy_on_write):
# the deep copy doesn't share memory
assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
if using_copy_on_write:
- assert df_copy._mgr.refs is None
+ assert not df_copy._mgr.blocks[0].refs.has_reference()
+ assert not df_copy._mgr.blocks[1].refs.has_reference()
# mutating copy doesn't mutate original
df_copy.iloc[0, 0] = 0
@@ -36,7 +37,8 @@ def test_copy_shallow(using_copy_on_write):
# the shallow copy still shares memory
assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
if using_copy_on_write:
- assert df_copy._mgr.refs is not None
+ assert df_copy._mgr.blocks[0].refs.has_reference()
+ assert df_copy._mgr.blocks[1].refs.has_reference()
if using_copy_on_write:
# mutating shallow copy doesn't mutate original
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This pr removes the reference tracking from the manager level and adds it to the block level. Tests are passing locally.
Right now we are tracking the references also in non-CoW mode, but we are only accessing the object in CoW mode, so should not be harmful. Can also disable this, but wanted to avoid accessing ``using_copy_on_write()`` constantly.
Two things that the previous mechanism couldn't handle:
```
df = DataFrame({"a": [1, 2, 3]})
df = df.reset_index()
df.iloc[0, 1]
```
This triggered a copy, because the reference to the original df was kept alive. Same example: We were not able to free the memory of old object, if the result of an operation was assigned to the same variable.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51144 | 2023-02-03T16:13:46Z | 2023-02-08T09:01:09Z | 2023-02-08T09:01:09Z | 2023-02-22T09:19:37Z |
DOC Correcting EX02 errors | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 9c8d48bc6ba45..cb66a1f350e8f 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -579,8 +579,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
MSG='Partially validate docstrings (EX02)' ; echo $MSG
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX02 --ignore_functions \
pandas.DataFrame.plot.line \
- pandas.DataFrame.std \
- pandas.DataFrame.var \
pandas.Index.factorize \
pandas.Period.strftime \
pandas.Series.factorize \
@@ -590,8 +588,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.Series.sparse.density \
pandas.Series.sparse.npoints \
pandas.Series.sparse.sp_values \
- pandas.Series.std \
- pandas.Series.var \
pandas.Timestamp.fromtimestamp \
pandas.api.types.infer_dtype \
pandas.api.types.is_bool_dtype \
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8fa86e80e1a44..62e8921dc2270 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11872,12 +11872,14 @@ def _doc_params(cls):
>>> df.std()
age 18.786076
height 0.237417
+dtype: float64
Alternatively, `ddof=0` can be set to normalize by N instead of N-1:
>>> df.std(ddof=0)
age 16.269219
-height 0.205609"""
+height 0.205609
+dtype: float64"""
_var_examples = """
@@ -11898,12 +11900,14 @@ def _doc_params(cls):
>>> df.var()
age 352.916667
height 0.056367
+dtype: float64
Alternatively, ``ddof=0`` can be set to normalize by N instead of N-1:
>>> df.var(ddof=0)
age 264.687500
-height 0.042275"""
+height 0.042275
+dtype: float64"""
_bool_doc = """
{desc}
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Towards https://github.com/pandas-dev/pandas/issues/37875 and https://github.com/pandas-dev/pandas/issues/27977 | https://api.github.com/repos/pandas-dev/pandas/pulls/51143 | 2023-02-03T12:08:08Z | 2023-02-03T16:10:19Z | 2023-02-03T16:10:19Z | 2023-02-04T15:30:26Z |
DEPR: Remove NumericIndex | diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index 3e171b57af3ba..818ea1e6ef9d0 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -363,14 +363,7 @@ def is_class_equiv(idx: Index) -> bool:
This only checks class equivalence. There is a separate check that the
dtype is int64.
"""
- from pandas.core.indexes.numeric import NumericIndex
-
- if isinstance(idx, RangeIndex):
- return True
- elif type(idx) is Index or type(idx) is NumericIndex:
- return True
- else:
- return False
+ return type(idx) is Index or isinstance(idx, RangeIndex)
if type(left) == type(right):
return
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 1a22c3fe327e9..0037cd20e8c1e 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1313,8 +1313,7 @@ def __init__(self, obj: DataFrame, n: int, keep: str, columns: IndexLabel) -> No
self.columns = columns
def compute(self, method: str) -> DataFrame:
-
- from pandas.core.api import NumericIndex
+ from pandas.core.api import Index
n = self.n
frame = self.obj
@@ -1342,7 +1341,7 @@ def get_indexer(current_indexer, other_indexer):
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
- indexer = NumericIndex([], dtype=np.int64)
+ indexer = Index([], dtype=np.int64)
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 0dffa681acfc1..c0b828d9330b4 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -54,7 +54,6 @@
Index,
IntervalIndex,
MultiIndex,
- NumericIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
@@ -117,7 +116,6 @@
"NaT",
"notna",
"notnull",
- "NumericIndex",
"Period",
"PeriodDtype",
"PeriodIndex",
diff --git a/pandas/core/base.py b/pandas/core/base.py
index ab7a79162a0f2..b708ff44b39d0 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1161,7 +1161,7 @@ def factorize(
uniques = uniques.astype(np.float32)
if isinstance(self, ABCIndex):
- # preserve e.g. NumericIndex, preserve MultiIndex
+ # preserve e.g. MultiIndex
uniques = self._constructor(uniques)
else:
from pandas import Index
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 190bd9d940ca3..5904ba4895aef 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -93,7 +93,6 @@ def _subclasscheck(cls, inst) -> bool:
{
"index",
"rangeindex",
- "numericindex",
"multiindex",
"datetimeindex",
"timedeltaindex",
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index 08191363bfc93..fcf529f5be9ac 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -26,7 +26,6 @@
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.interval import IntervalIndex
from pandas.core.indexes.multi import MultiIndex
-from pandas.core.indexes.numeric import NumericIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.range import RangeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
@@ -46,7 +45,6 @@
__all__ = [
"Index",
"MultiIndex",
- "NumericIndex",
"CategoricalIndex",
"IntervalIndex",
"RangeIndex",
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e1092996abce5..b607d170d5385 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -317,7 +317,6 @@ class Index(IndexOpsMixin, PandasObject):
DatetimeIndex : Index of datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
- NumericIndex : Index of numpy int/uint/float data.
Notes
-----
@@ -539,7 +538,6 @@ def __new__(
klass = cls._dtype_to_subclass(arr.dtype)
- # _ensure_array _may_ be unnecessary once NumericIndex etc are gone
arr = klass._ensure_array(arr, arr.dtype, copy=False)
return klass._simple_new(arr, name)
@@ -596,18 +594,11 @@ def _dtype_to_subclass(cls, dtype: DtypeObj):
return TimedeltaIndex
- elif dtype.kind in ["i", "f", "u"]:
- from pandas.core.api import NumericIndex
-
- return NumericIndex
-
elif dtype.kind == "O":
# NB: assuming away MultiIndex
return Index
- elif issubclass(
- dtype.type, (str, bool, np.bool_, complex, np.complex64, np.complex128)
- ):
+ elif issubclass(dtype.type, str) or is_numeric_dtype(dtype):
return Index
raise NotImplementedError(dtype)
@@ -1207,10 +1198,6 @@ def __repr__(self) -> str_t:
Return a string representation for this object.
"""
klass_name = type(self).__name__
- from pandas.core.indexes.numeric import NumericIndex
-
- if type(self) is NumericIndex:
- klass_name = "Index"
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
@@ -5375,6 +5362,7 @@ def identical(self, other) -> bool:
for c in self._comparables
)
and type(self) == type(other)
+ and self.dtype == other.dtype
)
@final
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
deleted file mode 100644
index 8113b5ea2bb2a..0000000000000
--- a/pandas/core/indexes/numeric.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from __future__ import annotations
-
-from pandas.core.indexes.base import Index
-
-
-class NumericIndex(Index):
- pass
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 144665401a2a0..ca34fcfc7a625 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -49,13 +49,12 @@
Index,
maybe_extract_name,
)
-from pandas.core.indexes.numeric import NumericIndex
from pandas.core.ops.common import unpack_zerodim_and_defer
_empty_range = range(0)
-class RangeIndex(NumericIndex):
+class RangeIndex(Index):
"""
Immutable Index implementing a monotonic integer range.
@@ -196,7 +195,7 @@ def _validate_dtype(cls, dtype: Dtype | None) -> None:
@cache_readonly
def _constructor(self) -> type[Index]: # type: ignore[override]
"""return the class to use for construction"""
- return NumericIndex
+ return Index
# error: Signature of "_data" incompatible with supertype "Index"
@cache_readonly
@@ -408,7 +407,7 @@ def _shallow_copy(self, values, name: Hashable = no_default):
new_range = range(values[0], values[-1] + diff, diff)
return type(self)._simple_new(new_range, name=name)
else:
- return NumericIndex._simple_new(values, name=name)
+ return self._constructor._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 9a93724ca8f37..d826c0a148ebe 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -165,7 +165,7 @@
>>> s = pd.Series(text_values, index=int_values)
>>> s.info()
<class 'pandas.core.series.Series'>
- NumericIndex: 5 entries, 1 to 5
+ Index: 5 entries, 1 to 5
Series name: None
Non-Null Count Dtype
-------------- -----
@@ -177,7 +177,7 @@
>>> s.info(verbose=False)
<class 'pandas.core.series.Series'>
- NumericIndex: 5 entries, 1 to 5
+ Index: 5 entries, 1 to 5
dtypes: object(1)
memory usage: 80.0+ bytes
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 8bb5948029ca1..4bb7f62fb13bd 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -737,7 +737,7 @@ def test_getitem_setitem_float_labels(self, using_array_manager):
# positional slicing only via iloc!
msg = (
- "cannot do positional indexing on NumericIndex with "
+ "cannot do positional indexing on Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py
index 5e3a427bc75ba..e9c65d32cbcd7 100644
--- a/pandas/tests/indexes/multi/test_names.py
+++ b/pandas/tests/indexes/multi/test_names.py
@@ -146,7 +146,6 @@ def test_setting_names_from_levels_raises():
new.index.name = "bar"
assert pd.Index._no_setting_name is False
- assert pd.core.api.NumericIndex._no_setting_name is False
assert pd.RangeIndex._no_setting_name is False
diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py
index 68da64805ad1e..ed8071afe2ee2 100644
--- a/pandas/tests/indexes/numeric/test_numeric.py
+++ b/pandas/tests/indexes/numeric/test_numeric.py
@@ -43,8 +43,6 @@ def float_index(self, dtype):
return self._index_cls([0.0, 2.5, 5.0, 7.5, 10.0], dtype=dtype)
def test_repr_roundtrip(self, index):
- from pandas.core.api import NumericIndex # noqa: F401
-
tm.assert_index_equal(eval(repr(index)), index, exact=True)
def check_coerce(self, a, b, is_float_index=True):
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
index fdea2945a8c0b..3b3d6dbaf697f 100644
--- a/pandas/tests/indexes/test_any_index.py
+++ b/pandas/tests/indexes/test_any_index.py
@@ -154,8 +154,6 @@ def test_str(self, index):
# test the string repr
index.name = "foo"
assert "'foo'" in str(index)
- if type(index).__name__ == "NumericIndex": # TODO: remove NumericIndex
- return
assert type(index).__name__ in str(index)
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 957e7031f9bf6..40440bd8e0ff8 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -31,7 +31,6 @@
RangeIndex,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
class TestCommon:
@@ -431,7 +430,7 @@ def test_hasnans_isnans(self, index_flat):
if len(index) == 0:
return
- elif isinstance(index, NumericIndex) and is_integer_dtype(index.dtype):
+ elif is_integer_dtype(index.dtype):
return
elif index.dtype == bool:
# values[1] = np.nan below casts to True!
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 51a65d88d7b32..77b068aba765c 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -646,7 +646,7 @@ def test_selection_api_validation():
# non DatetimeIndex
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, "
- "but got an instance of 'NumericIndex'"
+ "but got an instance of 'Index'"
)
with pytest.raises(TypeError, match=msg):
df.resample("2D", level="v")
| - [x] closes #42717
- [x] closes #51020
This finishes the removal of `NumericIndex` from the code base. Note that this PR builds atop of #51132, so you might want to merge that first.
Everything should be finished now after this PR, e.g. the doc updates have been merged (#51111) and the doc string has been updated (#51089). I'll have a final look once this is merged, but this should pretty much finish this issue. | https://api.github.com/repos/pandas-dev/pandas/pulls/51139 | 2023-02-03T07:40:47Z | 2023-02-03T13:02:20Z | 2023-02-03T13:02:20Z | 2023-05-19T02:59:12Z |
CLN: Partially revert #29553 | diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py
index 466eeb768f5f9..14bf26f40ea0d 100644
--- a/pandas/core/array_algos/replace.py
+++ b/pandas/core/array_algos/replace.py
@@ -19,8 +19,6 @@
)
from pandas.core.dtypes.common import (
- is_datetimelike_v_numeric,
- is_numeric_v_string_like,
is_re,
is_re_compilable,
is_scalar,
@@ -44,7 +42,7 @@ def should_use_regex(regex: bool, to_replace: Any) -> bool:
def compare_or_regex_search(
a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]
-) -> ArrayLike | bool:
+) -> ArrayLike:
"""
Compare two array-like inputs of the same shape or two scalar values
@@ -95,15 +93,6 @@ def _check_comparison_types(
if isinstance(a, np.ndarray):
a = a[mask]
- if is_numeric_v_string_like(a, b):
- # GH#29553 avoid deprecation warnings from numpy
- return np.zeros(a.shape, dtype=bool)
-
- elif is_datetimelike_v_numeric(a, b):
- # GH#29553 avoid deprecation warnings from numpy
- _check_comparison_types(False, a, b)
- return False
-
result = op(a)
if isinstance(result, np.ndarray) and mask is not None:
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index fb9817de2b69b..3281c7fe859e5 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1067,64 +1067,6 @@ def is_numeric_v_string_like(a: ArrayLike, b) -> bool:
)
-# This exists to silence numpy deprecation warnings, see GH#29553
-def is_datetimelike_v_numeric(a, b) -> bool:
- """
- Check if we are comparing a datetime-like object to a numeric object.
- By "numeric," we mean an object that is either of an int or float dtype.
-
- Parameters
- ----------
- a : array-like, scalar
- The first object to check.
- b : array-like, scalar
- The second object to check.
-
- Returns
- -------
- boolean
- Whether we return a comparing a datetime-like to a numeric object.
-
- Examples
- --------
- >>> from datetime import datetime
- >>> dt = np.datetime64(datetime(2017, 1, 1))
- >>>
- >>> is_datetimelike_v_numeric(1, 1)
- False
- >>> is_datetimelike_v_numeric(dt, dt)
- False
- >>> is_datetimelike_v_numeric(1, dt)
- True
- >>> is_datetimelike_v_numeric(dt, 1) # symmetric check
- True
- >>> is_datetimelike_v_numeric(np.array([dt]), 1)
- True
- >>> is_datetimelike_v_numeric(np.array([1]), dt)
- True
- >>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
- True
- >>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))
- False
- >>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
- False
- """
- if not hasattr(a, "dtype"):
- a = np.asarray(a)
- if not hasattr(b, "dtype"):
- b = np.asarray(b)
-
- def is_numeric(x):
- """
- Check if an object has a numeric dtype (i.e. integer or float).
- """
- return is_integer_dtype(x) or is_float_dtype(x)
-
- return (needs_i8_conversion(a) and is_numeric(b)) or (
- needs_i8_conversion(b) and is_numeric(a)
- )
-
-
def needs_i8_conversion(arr_or_dtype) -> bool:
"""
Check whether the array or dtype should be converted to int64.
@@ -1790,7 +1732,6 @@ def is_all_strings(value: ArrayLike) -> bool:
"is_datetime64_dtype",
"is_datetime64_ns_dtype",
"is_datetime64tz_dtype",
- "is_datetimelike_v_numeric",
"is_datetime_or_timedelta_dtype",
"is_decimal",
"is_dict_like",
@@ -1809,7 +1750,6 @@ def is_all_strings(value: ArrayLike) -> bool:
"is_number",
"is_numeric_dtype",
"is_any_numeric_dtype",
- "is_numeric_v_string_like",
"is_object_dtype",
"is_period_dtype",
"is_re",
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 5b97cb879d663..211b67d3590ed 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -28,7 +28,6 @@
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
- is_datetimelike_v_numeric,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
@@ -505,8 +504,6 @@ def array_equivalent(
# fastpath when we require that the dtypes match (Block.equals)
if left.dtype.kind in ["f", "c"]:
return _array_equivalent_float(left, right)
- elif is_datetimelike_v_numeric(left.dtype, right.dtype):
- return False
elif needs_i8_conversion(left.dtype):
return _array_equivalent_datetimelike(left, right)
elif is_string_or_object_np_dtype(left.dtype):
@@ -529,10 +526,6 @@ def array_equivalent(
return True
return ((left == right) | (isna(left) & isna(right))).all()
- elif is_datetimelike_v_numeric(left, right):
- # GH#29553 avoid numpy deprecation warning
- return False
-
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# datetime64, timedelta64, Period
if not is_dtype_equal(left.dtype, right.dtype):
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 94433fdfbe753..fb980db320bfd 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -677,10 +677,7 @@ def replace_list(
# GH#38086 faster if we know we dont need to check for regex
masks = [missing.mask_missing(values, s[0]) for s in pairs]
- # error: Argument 1 to "extract_bool_array" has incompatible type
- # "Union[ExtensionArray, ndarray, bool]"; expected "Union[ExtensionArray,
- # ndarray]"
- masks = [extract_bool_array(x) for x in masks] # type: ignore[arg-type]
+ masks = [extract_bool_array(x) for x in masks]
rb = [self if inplace else self.copy()]
for i, (src, dest) in enumerate(pairs):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index ce900ff649eec..73abea30029b1 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -1,7 +1,5 @@
from __future__ import annotations
-from datetime import datetime
-
import numpy as np
import pytest
@@ -517,21 +515,6 @@ def test_is_numeric_v_string_like():
assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
-def test_is_datetimelike_v_numeric():
- dt = np.datetime64(datetime(2017, 1, 1))
-
- assert not com.is_datetimelike_v_numeric(1, 1)
- assert not com.is_datetimelike_v_numeric(dt, dt)
- assert not com.is_datetimelike_v_numeric(np.array([1]), np.array([2]))
- assert not com.is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
-
- assert com.is_datetimelike_v_numeric(1, dt)
- assert com.is_datetimelike_v_numeric(1, dt)
- assert com.is_datetimelike_v_numeric(np.array([dt]), 1)
- assert com.is_datetimelike_v_numeric(np.array([1]), dt)
- assert com.is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
-
-
def test_needs_i8_conversion():
assert not com.needs_i8_conversion(str)
assert not com.needs_i8_conversion(np.int64)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51137 | 2023-02-03T02:53:10Z | 2023-02-06T21:09:27Z | 2023-02-06T21:09:27Z | 2023-02-06T22:45:15Z |
CLN: Assorted | diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi
index 0f72b2b72141f..09f4fbec5176e 100644
--- a/pandas/_libs/groupby.pyi
+++ b/pandas/_libs/groupby.pyi
@@ -55,6 +55,7 @@ def group_any_all(
mask: np.ndarray, # const uint8_t[::1]
val_test: Literal["any", "all"],
skipna: bool,
+ nullable: bool,
) -> None: ...
def group_sum(
out: np.ndarray, # complexfloatingintuint_t[:, ::1]
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 998a6f69a930a..da8c367d4fc2a 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -2042,7 +2042,7 @@ def _compute_na_values():
np.uint16: uint16info.max,
np.uint8: uint8info.max,
np.bool_: uint8info.max,
- np.object_: np.nan # oof
+ np.object_: np.nan,
}
return na_values
diff --git a/pandas/_testing/compat.py b/pandas/_testing/compat.py
index e2ac8f779ef0e..bb3bb99a4c6e4 100644
--- a/pandas/_testing/compat.py
+++ b/pandas/_testing/compat.py
@@ -1,11 +1,12 @@
"""
Helpers for sharing tests between DataFrame/Series
"""
+from pandas._typing import DtypeObj
from pandas import DataFrame
-def get_dtype(obj):
+def get_dtype(obj) -> DtypeObj:
if isinstance(obj, DataFrame):
# Note: we are assuming only one column
return obj.dtypes.iat[0]
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 888205366f9e6..4d0354a2aab04 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -17,6 +17,7 @@
- Dtypes
- Misc
"""
+from __future__ import annotations
from collections import abc
from datetime import (
@@ -31,6 +32,7 @@
import os
from typing import (
Callable,
+ Hashable,
Iterator,
)
@@ -1881,7 +1883,7 @@ def __init__(self, **kwargs) -> None:
(pd.NA, pd.NA, pd.NA),
]
)
-def names(request):
+def names(request) -> tuple[Hashable, Hashable, Hashable]:
"""
A 3-tuple of names, the first two for operands, the last for a result.
"""
@@ -1937,7 +1939,7 @@ def indexer_ial(request):
@pytest.fixture
-def using_array_manager():
+def using_array_manager() -> bool:
"""
Fixture to check if the array manager is being used.
"""
@@ -1958,7 +1960,7 @@ def using_copy_on_write() -> bool:
@pytest.fixture(params=warsaws)
-def warsaw(request):
+def warsaw(request) -> str:
"""
tzinfo for Europe/Warsaw using pytz, dateutil, or zoneinfo.
"""
diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index bed2ed113606e..907804ab9e51d 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -4,7 +4,11 @@
import numpy as np
-from pandas._typing import DtypeObj
+from pandas._typing import (
+ TYPE_CHECKING,
+ DtypeObj,
+ type_t,
+)
from pandas.compat import pa_version_under7p0
from pandas.util._decorators import cache_readonly
@@ -16,6 +20,9 @@
if not pa_version_under7p0:
import pyarrow as pa
+if TYPE_CHECKING:
+ from pandas.core.arrays.arrow import ArrowExtensionArray
+
@register_extension_dtype
class ArrowDtype(StorageExtensionDtype):
@@ -113,7 +120,7 @@ def itemsize(self) -> int:
return self.numpy_dtype.itemsize
@classmethod
- def construct_array_type(cls):
+ def construct_array_type(cls) -> type_t[ArrowExtensionArray]:
"""
Return the array type associated with this dtype.
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index cdb5dddf03a64..fd07b472fc3da 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -66,10 +66,7 @@
notna,
)
-from pandas.core import (
- algorithms,
- nanops,
-)
+from pandas.core import algorithms
from pandas.core.apply import (
GroupByApply,
maybe_mangle_lambdas,
@@ -98,6 +95,7 @@
from pandas.plotting import boxplot_frame_groupby
if TYPE_CHECKING:
+ from pandas import Categorical
from pandas.core.generic import NDFrame
# TODO(typing) the return value on this callable should be any *scalar*.
@@ -138,29 +136,6 @@ class NamedAgg(NamedTuple):
aggfunc: AggScalar
-def generate_property(name: str, klass: type[DataFrame | Series]):
- """
- Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
-
- Parameters
- ----------
- name : str
- klass : {DataFrame, Series}
-
- Returns
- -------
- property
- """
-
- def prop(self):
- return self._make_wrapper(name)
-
- parent_method = getattr(klass, name)
- prop.__doc__ = parent_method.__doc__ or ""
- prop.__name__ = name
- return property(prop)
-
-
class SeriesGroupBy(GroupBy[Series]):
def _wrap_agged_manager(self, mgr: Manager) -> Series:
return self.obj._constructor(mgr, name=self.obj.name)
@@ -718,18 +693,13 @@ def value_counts(
else:
# lab is a Categorical with categories an IntervalIndex
- lab = cut(Series(val), bins, include_lowest=True)
- # error: "ndarray" has no attribute "cat"
- lev = lab.cat.categories # type: ignore[attr-defined]
- # error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
- # argument types "Any", "bool", "Union[Any, float]"
- lab = lev.take( # type: ignore[call-overload]
- # error: "ndarray" has no attribute "cat"
- lab.cat.codes, # type: ignore[attr-defined]
+ cat_ser = cut(Series(val), bins, include_lowest=True)
+ cat_obj = cast("Categorical", cat_ser._values)
+ lev = cat_obj.categories
+ lab = lev.take(
+ cat_obj.codes,
allow_fill=True,
- # error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
- # "_na_value"
- fill_value=lev._na_value, # type: ignore[union-attr]
+ fill_value=lev._na_value,
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
@@ -1544,7 +1514,6 @@ def _cython_transform(
**kwargs,
) -> DataFrame:
assert axis == 0 # handled by caller
- # TODO: no tests with self.ndim == 1 for DataFrameGroupBy
# With self.axis == 0, we have multi-block tests
# e.g. test_rank_min_int, test_cython_transform_frame
@@ -2058,17 +2027,7 @@ def idxmax(
axis = self.axis
def func(df):
- res = df._reduce(
- nanops.nanargmax,
- "argmax",
- axis=axis,
- skipna=skipna,
- numeric_only=numeric_only,
- )
- indices = res._values
- index = df._get_axis(axis)
- result = [index[i] if i >= 0 else np.nan for i in indices]
- return df._constructor_sliced(result, index=res.index)
+ return df.idxmax(axis=axis, skipna=skipna, numeric_only=numeric_only)
func.__name__ = "idxmax"
result = self._python_apply_general(
@@ -2154,17 +2113,7 @@ def idxmin(
axis = self.axis
def func(df):
- res = df._reduce(
- nanops.nanargmin,
- "argmin",
- axis=axis,
- skipna=skipna,
- numeric_only=numeric_only,
- )
- indices = res._values
- index = df._get_axis(axis)
- result = [index[i] if i >= 0 else np.nan for i in indices]
- return df._constructor_sliced(result, index=res.index)
+ return df.idxmin(axis=axis, skipna=skipna, numeric_only=numeric_only)
func.__name__ = "idxmin"
result = self._python_apply_general(
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 5f5bb1c8833da..a54c524094b23 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1380,9 +1380,7 @@ def _python_apply_general(
this can be coincidental leading to value-dependent behavior.
is_transform : bool, default False
Indicator for whether the function is actually a transform
- and should not have group keys prepended. This is used
- in _make_wrapper which generates both transforms (e.g. diff)
- and non-transforms (e.g. corr)
+ and should not have group keys prepended.
is_agg : bool, default False
Indicator for whether the function is an aggregation. When the
result is empty, we don't want to warn for this case.
@@ -4110,15 +4108,8 @@ def get_groupby(
obj: NDFrame,
by: _KeysArgType | None = None,
axis: AxisInt = 0,
- level=None,
grouper: ops.BaseGrouper | None = None,
- exclusions=None,
- selection=None,
- as_index: bool = True,
- sort: bool = True,
group_keys: bool | lib.NoDefault = True,
- observed: bool = False,
- dropna: bool = True,
) -> GroupBy:
klass: type[GroupBy]
@@ -4137,15 +4128,8 @@ def get_groupby(
obj=obj,
keys=by,
axis=axis,
- level=level,
grouper=grouper,
- exclusions=exclusions,
- selection=selection,
- as_index=as_index,
- sort=sort,
group_keys=group_keys,
- observed=observed,
- dropna=dropna,
)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index dc109f6b30d5c..62e12bd4a66e8 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -743,7 +743,7 @@ def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter:
Generator yielding subsetted objects
"""
ids, _, ngroups = self.group_info
- return get_splitter(data, ids, ngroups, axis=axis)
+ return _get_splitter(data, ids, ngroups, axis=axis)
@final
@cache_readonly
@@ -1017,13 +1017,12 @@ def agg_series(
def _aggregate_series_pure_python(
self, obj: Series, func: Callable
) -> npt.NDArray[np.object_]:
- ids, _, ngroups = self.group_info
+ _, _, ngroups = self.group_info
result = np.empty(ngroups, dtype="O")
initialized = False
- # equiv: splitter = self._get_splitter(obj, axis=0)
- splitter = get_splitter(obj, ids, ngroups, axis=0)
+ splitter = self._get_splitter(obj, axis=0)
for i, group in enumerate(splitter):
res = func(group)
@@ -1268,7 +1267,7 @@ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
return df.__finalize__(sdata, method="groupby")
-def get_splitter(
+def _get_splitter(
data: NDFrame, labels: np.ndarray, ngroups: int, axis: AxisInt = 0
) -> DataSplitter:
if isinstance(data, Series):
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index fb980db320bfd..22fe227d21727 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -261,10 +261,6 @@ def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block:
new_mgr_locs = self._mgr_locs[slicer]
new_values = self._slice(slicer)
-
- if new_values.ndim != self.values.ndim:
- raise ValueError("Only same dim slicing is allowed")
-
return type(self)(new_values, new_mgr_locs, self.ndim)
@final
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 31d1be6c9ccbe..613f841a9a340 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -161,9 +161,7 @@ def __init__(
self._timegrouper = timegrouper
self.keys = None
self.sort = True
- # error: Incompatible types in assignment (expression has type "Union
- # [int, Literal['index', 'columns', 'rows']]", variable has type "int")
- self.axis = axis # type: ignore[assignment]
+ self.axis = obj._get_axis_number(axis)
self.kind = kind
self.group_keys = group_keys
self.as_index = True
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 1313c39bb67b2..9a64c5eea33a8 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -708,6 +708,7 @@ def test_cython_transform_frame(op, args, targop):
{"by": "string"},
]: # {"by": 'string_missing'}]:
# {"by": ['int','string']}]:
+ # TODO: remove or enable commented-out code
gb = df.groupby(group_keys=False, **gb_target)
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 3cc0d59457528..f1e40691059e2 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -560,8 +560,6 @@ def _check_plot_works(f, default_axes=False, **kwargs):
with tm.ensure_clean(return_filelike=True) as path:
plt.savefig(path)
- except Exception as err:
- raise err
finally:
tm.close(fig)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51136 | 2023-02-03T01:47:06Z | 2023-02-07T20:34:52Z | 2023-02-07T20:34:52Z | 2023-02-07T20:38:04Z |
REF: Resampler.groupby -> _timegrouper | diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index d3b3c844e8c4e..23bf4e5e937d1 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -127,6 +127,7 @@ class Resampler(BaseGroupBy, PandasObject):
"""
grouper: BinGrouper
+ _timegrouper: TimeGrouper
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
# to the groupby descriptor
@@ -152,7 +153,7 @@ def __init__(
selection=None,
**kwargs,
) -> None:
- self.groupby = groupby
+ self._timegrouper = groupby
self.keys = None
self.sort = True
# error: Incompatible types in assignment (expression has type "Union
@@ -162,11 +163,11 @@ def __init__(
self.group_keys = group_keys
self.as_index = True
- self.groupby._set_grouper(self._convert_obj(obj), sort=True)
+ self._timegrouper._set_grouper(self._convert_obj(obj), sort=True)
self.binner, self.grouper = self._get_binner()
self._selection = selection
- if self.groupby.key is not None:
- self.exclusions = frozenset([self.groupby.key])
+ if self._timegrouper.key is not None:
+ self.exclusions = frozenset([self._timegrouper.key])
else:
self.exclusions = frozenset()
@@ -187,9 +188,9 @@ def __str__(self) -> str:
Provide a nice str repr of our rolling object.
"""
attrs = (
- f"{k}={getattr(self.groupby, k)}"
+ f"{k}={getattr(self._timegrouper, k)}"
for k in self._attributes
- if getattr(self.groupby, k, None) is not None
+ if getattr(self._timegrouper, k, None) is not None
)
return f"{type(self).__name__} [{', '.join(attrs)}]"
@@ -197,7 +198,7 @@ def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
- return getattr(self.groupby, attr)
+ return getattr(self._timegrouper, attr)
if attr in self.obj:
return self[attr]
@@ -208,13 +209,13 @@ def __getattr__(self, attr: str):
def obj(self) -> NDFrame: # type: ignore[override]
# error: Incompatible return value type (got "Optional[Any]",
# expected "NDFrameT")
- return self.groupby.obj # type: ignore[return-value]
+ return self._timegrouper.obj # type: ignore[return-value]
@property
def ax(self):
# we can infer that this is a PeriodIndex/DatetimeIndex/TimedeltaIndex,
# but skipping annotating bc the overrides overwhelming
- return self.groupby.ax
+ return self._timegrouper.ax
@property
def _from_selection(self) -> bool:
@@ -223,8 +224,8 @@ def _from_selection(self) -> bool:
"""
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
- return self.groupby is not None and (
- self.groupby.key is not None or self.groupby.level is not None
+ return self._timegrouper is not None and (
+ self._timegrouper.key is not None or self._timegrouper.level is not None
)
def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
@@ -252,7 +253,7 @@ def _get_binner(self):
"""
binner, bins, binlabels = self._get_binner_for_time()
assert len(bins) == len(binlabels)
- bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)
+ bin_grouper = BinGrouper(bins, binlabels, indexer=self._timegrouper.indexer)
return binner, bin_grouper
@Substitution(
@@ -391,7 +392,9 @@ def transform(self, arg, *args, **kwargs):
2018-01-01 01:00:00 NaN
Freq: H, dtype: float64
"""
- return self._selected_obj.groupby(self.groupby).transform(arg, *args, **kwargs)
+ return self._selected_obj.groupby(self._timegrouper).transform(
+ arg, *args, **kwargs
+ )
def _downsample(self, f, **kwargs):
raise AbstractMethodError(self)
@@ -1168,7 +1171,7 @@ def __init__(self, obj, parent=None, groupby=None, key=None, **kwargs) -> None:
self.key = key
self._groupby = groupby
- self.groupby = copy.copy(parent.groupby)
+ self._timegrouper = copy.copy(parent._timegrouper)
@no_type_check
def _apply(self, f, *args, **kwargs):
@@ -1178,7 +1181,7 @@ def _apply(self, f, *args, **kwargs):
"""
def func(x):
- x = self._shallow_copy(x, groupby=self.groupby)
+ x = self._shallow_copy(x, groupby=self._timegrouper)
if isinstance(f, str):
return getattr(x, f)(**kwargs)
@@ -1243,8 +1246,8 @@ def _get_binner_for_time(self):
# this is how we are actually creating the bins
if self.kind == "period":
- return self.groupby._get_time_period_bins(self.ax)
- return self.groupby._get_time_bins(self.ax)
+ return self._timegrouper._get_time_period_bins(self.ax)
+ return self._timegrouper._get_time_bins(self.ax)
def _downsample(self, how, **kwargs):
"""
@@ -1373,7 +1376,7 @@ def _resampler_for_grouping(self):
def _get_binner_for_time(self):
if self.kind == "timestamp":
return super()._get_binner_for_time()
- return self.groupby._get_period_bins(self.ax)
+ return self._timegrouper._get_period_bins(self.ax)
def _convert_obj(self, obj: NDFrameT) -> NDFrameT:
obj = super()._convert_obj(obj)
@@ -1483,7 +1486,7 @@ def _resampler_for_grouping(self):
return TimedeltaIndexResamplerGroupby
def _get_binner_for_time(self):
- return self.groupby._get_time_delta_bins(self.ax)
+ return self._timegrouper._get_time_delta_bins(self.ax)
def _adjust_binner_for_upsample(self, binner):
"""
| GroupBy object is very different from a TimeGrouper object. Hopefully this will make this code slightly less confusing (xref #51134 this is a PITA) | https://api.github.com/repos/pandas-dev/pandas/pulls/51135 | 2023-02-03T01:36:15Z | 2023-02-03T21:08:39Z | 2023-02-03T21:08:39Z | 2023-02-03T23:17:05Z |
CI: Change development python version to 3.10 | diff --git a/.circleci/config.yml b/.circleci/config.yml
index e704c37df3e45..e7322e748662f 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -6,7 +6,7 @@ jobs:
image: ubuntu-2004:2022.04.1
resource_class: arm.large
environment:
- ENV_FILE: ci/deps/circle-38-arm64.yaml
+ ENV_FILE: ci/deps/circle-39-arm64.yaml
PYTEST_WORKERS: auto
PATTERN: "not single_cpu and not slow and not network and not clipboard and not arm_slow and not db"
PYTEST_TARGET: "pandas"
diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index d6d43a8bfc13b..ab8f873e9b70b 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -179,7 +179,7 @@ jobs:
id: setup_python
uses: actions/setup-python@v4
with:
- python-version: '3.8'
+ python-version: '3.10'
cache: 'pip'
cache-dependency-path: 'requirements-dev.txt'
diff --git a/.github/workflows/package-checks.yml b/.github/workflows/package-checks.yml
index fa1b5e5d4fba3..7130bed21d5ff 100644
--- a/.github/workflows/package-checks.yml
+++ b/.github/workflows/package-checks.yml
@@ -38,7 +38,7 @@ jobs:
id: setup_python
uses: actions/setup-python@v4
with:
- python-version: '3.8'
+ python-version: '3.10'
- name: Install required dependencies
run: |
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
index 460369f45e900..957e7103f4ff6 100644
--- a/.github/workflows/sdist.yml
+++ b/.github/workflows/sdist.yml
@@ -29,7 +29,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11"]
+ python-version: ["3.9", "3.10", "3.11"]
concurrency:
# https://github.community/t/concurrecy-not-work-for-push/183068/7
group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{matrix.python-version}}-sdist
@@ -80,8 +80,6 @@ jobs:
- name: Force oldest supported NumPy
run: |
case "${{matrix.python-version}}" in
- 3.8)
- pip install numpy==1.21.6 ;;
3.9)
pip install numpy==1.21.6 ;;
3.10)
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 31e2095624347..69baa3ccd6f34 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -26,19 +26,19 @@ jobs:
timeout-minutes: 180
strategy:
matrix:
- env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml, actions-311.yaml]
+ env_file: [actions-39.yaml, actions-310.yaml, actions-311.yaml]
# Prevent the include jobs from overriding other jobs
pattern: [""]
include:
- name: "Downstream Compat"
- env_file: actions-38-downstream_compat.yaml
+ env_file: actions-39-downstream_compat.yaml
pattern: "not slow and not network and not single_cpu"
pytest_target: "pandas/tests/test_downstream.py"
- name: "Minimum Versions"
- env_file: actions-38-minimum_versions.yaml
+ env_file: actions-39-minimum_versions.yaml
pattern: "not slow and not network and not single_cpu"
- name: "Locale: it_IT"
- env_file: actions-38.yaml
+ env_file: actions-310.yaml
pattern: "not slow and not network and not single_cpu"
extra_apt: "language-pack-it"
# Use the utf8 version as the default, it has no bad side-effect.
@@ -48,7 +48,7 @@ jobs:
# It will be temporarily activated during tests with locale.setlocale
extra_loc: "it_IT"
- name: "Locale: zh_CN"
- env_file: actions-38.yaml
+ env_file: actions-310.yaml
pattern: "not slow and not network and not single_cpu"
extra_apt: "language-pack-zh-hans"
# Use the utf8 version as the default, it has no bad side-effect.
@@ -62,7 +62,7 @@ jobs:
pattern: "not slow and not network and not single_cpu"
pandas_copy_on_write: "1"
- name: "Pypy"
- env_file: actions-pypy-38.yaml
+ env_file: actions-pypy-39.yaml
pattern: "not slow and not network and not single_cpu"
test_args: "--max-worker-restart 0"
- name: "Numpy Dev"
@@ -173,7 +173,7 @@ jobs:
strategy:
matrix:
os: [macos-latest, windows-latest]
- env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml, actions-311.yaml]
+ env_file: [actions-39.yaml, actions-310.yaml, actions-311.yaml]
fail-fast: false
runs-on: ${{ matrix.os }}
name: ${{ format('{0} {1}', matrix.os, matrix.env_file) }}
@@ -227,7 +227,7 @@ jobs:
fi
- name: Build environment and Run Tests
run: |
- /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev
+ /opt/python/cp39-cp39/bin/python -m venv ~/virtualenvs/pandas-dev
. ~/virtualenvs/pandas-dev/bin/activate
python -m pip install --no-cache-dir --no-deps -U pip wheel setuptools
python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.0.0 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index a42957c1cc942..79dd9222fb90d 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -55,7 +55,7 @@ jobs:
- [windows-2019, win_amd64]
- [windows-2019, win32]
# TODO: support PyPy?
- python: [["cp38", "3.8"], ["cp39", "3.9"], ["cp310", "3.10"], ["cp311", "3.11"]]# "pp38", "pp39"]
+ python: [["cp39", "3.9"], ["cp310", "3.10"], ["cp311", "3.11"]]# "pp39"]
env:
IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }}
IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
@@ -152,7 +152,7 @@ jobs:
auto-update-conda: true
# Really doesn't matter what version we upload with
# just the version we test with
- python-version: '3.8'
+ python-version: '3.10'
channels: conda-forge
channel-priority: true
# mamba fails to solve, also we really don't need this since we're just installing python
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
deleted file mode 100644
index df7d0277f3ba9..0000000000000
--- a/ci/deps/actions-38.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: pandas-dev
-channels:
- - conda-forge
-dependencies:
- - python=3.8
-
- # build dependencies
- - versioneer[toml]
- - cython>=0.29.33
-
- # test dependencies
- - pytest>=7.0.0
- - pytest-cov
- - pytest-xdist>=2.2.0
- - pytest-asyncio>=0.17.0
- - boto3
-
- # required dependencies
- - python-dateutil
- - numpy
- - pytz
-
- # optional dependencies
- - beautifulsoup4>=4.11.1
- - blosc>=1.21.0
- - bottleneck>=1.3.4
- - brotlipy>=0.7.0
- - fastparquet>=0.8.1
- - fsspec>=2022.05.0
- - html5lib>=1.1
- - hypothesis>=6.46.1
- - gcsfs>=2022.05.0
- - jinja2>=3.1.2
- - lxml>=4.8.0
- - matplotlib>=3.6.1
- - numba>=0.55.2
- - numexpr>=2.8.0
- - odfpy>=1.4.1
- - qtpy>=2.2.0
- - openpyxl<3.1.1, >=3.0.10
- - pandas-gbq>=0.17.5
- - psycopg2>=2.9.3
- - pyarrow>=7.0.0
- - pymysql>=1.0.2
- - pyreadstat>=1.1.5
- - pytables>=3.7.0
- - python-snappy>=0.6.1
- - pyxlsb>=1.0.9
- - s3fs>=2022.05.0
- - scipy>=1.8.1
- - sqlalchemy>=1.4.36
- - tabulate>=0.8.10
- - xarray>=2022.03.0
- - xlrd>=2.0.1
- - xlsxwriter>=3.0.3
- - zstandard>=0.17.0
-
- - pip:
- - pyqt5>=5.15.6
- - tzdata>=2022.1
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-39-downstream_compat.yaml
similarity index 98%
rename from ci/deps/actions-38-downstream_compat.yaml
rename to ci/deps/actions-39-downstream_compat.yaml
index 670d7c37dc4d2..241adef3367a0 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-39-downstream_compat.yaml
@@ -3,7 +3,7 @@ name: pandas-dev
channels:
- conda-forge
dependencies:
- - python=3.8
+ - python=3.9
# build dependencies
- versioneer[toml]
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml
similarity index 98%
rename from ci/deps/actions-38-minimum_versions.yaml
rename to ci/deps/actions-39-minimum_versions.yaml
index 96c6a0fd6eb2e..61752cbfa54b2 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-39-minimum_versions.yaml
@@ -4,7 +4,7 @@ name: pandas-dev
channels:
- conda-forge
dependencies:
- - python=3.8.0
+ - python=3.9
# build dependencies
- versioneer[toml]
diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-39.yaml
similarity index 86%
rename from ci/deps/actions-pypy-38.yaml
rename to ci/deps/actions-pypy-39.yaml
index 981399dcd4b7c..64774e776056f 100644
--- a/ci/deps/actions-pypy-38.yaml
+++ b/ci/deps/actions-pypy-39.yaml
@@ -5,7 +5,7 @@ dependencies:
# TODO: Add the rest of the dependencies in here
# once the other plentiful failures/segfaults
# with base pandas has been dealt with
- - python=3.8[build=*_pypy] # TODO: use this once pypy3.8 is available
+ - python=3.9[build=*_pypy]
# build dependencies
- versioneer[toml]
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-39-arm64.yaml
similarity index 98%
rename from ci/deps/circle-38-arm64.yaml
rename to ci/deps/circle-39-arm64.yaml
index 5102b2ca55404..42f9994b64157 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-39-arm64.yaml
@@ -2,7 +2,7 @@ name: pandas-dev
channels:
- conda-forge
dependencies:
- - python=3.8
+ - python=3.9
# build dependencies
- versioneer[toml]
diff --git a/doc/source/conf.py b/doc/source/conf.py
index c73a91aa90365..d808d60b1ac95 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -76,6 +76,7 @@
# to ensure that include files (partial pages) aren't built, exclude them
# https://github.com/sphinx-doc/sphinx/issues/1965#issuecomment-124732907
"**/includes/**",
+ "**/api/pandas.Series.dt.rst",
]
try:
import nbconvert
diff --git a/doc/source/development/contributing_gitpod.rst b/doc/source/development/contributing_gitpod.rst
index c591be5425db9..042a2f316cd42 100644
--- a/doc/source/development/contributing_gitpod.rst
+++ b/doc/source/development/contributing_gitpod.rst
@@ -29,7 +29,7 @@ you do not have an account yet, you will need to create one first.
To get started just login at `Gitpod`_, and grant the appropriate permissions to GitHub.
-We have built a python 3.8 environment and all development dependencies will
+We have built a python 3.10 environment and all development dependencies will
install when the environment starts.
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 49ec242f6bb96..1770d759dde4d 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -20,7 +20,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 3.8, 3.9, 3.10 and 3.11.
+Officially Python 3.9, 3.10 and 3.11.
Installing pandas
-----------------
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index cfce12c2930d7..b10dd876050ae 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -124,6 +124,11 @@ Backwards incompatible API changes
.. _whatsnew_210.api_breaking.deps:
+Increased minimum version for Python
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+pandas 2.1.0 supports Python 3.9 and higher.
+
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Some minimum supported versions of dependencies were updated.
diff --git a/environment.yml b/environment.yml
index bde8c46bffd97..1e30c51537fa0 100644
--- a/environment.yml
+++ b/environment.yml
@@ -3,7 +3,7 @@ name: pandas-dev
channels:
- conda-forge
dependencies:
- - python=3.8
+ - python=3.10
- pip
# build dependencies
@@ -38,7 +38,7 @@ dependencies:
- matplotlib>=3.6.1
- numba>=0.55.2
- numexpr>=2.8.0 # pin for "Run checks on imported code" job
- - openpyxl<3.1.1, >=3.0.7
+ - openpyxl>=3.0.10
- odfpy>=1.4.1
- py
- psycopg2>=2.9.3
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index 547422f5ec55c..9ba75c8485ac7 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -158,7 +158,9 @@ class Timestamp(datetime):
def __hash__(self) -> int: ...
def weekday(self) -> int: ...
def isoweekday(self) -> int: ...
- def isocalendar(self) -> tuple[int, int, int]: ...
+ # Return type "Tuple[int, int, int]" of "isocalendar" incompatible with return
+ # type "_IsoCalendarDate" in supertype "date"
+ def isocalendar(self) -> tuple[int, int, int]: ... # type: ignore[override]
@property
def is_leap_year(self) -> bool: ...
@property
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 77d2f4802c08f..9ede9e65a6839 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -84,7 +84,7 @@
zoneinfo = None
if compat.PY39:
# Import "zoneinfo" could not be resolved (reportMissingImports)
- import zoneinfo # type: ignore[no-redef]
+ import zoneinfo # type: ignore[assignment]
# Although zoneinfo can be imported in Py39, it is effectively
# "not available" without tzdata/IANA tz data.
@@ -1964,7 +1964,9 @@ def using_copy_on_write() -> bool:
warsaws = ["Europe/Warsaw", "dateutil/Europe/Warsaw"]
if zoneinfo is not None:
- warsaws.append(zoneinfo.ZoneInfo("Europe/Warsaw"))
+ warsaws.append(
+ zoneinfo.ZoneInfo("Europe/Warsaw") # pyright: ignore[reportGeneralTypeIssues]
+ )
@pytest.fixture(params=warsaws)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 9e9f28b1dfddb..b6ade0728e075 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11756,8 +11756,7 @@ def __iand__(self, other) -> Self:
@final
def __ior__(self, other) -> Self:
- # error: Unsupported left operand type for | ("Type[NDFrame]")
- return self._inplace_method(other, type(self).__or__) # type: ignore[operator]
+ return self._inplace_method(other, type(self).__or__)
@final
def __ixor__(self, other) -> Self:
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 6ef33c3d58306..8dd3e4385a383 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1831,7 +1831,9 @@ def _load_backend(backend: str) -> types.ModuleType:
if hasattr(eps, "select"):
entry = eps.select(group=key) # pyright: ignore[reportGeneralTypeIssues]
else:
- entry = eps.get(key, ())
+ # Argument 2 to "get" of "dict" has incompatible type "Tuple[]";
+ # expected "EntryPoints" [arg-type]
+ entry = eps.get(key, ()) # type: ignore[arg-type]
for entry_point in entry:
found_backend = entry_point.name == backend
if found_backend:
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 4bd6fd745d56d..b8cd8a5546f05 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -1,13 +1,16 @@
"""
Tests for DatetimeArray
"""
+from __future__ import annotations
+
from datetime import timedelta
import operator
try:
from zoneinfo import ZoneInfo
except ImportError:
- ZoneInfo = None
+ # Cannot assign to a type
+ ZoneInfo = None # type: ignore[misc, assignment]
import numpy as np
import pytest
@@ -712,7 +715,9 @@ def test_tz_localize_t2d(self):
# no tzdata
pass
else:
- easts.append(tz)
+ # Argument 1 to "append" of "list" has incompatible type "ZoneInfo";
+ # expected "str"
+ easts.append(tz) # type: ignore[arg-type]
@pytest.mark.parametrize("tz", easts)
def test_iter_zoneinfo_fold(self, tz):
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index f657cc71e6346..6d18a292061b9 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from datetime import (
datetime,
timedelta,
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 05700841de7e1..6f3c83b999e94 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -22,7 +22,8 @@
try:
from zoneinfo import ZoneInfo
except ImportError:
- ZoneInfo = None
+ # Cannot assign to a type [misc]
+ ZoneInfo = None # type: ignore[misc, assignment]
from pandas._libs.tslibs import (
conversion,
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index b855232179b51..4851612392e68 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -848,7 +848,10 @@ def test_timestamp_constructor_retain_fold(tz, fold):
_tzs = ["dateutil/Europe/London"]
if PY39:
try:
- _tzs = ["dateutil/Europe/London", zoneinfo.ZoneInfo("Europe/London")]
+ _tzs = [
+ "dateutil/Europe/London",
+ zoneinfo.ZoneInfo("Europe/London"), # type: ignore[list-item]
+ ]
except zoneinfo.ZoneInfoNotFoundError:
pass
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index 820b2e17a9d3f..3a6953af4337e 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -34,7 +34,8 @@
try:
from zoneinfo import ZoneInfo
except ImportError:
- ZoneInfo = None
+ # Cannot assign to a type
+ ZoneInfo = None # type: ignore[misc, assignment]
class TestTimestampTZOperations:
diff --git a/pyproject.toml b/pyproject.toml
index 8b5603ecb7c7b..aacf8649add35 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,7 +21,7 @@ authors = [
{ name = 'The Pandas Development Team', email='pandas-dev@python.org' },
]
license = {file = 'LICENSE'}
-requires-python = '>=3.8'
+requires-python = '>=3.9'
dependencies = [
"numpy>=1.21.6; python_version<'3.11'",
"numpy>=1.23.2; python_version>='3.11'",
@@ -39,7 +39,6 @@ classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
- 'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
@@ -171,7 +170,7 @@ select = "*-win32"
environment = { IS_32_BIT="true" }
[tool.black]
-target-version = ['py38', 'py39']
+target-version = ['py39', 'py310']
required-version = '23.1.0'
exclude = '''
(
@@ -194,7 +193,7 @@ exclude = '''
[tool.ruff]
line-length = 88
update-check = false
-target-version = "py38"
+target-version = "py310"
fix = true
unfixable = ["E711"]
@@ -257,6 +256,8 @@ ignore = [
"B023",
# Functions defined inside a loop must not use variables redefined in the loop
# "B301", # not yet implemented
+ # Only works with python >=3.10
+ "B905",
# Too many arguments to function call
"PLR0913",
# Too many returns
@@ -471,7 +472,7 @@ follow_imports_for_stubs = false
no_site_packages = false
no_silence_site_packages = false
# Platform configuration
-python_version = "3.8"
+python_version = "3.10"
platform = "linux-64"
# Disallow dynamic typing
disallow_any_unimported = false # TODO
@@ -549,7 +550,7 @@ skip_glob = "env"
skip = "pandas/__init__.py"
[tool.pyright]
-pythonVersion = "3.8"
+pythonVersion = "3.10"
typeCheckingMode = "basic"
include = ["pandas", "typings"]
exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 30189c35fcbb5..d2f024886a129 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -27,7 +27,7 @@ lxml>=4.8.0
matplotlib>=3.6.1
numba>=0.55.2
numexpr>=2.8.0
-openpyxl<3.1.1, >=3.0.7
+openpyxl>=3.0.10
odfpy>=1.4.1
py
psycopg2-binary>=2.9.3
diff --git a/scripts/run_stubtest.py b/scripts/run_stubtest.py
index db7a327f231b5..dedcdb5532593 100644
--- a/scripts/run_stubtest.py
+++ b/scripts/run_stubtest.py
@@ -49,8 +49,8 @@
"pandas._libs.lib._NoDefault.no_default",
# internal type alias (should probably be private)
"pandas._libs.lib.ndarray_obj_2d",
- # workaround for mypy (cache_readonly = property)
- "pandas._libs.properties.cache_readonly.__get__",
+ # runtime argument "owner" has a default value but stub argument does not
+ "pandas._libs.properties.AxisProperty.__get__",
"pandas._libs.properties.cache_readonly.deleter",
"pandas._libs.properties.cache_readonly.getter",
"pandas._libs.properties.cache_readonly.setter",
diff --git a/scripts/validate_min_versions_in_sync.py b/scripts/validate_min_versions_in_sync.py
index e0182ebaaee60..9a6d97a222000 100755
--- a/scripts/validate_min_versions_in_sync.py
+++ b/scripts/validate_min_versions_in_sync.py
@@ -124,6 +124,11 @@ def get_yaml_map_from(
yaml_package, yaml_version2 = yaml_dependency.split(operator)
yaml_version2 = operator + yaml_version2
yaml_map[yaml_package] = [yaml_version1, yaml_version2]
+ elif "[build=*_pypy]" in dependency:
+ search_text = search_text.replace("[build=*_pypy]", "")
+ yaml_package, yaml_version = search_text.split(operator)
+ yaml_version = operator + yaml_version
+ yaml_map[yaml_package] = [yaml_version]
elif operator is not None:
yaml_package, yaml_version = search_text.split(operator)
yaml_version = operator + yaml_version
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Mypy passes locally now. Lets see if ci passes as well. | https://api.github.com/repos/pandas-dev/pandas/pulls/51133 | 2023-02-02T23:16:16Z | 2023-04-26T14:30:37Z | 2023-04-26T14:30:37Z | 2023-04-26T15:52:47Z |
DEPR: Remove NumericIndex from tests/indexes/test_numpy_compat.py | diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py
index cdc5446bfeba3..2a29e57678df9 100644
--- a/pandas/tests/indexes/test_numpy_compat.py
+++ b/pandas/tests/indexes/test_numpy_compat.py
@@ -10,8 +10,10 @@
isna,
)
import pandas._testing as tm
-from pandas.api.types import is_complex_dtype
-from pandas.core.api import NumericIndex
+from pandas.api.types import (
+ is_complex_dtype,
+ is_numeric_dtype,
+)
from pandas.core.arrays import BooleanArray
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
@@ -66,11 +68,8 @@ def test_numpy_ufuncs_basic(index, func):
with tm.external_error_raised((TypeError, AttributeError)):
with np.errstate(all="ignore"):
func(index)
- elif (
- isinstance(index, NumericIndex)
- or (not isinstance(index.dtype, np.dtype) and index.dtype._is_numeric)
- or (index.dtype.kind == "c" and func not in [np.deg2rad, np.rad2deg])
- or index.dtype == bool
+ elif is_numeric_dtype(index) and not (
+ is_complex_dtype(index) and func in [np.deg2rad, np.rad2deg]
):
# coerces to float (e.g. np.sin)
with np.errstate(all="ignore"):
@@ -81,10 +80,9 @@ def test_numpy_ufuncs_basic(index, func):
exp = Index(arr_result, name=index.name)
tm.assert_index_equal(result, exp)
- if type(index) is not Index or index.dtype == bool:
- assert type(result) is NumericIndex
+ if isinstance(index.dtype, np.dtype) and is_numeric_dtype(index):
if is_complex_dtype(index):
- assert result.dtype == "complex64"
+ assert result.dtype == index.dtype
elif index.dtype in ["bool", "int8", "uint8"]:
assert result.dtype in ["float16", "float32"]
elif index.dtype in ["int16", "uint16", "float32"]:
@@ -128,11 +126,8 @@ def test_numpy_ufuncs_other(index, func):
with tm.external_error_raised(TypeError):
func(index)
- elif (
- isinstance(index, NumericIndex)
- or (not isinstance(index.dtype, np.dtype) and index.dtype._is_numeric)
- or (index.dtype.kind == "c" and func is not np.signbit)
- or index.dtype == bool
+ elif is_numeric_dtype(index) and not (
+ is_complex_dtype(index) and func is np.signbit
):
# Results in bool array
result = func(index)
| This was a bit tricky.
xref #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/51132 | 2023-02-02T21:24:03Z | 2023-02-03T09:40:01Z | 2023-02-03T09:40:01Z | 2023-02-03T13:57:04Z |
DEPR: float/int(Series[single_elemet]) | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index c3481a5452091..2258c76a6cff5 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -780,7 +780,7 @@ Deprecations
- :meth:`Index.is_object` has been deprecated. Use :func:`pandas.api.types.is_object_dtype` instead (:issue:`50042`)
- :meth:`Index.is_interval` has been deprecated. Use :func:`pandas.api.types.is_intterval_dtype` instead (:issue:`50042`)
- Deprecated ``all`` and ``any`` reductions with ``datetime64`` and :class:`DatetimeTZDtype` dtypes, use e.g. ``(obj != pd.Timestamp(0), tz=obj.tz).all()`` instead (:issue:`34479`)
--
+- Deprecated calling ``float`` or ``int`` on a single element :class:`Series` to return a ``float`` or ``int`` respectively. Extract the element before calling ``float`` or ``int`` instead (:issue:`51101`)
.. ---------------------------------------------------------------------------
.. _whatsnew_200.prior_deprecations:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index abe31d0dbd52a..9a5412145366f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -19,6 +19,7 @@
cast,
overload,
)
+import warnings
import weakref
import numpy as np
@@ -81,6 +82,7 @@
Substitution,
doc,
)
+from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
validate_ascending,
validate_bool_kwarg,
@@ -216,6 +218,13 @@ def _coerce_method(converter):
def wrapper(self):
if len(self) == 1:
+ warnings.warn(
+ f"Calling {converter.__name__} on a single element Series is "
+ "deprecated and will raise a TypeError in the future. "
+ f"Use {converter.__name__}(ser.iloc[0]) instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
return converter(self.iloc[0])
raise TypeError(f"cannot convert the series to {converter}")
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index 3f2e8e9ea07a6..101e7be70e691 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -277,7 +277,7 @@ def test_agg_none_to_type():
df = DataFrame({"a": [None]})
msg = re.escape("int() argument must be a string")
with pytest.raises(TypeError, match=msg):
- df.agg({"a": int})
+ df.agg({"a": lambda x: int(x.iloc[0])})
def test_transform_none_to_type():
@@ -285,7 +285,7 @@ def test_transform_none_to_type():
df = DataFrame({"a": [None]})
msg = "argument must be a"
with pytest.raises(TypeError, match=msg):
- df.transform({"a": int})
+ df.transform({"a": lambda x: int(x.iloc[0])})
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 4852788ef2c06..5c3dfd246e9aa 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -287,3 +287,10 @@ def test_numeric_only(self, kernel, has_numeric_only, dtype):
else:
# reducer
assert result == expected
+
+
+@pytest.mark.parametrize("converter", [int, float, complex])
+def test_float_int_deprecated(converter):
+ # GH 51101
+ with tm.assert_produces_warning(FutureWarning):
+ assert converter(Series([1])) == converter(1)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index bd6db2befdb0b..49c5b78a48a9f 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -145,10 +145,6 @@ def test_scalar_conversion(self):
scalar = Series(0.5)
assert not isinstance(scalar, float)
- # Coercion
- assert float(Series([1.0])) == 1.0
- assert int(Series([1.0])) == 1
-
def test_scalar_extension_dtype(self, ea_scalar_and_dtype):
# GH 28401
| - [ ] closes #51101 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51131 | 2023-02-02T21:04:41Z | 2023-02-04T19:20:50Z | 2023-02-04T19:20:50Z | 2023-02-06T18:20:58Z |
CLN: Use fixture dtype_backend in nullable tests | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 64a8f0f9efc1d..888205366f9e6 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1293,6 +1293,22 @@ def string_storage(request):
return request.param
+@pytest.fixture(
+ params=[
+ "pandas",
+ pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")),
+ ]
+)
+def dtype_backend(request):
+ """
+ Parametrized fixture for pd.options.mode.string_storage.
+
+ * 'python'
+ * 'pyarrow'
+ """
+ return request.param
+
+
# Alias so we can test with cartesian product of string_storage
string_storage2 = string_storage
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index f194cadbc73d8..3f2fecbfb48a6 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -536,10 +536,6 @@ def test_reader_dtype_str(self, read_ext, dtype, expected):
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
- @pytest.mark.parametrize(
- "dtype_backend",
- ["pandas", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))],
- )
@pytest.mark.parametrize("option", [True, False])
def test_use_nullable_dtypes(self, read_ext, dtype_backend, option):
# GH#36712
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 7b473a56aa200..cf69cebd3c05e 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1869,7 +1869,6 @@ def test_json_uint64(self):
result = df.to_json(orient="split")
assert result == expected
- @pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
@pytest.mark.parametrize(
"orient", ["split", "records", "values", "index", "columns"]
)
@@ -1936,7 +1935,6 @@ def test_read_json_nullable(self, string_storage, dtype_backend, orient, option)
tm.assert_frame_equal(result, expected)
- @pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
@pytest.mark.parametrize("orient", ["split", "records", "index"])
def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
# GH#50750
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 434e617ff05f9..c2939f7c12f10 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -948,17 +948,13 @@ def test_widths_and_usecols():
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
def test_use_nullable_dtypes(string_storage, dtype_backend):
# GH#50289
-
- if string_storage == "pyarrow" or dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
-
if string_storage == "python":
arr = StringArray(np.array(["a", "b"], dtype=np.object_))
arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_))
else:
+ pa = pytest.importorskip("pyarrow")
arr = ArrowStringArray(pa.array(["a", "b"]))
arr_na = ArrowStringArray(pa.array([None, "a"]))
@@ -983,6 +979,7 @@ def test_use_nullable_dtypes(string_storage, dtype_backend):
}
)
if dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
expected = DataFrame(
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index d018affdecfca..a28ad39606033 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -418,7 +418,6 @@ def test_raw_roundtrip(self, data):
# Clipboard can sometimes keep previous param causing flaky CI failures
subprocess.run(["xsel", "--delete", "--clipboard"], check=True)
- @pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
@pytest.mark.parametrize("engine", ["c", "python"])
def test_read_clipboard_nullable_dtypes(
self, request, mock_clipboard, string_storage, dtype_backend, engine
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 7e07ad0ec2ad3..df934a9d2555f 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -199,7 +199,6 @@ def test_http_path(self, feather_file):
res = read_feather(url)
tm.assert_frame_equal(expected, res)
- @pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
@pytest.mark.parametrize("option", [True, False])
def test_read_json_nullable(self, string_storage, dtype_backend, option):
# GH#50765
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index de36548f08a12..d27aeeb94199c 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -138,9 +138,7 @@ def test_to_html_compat(self):
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
- @pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
- @pytest.mark.parametrize("storage", ["python", "pyarrow"])
- def test_use_nullable_dtypes(self, storage, dtype_backend):
+ def test_use_nullable_dtypes(self, string_storage, dtype_backend):
# GH#50286
df = DataFrame(
{
@@ -155,7 +153,7 @@ def test_use_nullable_dtypes(self, storage, dtype_backend):
}
)
- if storage == "python":
+ if string_storage == "python":
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
@@ -165,7 +163,7 @@ def test_use_nullable_dtypes(self, storage, dtype_backend):
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
out = df.to_html(index=False)
- with pd.option_context("mode.string_storage", storage):
+ with pd.option_context("mode.string_storage", string_storage):
with pd.option_context("mode.dtype_backend", dtype_backend):
result = self.read_html(out, use_nullable_dtypes=True)[0]
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 4c884e20cf423..862c7d4c30fa8 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -591,6 +591,7 @@ def test_write_column_index_nonstring(self, pa):
msg = r"parquet must have string column names"
self.check_error_on_write(df, engine, ValueError, msg)
+ @pytest.mark.skipif(pa_version_under6p0, reason="minimum pyarrow not installed")
def test_use_nullable_dtypes(self, engine, request):
import pyarrow.parquet as pq
@@ -640,6 +641,7 @@ def test_use_nullable_dtypes(self, engine, request):
expected = expected.drop("c", axis=1)
tm.assert_frame_equal(result2, expected)
+ @pytest.mark.skipif(pa_version_under6p0, reason="minimum pyarrow not installed")
def test_use_nullable_dtypes_option(self, engine, request):
# GH#50748
import pyarrow.parquet as pq
diff --git a/pandas/tests/io/test_spss.py b/pandas/tests/io/test_spss.py
index c2bcf3601d5fa..7b19d2dafb34e 100644
--- a/pandas/tests/io/test_spss.py
+++ b/pandas/tests/io/test_spss.py
@@ -82,7 +82,6 @@ def test_spss_usecols(datapath):
pd.read_spss(fname, usecols="VAR00002")
-@pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
def test_spss_umlauts_use_nullable_dtypes(datapath, dtype_backend):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "spss", "umlauts.sav")
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index e268fa56cacf5..3ccc3bdd94f7e 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2360,7 +2360,6 @@ def test_get_engine_auto_error_message(self):
@pytest.mark.parametrize("option", [True, False])
@pytest.mark.parametrize("func", ["read_sql", "read_sql_query"])
- @pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
def test_read_sql_nullable_dtypes(
self, string_storage, func, option, dtype_backend
):
@@ -2395,7 +2394,6 @@ def test_read_sql_nullable_dtypes(
@pytest.mark.parametrize("option", [True, False])
@pytest.mark.parametrize("func", ["read_sql", "read_sql_table"])
- @pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
def test_read_sql_nullable_dtypes_table(
self, string_storage, func, option, dtype_backend
):
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index 2d3435eab9f60..dfa251788ddc3 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -1773,11 +1773,8 @@ def test_s3_parser_consistency():
tm.assert_frame_equal(df_lxml, df_etree)
-@pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
def test_read_xml_nullable_dtypes(parser, string_storage, dtype_backend):
# GH#50500
- if string_storage == "pyarrow" or dtype_backend == "pyarrow":
- pa = pytest.importorskip("pyarrow")
data = """<?xml version='1.0' encoding='utf-8'?>
<data xmlns="http://example.com">
<row>
@@ -1809,6 +1806,7 @@ def test_read_xml_nullable_dtypes(parser, string_storage, dtype_backend):
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
else:
+ pa = pytest.importorskip("pyarrow")
string_array = ArrowStringArray(pa.array(["x", "y"]))
string_array_na = ArrowStringArray(pa.array(["x", None]))
@@ -1831,6 +1829,7 @@ def test_read_xml_nullable_dtypes(parser, string_storage, dtype_backend):
)
if dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
from pandas.arrays import ArrowExtensionArray
expected = DataFrame(
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index 8b57bbe03f9e7..18b8dd8394133 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -912,13 +912,10 @@ def test_to_numeric_use_nullable_dtypes_already_nullable(dtype):
@pytest.mark.parametrize(
"use_nullable_dtypes, dtype", [(True, "Float64"), (False, "float64")]
)
-@pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
def test_to_numeric_use_nullable_dtypes_error(
use_nullable_dtypes, dtype, dtype_backend
):
# GH#50505
- if dtype_backend == "pyarrow":
- pytest.importorskip("pyarrow")
ser = Series(["a", "b", ""])
expected = ser.copy()
with pytest.raises(ValueError, match="Unable to parse string"):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/51129 | 2023-02-02T20:21:07Z | 2023-02-04T01:11:58Z | 2023-02-04T01:11:58Z | 2023-02-04T11:30:40Z |
ENH: Implement arrow support for read_csv with engine=c | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7d028935ad175..12f8b1736a479 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -138,8 +138,8 @@ The option will only work for functions with the keyword ``use_nullable_dtypes``
Additionally a new global configuration, ``mode.dtype_backend`` can now be used in conjunction with the parameter ``use_nullable_dtypes=True`` in the following functions
to select the nullable dtypes implementation.
-* :func:`read_csv` (with ``engine="pyarrow"`` or ``engine="python"``)
-* :func:`read_clipboard` (with ``engine="python"``)
+* :func:`read_csv`
+* :func:`read_clipboard`
* :func:`read_fwf`
* :func:`read_excel`
* :func:`read_html`
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index da8c367d4fc2a..20a25afa6a51f 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -15,6 +15,7 @@ from pandas.util._exceptions import find_stack_level
from pandas import StringDtype
from pandas.core.arrays import (
+ ArrowExtensionArray,
BooleanArray,
FloatingArray,
IntegerArray,
@@ -341,6 +342,7 @@ cdef class TextReader:
bint use_nullable_dtypes
object usecols
set unnamed_cols # set[str]
+ str dtype_backend
def __cinit__(self, source,
delimiter=b",", # bytes | str
@@ -377,7 +379,8 @@ cdef class TextReader:
float_precision=None,
bint skip_blank_lines=True,
encoding_errors=b"strict",
- use_nullable_dtypes=False):
+ use_nullable_dtypes=False,
+ dtype_backend="pandas"):
# set encoding for native Python and C library
if isinstance(encoding_errors, str):
@@ -499,6 +502,7 @@ cdef class TextReader:
# - dict[Any, DtypeObj]
self.dtype = dtype
self.use_nullable_dtypes = use_nullable_dtypes
+ self.dtype_backend = dtype_backend
self.noconvert = set()
@@ -1054,7 +1058,9 @@ cdef class TextReader:
):
use_nullable_dtypes = self.use_nullable_dtypes and col_dtype is None
col_res = _maybe_upcast(
- col_res, use_nullable_dtypes=use_nullable_dtypes
+ col_res,
+ use_nullable_dtypes=use_nullable_dtypes,
+ dtype_backend=self.dtype_backend,
)
if col_res is None:
@@ -1387,7 +1393,9 @@ STR_NA_VALUES = {
_NA_VALUES = _ensure_encoded(list(STR_NA_VALUES))
-def _maybe_upcast(arr, use_nullable_dtypes: bool = False):
+def _maybe_upcast(
+ arr, use_nullable_dtypes: bool = False, dtype_backend: str = "pandas"
+):
"""Sets nullable dtypes or upcasts if nans are present.
Upcast, if use_nullable_dtypes is false and nans are present so that the
@@ -1440,6 +1448,13 @@ def _maybe_upcast(arr, use_nullable_dtypes: bool = False):
if use_nullable_dtypes:
arr = StringDtype().construct_array_type()._from_sequence(arr)
+ if use_nullable_dtypes and dtype_backend == "pyarrow":
+ import pyarrow as pa
+ if isinstance(arr, IntegerArray) and arr.isna().all():
+ # use null instead of int64 in pyarrow
+ arr = arr.to_numpy()
+ arr = ArrowExtensionArray(pa.array(arr, from_pandas=True))
+
return arr
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 2872263e712d7..fa87e02793b55 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -43,8 +43,6 @@ def read_clipboard(
numpy-backed nullable dtypes or
``pd.set_option("mode.dtype_backend", "pyarrow")`` to use
pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``).
- This is only implemented for the ``python``
- engine.
.. versionadded:: 2.0
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index dbc7658e11631..7842a3d18166d 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -11,6 +11,8 @@
import numpy as np
+from pandas._config.config import get_option
+
from pandas._libs import parsers
from pandas._typing import (
ArrayLike,
@@ -18,6 +20,7 @@
DtypeObj,
ReadCsvBuffer,
)
+from pandas.compat._optional import import_optional_dependency
from pandas.errors import DtypeWarning
from pandas.util._exceptions import find_stack_level
@@ -79,6 +82,11 @@ def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None:
kwds.pop(key, None)
kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
+ dtype_backend = get_option("mode.dtype_backend")
+ kwds["dtype_backend"] = dtype_backend
+ if dtype_backend == "pyarrow":
+ # Fail here loudly instead of in cython after reading
+ import_optional_dependency("pyarrow")
self._reader = parsers.TextReader(src, **kwds)
self.unnamed_cols = self._reader.unnamed_cols
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 3dea62d57c255..7230c675ee775 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -24,10 +24,7 @@
import numpy as np
-from pandas._config import (
- get_option,
- using_nullable_dtypes,
-)
+from pandas._config import using_nullable_dtypes
from pandas._libs import lib
from pandas._libs.parsers import STR_NA_VALUES
@@ -408,8 +405,6 @@
numpy-backed nullable dtypes or
``pd.set_option("mode.dtype_backend", "pyarrow")`` to use
pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``).
- This is only implemented for the ``pyarrow`` or ``python``
- engines.
.. versionadded:: 2.0
@@ -566,15 +561,6 @@ def _read(
raise ValueError(
"The 'chunksize' option is not supported with the 'pyarrow' engine"
)
- elif (
- kwds.get("use_nullable_dtypes", False)
- and get_option("mode.dtype_backend") == "pyarrow"
- and kwds.get("engine") == "c"
- ):
- raise NotImplementedError(
- f"use_nullable_dtypes=True and engine={kwds['engine']} with "
- "mode.dtype_backend set to 'pyarrow' is not implemented."
- )
else:
chunksize = validate_integer("chunksize", chunksize, 1)
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index ca12b1ce4b967..bd5ebfeffca14 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -500,13 +500,6 @@ def test_use_nullable_dtypes_pyarrow_backend(all_parsers, request):
3,4.5,False,b,6,7.5,True,a,12-31-2019,
"""
with pd.option_context("mode.dtype_backend", "pyarrow"):
- if engine == "c":
- request.node.add_marker(
- pytest.mark.xfail(
- raises=NotImplementedError,
- reason=f"Not implemented with engine={parser.engine}",
- )
- )
result = parser.read_csv(
StringIO(data), use_nullable_dtypes=True, parse_dates=["i"]
)
@@ -520,7 +513,7 @@ def test_use_nullable_dtypes_pyarrow_backend(all_parsers, request):
"f": pd.Series([pd.NA, 7.5], dtype="float64[pyarrow]"),
"g": pd.Series([pd.NA, True], dtype="bool[pyarrow]"),
"h": pd.Series(
- [pd.NA if engine == "python" else "", "a"],
+ [pd.NA if engine != "pyarrow" else "", "a"],
dtype=pd.ArrowDtype(pa.string()),
),
"i": pd.Series([Timestamp("2019-12-31")] * 2),
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index a28ad39606033..eeadd8bc56c74 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -426,9 +426,6 @@ def test_read_clipboard_nullable_dtypes(
if string_storage == "pyarrow" or dtype_backend == "pyarrow":
pa = pytest.importorskip("pyarrow")
- if dtype_backend == "pyarrow" and engine == "c":
- pytest.skip(reason="c engine not yet supported")
-
if string_storage == "python":
string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Not really sure if this is the best way forward, but want to see if ci passes everywhere | https://api.github.com/repos/pandas-dev/pandas/pulls/51128 | 2023-02-02T20:03:06Z | 2023-02-08T22:02:32Z | 2023-02-08T22:02:32Z | 2023-02-08T22:03:07Z |
DEPR: Remove various uses of NumericIndex | diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index b2305d0fe1cbf..db5ebc6d6bef1 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -62,7 +62,6 @@
if TYPE_CHECKING:
from pandas.core.api import (
DataFrame,
- NumericIndex,
PeriodIndex,
)
@@ -283,11 +282,9 @@ def to_period(self, freq=None) -> PeriodIndex:
return PeriodIndex._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_julian_date)
- def to_julian_date(self) -> NumericIndex:
- from pandas.core.indexes.api import NumericIndex
-
+ def to_julian_date(self) -> Index:
arr = self._data.to_julian_date()
- return NumericIndex._simple_new(arr, name=self.name)
+ return Index._simple_new(arr, name=self.name)
@doc(DatetimeArray.isocalendar)
def isocalendar(self) -> DataFrame:
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index b9aa84aefd2cb..9be0b95472d99 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -41,7 +41,6 @@
date_range,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
fix_now = pd.Timestamp("2021-01-01")
fix_utcnow = pd.Timestamp("2021-01-01", tz="UTC")
@@ -406,10 +405,10 @@ def test_array_equivalent(dtype_equal):
np.array(["a", "b", "c", "d"]), np.array(["e", "e"]), dtype_equal=dtype_equal
)
assert array_equivalent(
- NumericIndex([0, np.nan]), NumericIndex([0, np.nan]), dtype_equal=dtype_equal
+ Index([0, np.nan]), Index([0, np.nan]), dtype_equal=dtype_equal
)
assert not array_equivalent(
- NumericIndex([0, np.nan]), NumericIndex([1, np.nan]), dtype_equal=dtype_equal
+ Index([0, np.nan]), Index([1, np.nan]), dtype_equal=dtype_equal
)
assert array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]), dtype_equal=dtype_equal
diff --git a/pandas/tests/groupby/test_pipe.py b/pandas/tests/groupby/test_pipe.py
index 9eded607e8733..49ce51bedbf9f 100644
--- a/pandas/tests/groupby/test_pipe.py
+++ b/pandas/tests/groupby/test_pipe.py
@@ -6,7 +6,6 @@
Index,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
def test_pipe():
@@ -76,6 +75,6 @@ def h(df, arg3):
ser = pd.Series([1, 1, 2, 2, 3, 3])
result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
- expected = pd.Series([4, 8, 12], index=NumericIndex([1, 2, 3], dtype=np.int64))
+ expected = pd.Series([4, 8, 12], index=Index([1, 2, 3], dtype=np.int64))
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index c203792be2694..3ef4bb741f418 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -28,7 +28,6 @@
isna,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
from pandas.core.arrays import BaseMaskedArray
@@ -317,9 +316,7 @@ def test_numpy_argsort(self, index):
def test_repeat(self, simple_index):
rep = 2
idx = simple_index.copy()
- new_index_cls = (
- NumericIndex if isinstance(idx, RangeIndex) else idx._constructor
- )
+ new_index_cls = idx._constructor
expected = new_index_cls(idx.values.repeat(rep), name=idx.name)
tm.assert_index_equal(idx.repeat(rep), expected)
@@ -523,7 +520,7 @@ def test_fillna(self, index):
elif index.dtype == bool:
# can't hold NAs
return
- elif isinstance(index, NumericIndex) and is_integer_dtype(index.dtype):
+ elif isinstance(index, Index) and is_integer_dtype(index.dtype):
return
elif isinstance(index, MultiIndex):
idx = index.copy(deep=True)
@@ -592,7 +589,7 @@ def test_map(self, simple_index):
idx = simple_index
result = idx.map(lambda x: x)
- # RangeIndex are equivalent to the similar NumericIndex with int64 dtype
+ # RangeIndex are equivalent to the similar Index with int64 dtype
tm.assert_index_equal(result, idx, exact="equiv")
@pytest.mark.parametrize(
@@ -615,7 +612,7 @@ def test_map_dictlike(self, mapper, simple_index):
identity = mapper(idx.values, idx)
result = idx.map(identity)
- # RangeIndex are equivalent to the similar NumericIndex with int64 dtype
+ # RangeIndex are equivalent to the similar Index with int64 dtype
tm.assert_index_equal(result, idx, exact="equiv")
# empty mappable
@@ -753,7 +750,7 @@ def test_index_groupby(self, simple_index):
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
def test_append_preserves_dtype(self, simple_index):
- # In particular NumericIndex with dtype float32
+ # In particular Index with dtype float32
index = simple_index
N = len(index)
@@ -917,19 +914,19 @@ def test_arithmetic_explicit_conversions(self):
# float conversions
arr = np.arange(5, dtype="int64") * 3.2
- expected = NumericIndex(arr, dtype=np.float64)
+ expected = Index(arr, dtype=np.float64)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
- expected = NumericIndex(arr, dtype=np.float64)
+ expected = Index(arr, dtype=np.float64)
a = np.zeros(5, dtype="float64")
result = fidx - a
tm.assert_index_equal(result, expected)
- expected = NumericIndex(-arr, dtype=np.float64)
+ expected = Index(-arr, dtype=np.float64)
a = np.zeros(5, dtype="float64")
result = a - fidx
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index e3f933a35efe7..4b46c6d612bae 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -18,7 +18,6 @@
timedelta_range,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
from pandas.core.arrays import IntervalArray
import pandas.core.common as com
@@ -39,9 +38,9 @@ class ConstructorTests:
params=[
([3, 14, 15, 92, 653], np.int64),
(np.arange(10, dtype="int64"), np.int64),
- (NumericIndex(np.arange(-10, 11, dtype=np.int64)), np.int64),
- (NumericIndex(np.arange(10, 31, dtype=np.uint64)), np.uint64),
- (NumericIndex(np.arange(20, 30, 0.5), dtype=np.float64), np.float64),
+ (Index(np.arange(-10, 11, dtype=np.int64)), np.int64),
+ (Index(np.arange(10, 31, dtype=np.uint64)), np.uint64),
+ (Index(np.arange(20, 30, 0.5), dtype=np.float64), np.float64),
(date_range("20180101", periods=10), "<M8[ns]"),
(
date_range("20180101", periods=10, tz="US/Eastern"),
@@ -69,10 +68,10 @@ def test_constructor(self, constructor, breaks_and_expected_subtype, closed, nam
@pytest.mark.parametrize(
"breaks, subtype",
[
- (NumericIndex([0, 1, 2, 3, 4], dtype=np.int64), "float64"),
- (NumericIndex([0, 1, 2, 3, 4], dtype=np.int64), "datetime64[ns]"),
- (NumericIndex([0, 1, 2, 3, 4], dtype=np.int64), "timedelta64[ns]"),
- (NumericIndex([0, 1, 2, 3, 4], dtype=np.float64), "int64"),
+ (Index([0, 1, 2, 3, 4], dtype=np.int64), "float64"),
+ (Index([0, 1, 2, 3, 4], dtype=np.int64), "datetime64[ns]"),
+ (Index([0, 1, 2, 3, 4], dtype=np.int64), "timedelta64[ns]"),
+ (Index([0, 1, 2, 3, 4], dtype=np.float64), "int64"),
(date_range("2017-01-01", periods=5), "int64"),
(timedelta_range("1 day", periods=5), "int64"),
],
@@ -91,9 +90,9 @@ def test_constructor_dtype(self, constructor, breaks, subtype):
@pytest.mark.parametrize(
"breaks",
[
- NumericIndex([0, 1, 2, 3, 4], dtype=np.int64),
- NumericIndex([0, 1, 2, 3, 4], dtype=np.uint64),
- NumericIndex([0, 1, 2, 3, 4], dtype=np.float64),
+ Index([0, 1, 2, 3, 4], dtype=np.int64),
+ Index([0, 1, 2, 3, 4], dtype=np.uint64),
+ Index([0, 1, 2, 3, 4], dtype=np.float64),
date_range("2017-01-01", periods=5),
timedelta_range("1 day", periods=5),
],
@@ -250,8 +249,8 @@ def test_mixed_float_int(self, left_subtype, right_subtype):
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
- expected_left = NumericIndex(left, dtype=np.float64)
- expected_right = NumericIndex(right, dtype=np.float64)
+ expected_left = Index(left, dtype=np.float64)
+ expected_right = Index(right, dtype=np.float64)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 2e83fb642905e..23ff8059933fc 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -37,7 +37,6 @@
period_range,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
from pandas.core.indexes.api import (
Index,
MultiIndex,
@@ -194,14 +193,14 @@ def test_constructor_from_frame_series_freq(self):
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
- expected = NumericIndex(data, dtype=np.float64)
+ expected = Index(data, dtype=np.float64)
result = Index(data, dtype="float")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"klass,dtype,na_val",
[
- (NumericIndex, np.float64, np.nan),
+ (Index, np.float64, np.nan),
(DatetimeIndex, "datetime64[ns]", pd.NaT),
],
)
@@ -867,7 +866,8 @@ def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
np.array([False, False]),
)
- def test_isin_nan_common_float64(self, nulls_fixture):
+ def test_isin_nan_common_float64(self, nulls_fixture, float_numpy_dtype):
+ dtype = float_numpy_dtype
if nulls_fixture is pd.NaT or nulls_fixture is pd.NA:
# Check 1) that we cannot construct a float64 Index with this value
@@ -877,13 +877,13 @@ def test_isin_nan_common_float64(self, nulls_fixture):
f"not {repr(type(nulls_fixture).__name__)}"
)
with pytest.raises(TypeError, match=msg):
- NumericIndex([1.0, nulls_fixture], dtype=np.float64)
+ Index([1.0, nulls_fixture], dtype=dtype)
- idx = NumericIndex([1.0, np.nan], dtype=np.float64)
+ idx = Index([1.0, np.nan], dtype=dtype)
assert not idx.isin([nulls_fixture]).any()
return
- idx = NumericIndex([1.0, nulls_fixture], dtype=np.float64)
+ idx = Index([1.0, nulls_fixture], dtype=dtype)
res = idx.isin([np.nan])
tm.assert_numpy_array_equal(res, np.array([False, True]))
@@ -896,7 +896,7 @@ def test_isin_nan_common_float64(self, nulls_fixture):
"index",
[
Index(["qux", "baz", "foo", "bar"]),
- NumericIndex([1.0, 2.0, 3.0, 4.0], dtype=np.float64),
+ Index([1.0, 2.0, 3.0, 4.0], dtype=np.float64),
],
)
def test_isin_level_kwarg(self, level, index):
@@ -1149,7 +1149,7 @@ def test_reindex_doesnt_preserve_type_if_target_is_empty_index_numeric(
# GH7774
dtype = any_real_numpy_dtype
index = Index(list("abc"))
- labels = NumericIndex([], dtype=dtype)
+ labels = Index([], dtype=dtype)
assert index.reindex(labels)[0].dtype == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
@@ -1595,11 +1595,9 @@ def test_validate_1d_input(dtype):
"klass, extra_kwargs",
[
[Index, {}],
- [lambda x: NumericIndex(x, np.int64), {}],
- [lambda x: NumericIndex(x, np.float64), {}],
+ *[[lambda x: Index(x, dtype=dtyp), {}] for dtyp in tm.ALL_REAL_NUMPY_DTYPES],
[DatetimeIndex, {}],
[TimedeltaIndex, {}],
- [NumericIndex, {}],
[PeriodIndex, {"freq": "Y"}],
],
)
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 1476a93ea0a11..957e7031f9bf6 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -18,7 +18,10 @@
)
from pandas.errors import PerformanceWarning
-from pandas.core.dtypes.common import is_integer_dtype
+from pandas.core.dtypes.common import (
+ is_integer_dtype,
+ is_numeric_dtype,
+)
import pandas as pd
from pandas import (
@@ -316,7 +319,7 @@ def test_drop_duplicates(self, index_flat, keep):
# make unique index
holder = type(index)
unique_values = list(set(index))
- dtype = index.dtype if isinstance(index, NumericIndex) else None
+ dtype = index.dtype if is_numeric_dtype(index) else None
unique_idx = holder(unique_values, dtype=dtype)
# make duplicated index
@@ -345,7 +348,7 @@ def test_drop_duplicates_no_duplicates(self, index_flat):
else:
holder = type(index)
unique_values = list(set(index))
- dtype = index.dtype if isinstance(index, NumericIndex) else None
+ dtype = index.dtype if is_numeric_dtype(index) else None
unique_idx = holder(unique_values, dtype=dtype)
# check on unique index
diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py
index acc9d499a9f6b..564d3abc4f1fe 100644
--- a/pandas/tests/indexes/test_index_new.py
+++ b/pandas/tests/indexes/test_index_new.py
@@ -29,7 +29,6 @@
timedelta_range,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
class TestIndexConstructorInference:
@@ -84,7 +83,7 @@ def test_construction_list_tuples_nan(self, na_value, vtype):
)
def test_constructor_int_dtype_float(self, dtype):
# GH#18400
- expected = NumericIndex([0, 1, 2, 3], dtype=dtype)
+ expected = Index([0, 1, 2, 3], dtype=dtype)
result = Index([0.0, 1.0, 2.0, 3.0], dtype=dtype)
tm.assert_index_equal(result, expected)
@@ -283,7 +282,7 @@ def test_constructor_int_dtype_nan_raises(self, dtype):
)
def test_constructor_dtypes_to_int(self, vals, any_int_numpy_dtype):
dtype = any_int_numpy_dtype
- index = NumericIndex(vals, dtype=dtype)
+ index = Index(vals, dtype=dtype)
assert index.dtype == dtype
@pytest.mark.parametrize(
@@ -298,7 +297,7 @@ def test_constructor_dtypes_to_int(self, vals, any_int_numpy_dtype):
)
def test_constructor_dtypes_to_float(self, vals, float_numpy_dtype):
dtype = float_numpy_dtype
- index = NumericIndex(vals, dtype=dtype)
+ index = Index(vals, dtype=dtype)
assert index.dtype == dtype
@pytest.mark.parametrize(
diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py
index 2b9c6323113d5..d573ee9759112 100644
--- a/pandas/tests/indexes/test_indexing.py
+++ b/pandas/tests/indexes/test_indexing.py
@@ -33,7 +33,6 @@
TimedeltaIndex,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
class TestTake:
@@ -141,7 +140,7 @@ def test_contains_with_float_index(self, any_real_numpy_dtype):
# GH#22085
dtype = any_real_numpy_dtype
data = [0, 1, 2, 3] if not is_float_dtype(dtype) else [0.1, 1.1, 2.2, 3.3]
- index = NumericIndex(data, dtype=dtype)
+ index = Index(data, dtype=dtype)
if not is_float_dtype(index.dtype):
assert 1.1 not in index
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index a32d422ad2905..77b2b622a8439 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -8,7 +8,6 @@
Series,
)
import pandas._testing as tm
-from pandas.core.api import NumericIndex
def gen_obj(klass, index):
@@ -261,9 +260,9 @@ def test_slice_integer(self):
# oob indicates if we are out of bounds
# of positional indexing
for index, oob in [
- (NumericIndex(np.arange(5, dtype=np.int64)), False),
+ (Index(np.arange(5, dtype=np.int64)), False),
(RangeIndex(5), False),
- (NumericIndex(np.arange(5, dtype=np.int64) + 10), True),
+ (Index(np.arange(5, dtype=np.int64) + 10), True),
]:
# s is an in-range index
| xref #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/51127 | 2023-02-02T20:00:14Z | 2023-02-03T00:56:07Z | 2023-02-03T00:56:07Z | 2023-02-03T00:56:14Z |
CLN: Put exit_stack inside _query_iterator. | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b4624a1f4a447..d88decc8601f0 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -75,14 +75,6 @@
# -- Helper functions
-def _cleanup_after_generator(generator, exit_stack: ExitStack):
- """Does the cleanup after iterating through the generator."""
- try:
- yield from generator
- finally:
- exit_stack.close()
-
-
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
@@ -1093,6 +1085,7 @@ def insert(
def _query_iterator(
self,
result,
+ exit_stack: ExitStack,
chunksize: str | None,
columns,
coerce_float: bool = True,
@@ -1101,28 +1094,29 @@ def _query_iterator(
):
"""Return generator through chunked result set."""
has_read_data = False
- while True:
- data = result.fetchmany(chunksize)
- if not data:
- if not has_read_data:
- yield DataFrame.from_records(
- [], columns=columns, coerce_float=coerce_float
- )
- break
+ with exit_stack:
+ while True:
+ data = result.fetchmany(chunksize)
+ if not data:
+ if not has_read_data:
+ yield DataFrame.from_records(
+ [], columns=columns, coerce_float=coerce_float
+ )
+ break
- has_read_data = True
- self.frame = _convert_arrays_to_dataframe(
- data, columns, coerce_float, use_nullable_dtypes
- )
+ has_read_data = True
+ self.frame = _convert_arrays_to_dataframe(
+ data, columns, coerce_float, use_nullable_dtypes
+ )
- self._harmonize_columns(
- parse_dates=parse_dates, use_nullable_dtypes=use_nullable_dtypes
- )
+ self._harmonize_columns(
+ parse_dates=parse_dates, use_nullable_dtypes=use_nullable_dtypes
+ )
- if self.index is not None:
- self.frame.set_index(self.index, inplace=True)
+ if self.index is not None:
+ self.frame.set_index(self.index, inplace=True)
- yield self.frame
+ yield self.frame
def read(
self,
@@ -1147,16 +1141,14 @@ def read(
column_names = result.keys()
if chunksize is not None:
- return _cleanup_after_generator(
- self._query_iterator(
- result,
- chunksize,
- column_names,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- use_nullable_dtypes=use_nullable_dtypes,
- ),
+ return self._query_iterator(
+ result,
exit_stack,
+ chunksize,
+ column_names,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ use_nullable_dtypes=use_nullable_dtypes,
)
else:
data = result.fetchall()
@@ -1693,6 +1685,7 @@ def read_table(
@staticmethod
def _query_iterator(
result,
+ exit_stack: ExitStack,
chunksize: int,
columns,
index_col=None,
@@ -1703,31 +1696,32 @@ def _query_iterator(
):
"""Return generator through chunked result set"""
has_read_data = False
- while True:
- data = result.fetchmany(chunksize)
- if not data:
- if not has_read_data:
- yield _wrap_result(
- [],
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- use_nullable_dtypes=use_nullable_dtypes,
- )
- break
+ with exit_stack:
+ while True:
+ data = result.fetchmany(chunksize)
+ if not data:
+ if not has_read_data:
+ yield _wrap_result(
+ [],
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ use_nullable_dtypes=use_nullable_dtypes,
+ )
+ break
- has_read_data = True
- yield _wrap_result(
- data,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- use_nullable_dtypes=use_nullable_dtypes,
- )
+ has_read_data = True
+ yield _wrap_result(
+ data,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ use_nullable_dtypes=use_nullable_dtypes,
+ )
def read_query(
self,
@@ -1793,18 +1787,16 @@ def read_query(
if chunksize is not None:
self.returns_generator = True
- return _cleanup_after_generator(
- self._query_iterator(
- result,
- chunksize,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- use_nullable_dtypes=use_nullable_dtypes,
- ),
+ return self._query_iterator(
+ result,
self.exit_stack,
+ chunksize,
+ columns,
+ index_col=index_col,
+ coerce_float=coerce_float,
+ parse_dates=parse_dates,
+ dtype=dtype,
+ use_nullable_dtypes=use_nullable_dtypes,
)
else:
data = result.fetchall()
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This closes up a follow up item from the pull request #49967 . | https://api.github.com/repos/pandas-dev/pandas/pulls/51125 | 2023-02-02T18:53:47Z | 2023-02-03T19:21:42Z | 2023-02-03T19:21:42Z | 2023-02-09T18:36:13Z |
DOC: remove inplace usage from docstring examples | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f2e54185c11ff..7197994bfbcb1 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1673,13 +1673,6 @@ def sort_values(
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
- Inplace sorting can be done as well:
-
- >>> c.sort_values(inplace=True)
- >>> c
- [1, 1, 2, 2, 5]
- Categories (3, int64): [1, 2, 5]
- >>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 36a7ef7cd6d9e..4f13ead4005e7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4512,17 +4512,6 @@ def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None:
3 4 4
4 5 2
- Use ``inplace=True`` to modify the original DataFrame.
-
- >>> df.eval('C = A + B', inplace=True)
- >>> df
- A B C
- 0 1 10 11
- 1 2 8 10
- 2 3 6 9
- 3 4 4 8
- 4 5 2 7
-
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
@@ -4998,14 +4987,6 @@ def align(
0 1 4
1 2 5
2 3 6
-
- Now, update the labels without copying the underlying data.
-
- >>> df.set_axis(['i', 'ii'], axis='columns', copy=False)
- i ii
- 0 1 4
- 1 2 5
- 2 3 6
"""
)
@Substitution(
@@ -6372,13 +6353,6 @@ def dropna(
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
-
- Keep the DataFrame with valid entries in the same variable.
-
- >>> df.dropna(inplace=True)
- >>> df
- name toy born
- 1 Batman Batmobile 1940-04-25
"""
if (how is not no_default) and (thresh is not no_default):
raise TypeError(
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8fa86e80e1a44..6211248129ee6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6216,17 +6216,6 @@ def astype(
dtype: category
Categories (2, int64): [2 < 1]
- Note that using ``copy=False`` and changing data on a new
- pandas object may propagate changes:
-
- >>> s1 = pd.Series([1, 2])
- >>> s2 = s1.astype('int64', copy=False)
- >>> s2[0] = 10
- >>> s1 # note that s1[0] has changed too
- 0 10
- 1 2
- dtype: int64
-
Create a series of dates:
>>> ser_date = pd.Series(pd.date_range('20200101', periods=3))
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 993fefdc91aa0..3eef90343bdf6 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1745,13 +1745,7 @@ def set_names(
( 'cobra', 2018),
( 'cobra', 2019)],
)
- >>> idx.set_names(['kind', 'year'], inplace=True)
- >>> idx
- MultiIndex([('python', 2018),
- ('python', 2019),
- ( 'cobra', 2018),
- ( 'cobra', 2019)],
- names=['kind', 'year'])
+ >>> idx = idx.set_names(['kind', 'year'])
>>> idx.set_names('species', level=0)
MultiIndex([('python', 2018),
('python', 2019),
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ed23edc55805d..abe31d0dbd52a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1494,17 +1494,6 @@ def reset_index(
3 4
Name: foo, dtype: int64
- To update the Series in place, without generating a new one
- set `inplace` to True. Note that it also requires ``drop=True``.
-
- >>> s.reset_index(inplace=True, drop=True)
- >>> s
- 0 1
- 1 2
- 2 3
- 3 4
- Name: foo, dtype: int64
-
The `level` parameter is interesting for Series with a multi-level
index.
@@ -2242,11 +2231,9 @@ def drop_duplicates(
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
- duplicated entries. Setting the value of 'inplace' to ``True`` performs
- the operation inplace and returns ``None``.
+ duplicated entries.
- >>> s.drop_duplicates(keep=False, inplace=True)
- >>> s
+ >>> s.drop_duplicates(keep=False)
1 cow
3 beetle
5 hippo
@@ -3490,17 +3477,6 @@ def sort_values(
0 NaN
dtype: float64
- Sort values inplace
-
- >>> s.sort_values(ascending=False, inplace=True)
- >>> s
- 3 10.0
- 4 5.0
- 2 3.0
- 1 1.0
- 0 NaN
- dtype: float64
-
Sort values putting NAs first
>>> s.sort_values(na_position='first')
@@ -3750,16 +3726,6 @@ def sort_index(
1 c
dtype: object
- Sort Inplace
-
- >>> s.sort_index(inplace=True)
- >>> s
- 1 c
- 2 b
- 3 a
- 4 d
- dtype: object
-
By default NaNs are put at the end, but use `na_position` to place
them at the beginning
@@ -5619,14 +5585,6 @@ def dropna(
1 2.0
dtype: float64
- Keep the Series with valid entries in the same variable.
-
- >>> ser.dropna(inplace=True)
- >>> ser
- 0 1.0
- 1 2.0
- dtype: float64
-
Empty strings are not considered NA values. ``None`` is considered an
NA value.
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Removes the inplace=True usage examples from the docstring examples. | https://api.github.com/repos/pandas-dev/pandas/pulls/51124 | 2023-02-02T18:01:21Z | 2023-02-03T16:10:56Z | 2023-02-03T16:10:56Z | 2023-02-03T16:35:05Z |
TYP: suppress pyright's information-level output | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index dfbd24767a189..39c1f2b3a6c85 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -135,11 +135,11 @@ repos:
types: [python]
stages: [manual]
additional_dependencies: &pyright_dependencies
- - pyright@1.1.284
+ - pyright@1.1.292
- id: pyright_reportGeneralTypeIssues
# note: assumes python env is setup and activated
name: pyright reportGeneralTypeIssues
- entry: pyright --skipunannotated -p pyright_reportGeneralTypeIssues.json
+ entry: pyright --skipunannotated -p pyright_reportGeneralTypeIssues.json --level warning
language: node
pass_filenames: false
types: [python]
| `pre-commit run --hook-stage manual -av pyright_reportGeneralTypeIssues` is currently very verbose as pyright prints a line for each un-annotated function it skips. This PR removes these information-level messages (still prints warnings and errors). | https://api.github.com/repos/pandas-dev/pandas/pulls/51122 | 2023-02-02T16:52:02Z | 2023-02-03T19:22:27Z | 2023-02-03T19:22:27Z | 2023-08-09T15:08:26Z |
CI: Pin pyarrow to < 11.0 | diff --git a/.github/actions/setup-conda/action.yml b/.github/actions/setup-conda/action.yml
index 002d0020c2df1..f4b1d9e49f63a 100644
--- a/.github/actions/setup-conda/action.yml
+++ b/.github/actions/setup-conda/action.yml
@@ -18,7 +18,7 @@ runs:
- name: Set Arrow version in ${{ inputs.environment-file }} to ${{ inputs.pyarrow-version }}
run: |
grep -q ' - pyarrow' ${{ inputs.environment-file }}
- sed -i"" -e "s/ - pyarrow/ - pyarrow=${{ inputs.pyarrow-version }}/" ${{ inputs.environment-file }}
+ sed -i"" -e "s/ - pyarrow<11/ - pyarrow=${{ inputs.pyarrow-version }}/" ${{ inputs.environment-file }}
cat ${{ inputs.environment-file }}
shell: bash
if: ${{ inputs.pyarrow-version }}
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index b500cf66b10c2..25032ed1c76b0 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -42,7 +42,7 @@ dependencies:
- psycopg2
- pymysql
- pytables
- - pyarrow
+ - pyarrow<11
- pyreadstat
- python-snappy
- pyxlsb
diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml
index 8e15c7b4740c5..aef97c232e940 100644
--- a/ci/deps/actions-311.yaml
+++ b/ci/deps/actions-311.yaml
@@ -42,7 +42,7 @@ dependencies:
- psycopg2
- pymysql
# - pytables>=3.8.0 # first version that supports 3.11
- - pyarrow
+ - pyarrow<11
- pyreadstat
- python-snappy
- pyxlsb
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml
index 151cabfacb434..1de392a9cc277 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-38-downstream_compat.yaml
@@ -40,7 +40,7 @@ dependencies:
- openpyxl
- odfpy
- psycopg2
- - pyarrow
+ - pyarrow<11
- pymysql
- pyreadstat
- pytables
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index bd1246ddc7a3e..803b0bdbff793 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -40,7 +40,7 @@ dependencies:
- odfpy
- pandas-gbq
- psycopg2
- - pyarrow
+ - pyarrow<11
- pymysql
- pyreadstat
- pytables
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 93b73b20591b0..5ce5681aa9e21 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -41,7 +41,7 @@ dependencies:
- pandas-gbq
- psycopg2
- pymysql
- - pyarrow
+ - pyarrow<11
- pyreadstat
- pytables
- python-snappy
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml
index addbda194cc0c..7dcb84dc8874c 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-38-arm64.yaml
@@ -40,7 +40,7 @@ dependencies:
- odfpy
- pandas-gbq
- psycopg2
- - pyarrow
+ - pyarrow<11
- pymysql
# Not provided on ARM
#- pyreadstat
diff --git a/environment.yml b/environment.yml
index 47627fcac32e1..076e6fa727332 100644
--- a/environment.yml
+++ b/environment.yml
@@ -43,7 +43,7 @@ dependencies:
- odfpy
- py
- psycopg2
- - pyarrow
+ - pyarrow<11
- pymysql
- pyreadstat
- pytables
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 112f90222427b..04d8b176dffae 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -32,7 +32,7 @@ openpyxl
odfpy
py
psycopg2-binary
-pyarrow
+pyarrow<11
pymysql
pyreadstat
tables
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
cc @MarcoGorelli | https://api.github.com/repos/pandas-dev/pandas/pulls/51120 | 2023-02-02T10:16:07Z | 2023-02-02T13:41:07Z | 2023-02-02T13:41:07Z | 2023-02-02T15:20:16Z |
DOC: Add rolling in reference/groupby.rst | diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst
index 54b2e893bfd08..8374b0c739f89 100644
--- a/doc/source/reference/groupby.rst
+++ b/doc/source/reference/groupby.rst
@@ -97,6 +97,7 @@ Function application
DataFrameGroupBy.quantile
DataFrameGroupBy.rank
DataFrameGroupBy.resample
+ DataFrameGroupBy.rolling
DataFrameGroupBy.sample
DataFrameGroupBy.sem
DataFrameGroupBy.shift
@@ -152,6 +153,7 @@ Function application
SeriesGroupBy.quantile
SeriesGroupBy.rank
SeriesGroupBy.resample
+ SeriesGroupBy.rolling
SeriesGroupBy.sample
SeriesGroupBy.sem
SeriesGroupBy.shift
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index fd9a06a06cfa7..bce2886a36f34 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2722,11 +2722,134 @@ def resample(self, rule, *args, **kwargs):
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@final
- @Substitution(name="groupby")
- @Appender(_common_see_also)
def rolling(self, *args, **kwargs) -> RollingGroupby:
"""
Return a rolling grouper, providing rolling functionality per group.
+
+ Parameters
+ ----------
+ window : int, timedelta, str, offset, or BaseIndexer subclass
+ Size of the moving window.
+
+ If an integer, the fixed number of observations used for
+ each window.
+
+ If a timedelta, str, or offset, the time period of each window. Each
+ window will be a variable sized based on the observations included in
+ the time-period. This is only valid for datetimelike indexes.
+ To learn more about the offsets & frequency strings, please see `this link
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
+
+ If a BaseIndexer subclass, the window boundaries
+ based on the defined ``get_window_bounds`` method. Additional rolling
+ keyword arguments, namely ``min_periods``, ``center``, ``closed`` and
+ ``step`` will be passed to ``get_window_bounds``.
+
+ min_periods : int, default None
+ Minimum number of observations in window required to have a value;
+ otherwise, result is ``np.nan``.
+
+ For a window that is specified by an offset,
+ ``min_periods`` will default to 1.
+
+ For a window that is specified by an integer, ``min_periods`` will default
+ to the size of the window.
+
+ center : bool, default False
+ If False, set the window labels as the right edge of the window index.
+
+ If True, set the window labels as the center of the window index.
+
+ win_type : str, default None
+ If ``None``, all points are evenly weighted.
+
+ If a string, it must be a valid `scipy.signal window function
+ <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.
+
+ Certain Scipy window types require additional parameters to be passed
+ in the aggregation function. The additional parameters must match
+ the keywords specified in the Scipy window type method signature.
+
+ on : str, optional
+ For a DataFrame, a column label or Index level on which
+ to calculate the rolling window, rather than the DataFrame's index.
+
+ Provided integer column is ignored and excluded from result since
+ an integer index is not used to calculate the rolling window.
+
+ axis : int or str, default 0
+ If ``0`` or ``'index'``, roll across the rows.
+
+ If ``1`` or ``'columns'``, roll across the columns.
+
+ For `Series` this parameter is unused and defaults to 0.
+
+ closed : str, default None
+ If ``'right'``, the first point in the window is excluded from calculations.
+
+ If ``'left'``, the last point in the window is excluded from calculations.
+
+ If ``'both'``, the no points in the window are excluded from calculations.
+
+ If ``'neither'``, the first and last points in the window are excluded
+ from calculations.
+
+ Default ``None`` (``'right'``).
+
+ method : str {'single', 'table'}, default 'single'
+ Execute the rolling operation per single column or row (``'single'``)
+ or over the entire object (``'table'``).
+
+ This argument is only implemented when specifying ``engine='numba'``
+ in the method call.
+
+ Returns
+ -------
+ RollingGroupby
+ Return a new grouper with our rolling appended.
+
+ See Also
+ --------
+ Series.rolling : Calling object with Series data.
+ DataFrame.rolling : Calling object with DataFrames.
+ Series.groupby : Apply a function groupby to a Series.
+ DataFrame.groupby : Apply a function groupby.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({'A': [1, 1, 2, 2],
+ ... 'B': [1, 2, 3, 4],
+ ... 'C': [0.362, 0.227, 1.267, -0.562]})
+ >>> df
+ A B C
+ 0 1 1 0.362
+ 1 1 2 0.227
+ 2 2 3 1.267
+ 3 2 4 -0.562
+
+ >>> df.groupby('A').rolling(2).sum()
+ B C
+ A
+ 1 0 NaN NaN
+ 1 3.0 0.589
+ 2 2 NaN NaN
+ 3 7.0 0.705
+
+ >>> df.groupby('A').rolling(2, min_periods=1).sum()
+ B C
+ A
+ 1 0 1.0 0.362
+ 1 3.0 0.589
+ 2 2 3.0 1.267
+ 3 7.0 0.705
+
+ >>> df.groupby('A').rolling(2, on='B').sum()
+ B C
+ A
+ 1 0 1 NaN
+ 1 2 0.589
+ 2 2 3 NaN
+ 3 4 0.705
"""
from pandas.core.window import RollingGroupby
| - [ ] close #51097
| https://api.github.com/repos/pandas-dev/pandas/pulls/51119 | 2023-02-02T08:30:58Z | 2023-02-08T16:47:29Z | 2023-02-08T16:47:29Z | 2023-02-09T04:17:43Z |
DEPR: remove NumericIndex.__new__ & ._should_fallback_to_positional | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 881e83313ced5..993fefdc91aa0 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5689,6 +5689,8 @@ def _should_fallback_to_positional(self) -> bool:
"""
Should an integer key be treated as positional?
"""
+ if isinstance(self.dtype, np.dtype) and self.dtype.kind in ["i", "u", "f"]:
+ return False
return not self._holds_integer()
_index_shared_docs[
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index d26f8c01dc786..8113b5ea2bb2a 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,33 +1,7 @@
from __future__ import annotations
-import numpy as np
-
-from pandas._typing import Dtype
-from pandas.util._decorators import (
- cache_readonly,
- doc,
-)
-
from pandas.core.indexes.base import Index
class NumericIndex(Index):
- def __new__(
- cls, data=None, dtype: Dtype | None = None, copy: bool = False, name=None
- ) -> NumericIndex:
- # temporary scaffolding, will be removed soon.
- if isinstance(data, list) and len(data) == 0:
- data = np.array([], dtype=np.int64)
- elif isinstance(data, range):
- data = np.arange(data.start, data.stop, data.step, dtype=np.int64)
- return super().__new__(
- cls, data=data, dtype=dtype, copy=copy, name=name
- ) # type: ignore[return-value]
-
- # ----------------------------------------------------------------
- # Indexing Methods
-
- @cache_readonly
- @doc(Index._should_fallback_to_positional)
- def _should_fallback_to_positional(self) -> bool:
- return False
+ pass
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index d78da49c967ab..2e83fb642905e 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -486,7 +486,7 @@ def test_fancy(self, simple_index):
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, index, dtype):
empty_arr = np.array([], dtype=dtype)
- empty_index = type(index)([])
+ empty_index = type(index)([], dtype=index.dtype)
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@@ -500,7 +500,7 @@ def test_empty_fancy_raises(self, index):
# DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
- empty_index = type(index)([])
+ empty_index = type(index)([], dtype=index.dtype)
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
| Remove `NumericIndex.__new__` & `._should_fallback_to_positional` to Index. Next up is to actually remove `NumericIndex` from the code base (there is various scaffolding around, that I need to remove/alter at the same time).
This move `NumericIndex._should_fallback_to_positional` to `Index` as-is. There is an issue #51053 that should be fixed, I'll take that in a follow-up so I can get `NumericIndex` removed here, without worrying about that.
xref #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/51118 | 2023-02-02T07:17:19Z | 2023-02-02T17:17:46Z | 2023-02-02T17:17:46Z | 2023-02-02T18:24:27Z |
REF: remove group_selection_context | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 696f924e31179..fd9a06a06cfa7 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -8,7 +8,6 @@ class providing the base-class of operations.
"""
from __future__ import annotations
-from contextlib import contextmanager
import datetime
from functools import (
partial,
@@ -19,7 +18,6 @@ class providing the base-class of operations.
from typing import (
TYPE_CHECKING,
Callable,
- Generator,
Hashable,
Iterable,
Iterator,
@@ -611,7 +609,6 @@ def f(self):
class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin):
- _group_selection: IndexLabel | None = None
_hidden_attrs = PandasObject._hidden_attrs | {
"as_index",
"axis",
@@ -738,8 +735,6 @@ def _selected_obj(self):
# that and avoid making a copy.
return self._obj_with_exclusions
- if self._group_selection is not None:
- return self._obj_with_exclusions
return self.obj
@final
@@ -961,12 +956,8 @@ def __getattr__(self, attr: str):
def _op_via_apply(self, name: str, *args, **kwargs):
"""Compute the result of an operation by using GroupBy's apply."""
f = getattr(type(self._obj_with_exclusions), name)
- with self._group_selection_context():
- # need to setup the selection
- # as are not passed directly but in the grouper
- f = getattr(type(self._obj_with_exclusions), name)
- if not callable(f):
- return self.apply(lambda self: getattr(self, name))
+ if not callable(f):
+ return self.apply(lambda self: getattr(self, name))
sig = inspect.signature(f)
@@ -1008,56 +999,6 @@ def curried(x):
# -----------------------------------------------------------------
# Selection
- @final
- def _set_group_selection(self) -> None:
- """
- Create group based selection.
-
- Used when selection is not passed directly but instead via a grouper.
-
- NOTE: this should be paired with a call to _reset_group_selection
- """
- # This is a no-op for SeriesGroupBy
- grp = self.grouper
- if (
- grp.groupings is None
- or self.obj.ndim == 1
- or self._group_selection is not None
- ):
- return
-
- groupers = self.exclusions
-
- if len(groupers):
- # GH12839 clear selected obj cache when group selection changes
- ax = self.obj._info_axis
- self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
- self._reset_cache("_selected_obj")
-
- @final
- def _reset_group_selection(self) -> None:
- """
- Clear group based selection.
-
- Used for methods needing to return info on each group regardless of
- whether a group selection was previously set.
- """
- if self._group_selection is not None:
- # GH12839 clear cached selection too when changing group selection
- self._group_selection = None
- self._reset_cache("_selected_obj")
-
- @contextmanager
- def _group_selection_context(self) -> Generator[GroupBy, None, None]:
- """
- Set / reset the _group_selection_context.
- """
- self._set_group_selection()
- try:
- yield self
- finally:
- self._reset_group_selection()
-
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
@@ -1327,8 +1268,7 @@ def _numba_agg_general(
if self.axis == 1:
raise NotImplementedError("axis=1 is not supported.")
- with self._group_selection_context():
- data = self._selected_obj
+ data = self._obj_with_exclusions
df = data if data.ndim == 2 else data.to_frame()
starts, ends, sorted_index, sorted_data = self._numba_prep(df)
aggregator = executor.generate_shared_aggregator(
@@ -1455,8 +1395,7 @@ def f(g):
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
- with self._group_selection_context():
- return self._python_apply_general(f, self._selected_obj)
+ return self._python_apply_general(f, self._obj_with_exclusions)
return result
@@ -2215,117 +2154,115 @@ def _value_counts(
)
name = "proportion" if normalize else "count"
- with self._group_selection_context():
- df = self.obj
+ df = self.obj
+ obj = self._obj_with_exclusions
- in_axis_names = {
- grouping.name for grouping in self.grouper.groupings if grouping.in_axis
- }
- if isinstance(self._selected_obj, Series):
- _name = self._selected_obj.name
- keys = [] if _name in in_axis_names else [self._selected_obj]
+ in_axis_names = {
+ grouping.name for grouping in self.grouper.groupings if grouping.in_axis
+ }
+ if isinstance(obj, Series):
+ _name = obj.name
+ keys = [] if _name in in_axis_names else [obj]
+ else:
+ unique_cols = set(obj.columns)
+ if subset is not None:
+ subsetted = set(subset)
+ clashing = subsetted & set(in_axis_names)
+ if clashing:
+ raise ValueError(
+ f"Keys {clashing} in subset cannot be in "
+ "the groupby column keys."
+ )
+ doesnt_exist = subsetted - unique_cols
+ if doesnt_exist:
+ raise ValueError(
+ f"Keys {doesnt_exist} in subset do not "
+ f"exist in the DataFrame."
+ )
else:
- unique_cols = set(self._selected_obj.columns)
- if subset is not None:
- subsetted = set(subset)
- clashing = subsetted & set(in_axis_names)
- if clashing:
- raise ValueError(
- f"Keys {clashing} in subset cannot be in "
- "the groupby column keys."
- )
- doesnt_exist = subsetted - unique_cols
- if doesnt_exist:
- raise ValueError(
- f"Keys {doesnt_exist} in subset do not "
- f"exist in the DataFrame."
- )
- else:
- subsetted = unique_cols
-
- keys = [
- # Can't use .values because the column label needs to be preserved
- self._selected_obj.iloc[:, idx]
- for idx, _name in enumerate(self._selected_obj.columns)
- if _name not in in_axis_names and _name in subsetted
- ]
-
- groupings = list(self.grouper.groupings)
- for key in keys:
- grouper, _, _ = get_grouper(
- df,
- key=key,
- axis=self.axis,
- sort=self.sort,
- observed=False,
- dropna=dropna,
- )
- groupings += list(grouper.groupings)
+ subsetted = unique_cols
+
+ keys = [
+ # Can't use .values because the column label needs to be preserved
+ obj.iloc[:, idx]
+ for idx, _name in enumerate(obj.columns)
+ if _name not in in_axis_names and _name in subsetted
+ ]
- # Take the size of the overall columns
- gb = df.groupby(
- groupings,
+ groupings = list(self.grouper.groupings)
+ for key in keys:
+ grouper, _, _ = get_grouper(
+ df,
+ key=key,
+ axis=self.axis,
+ sort=self.sort,
+ observed=False,
+ dropna=dropna,
+ )
+ groupings += list(grouper.groupings)
+
+ # Take the size of the overall columns
+ gb = df.groupby(
+ groupings,
+ sort=self.sort,
+ observed=self.observed,
+ dropna=self.dropna,
+ )
+ result_series = cast(Series, gb.size())
+ result_series.name = name
+
+ # GH-46357 Include non-observed categories
+ # of non-grouping columns regardless of `observed`
+ if any(
+ isinstance(grouping.grouping_vector, (Categorical, CategoricalIndex))
+ and not grouping._observed
+ for grouping in groupings
+ ):
+ levels_list = [ping.result_index for ping in groupings]
+ multi_index, _ = MultiIndex.from_product(
+ levels_list, names=[ping.name for ping in groupings]
+ ).sortlevel()
+ result_series = result_series.reindex(multi_index, fill_value=0)
+
+ if normalize:
+ # Normalize the results by dividing by the original group sizes.
+ # We are guaranteed to have the first N levels be the
+ # user-requested grouping.
+ levels = list(
+ range(len(self.grouper.groupings), result_series.index.nlevels)
+ )
+ indexed_group_size = result_series.groupby(
+ result_series.index.droplevel(levels),
sort=self.sort,
- observed=self.observed,
dropna=self.dropna,
+ ).transform("sum")
+ result_series /= indexed_group_size
+
+ # Handle groups of non-observed categories
+ result_series = result_series.fillna(0.0)
+
+ if sort:
+ # Sort the values and then resort by the main grouping
+ index_level = range(len(self.grouper.groupings))
+ result_series = result_series.sort_values(ascending=ascending).sort_index(
+ level=index_level, sort_remaining=False
)
- result_series = cast(Series, gb.size())
- result_series.name = name
- # GH-46357 Include non-observed categories
- # of non-grouping columns regardless of `observed`
- if any(
- isinstance(grouping.grouping_vector, (Categorical, CategoricalIndex))
- and not grouping._observed
- for grouping in groupings
- ):
- levels_list = [ping.result_index for ping in groupings]
- multi_index, _ = MultiIndex.from_product(
- levels_list, names=[ping.name for ping in groupings]
- ).sortlevel()
- result_series = result_series.reindex(multi_index, fill_value=0)
-
- if normalize:
- # Normalize the results by dividing by the original group sizes.
- # We are guaranteed to have the first N levels be the
- # user-requested grouping.
- levels = list(
- range(len(self.grouper.groupings), result_series.index.nlevels)
- )
- indexed_group_size = result_series.groupby(
- result_series.index.droplevel(levels),
- sort=self.sort,
- dropna=self.dropna,
- ).transform("sum")
- result_series /= indexed_group_size
-
- # Handle groups of non-observed categories
- result_series = result_series.fillna(0.0)
-
- if sort:
- # Sort the values and then resort by the main grouping
- index_level = range(len(self.grouper.groupings))
- result_series = result_series.sort_values(
- ascending=ascending
- ).sort_index(level=index_level, sort_remaining=False)
-
- result: Series | DataFrame
- if self.as_index:
- result = result_series
- else:
- # Convert to frame
- index = result_series.index
- columns = com.fill_missing_names(index.names)
- if name in columns:
- raise ValueError(
- f"Column label '{name}' is duplicate of result column"
- )
- result_series.name = name
- result_series.index = index.set_names(range(len(columns)))
- result_frame = result_series.reset_index()
- result_frame.columns = columns + [name]
- result = result_frame
- return result.__finalize__(self.obj, method="value_counts")
+ result: Series | DataFrame
+ if self.as_index:
+ result = result_series
+ else:
+ # Convert to frame
+ index = result_series.index
+ columns = com.fill_missing_names(index.names)
+ if name in columns:
+ raise ValueError(f"Column label '{name}' is duplicate of result column")
+ result_series.name = name
+ result_series.index = index.set_names(range(len(columns)))
+ result_frame = result_series.reset_index()
+ result_frame.columns = columns + [name]
+ result = result_frame
+ return result.__finalize__(self.obj, method="value_counts")
@final
def sem(self, ddof: int = 1, numeric_only: bool = False):
@@ -2651,36 +2588,36 @@ def describe(
include=None,
exclude=None,
) -> NDFrameT:
- with self._group_selection_context():
- selected_obj = self._selected_obj
- if len(selected_obj) == 0:
- described = selected_obj.describe(
+ obj = self._obj_with_exclusions
+
+ if len(obj) == 0:
+ described = obj.describe(
+ percentiles=percentiles, include=include, exclude=exclude
+ )
+ if obj.ndim == 1:
+ result = described
+ else:
+ result = described.unstack()
+ return result.to_frame().T.iloc[:0]
+
+ with com.temp_setattr(self, "as_index", True):
+ result = self._python_apply_general(
+ lambda x: x.describe(
percentiles=percentiles, include=include, exclude=exclude
- )
- if selected_obj.ndim == 1:
- result = described
- else:
- result = described.unstack()
- return result.to_frame().T.iloc[:0]
-
- with com.temp_setattr(self, "as_index", True):
- result = self._python_apply_general(
- lambda x: x.describe(
- percentiles=percentiles, include=include, exclude=exclude
- ),
- selected_obj,
- not_indexed_same=True,
- )
- if self.axis == 1:
- return result.T
+ ),
+ obj,
+ not_indexed_same=True,
+ )
+ if self.axis == 1:
+ return result.T
- # GH#49256 - properly handle the grouping column(s)
- result = result.unstack()
- if not self.as_index:
- result = self._insert_inaxis_grouper(result)
- result.index = default_index(len(result))
+ # GH#49256 - properly handle the grouping column(s)
+ result = result.unstack()
+ if not self.as_index:
+ result = self._insert_inaxis_grouper(result)
+ result.index = default_index(len(result))
- return result
+ return result
@final
def resample(self, rule, *args, **kwargs):
@@ -3399,25 +3336,25 @@ def ngroup(self, ascending: bool = True):
5 1
dtype: int64
"""
- with self._group_selection_context():
- index = self._selected_obj._get_axis(self.axis)
- comp_ids = self.grouper.group_info[0]
+ obj = self._obj_with_exclusions
+ index = obj._get_axis(self.axis)
+ comp_ids = self.grouper.group_info[0]
- dtype: type
- if self.grouper.has_dropped_na:
- comp_ids = np.where(comp_ids == -1, np.nan, comp_ids)
- dtype = np.float64
- else:
- dtype = np.int64
+ dtype: type
+ if self.grouper.has_dropped_na:
+ comp_ids = np.where(comp_ids == -1, np.nan, comp_ids)
+ dtype = np.float64
+ else:
+ dtype = np.int64
- if any(ping._passed_categorical for ping in self.grouper.groupings):
- # comp_ids reflect non-observed groups, we need only observed
- comp_ids = rank_1d(comp_ids, ties_method="dense") - 1
+ if any(ping._passed_categorical for ping in self.grouper.groupings):
+ # comp_ids reflect non-observed groups, we need only observed
+ comp_ids = rank_1d(comp_ids, ties_method="dense") - 1
- result = self._obj_1d_constructor(comp_ids, index, dtype=dtype)
- if not ascending:
- result = self.ngroups - 1 - result
- return result
+ result = self._obj_1d_constructor(comp_ids, index, dtype=dtype)
+ if not ascending:
+ result = self.ngroups - 1 - result
+ return result
@final
@Substitution(name="groupby")
@@ -3474,10 +3411,9 @@ def cumcount(self, ascending: bool = True):
5 0
dtype: int64
"""
- with self._group_selection_context():
- index = self._selected_obj._get_axis(self.axis)
- cumcounts = self._cumcount_array(ascending=ascending)
- return self._obj_1d_constructor(cumcounts, index)
+ index = self._obj_with_exclusions._get_axis(self.axis)
+ cumcounts = self._cumcount_array(ascending=ascending)
+ return self._obj_1d_constructor(cumcounts, index)
@final
@Substitution(name="groupby")
@@ -3957,7 +3893,6 @@ def head(self, n: int = 5) -> NDFrameT:
A B
0 1 2
"""
- self._reset_group_selection()
mask = self._make_mask_from_positional_indexer(slice(None, n))
return self._mask_selected_obj(mask)
@@ -3997,7 +3932,6 @@ def tail(self, n: int = 5) -> NDFrameT:
1 a 2
3 b 2
"""
- self._reset_group_selection()
if n:
mask = self._make_mask_from_positional_indexer(slice(-n, None))
else:
diff --git a/pandas/core/groupby/indexing.py b/pandas/core/groupby/indexing.py
index 40eaeea66b25c..911ee0e8e4725 100644
--- a/pandas/core/groupby/indexing.py
+++ b/pandas/core/groupby/indexing.py
@@ -280,7 +280,6 @@ def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:
GroupBy.nth : Take the nth row from each group if n is an int, or a
subset of rows, if n is a list of ints.
"""
- self.groupby_object._reset_group_selection()
mask = self.groupby_object._make_mask_from_positional_indexer(arg)
return self.groupby_object._mask_selected_obj(mask)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 81dca42b1d74f..1313c39bb67b2 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -710,11 +710,6 @@ def test_cython_transform_frame(op, args, targop):
# {"by": ['int','string']}]:
gb = df.groupby(group_keys=False, **gb_target)
- # allowlisted methods set the selection before applying
- # bit a of hack to make sure the cythonized shift
- # is equivalent to pre 0.17.1 behavior
- if op == "shift":
- gb._set_group_selection()
if op != "shift" and "int" not in gb_target:
# numeric apply fastpath promotes dtype so have
| cc @rhshadrach | https://api.github.com/repos/pandas-dev/pandas/pulls/51117 | 2023-02-02T01:12:38Z | 2023-02-03T03:47:12Z | 2023-02-03T03:47:12Z | 2023-02-03T04:03:23Z |
CLN: standardize ArrowExtensionArray in tests | diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index fad28c1896ad0..3707447151ae3 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -77,7 +77,9 @@ class ArrowExtensionArray(OpsMixin, ExtensionArray):
_data: pa.ChunkedArray
@classmethod
- def from_scalars(cls, values):
+ def _from_sequence(cls, values, dtype=None, copy=False):
+ # TODO: respect dtype, copy
+
if isinstance(values, cls):
# in particular for empty cases the pa.array(np.asarray(...))
# does not round-trip
@@ -91,15 +93,6 @@ def from_scalars(cls, values):
arr = pa.chunked_array([pa.array(np.asarray(values))])
return cls(arr)
- @classmethod
- def from_array(cls, arr):
- assert isinstance(arr, pa.Array)
- return cls(pa.chunked_array([arr]))
-
- @classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
- return cls.from_scalars(scalars)
-
def __repr__(self):
return f"{type(self).__name__}({repr(self._data)})"
@@ -116,7 +109,7 @@ def __getitem__(self, item):
return self._data.to_pandas()[item]
else:
vals = self._data.to_pandas()[item]
- return type(self).from_scalars(vals)
+ return type(self)._from_sequence(vals)
def __len__(self):
return len(self._data)
@@ -160,7 +153,7 @@ def nbytes(self) -> int:
def isna(self):
nas = pd.isna(self._data.to_pandas())
- return type(self).from_scalars(nas)
+ return type(self)._from_sequence(nas)
def take(self, indices, allow_fill=False, fill_value=None):
data = self._data.to_pandas()
@@ -182,7 +175,7 @@ def _concat_same_type(cls, to_concat):
return cls(arr)
def __invert__(self):
- return type(self).from_scalars(~self._data.to_pandas())
+ return type(self)._from_sequence(~self._data.to_pandas())
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if skipna:
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index a73684868e3ae..ddd10dfcb2d60 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -23,12 +23,12 @@ def dtype():
def data():
values = np.random.randint(0, 2, size=100, dtype=bool)
values[1] = ~values[0]
- return ArrowBoolArray.from_scalars(values)
+ return ArrowBoolArray._from_sequence(values)
@pytest.fixture
def data_missing():
- return ArrowBoolArray.from_scalars([None, True])
+ return ArrowBoolArray._from_sequence([None, True])
def test_basic_equals(data):
diff --git a/pandas/tests/extension/arrow/test_timestamp.py b/pandas/tests/extension/arrow/test_timestamp.py
index fe2c484731019..28e6ce0e77b34 100644
--- a/pandas/tests/extension/arrow/test_timestamp.py
+++ b/pandas/tests/extension/arrow/test_timestamp.py
@@ -51,10 +51,7 @@ def __init__(self, values):
def test_constructor_extensionblock():
# GH 34986
- pd.DataFrame(
- {
- "timestamp": ArrowTimestampUSArray.from_scalars(
- [None, datetime.datetime(2010, 9, 8, 7, 6, 5, 4)]
- )
- }
+ arr = ArrowTimestampUSArray._from_sequence(
+ [None, datetime.datetime(2010, 9, 8, 7, 6, 5, 4)]
)
+ pd.DataFrame({"timestamp": arr})
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46009 | 2022-02-15T21:57:14Z | 2022-02-16T13:33:13Z | 2022-02-16T13:33:13Z | 2022-02-16T13:33:13Z |
BUG: to_csv not respecting float_format for Float64 | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 71394a858aefe..6fb2914543fc1 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -353,6 +353,7 @@ I/O
- Bug in :func:`read_excel` results in an infinite loop with certain ``skiprows`` callables (:issue:`45585`)
- Bug in :meth:`DataFrame.info` where a new line at the end of the output is omitted when called on an empty :class:`DataFrame` (:issue:`45494`)
- Bug in :func:`read_csv` not recognizing line break for ``on_bad_lines="warn"`` for ``engine="c"`` (:issue:`41710`)
+- Bug in :meth:`DataFrame.to_csv` not respecting ``float_format`` for ``Float64`` dtype (:issue:`45991`)
- Bug in :func:`read_parquet` when ``engine="pyarrow"`` which caused partial write to disk when column of unsupported datatype was passed (:issue:`44914`)
- Bug in :func:`DataFrame.to_excel` and :class:`ExcelWriter` would raise when writing an empty DataFrame to a ``.ods`` file (:issue:`45793`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index d5ea257af353e..a6a8af9fd5f6a 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -51,6 +51,7 @@
is_dtype_equal,
is_interval_dtype,
is_list_like,
+ is_sparse,
is_string_dtype,
)
from pandas.core.dtypes.dtypes import (
@@ -2257,14 +2258,7 @@ def to_native_types(
results_converted.append(result.astype(object, copy=False))
return np.vstack(results_converted)
- elif isinstance(values, ExtensionArray):
- mask = isna(values)
-
- new_values = np.asarray(values.astype(object))
- new_values[mask] = na_rep
- return new_values
-
- elif values.dtype.kind == "f":
+ elif values.dtype.kind == "f" and not is_sparse(values):
# see GH#13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
@@ -2294,6 +2288,13 @@ def to_native_types(
res = res.astype(object, copy=False)
return res
+ elif isinstance(values, ExtensionArray):
+ mask = isna(values)
+
+ new_values = np.asarray(values.astype(object))
+ new_values[mask] = na_rep
+ return new_values
+
else:
mask = isna(values)
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index afc3ef7a25cc7..bee17e4e8e162 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -315,6 +315,32 @@ def test_to_csv_date_format_in_categorical(self):
ser = ser.astype("category")
assert ser.to_csv(index=False, date_format="%Y-%m-%d") == expected
+ def test_to_csv_float_ea_float_format(self):
+ # GH#45991
+ df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
+ df["a"] = df["a"].astype("Float64")
+ result = df.to_csv(index=False, float_format="%.5f")
+ expected = """a,b
+1.10000,c
+2.02000,c
+,c
+6.00001,c
+"""
+ assert result == expected
+
+ def test_to_csv_float_ea_no_float_format(self):
+ # GH#45991
+ df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
+ df["a"] = df["a"].astype("Float64")
+ result = df.to_csv(index=False)
+ expected = """a,b
+1.1,c
+2.02,c
+,c
+6.000006,c
+"""
+ assert result == expected
+
def test_to_csv_multi_index(self):
# see gh-6618
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
| - [x] closes #45991 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46007 | 2022-02-15T20:48:49Z | 2022-02-16T13:31:02Z | 2022-02-16T13:31:02Z | 2022-02-16T18:46:31Z |
BUG: PeriodArray subtraction returning wrong results | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 71394a858aefe..3b5d0fc624753 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -358,7 +358,7 @@ I/O
Period
^^^^^^
--
+- Bug in subtraction of :class:`Period` from :class:`PeriodArray` returning wrong results (:issue:`45999`)
-
Plotting
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 04cc70b7efa6a..6189584dff7f1 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -710,7 +710,7 @@ def _sub_period(self, other):
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
- new_data = np.array([self.freq * x for x in new_data])
+ new_data = np.array([self.freq.base * x for x in new_data])
if self._hasna:
new_data[self._isnan] = NaT
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index bbb464cb7dfed..06b372abf66f5 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -25,9 +25,9 @@
from pandas.tests.extension import base
-@pytest.fixture
-def dtype():
- return PeriodDtype(freq="D")
+@pytest.fixture(params=["D", "2D"])
+def dtype(request):
+ return PeriodDtype(freq=request.param)
@pytest.fixture
| - [x] closes #45999 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46006 | 2022-02-15T20:36:57Z | 2022-02-16T13:35:33Z | 2022-02-16T13:35:33Z | 2022-02-16T18:47:26Z |
Backport PR #45995 on branch 1.4.x (CI: Add single_cpu build) | diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index 0e8da7b66026f..35196ad2840c6 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -13,7 +13,6 @@ on:
- "doc/**"
env:
- PYTEST_WORKERS: "auto"
PANDAS_CI: 1
jobs:
@@ -25,33 +24,48 @@ jobs:
timeout-minutes: 120
strategy:
matrix:
- settings: [
- [actions-38-downstream_compat.yaml, "not slow and not network and not single_cpu", "", "", "", "", ""],
- [actions-38-minimum_versions.yaml, "not single_cpu", "", "", "", "", ""],
- [actions-38.yaml, "not slow and not network and not single_cpu", "language-pack-it", "it_IT.utf8", "it_IT.utf8", "", ""],
- [actions-38.yaml, "not slow and not network and not single_cpu", "language-pack-zh-hans", "zh_CN.utf8", "zh_CN.utf8", "", ""],
- [actions-38.yaml, "not single_cpu", "", "", "", "", ""],
- [actions-pypy-38.yaml, "not slow and not single_cpu", "", "", "", "", "--max-worker-restart 0"],
- [actions-39.yaml, "not single_cpu", "", "", "", "", ""],
- [actions-310-numpydev.yaml, "not slow and not network and not single_cpu", "", "", "", "deprecate", "-W error"],
- [actions-310.yaml, "not single_cpu", "", "", "", "", ""],
- ]
+ env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
+ pattern: ["not single_cpu", "single_cpu"]
+ include:
+ - env_file: actions-38-downstream_compat.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pytest_target: "pandas/tests/test_downstream.py"
+ - env_file: actions-38-minimum_versions.yaml
+ pattern: "not slow and not network and not single_cpu"
+ - env_file: actions-38.yaml
+ pattern: "not slow and not network and not single_cpu"
+ extra_apt: "language-pack-it"
+ lang: "it_IT.utf8"
+ lc_all: "it_IT.utf8"
+ - env_file: actions-38.yaml
+ pattern: "not slow and not network and not single_cpu"
+ extra_apt: "language-pack-zh-hans"
+ lang: "zh_CN.utf8"
+ lc_all: "zh_CN.utf8"
+ - env_file: actions-pypy-38.yaml
+ pattern: "not slow and not network and not single_cpu"
+ test_args: "--max-worker-restart 0"
+ - env_file: actions-310-numpydev.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_testing_mode: "deprecate"
+ test_args: "-W error"
fail-fast: false
env:
- ENV_FILE: ci/deps/${{ matrix.settings[0] }}
- PATTERN: ${{ matrix.settings[1] }}
- EXTRA_APT: ${{ matrix.settings[2] }}
- LANG: ${{ matrix.settings[3] }}
- LC_ALL: ${{ matrix.settings[4] }}
- PANDAS_TESTING_MODE: ${{ matrix.settings[5] }}
- TEST_ARGS: ${{ matrix.settings[6] }}
- PYTEST_TARGET: pandas
- IS_PYPY: ${{ contains(matrix.settings[0], 'pypy') }}
+ ENV_FILE: ci/deps/${{ matrix.env_file }}
+ PATTERN: ${{ matrix.pattern }}
+ EXTRA_APT: ${{ matrix.extra_apt || '' }}
+ LANG: ${{ matrix.lang || '' }}
+ LC_ALL: ${{ matrix.lc_all || '' }}
+ PANDAS_TESTING_MODE: ${{ matrix.pandas_testing_mode || '' }}
+ TEST_ARGS: ${{ matrix.test_args || '' }}
+ PYTEST_WORKERS: ${{ contains(matrix.pattern, 'not single_cpu') && 'auto' || '1' }}
+ PYTEST_TARGET: ${{ matrix.pytest_target || 'pandas' }}
+ IS_PYPY: ${{ contains(matrix.env_file, 'pypy') }}
# TODO: re-enable coverage on pypy, its slow
- COVERAGE: ${{ !contains(matrix.settings[0], 'pypy') }}
+ COVERAGE: ${{ !contains(matrix.env_file, 'pypy') }}
concurrency:
# https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.settings[0] }}-${{ matrix.settings[1] }}-${{ matrix.settings[2] }}
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.pattern }}-${{ matrix.extra_apt || '' }}
cancel-in-progress: true
services:
diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py
index 7428432aed6e5..066f448d97505 100644
--- a/pandas/tests/io/parser/conftest.py
+++ b/pandas/tests/io/parser/conftest.py
@@ -89,7 +89,7 @@ def csv1(datapath):
_py_parsers_only = [_pythonParser]
_c_parsers_only = [_cParserHighMemory, _cParserLowMemory]
-_pyarrow_parsers_only = [_pyarrowParser]
+_pyarrow_parsers_only = [pytest.param(_pyarrowParser, marks=pytest.mark.single_cpu)]
_all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only]
@@ -108,9 +108,8 @@ def all_parsers(request):
parser = request.param()
if parser.engine == "pyarrow":
pytest.importorskip("pyarrow", VERSIONS["pyarrow"])
- # Try setting num cpus to 1 to avoid hangs on Azure MacOS/Windows builds
- # or better yet find a way to disable threads
- # TODO(GH#44584) pytest.mark.single_cpu these tests
+ # Try finding a way to disable threads all together
+ # for more stable CI runs
import pyarrow
pyarrow.set_cpu_count(1)
@@ -149,8 +148,14 @@ def _get_all_parser_float_precision_combinations():
params = []
ids = []
for parser, parser_id in zip(_all_parsers, _all_parser_ids):
+ if hasattr(parser, "values"):
+ # Wrapped in pytest.param, get the actual parser back
+ parser = parser.values[0]
for precision in parser.float_precision_choices:
- params.append((parser(), precision))
+ # Re-wrap in pytest.param for pyarrow
+ mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else ()
+ param = pytest.param((parser(), precision), marks=mark)
+ params.append(param)
ids.append(f"{parser_id}-{precision}")
return {"params": params, "ids": ids}
| Backport PR #45995: CI: Add single_cpu build | https://api.github.com/repos/pandas-dev/pandas/pulls/46005 | 2022-02-15T20:34:10Z | 2022-02-16T13:05:05Z | 2022-02-16T13:05:05Z | 2022-02-16T13:05:06Z |
TST: Don't mark all plotting tests as slow | diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 7c11eb13e0e1d..aa6bb714af9dd 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -30,10 +30,6 @@ if [[ "$PATTERN" ]]; then
PYTEST_CMD="$PYTEST_CMD -m \"$PATTERN\""
fi
-if [[ $(uname) != "Linux" && $(uname) != "Darwin" ]]; then
- PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/plotting/"
-fi
-
echo $PYTEST_CMD
sh -c "$PYTEST_CMD"
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 216ccddcf05cb..85bc3121f5e4e 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1793,12 +1793,19 @@ def _load_backend(backend: str) -> types.ModuleType:
found_backend = False
eps = entry_points()
- if "pandas_plotting_backends" in eps:
- for entry_point in eps["pandas_plotting_backends"]:
- found_backend = entry_point.name == backend
- if found_backend:
- module = entry_point.load()
- break
+ key = "pandas_plotting_backends"
+ # entry_points lost dict API ~ PY 3.10
+ # https://github.com/python/importlib_metadata/issues/298
+ if hasattr(eps, "select"):
+ # error: "Dict[str, Tuple[EntryPoint, ...]]" has no attribute "select"
+ entry = eps.select(group=key) # type: ignore[attr-defined]
+ else:
+ entry = eps.get(key, ())
+ for entry_point in entry:
+ found_backend = entry_point.name == backend
+ if found_backend:
+ module = entry_point.load()
+ break
if not found_backend:
# Fall back to unregistered, module name approach.
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 1e91379921b88..df853770b85f1 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -1,8 +1,5 @@
"""
Module consolidating common testing functions for checking plotting.
-
-Currently all plotting tests are marked as slow via
-``pytestmark = pytest.mark.slow`` at the module level.
"""
from __future__ import annotations
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 9861c84b7cc03..818c86dfca424 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -33,11 +33,10 @@
from pandas.io.formats.printing import pprint_thing
import pandas.plotting as plotting
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
+ @pytest.mark.slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
@@ -163,6 +162,7 @@ def test_nullable_int_plot(self):
_check_plot_works(df[["A", "D"]].plot, x="A", y="D")
_check_plot_works(df[["A", "E"]].plot, x="A", y="E")
+ @pytest.mark.slow
def test_integer_array_plot(self):
# GH 25587
arr = pd.array([1, 2, 3, 4], dtype="UInt32")
@@ -787,6 +787,7 @@ def test_plot_scatter_with_s(self):
ax = df.plot.scatter(x="a", y="b", s="c")
tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes())
+ @pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(
np.random.randn(6, 4),
@@ -1416,6 +1417,7 @@ def test_pie_df_nan(self):
expected_labels = base_expected[:i] + base_expected[i + 1 :]
assert result_labels == expected_labels
+ @pytest.mark.slow
def test_errorbar_plot(self):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
@@ -1462,6 +1464,7 @@ def test_errorbar_plot(self):
with tm.external_error_raised(TypeError):
df.plot(yerr=df_err)
+ @pytest.mark.slow
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
def test_errorbar_plot_different_kinds(self, kind):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
@@ -1513,6 +1516,7 @@ def test_errorbar_with_integer_column_names(self):
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
+ @pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py
index 867570b06ff31..ed0c94aa4dfa0 100644
--- a/pandas/tests/plotting/frame/test_frame_color.py
+++ b/pandas/tests/plotting/frame/test_frame_color.py
@@ -15,8 +15,6 @@
_check_plot_works,
)
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestDataFrameColor(TestPlotBase):
diff --git a/pandas/tests/plotting/frame/test_frame_groupby.py b/pandas/tests/plotting/frame/test_frame_groupby.py
index e18bdbc5d7579..9c148645966ad 100644
--- a/pandas/tests/plotting/frame/test_frame_groupby.py
+++ b/pandas/tests/plotting/frame/test_frame_groupby.py
@@ -7,8 +7,6 @@
from pandas import DataFrame
from pandas.tests.plotting.common import TestPlotBase
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestDataFramePlotsGroupby(TestPlotBase):
diff --git a/pandas/tests/plotting/frame/test_frame_legend.py b/pandas/tests/plotting/frame/test_frame_legend.py
index 9501047415e9e..e11aeeccfd5fb 100644
--- a/pandas/tests/plotting/frame/test_frame_legend.py
+++ b/pandas/tests/plotting/frame/test_frame_legend.py
@@ -1,14 +1,14 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas import (
DataFrame,
date_range,
)
from pandas.tests.plotting.common import TestPlotBase
-pytestmark = pytest.mark.slow
-
class TestFrameLegend(TestPlotBase):
@pytest.mark.xfail(
@@ -45,6 +45,7 @@ def test_legend_false(self):
expected = ["blue", "green", "red"]
assert result == expected
+ @td.skip_if_no_scipy
def test_df_legend_labels(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
@@ -158,13 +159,21 @@ def test_legend_name(self):
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "new")
- def test_no_legend(self):
- kinds = ["line", "bar", "barh", "kde", "area", "hist"]
+ @pytest.mark.parametrize(
+ "kind",
+ [
+ "line",
+ "bar",
+ "barh",
+ pytest.param("kde", marks=td.skip_if_no_scipy),
+ "area",
+ "hist",
+ ],
+ )
+ def test_no_legend(self, kind):
df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
-
- for kind in kinds:
- ax = df.plot(kind=kind, legend=False)
- self._check_legend_labels(ax, visible=False)
+ ax = df.plot(kind=kind, legend=False)
+ self._check_legend_labels(ax, visible=False)
def test_missing_markers_legend(self):
# 14958
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py
index 4805eb558c465..47d91c7975a28 100644
--- a/pandas/tests/plotting/frame/test_frame_subplots.py
+++ b/pandas/tests/plotting/frame/test_frame_subplots.py
@@ -19,11 +19,10 @@
from pandas.io.formats.printing import pprint_thing
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestDataFramePlotsSubplots(TestPlotBase):
+ @pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
@@ -237,6 +236,7 @@ def test_subplots_layout_single_column(
)
assert axes.shape == expected_shape
+ @pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
@@ -580,19 +580,21 @@ def test_bar_barwidth_position(self, kwargs):
df = DataFrame(np.random.randn(5, 5))
self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs)
- def test_bar_barwidth_position_int(self):
+ @pytest.mark.parametrize("w", [1, 1.0])
+ def test_bar_barwidth_position_int(self, w):
+ # GH 12979
+ df = DataFrame(np.random.randn(5, 5))
+ ax = df.plot.bar(stacked=True, width=w)
+ ticks = ax.xaxis.get_ticklocs()
+ tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
+ assert ax.get_xlim() == (-0.75, 4.75)
+ # check left-edge of bars
+ assert ax.patches[0].get_x() == -0.5
+ assert ax.patches[-1].get_x() == 3.5
+
+ def test_bar_barwidth_position_int_width_1(self):
# GH 12979
df = DataFrame(np.random.randn(5, 5))
-
- for w in [1, 1.0]:
- ax = df.plot.bar(stacked=True, width=w)
- ticks = ax.xaxis.get_ticklocs()
- tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
- assert ax.get_xlim() == (-0.75, 4.75)
- # check left-edge of bars
- assert ax.patches[0].get_x() == -0.5
- assert ax.patches[-1].get_x() == 3.5
-
self._check_bar_alignment(df, kind="bar", stacked=True, width=1)
self._check_bar_alignment(df, kind="barh", stacked=False, width=1)
self._check_bar_alignment(df, kind="barh", stacked=True, width=1)
diff --git a/pandas/tests/plotting/frame/test_hist_box_by.py b/pandas/tests/plotting/frame/test_hist_box_by.py
index 282c977f89552..fe39c3d441396 100644
--- a/pandas/tests/plotting/frame/test_hist_box_by.py
+++ b/pandas/tests/plotting/frame/test_hist_box_by.py
@@ -24,6 +24,7 @@ def hist_df():
@td.skip_if_no_mpl
class TestHistWithBy(TestPlotBase):
+ @pytest.mark.slow
@pytest.mark.parametrize(
"by, column, titles, legends",
[
diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py
index 1e3635714b745..82e82781e894b 100644
--- a/pandas/tests/plotting/test_backend.py
+++ b/pandas/tests/plotting/test_backend.py
@@ -12,9 +12,6 @@
setattr(dummy_backend, "plot", lambda *args, **kwargs: "used_dummy")
-pytestmark = pytest.mark.slow
-
-
@pytest.fixture
def restore_backend():
"""Restore the plotting backend to matplotlib"""
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 207ebd7f2a193..9ca8a71ed1897 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -24,8 +24,6 @@
from pandas.io.formats.printing import pprint_thing
import pandas.plotting as plotting
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@@ -50,6 +48,7 @@ def test_stacked_boxplot_set_axis(self):
np.arange(0, 80, 10)
)
+ @pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
@@ -337,6 +336,7 @@ def test_boxplot_legacy1(self, hist_df):
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+ @pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
@@ -381,6 +381,7 @@ def test_grouped_plot_fignums(self):
res = df.groupby("gender").hist()
tm.close()
+ @pytest.mark.slow
def test_grouped_box_return_type(self, hist_df):
df = hist_df
@@ -415,6 +416,7 @@ def test_grouped_box_return_type(self, hist_df):
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
+ @pytest.mark.slow
def test_grouped_box_layout(self, hist_df):
df = hist_df
@@ -508,6 +510,7 @@ def test_grouped_box_layout(self, hist_df):
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
+ @pytest.mark.slow
def test_grouped_box_multiple_axes(self, hist_df):
# GH 6970, GH 7069
df = hist_df
diff --git a/pandas/tests/plotting/test_common.py b/pandas/tests/plotting/test_common.py
index 6eebf0c01ae52..d4624cfc74872 100644
--- a/pandas/tests/plotting/test_common.py
+++ b/pandas/tests/plotting/test_common.py
@@ -9,8 +9,6 @@
_gen_two_subplots,
)
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestCommon(TestPlotBase):
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index f3418415695b0..04d3397faddb7 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -43,9 +43,6 @@
dates = pytest.importorskip("matplotlib.dates")
-pytestmark = pytest.mark.slow
-
-
def test_registry_mpl_resets():
# Check that Matplotlib converters are properly reset (see issue #27481)
code = (
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 197083a9ad940..9b08cd0637751 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -41,23 +41,21 @@
from pandas.tseries.offsets import WeekOfMonth
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestTSPlot(TestPlotBase):
+ @pytest.mark.filterwarnings("ignore::UserWarning")
def test_ts_plot_with_tz(self, tz_aware_fixture):
# GH2877, GH17173, GH31205, GH31580
tz = tz_aware_fixture
index = date_range("1/1/2011", periods=2, freq="H", tz=tz)
ts = Series([188.5, 328.25], index=index)
- with tm.assert_produces_warning(None):
- _check_plot_works(ts.plot)
- ax = ts.plot()
- xdata = list(ax.get_lines())[0].get_xdata()
- # Check first and last points' labels are correct
- assert (xdata[0].hour, xdata[0].minute) == (0, 0)
- assert (xdata[-1].hour, xdata[-1].minute) == (1, 0)
+ _check_plot_works(ts.plot)
+ ax = ts.plot()
+ xdata = list(ax.get_lines())[0].get_xdata()
+ # Check first and last points' labels are correct
+ assert (xdata[0].hour, xdata[0].minute) == (0, 0)
+ assert (xdata[-1].hour, xdata[-1].minute) == (1, 0)
def test_fontsize_set_correctly(self):
# For issue #8765
@@ -497,6 +495,7 @@ def test_finder_annual(self):
assert rs == xp
+ @pytest.mark.slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range("1/1/1999", freq="Min", periods=nminutes)
diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py
index 997f5abe12078..de81ad20f7370 100644
--- a/pandas/tests/plotting/test_groupby.py
+++ b/pandas/tests/plotting/test_groupby.py
@@ -14,8 +14,6 @@
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
-pytestmark = pytest.mark.slow
-
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index c2926b4f22372..0955e7808f3f6 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -18,8 +18,6 @@
_check_plot_works,
)
-pytestmark = pytest.mark.slow
-
@pytest.fixture
def ts():
@@ -69,6 +67,7 @@ def test_hist_layout(self, hist_df):
with pytest.raises(ValueError, match=msg):
df.height.hist(layout=[1, 1])
+ @pytest.mark.slow
def test_hist_layout_with_by(self, hist_df):
df = hist_df
@@ -232,6 +231,7 @@ def test_hist_kde_color(self, ts):
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
+ @pytest.mark.slow
def test_hist_df_legacy(self, hist_df):
from matplotlib.patches import Rectangle
@@ -644,6 +644,7 @@ def test_grouped_hist_legacy2(self):
assert len(self.plt.get_fignums()) == 2
tm.close()
+ @pytest.mark.slow
def test_grouped_hist_layout(self, hist_df):
df = hist_df
msg = "Layout of 1x1 must be larger than required size 2"
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index be5e5cbae2538..64c736d6314e4 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -17,8 +17,6 @@
import pandas.plotting as plotting
-pytestmark = pytest.mark.slow
-
@td.skip_if_mpl
def test_import_error_message():
@@ -137,6 +135,7 @@ def test_scatter_matrix_axis(self, pass_axis):
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
+ @pytest.mark.slow
def test_andrews_curves(self, iris):
from matplotlib import cm
@@ -213,6 +212,7 @@ def test_andrews_curves(self, iris):
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
+ @pytest.mark.slow
def test_parallel_coordinates(self, iris):
from matplotlib import cm
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 457ccc2ffce51..d0ed5cb754c40 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -1,6 +1,4 @@
""" Test cases for Series.plot """
-
-
from datetime import datetime
from itertools import chain
@@ -23,8 +21,6 @@
import pandas.plotting as plotting
-pytestmark = pytest.mark.slow
-
@pytest.fixture
def ts():
@@ -43,6 +39,7 @@ def iseries():
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
+ @pytest.mark.slow
def test_plot(self, ts):
_check_plot_works(ts.plot, label="foo")
_check_plot_works(ts.plot, use_index=False)
@@ -64,7 +61,17 @@ def test_plot(self, ts):
def test_plot_iseries(self, iseries):
_check_plot_works(iseries.plot)
- @pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "hist", "box"])
+ @pytest.mark.parametrize(
+ "kind",
+ [
+ "line",
+ "bar",
+ "barh",
+ pytest.param("kde", marks=td.skip_if_no_scipy),
+ "hist",
+ "box",
+ ],
+ )
def test_plot_series_kinds(self, series, kind):
_check_plot_works(series[:5].plot, kind=kind)
@@ -495,6 +502,7 @@ def test_boxplot_series(self, ts):
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [""] * len(ylabels))
+ @td.skip_if_no_scipy
@pytest.mark.parametrize(
"kind",
plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds,
@@ -516,6 +524,7 @@ def test_invalid_plot_data(self, kind):
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
+ @td.skip_if_no_scipy
@pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds)
def test_valid_object_plot(self, kind):
s = Series(range(10), dtype=object)
@@ -562,6 +571,7 @@ def test_errorbar_asymmetrical(self):
tm.close()
+ @pytest.mark.slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name="x")
@@ -603,10 +613,13 @@ def test_errorbar_plot(self):
with tm.external_error_raised(TypeError):
s.plot(yerr=s_err)
+ @pytest.mark.slow
def test_table(self, series):
_check_plot_works(series.plot, table=True)
_check_plot_works(series.plot, table=series)
+ @pytest.mark.slow
+ @td.skip_if_no_scipy
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
diff --git a/pandas/tests/plotting/test_style.py b/pandas/tests/plotting/test_style.py
index 3c48eeaccbf34..665bda15724fd 100644
--- a/pandas/tests/plotting/test_style.py
+++ b/pandas/tests/plotting/test_style.py
@@ -5,8 +5,6 @@
pytest.importorskip("matplotlib")
from pandas.plotting._matplotlib.style import get_standard_colors
-pytestmark = pytest.mark.slow
-
class TestGetStandardColors:
@pytest.mark.parametrize(
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Ideally we should be more targeted regarding a slow test | https://api.github.com/repos/pandas-dev/pandas/pulls/46003 | 2022-02-15T19:32:40Z | 2022-02-27T15:20:55Z | 2022-02-27T15:20:55Z | 2022-02-28T03:05:45Z |
REF: Share NumericArray/NumericDtype methods | diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index d55aef953b5b5..49a71922f331b 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -3,8 +3,8 @@
import numpy as np
from pandas._typing import DtypeObj
-from pandas.util._decorators import cache_readonly
+from pandas.core.dtypes.common import is_float_dtype
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.arrays.numeric import (
@@ -24,13 +24,7 @@ class FloatingDtype(NumericDtype):
"""
_default_np_dtype = np.dtype(np.float64)
-
- def __repr__(self) -> str:
- return f"{self.name}Dtype()"
-
- @property
- def _is_numeric(self) -> bool:
- return True
+ _checker = is_float_dtype
@classmethod
def construct_array_type(cls) -> type[FloatingArray]:
@@ -58,18 +52,8 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
return None
@classmethod
- def _standardize_dtype(cls, dtype) -> FloatingDtype:
- if isinstance(dtype, str) and dtype.startswith("Float"):
- # Avoid DeprecationWarning from NumPy about np.dtype("Float64")
- # https://github.com/numpy/numpy/pull/7476
- dtype = dtype.lower()
-
- if not issubclass(type(dtype), FloatingDtype):
- try:
- dtype = FLOAT_STR_TO_DTYPE[str(np.dtype(dtype))]
- except KeyError as err:
- raise ValueError(f"invalid dtype specified {dtype}") from err
- return dtype
+ def _str_to_dtype_mapping(cls):
+ return FLOAT_STR_TO_DTYPE
@classmethod
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
@@ -151,22 +135,6 @@ class FloatingArray(NumericArray):
_truthy_value = 1.0
_falsey_value = 0.0
- @cache_readonly
- def dtype(self) -> FloatingDtype:
- return FLOAT_STR_TO_DTYPE[str(self._data.dtype)]
-
- def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
- if not (isinstance(values, np.ndarray) and values.dtype.kind == "f"):
- raise TypeError(
- "values should be floating numpy array. Use "
- "the 'pd.array' function instead"
- )
- if values.dtype == np.float16:
- # If we don't raise here, then accessing self.dtype would raise
- raise TypeError("FloatingArray does not support np.float16 dtype.")
-
- super().__init__(values, mask, copy=copy)
-
_dtype_docstring = """
An ExtensionDtype for {dtype} data.
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 056669f40ca87..9ef3939656ecd 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -3,9 +3,9 @@
import numpy as np
from pandas._typing import DtypeObj
-from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import register_extension_dtype
+from pandas.core.dtypes.common import is_integer_dtype
from pandas.core.arrays.masked import BaseMaskedDtype
from pandas.core.arrays.numeric import (
@@ -14,33 +14,18 @@
)
-class _IntegerDtype(NumericDtype):
+class IntegerDtype(NumericDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
- _IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
+ IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
_default_np_dtype = np.dtype(np.int64)
-
- def __repr__(self) -> str:
- sign = "U" if self.is_unsigned_integer else ""
- return f"{sign}Int{8 * self.itemsize}Dtype()"
-
- @cache_readonly
- def is_signed_integer(self) -> bool:
- return self.kind == "i"
-
- @cache_readonly
- def is_unsigned_integer(self) -> bool:
- return self.kind == "u"
-
- @property
- def _is_numeric(self) -> bool:
- return True
+ _checker = is_integer_dtype
@classmethod
def construct_array_type(cls) -> type[IntegerArray]:
@@ -86,20 +71,8 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
return None
@classmethod
- def _standardize_dtype(cls, dtype) -> _IntegerDtype:
- if isinstance(dtype, str) and (
- dtype.startswith("Int") or dtype.startswith("UInt")
- ):
- # Avoid DeprecationWarning from NumPy about np.dtype("Int64")
- # https://github.com/numpy/numpy/pull/7476
- dtype = dtype.lower()
-
- if not issubclass(type(dtype), _IntegerDtype):
- try:
- dtype = INT_STR_TO_DTYPE[str(np.dtype(dtype))]
- except KeyError as err:
- raise ValueError(f"invalid dtype specified {dtype}") from err
- return dtype
+ def _str_to_dtype_mapping(cls):
+ return INT_STR_TO_DTYPE
@classmethod
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
@@ -189,7 +162,7 @@ class IntegerArray(NumericArray):
Length: 3, dtype: UInt16
"""
- _dtype_cls = _IntegerDtype
+ _dtype_cls = IntegerDtype
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 1
@@ -197,18 +170,6 @@ class IntegerArray(NumericArray):
_truthy_value = 1
_falsey_value = 0
- @cache_readonly
- def dtype(self) -> _IntegerDtype:
- return INT_STR_TO_DTYPE[str(self._data.dtype)]
-
- def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
- if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]):
- raise TypeError(
- "values should be integer numpy array. Use "
- "the 'pd.array' function instead"
- )
- super().__init__(values, mask, copy=copy)
-
_dtype_docstring = """
An ExtensionDtype for {dtype} integer data.
@@ -231,62 +192,62 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
@register_extension_dtype
-class Int8Dtype(_IntegerDtype):
+class Int8Dtype(IntegerDtype):
type = np.int8
name = "Int8"
__doc__ = _dtype_docstring.format(dtype="int8")
@register_extension_dtype
-class Int16Dtype(_IntegerDtype):
+class Int16Dtype(IntegerDtype):
type = np.int16
name = "Int16"
__doc__ = _dtype_docstring.format(dtype="int16")
@register_extension_dtype
-class Int32Dtype(_IntegerDtype):
+class Int32Dtype(IntegerDtype):
type = np.int32
name = "Int32"
__doc__ = _dtype_docstring.format(dtype="int32")
@register_extension_dtype
-class Int64Dtype(_IntegerDtype):
+class Int64Dtype(IntegerDtype):
type = np.int64
name = "Int64"
__doc__ = _dtype_docstring.format(dtype="int64")
@register_extension_dtype
-class UInt8Dtype(_IntegerDtype):
+class UInt8Dtype(IntegerDtype):
type = np.uint8
name = "UInt8"
__doc__ = _dtype_docstring.format(dtype="uint8")
@register_extension_dtype
-class UInt16Dtype(_IntegerDtype):
+class UInt16Dtype(IntegerDtype):
type = np.uint16
name = "UInt16"
__doc__ = _dtype_docstring.format(dtype="uint16")
@register_extension_dtype
-class UInt32Dtype(_IntegerDtype):
+class UInt32Dtype(IntegerDtype):
type = np.uint32
name = "UInt32"
__doc__ = _dtype_docstring.format(dtype="uint32")
@register_extension_dtype
-class UInt64Dtype(_IntegerDtype):
+class UInt64Dtype(IntegerDtype):
type = np.uint64
name = "UInt64"
__doc__ = _dtype_docstring.format(dtype="uint64")
-INT_STR_TO_DTYPE: dict[str, _IntegerDtype] = {
+INT_STR_TO_DTYPE: dict[str, IntegerDtype] = {
"int8": Int8Dtype(),
"int16": Int16Dtype(),
"int32": Int32Dtype(),
diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py
index 5ab1a9908fd02..958c9f7b0b3f1 100644
--- a/pandas/core/arrays/numeric.py
+++ b/pandas/core/arrays/numeric.py
@@ -3,6 +3,8 @@
import numbers
from typing import (
TYPE_CHECKING,
+ Any,
+ Callable,
TypeVar,
)
@@ -17,6 +19,7 @@
DtypeObj,
)
from pandas.errors import AbstractMethodError
+from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_bool_dtype,
@@ -41,6 +44,22 @@
class NumericDtype(BaseMaskedDtype):
_default_np_dtype: np.dtype
+ _checker: Callable[[Any], bool] # is_foo_dtype
+
+ def __repr__(self) -> str:
+ return f"{self.name}Dtype()"
+
+ @cache_readonly
+ def is_signed_integer(self) -> bool:
+ return self.kind == "i"
+
+ @cache_readonly
+ def is_unsigned_integer(self) -> bool:
+ return self.kind == "u"
+
+ @property
+ def _is_numeric(self) -> bool:
+ return True
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
@@ -90,12 +109,27 @@ def __from_arrow__(
else:
return array_class._concat_same_type(results)
+ @classmethod
+ def _str_to_dtype_mapping(cls):
+ raise AbstractMethodError(cls)
+
@classmethod
def _standardize_dtype(cls, dtype) -> NumericDtype:
"""
Convert a string representation or a numpy dtype to NumericDtype.
"""
- raise AbstractMethodError(cls)
+ if isinstance(dtype, str) and (dtype.startswith(("Int", "UInt", "Float"))):
+ # Avoid DeprecationWarning from NumPy about np.dtype("Int64")
+ # https://github.com/numpy/numpy/pull/7476
+ dtype = dtype.lower()
+
+ if not issubclass(type(dtype), cls):
+ mapping = cls._str_to_dtype_mapping()
+ try:
+ dtype = mapping[str(np.dtype(dtype))]
+ except KeyError as err:
+ raise ValueError(f"invalid dtype specified {dtype}") from err
+ return dtype
@classmethod
def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
@@ -108,10 +142,7 @@ def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarr
def _coerce_to_data_and_mask(values, mask, dtype, copy, dtype_cls, default_dtype):
- if default_dtype.kind == "f":
- checker = is_float_dtype
- else:
- checker = is_integer_dtype
+ checker = dtype_cls._checker
inferred_type = None
@@ -188,6 +219,29 @@ class NumericArray(BaseMaskedArray):
_dtype_cls: type[NumericDtype]
+ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
+ checker = self._dtype_cls._checker
+ if not (isinstance(values, np.ndarray) and checker(values.dtype)):
+ descr = (
+ "floating"
+ if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap]
+ else "integer"
+ )
+ raise TypeError(
+ f"values should be {descr} numpy array. Use "
+ "the 'pd.array' function instead"
+ )
+ if values.dtype == np.float16:
+ # If we don't raise here, then accessing self.dtype would raise
+ raise TypeError("FloatingArray does not support np.float16 dtype.")
+
+ super().__init__(values, mask, copy=copy)
+
+ @cache_readonly
+ def dtype(self) -> NumericDtype:
+ mapping = self._dtype_cls._str_to_dtype_mapping()
+ return mapping[str(self._data.dtype)]
+
@classmethod
def _coerce_to_array(
cls, value, *, dtype: DtypeObj, copy: bool = False
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index af1756470a9c0..ca4348e3bd06a 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -45,7 +45,7 @@
)
from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.floating import FloatingDtype
-from pandas.core.arrays.integer import _IntegerDtype
+from pandas.core.arrays.integer import IntegerDtype
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.missing import isna
@@ -432,7 +432,7 @@ def astype(self, dtype, copy: bool = True):
return self.copy()
return self
- elif isinstance(dtype, _IntegerDtype):
+ elif isinstance(dtype, IntegerDtype):
arr = self._ndarray.copy()
mask = self.isna()
arr[mask] = 0
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index d4aa6ae9f4059..cf046d92dd6f3 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -76,7 +76,7 @@
)
from pandas.core.arrays.integer import (
Int64Dtype,
- _IntegerDtype,
+ IntegerDtype,
)
from pandas.core.arrays.masked import (
BaseMaskedArray,
@@ -300,10 +300,10 @@ def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:
if how in ["add", "cumsum", "sum", "prod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
- elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):
+ elif isinstance(dtype, (BooleanDtype, IntegerDtype)):
return Int64Dtype()
elif how in ["mean", "median", "var"]:
- if isinstance(dtype, (BooleanDtype, _IntegerDtype)):
+ if isinstance(dtype, (BooleanDtype, IntegerDtype)):
return Float64Dtype()
elif is_float_dtype(dtype) or is_complex_dtype(dtype):
return dtype
@@ -341,7 +341,7 @@ def _ea_wrap_cython_operation(
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
npvalues = values._ndarray.view("M8[ns]")
- elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):
+ elif isinstance(values.dtype, (BooleanDtype, IntegerDtype)):
# IntegerArray or BooleanArray
npvalues = values.to_numpy("float64", na_value=np.nan)
elif isinstance(values.dtype, FloatingDtype):
@@ -378,7 +378,7 @@ def _reconstruct_ea_result(self, values, res_values):
# TODO: allow EAs to override this logic
if isinstance(
- values.dtype, (BooleanDtype, _IntegerDtype, FloatingDtype, StringDtype)
+ values.dtype, (BooleanDtype, IntegerDtype, FloatingDtype, StringDtype)
):
dtype = self._get_result_dtype(values.dtype)
cls = dtype.construct_array_type()
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 883cc36e4c1f1..60c4634662296 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -62,7 +62,7 @@
to_timedelta,
)
from pandas.core.arrays.boolean import BooleanDtype
-from pandas.core.arrays.integer import _IntegerDtype
+from pandas.core.arrays.integer import IntegerDtype
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.series import Series
@@ -585,7 +585,7 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame:
for col in data:
# Cast from unsupported types to supported types
- is_nullable_int = isinstance(data[col].dtype, (_IntegerDtype, BooleanDtype))
+ is_nullable_int = isinstance(data[col].dtype, (IntegerDtype, BooleanDtype))
orig = data[col]
# We need to find orig_missing before altering data below
orig_missing = orig.isna()
@@ -593,7 +593,7 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame:
missing_loc = data[col].isna()
if missing_loc.any():
# Replace with always safe value
- fv = 0 if isinstance(data[col].dtype, _IntegerDtype) else False
+ fv = 0 if isinstance(data[col].dtype, IntegerDtype) else False
data.loc[missing_loc, col] = fv
# Replace with NumPy-compatible column
data[col] = data[col].astype(data[col].dtype.numpy_dtype)
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index 0e7e4b537c719..6d343de9f5d3a 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -762,11 +762,6 @@ def test_astype_categorical_to_string_missing(self):
class IntegerArrayNoCopy(pd.core.arrays.IntegerArray):
# GH 42501
- @classmethod
- def _from_sequence(cls, scalars, *, dtype=None, copy=False):
- values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy)
- return IntegerArrayNoCopy(values, mask)
-
def copy(self):
assert False
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/45997 | 2022-02-15T06:12:01Z | 2022-02-16T13:36:22Z | 2022-02-16T13:36:22Z | 2022-02-16T15:45:12Z |
TST: Use uuid instead of random chars for temp files | diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index 5a77c06d65d07..547ec9db20994 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -3,14 +3,13 @@
from contextlib import contextmanager
import os
from pathlib import Path
-import random
from shutil import rmtree
-import string
import tempfile
from typing import (
IO,
Any,
)
+import uuid
import numpy as np
@@ -107,9 +106,7 @@ def ensure_clean(filename=None, return_filelike: bool = False, **kwargs: Any):
if filename is None:
filename = ""
- filename = (
- "".join(random.choices(string.ascii_letters + string.digits, k=30)) + filename
- )
+ filename = str(uuid.uuid4()) + filename
path = folder / filename
path.touch()
diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py
index b9ddec0a37c11..fce1d1260b3fe 100644
--- a/pandas/tests/io/parser/common/test_file_buffer_url.py
+++ b/pandas/tests/io/parser/common/test_file_buffer_url.py
@@ -9,6 +9,7 @@
import os
import platform
from urllib.error import URLError
+import uuid
import pytest
@@ -87,7 +88,7 @@ def test_nonexistent_path(all_parsers):
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
- path = f"{tm.rands(10)}.csv"
+ path = f"{uuid.uuid4()}.csv"
msg = r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
@@ -255,7 +256,7 @@ def test_internal_eof_byte_to_file(all_parsers):
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
- path = f"__{tm.rands(10)}__.csv"
+ path = f"__{uuid.uuid4()}__.csv"
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index a70c3ee44edb6..c06ac9e76bd7f 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -2,10 +2,10 @@
Tests encoding functionality during parsing
for all of the parsers defined in parsers.py
"""
-
from io import BytesIO
import os
import tempfile
+import uuid
import numpy as np
import pytest
@@ -54,7 +54,7 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding):
4,5,6""".replace(
",", sep
)
- path = f"__{tm.rands(10)}__.csv"
+ path = f"__{uuid.uuid4()}__.csv"
kwargs = {"sep": sep, "skiprows": 2}
utf8 = "utf-8"
diff --git a/pandas/tests/io/pytables/conftest.py b/pandas/tests/io/pytables/conftest.py
index 988f78c5ae843..bbd815b9c6309 100644
--- a/pandas/tests/io/pytables/conftest.py
+++ b/pandas/tests/io/pytables/conftest.py
@@ -1,3 +1,5 @@
+import uuid
+
import pytest
import pandas._testing as tm
@@ -6,7 +8,7 @@
@pytest.fixture
def setup_path():
"""Fixture for setup path"""
- return f"tmp.__{tm.rands(10)}__.h5"
+ return f"tmp.__{uuid.uuid4()}__.h5"
@pytest.fixture(scope="module", autouse=True)
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 56fc42aa0e2db..3ad8fd4051b48 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -21,6 +21,7 @@
from pathlib import Path
import pickle
import shutil
+import uuid
from warnings import (
catch_warnings,
filterwarnings,
@@ -248,7 +249,7 @@ def test_legacy_sparse_warning(datapath, typ):
@pytest.fixture
def get_random_path():
- return f"__{tm.rands(10)}__.pickle"
+ return f"__{uuid.uuid4()}__.pickle"
class TestCompression:
| - [x] closes #39197 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/45996 | 2022-02-15T03:17:11Z | 2022-02-15T18:55:09Z | 2022-02-15T18:55:09Z | 2022-02-15T18:55:13Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.