title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
CLN: test_expressions.py | diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index e03194227f576..3980ec023960c 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -14,44 +14,88 @@
)
from pandas.core.computation import expressions as expr
-_frame = DataFrame(np.random.randn(10001, 4), columns=list("ABCD"), dtype="float64")
-_frame2 = DataFrame(np.random.randn(100, 4), columns=list("ABCD"), dtype="float64")
-_mixed = DataFrame(
- {
- "A": _frame["A"].copy(),
- "B": _frame["B"].astype("float32"),
- "C": _frame["C"].astype("int64"),
- "D": _frame["D"].astype("int32"),
- }
-)
-_mixed2 = DataFrame(
- {
- "A": _frame2["A"].copy(),
- "B": _frame2["B"].astype("float32"),
- "C": _frame2["C"].astype("int64"),
- "D": _frame2["D"].astype("int32"),
- }
-)
-_integer = DataFrame(
- np.random.randint(1, 100, size=(10001, 4)), columns=list("ABCD"), dtype="int64"
-)
-_integer2 = DataFrame(
- np.random.randint(1, 100, size=(101, 4)), columns=list("ABCD"), dtype="int64"
-)
-_array = _frame["A"].values.copy()
-_array2 = _frame2["A"].values.copy()
-_array_mixed = _mixed["D"].values.copy()
-_array_mixed2 = _mixed2["D"].values.copy()
+@pytest.fixture
+def _frame():
+ return DataFrame(np.random.randn(10001, 4), columns=list("ABCD"), dtype="float64")
+
+
+@pytest.fixture
+def _frame2():
+ return DataFrame(np.random.randn(100, 4), columns=list("ABCD"), dtype="float64")
+
+
+@pytest.fixture
+def _mixed(_frame):
+ return DataFrame(
+ {
+ "A": _frame["A"].copy(),
+ "B": _frame["B"].astype("float32"),
+ "C": _frame["C"].astype("int64"),
+ "D": _frame["D"].astype("int32"),
+ }
+ )
+
+
+@pytest.fixture
+def _mixed2(_frame2):
+ return DataFrame(
+ {
+ "A": _frame2["A"].copy(),
+ "B": _frame2["B"].astype("float32"),
+ "C": _frame2["C"].astype("int64"),
+ "D": _frame2["D"].astype("int32"),
+ }
+ )
+
+
+@pytest.fixture
+def _integer():
+ return DataFrame(
+ np.random.randint(1, 100, size=(10001, 4)), columns=list("ABCD"), dtype="int64"
+ )
+
+
+@pytest.fixture
+def _integer_randint(_integer):
+ # randint to get a case with zeros
+ return _integer * np.random.randint(0, 2, size=np.shape(_integer))
+
+
+@pytest.fixture
+def _integer2():
+ return DataFrame(
+ np.random.randint(1, 100, size=(101, 4)), columns=list("ABCD"), dtype="int64"
+ )
+
+
+@pytest.fixture
+def _array(_frame):
+ return _frame["A"].values.copy()
+
+
+@pytest.fixture
+def _array2(_frame2):
+ return _frame2["A"].values.copy()
+
+
+@pytest.fixture
+def _array_mixed(_mixed):
+ return _mixed["D"].values.copy()
+
+
+@pytest.fixture
+def _array_mixed2(_mixed2):
+ return _mixed2["D"].values.copy()
@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr")
class TestExpressions:
- def setup_method(self):
- self._MIN_ELEMENTS = expr._MIN_ELEMENTS
-
- def teardown_method(self):
- expr._MIN_ELEMENTS = self._MIN_ELEMENTS
+ @pytest.fixture(autouse=True)
+ def save_min_elements(self):
+ min_elements = expr._MIN_ELEMENTS
+ yield
+ expr._MIN_ELEMENTS = min_elements
@staticmethod
def call_op(df, other, flex: bool, opname: str):
@@ -70,23 +114,23 @@ def call_op(df, other, flex: bool, opname: str):
return result, expected
@pytest.mark.parametrize(
- "df",
+ "fixture",
[
- _integer,
- _integer2,
- # randint to get a case with zeros
- _integer * np.random.randint(0, 2, size=np.shape(_integer)),
- _frame,
- _frame2,
- _mixed,
- _mixed2,
+ "_integer",
+ "_integer2",
+ "_integer_randint",
+ "_frame",
+ "_frame2",
+ "_mixed",
+ "_mixed2",
],
)
@pytest.mark.parametrize("flex", [True, False])
@pytest.mark.parametrize(
"arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"]
)
- def test_run_arithmetic(self, df, flex, arith):
+ def test_run_arithmetic(self, request, fixture, flex, arith):
+ df = request.getfixturevalue(fixture)
expr._MIN_ELEMENTS = 0
result, expected = self.call_op(df, df, flex, arith)
@@ -101,25 +145,25 @@ def test_run_arithmetic(self, df, flex, arith):
tm.assert_equal(expected, result)
@pytest.mark.parametrize(
- "df",
+ "fixture",
[
- _integer,
- _integer2,
- # randint to get a case with zeros
- _integer * np.random.randint(0, 2, size=np.shape(_integer)),
- _frame,
- _frame2,
- _mixed,
- _mixed2,
+ "_integer",
+ "_integer2",
+ "_integer_randint",
+ "_frame",
+ "_frame2",
+ "_mixed",
+ "_mixed2",
],
)
@pytest.mark.parametrize("flex", [True, False])
- def test_run_binary(self, df, flex, comparison_op):
+ def test_run_binary(self, request, fixture, flex, comparison_op):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
+ df = request.getfixturevalue(fixture)
arith = comparison_op.__name__
with option_context("compute.use_numexpr", False):
other = df.copy() + 1
@@ -160,9 +204,12 @@ def test_invalid(self):
[("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")],
)
@pytest.mark.parametrize(
- "left,right", [(_array, _array2), (_array_mixed, _array_mixed2)]
+ "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]
)
- def test_binary_ops(self, opname, op_str, left, right):
+ def test_binary_ops(self, request, opname, op_str, left_fix, right_fix):
+ left = request.getfixturevalue(left_fix)
+ right = request.getfixturevalue(right_fix)
+
def testit():
if opname == "pow":
@@ -202,9 +249,12 @@ def testit():
],
)
@pytest.mark.parametrize(
- "left,right", [(_array, _array2), (_array_mixed, _array_mixed2)]
+ "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]
)
- def test_comparison_ops(self, opname, op_str, left, right):
+ def test_comparison_ops(self, request, opname, op_str, left_fix, right_fix):
+ left = request.getfixturevalue(left_fix)
+ right = request.getfixturevalue(right_fix)
+
def testit():
f12 = left + 1
f22 = right + 1
@@ -227,8 +277,10 @@ def testit():
testit()
@pytest.mark.parametrize("cond", [True, False])
- @pytest.mark.parametrize("df", [_frame, _frame2, _mixed, _mixed2])
- def test_where(self, cond, df):
+ @pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"])
+ def test_where(self, request, cond, fixture):
+ df = request.getfixturevalue(fixture)
+
def testit():
c = np.empty(df.shape, dtype=np.bool_)
c.fill(cond)
@@ -350,7 +402,7 @@ def test_bool_ops_column_name_dtype(self, test_input, expected):
"arith", ("add", "sub", "mul", "mod", "truediv", "floordiv")
)
@pytest.mark.parametrize("axis", (0, 1))
- def test_frame_series_axis(self, axis, arith):
+ def test_frame_series_axis(self, axis, arith, _frame):
# GH#26736 Dataframe.floordiv(Series, axis=1) fails
df = _frame
| - [x] closes #40497 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
| https://api.github.com/repos/pandas-dev/pandas/pulls/49422 | 2022-10-31T19:28:14Z | 2022-11-01T14:47:07Z | 2022-11-01T14:47:07Z | 2022-11-01T16:47:06Z |
ENH: Add masked engine | diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index a5bba8e5592c7..19ef536a636ea 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -8,6 +8,7 @@
import numpy as np
from pandas import (
+ NA,
CategoricalIndex,
DataFrame,
Index,
@@ -83,6 +84,37 @@ def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
+class NumericMaskedIndexing:
+ monotonic_list = list(range(10**6))
+ non_monotonic_list = (
+ list(range(50)) + [54, 53, 52, 51] + list(range(55, 10**6 - 1))
+ )
+
+ params = [
+ ("Int64", "UInt64", "Float64"),
+ (True, False),
+ ]
+ param_names = ["dtype", "monotonic"]
+
+ def setup(self, dtype, monotonic):
+
+ indices = {
+ True: Index(self.monotonic_list, dtype=dtype),
+ False: Index(self.non_monotonic_list, dtype=dtype).append(
+ Index([NA], dtype=dtype)
+ ),
+ }
+ self.data = indices[monotonic]
+ self.indexer = np.arange(300, 1_000)
+ self.data_dups = self.data.append(self.data)
+
+ def time_get_indexer(self, dtype, monotonic):
+ self.data.get_indexer(self.indexer)
+
+ def time_get_indexer_dups(self, dtype, monotonic):
+ self.data.get_indexer_for(self.indexer)
+
+
class NonNumericSeriesIndexing:
params = [
diff --git a/asv_bench/benchmarks/indexing_engines.py b/asv_bench/benchmarks/indexing_engines.py
index 0c6cb89f49da1..ce208761638c5 100644
--- a/asv_bench/benchmarks/indexing_engines.py
+++ b/asv_bench/benchmarks/indexing_engines.py
@@ -1,5 +1,8 @@
"""
-Benchmarks in this file depend exclusively on code in _libs/
+Benchmarks in this file depend mostly on code in _libs/
+
+We have to created masked arrays to test the masked engine though. The
+array is unpacked on the Cython level.
If a PR does not edit anything in _libs, it is very unlikely that benchmarks
in this file will be affected.
@@ -9,6 +12,8 @@
from pandas._libs import index as libindex
+from pandas.core.arrays import BaseMaskedArray
+
def _get_numeric_engines():
engine_names = [
@@ -30,6 +35,26 @@ def _get_numeric_engines():
]
+def _get_masked_engines():
+ engine_names = [
+ ("MaskedInt64Engine", "Int64"),
+ ("MaskedInt32Engine", "Int32"),
+ ("MaskedInt16Engine", "Int16"),
+ ("MaskedInt8Engine", "Int8"),
+ ("MaskedUInt64Engine", "UInt64"),
+ ("MaskedUInt32Engine", "UInt32"),
+ ("MaskedUInt16engine", "UInt16"),
+ ("MaskedUInt8Engine", "UInt8"),
+ ("MaskedFloat64Engine", "Float64"),
+ ("MaskedFloat32Engine", "Float32"),
+ ]
+ return [
+ (getattr(libindex, engine_name), dtype)
+ for engine_name, dtype in engine_names
+ if hasattr(libindex, engine_name)
+ ]
+
+
class NumericEngineIndexing:
params = [
@@ -80,6 +105,61 @@ def time_get_loc_near_middle(self, engine_and_dtype, index_type, unique, N):
self.data.get_loc(self.key_middle)
+class MaskedNumericEngineIndexing:
+
+ params = [
+ _get_masked_engines(),
+ ["monotonic_incr", "monotonic_decr", "non_monotonic"],
+ [True, False],
+ [10**5, 2 * 10**6], # 2e6 is above SIZE_CUTOFF
+ ]
+ param_names = ["engine_and_dtype", "index_type", "unique", "N"]
+
+ def setup(self, engine_and_dtype, index_type, unique, N):
+ engine, dtype = engine_and_dtype
+
+ if index_type == "monotonic_incr":
+ if unique:
+ arr = np.arange(N * 3, dtype=dtype.lower())
+ else:
+ values = list([1] * N + [2] * N + [3] * N)
+ arr = np.array(values, dtype=dtype.lower())
+ mask = np.zeros(N * 3, dtype=np.bool_)
+ elif index_type == "monotonic_decr":
+ if unique:
+ arr = np.arange(N * 3, dtype=dtype.lower())[::-1]
+ else:
+ values = list([1] * N + [2] * N + [3] * N)
+ arr = np.array(values, dtype=dtype.lower())[::-1]
+ mask = np.zeros(N * 3, dtype=np.bool_)
+ else:
+ assert index_type == "non_monotonic"
+ if unique:
+ arr = np.zeros(N * 3, dtype=dtype.lower())
+ arr[:N] = np.arange(N * 2, N * 3, dtype=dtype.lower())
+ arr[N:] = np.arange(N * 2, dtype=dtype.lower())
+
+ else:
+ arr = np.array([1, 2, 3] * N, dtype=dtype.lower())
+ mask = np.zeros(N * 3, dtype=np.bool_)
+ mask[-1] = True
+
+ self.data = engine(BaseMaskedArray(arr, mask))
+ # code belows avoids populating the mapping etc. while timing.
+ self.data.get_loc(2)
+
+ self.key_middle = arr[len(arr) // 2]
+ self.key_early = arr[2]
+
+ def time_get_loc(self, engine_and_dtype, index_type, unique, N):
+ self.data.get_loc(self.key_early)
+
+ def time_get_loc_near_middle(self, engine_and_dtype, index_type, unique, N):
+ # searchsorted performance may be different near the middle of a range
+ # vs near an endpoint
+ self.data.get_loc(self.key_middle)
+
+
class ObjectEngineIndexing:
params = [("monotonic_incr", "monotonic_decr", "non_monotonic")]
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index dc05745c8c0e5..f8ac9645f758d 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -884,6 +884,7 @@ Performance improvements
- Performance improvement in :func:`to_datetime` when parsing strings with timezone offsets (:issue:`50107`)
- Performance improvement in :meth:`DataFrame.loc` and :meth:`Series.loc` for tuple-based indexing of a :class:`MultiIndex` (:issue:`48384`)
- Performance improvement for :meth:`MultiIndex.unique` (:issue:`48335`)
+- Performance improvement for indexing operations with nullable dtypes (:issue:`49420`)
- Performance improvement for :func:`concat` with extension array backed indexes (:issue:`49128`, :issue:`49178`)
- Reduce memory usage of :meth:`DataFrame.to_pickle`/:meth:`Series.to_pickle` when using BZ2 or LZMA (:issue:`49068`)
- Performance improvement for :class:`~arrays.StringArray` constructor passing a numpy array with type ``np.str_`` (:issue:`49109`)
diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi
index af47e6c408c05..e9b78ad53380f 100644
--- a/pandas/_libs/hashtable.pyi
+++ b/pandas/_libs/hashtable.pyi
@@ -165,10 +165,12 @@ class HashTable:
def map_locations(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
+ mask: npt.NDArray[np.bool_] | None = ...,
) -> None: ...
def lookup(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
+ mask: npt.NDArray[np.bool_] | None = ...,
) -> npt.NDArray[np.intp]: ...
def get_labels(
self,
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 06ad614b4f963..d4d3117a32ac9 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -1005,8 +1005,9 @@ cdef class StringHashTable(HashTable):
return labels
@cython.boundscheck(False)
- def lookup(self, ndarray[object] values) -> ndarray:
+ def lookup(self, ndarray[object] values, object mask = None) -> ndarray:
# -> np.ndarray[np.intp]
+ # mask not yet implemented
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
@@ -1041,7 +1042,8 @@ cdef class StringHashTable(HashTable):
return np.asarray(locs)
@cython.boundscheck(False)
- def map_locations(self, ndarray[object] values) -> None:
+ def map_locations(self, ndarray[object] values, object mask = None) -> None:
+ # mask not yet implemented
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
@@ -1314,7 +1316,8 @@ cdef class PyObjectHashTable(HashTable):
else:
raise KeyError(key)
- def map_locations(self, ndarray[object] values) -> None:
+ def map_locations(self, ndarray[object] values, object mask = None) -> None:
+ # mask not yet implemented
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
@@ -1328,8 +1331,9 @@ cdef class PyObjectHashTable(HashTable):
k = kh_put_pymap(self.table, <PyObject*>val, &ret)
self.table.vals[k] = i
- def lookup(self, ndarray[object] values) -> ndarray:
+ def lookup(self, ndarray[object] values, object mask = None) -> ndarray:
# -> np.ndarray[np.intp]
+ # mask not yet implemented
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
diff --git a/pandas/_libs/index.pyi b/pandas/_libs/index.pyi
index 8fff335352617..4b4c4d65d1ea4 100644
--- a/pandas/_libs/index.pyi
+++ b/pandas/_libs/index.pyi
@@ -29,6 +29,12 @@ class IndexEngine:
targets: np.ndarray,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+class MaskedIndexEngine(IndexEngine):
+ def __init__(self, values: object) -> None: ...
+ def get_indexer_non_unique(
+ self, targets: object
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+
class Float64Engine(IndexEngine): ...
class Float32Engine(IndexEngine): ...
class Complex128Engine(IndexEngine): ...
@@ -46,6 +52,19 @@ class DatetimeEngine(Int64Engine): ...
class TimedeltaEngine(DatetimeEngine): ...
class PeriodEngine(Int64Engine): ...
class BoolEngine(UInt8Engine): ...
+class MaskedBoolEngine(MaskedUInt8Engine): ...
+class MaskedFloat64Engine(MaskedIndexEngine): ...
+class MaskedFloat32Engine(MaskedIndexEngine): ...
+class MaskedComplex128Engine(MaskedIndexEngine): ...
+class MaskedComplex64Engine(MaskedIndexEngine): ...
+class MaskedInt64Engine(MaskedIndexEngine): ...
+class MaskedInt32Engine(MaskedIndexEngine): ...
+class MaskedInt16Engine(MaskedIndexEngine): ...
+class MaskedInt8Engine(MaskedIndexEngine): ...
+class MaskedUInt64Engine(MaskedIndexEngine): ...
+class MaskedUInt32Engine(MaskedIndexEngine): ...
+class MaskedUInt16Engine(MaskedIndexEngine): ...
+class MaskedUInt8Engine(MaskedIndexEngine): ...
class BaseMultiIndexCodesEngine:
levels: list[np.ndarray]
@@ -57,10 +76,7 @@ class BaseMultiIndexCodesEngine:
labels: list[np.ndarray], # all entries integer-dtyped
offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1]
) -> None: ...
- def get_indexer(
- self,
- target: npt.NDArray[np.object_],
- ) -> npt.NDArray[np.intp]: ...
+ def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ...
def _extract_level_codes(self, target: MultiIndex) -> np.ndarray: ...
def get_indexer_with_fill(
self,
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 9e2adee407b1a..94d21f39dc61a 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -32,6 +32,7 @@ from pandas._libs import (
from pandas._libs.lib cimport eq_NA_compat
from pandas._libs.missing cimport (
+ C_NA,
checknull,
is_matching_na,
)
@@ -48,7 +49,7 @@ cdef bint is_definitely_invalid_key(object val):
return False
-cdef ndarray _get_bool_indexer(ndarray values, object val):
+cdef ndarray _get_bool_indexer(ndarray values, object val, ndarray mask = None):
"""
Return a ndarray[bool] of locations where val matches self.values.
@@ -61,6 +62,7 @@ cdef ndarray _get_bool_indexer(ndarray values, object val):
object item
if values.descr.type_num == cnp.NPY_OBJECT:
+ assert mask is None # no mask for object dtype
# i.e. values.dtype == object
if not checknull(val):
indexer = eq_NA_compat(values, val)
@@ -74,10 +76,16 @@ cdef ndarray _get_bool_indexer(ndarray values, object val):
indexer[i] = is_matching_na(item, val)
else:
- if util.is_nan(val):
- indexer = np.isnan(values)
+ if mask is not None:
+ if val is C_NA:
+ indexer = mask == 1
+ else:
+ indexer = (values == val) & ~mask
else:
- indexer = values == val
+ if util.is_nan(val):
+ indexer = np.isnan(values)
+ else:
+ indexer = values == val
return indexer.view(bool)
@@ -111,6 +119,7 @@ cdef class IndexEngine:
cdef readonly:
ndarray values
+ ndarray mask
HashTable mapping
bint over_size_threshold
@@ -121,6 +130,7 @@ cdef class IndexEngine:
def __init__(self, ndarray values):
self.values = values
+ self.mask = None
self.over_size_threshold = len(values) >= _SIZE_CUTOFF
self.clear_mapping()
@@ -159,6 +169,8 @@ cdef class IndexEngine:
self._ensure_mapping_populated()
if not self.unique:
return self._get_loc_duplicates(val)
+ if self.mask is not None and val is C_NA:
+ return self.mapping.get_na()
try:
return self.mapping.get_item(val)
@@ -206,7 +218,7 @@ cdef class IndexEngine:
cdef:
ndarray[uint8_t, ndim=1, cast=True] indexer
- indexer = _get_bool_indexer(self.values, val)
+ indexer = _get_bool_indexer(self.values, val, self.mask)
return _unpack_bool_indexer(indexer, val)
def sizeof(self, deep: bool = False) -> int:
@@ -247,21 +259,25 @@ cdef class IndexEngine:
cdef _do_monotonic_check(self):
cdef:
bint is_unique
- try:
- values = self.values
- self.monotonic_inc, self.monotonic_dec, is_unique = \
- self._call_monotonic(values)
- except TypeError:
+ if self.mask is not None and np.any(self.mask):
self.monotonic_inc = 0
self.monotonic_dec = 0
- is_unique = 0
+ else:
+ try:
+ values = self.values
+ self.monotonic_inc, self.monotonic_dec, is_unique = \
+ self._call_monotonic(values)
+ except TypeError:
+ self.monotonic_inc = 0
+ self.monotonic_dec = 0
+ is_unique = 0
- self.need_monotonic_check = 0
+ self.need_monotonic_check = 0
- # we can only be sure of uniqueness if is_unique=1
- if is_unique:
- self.unique = 1
- self.need_unique_check = 0
+ # we can only be sure of uniqueness if is_unique=1
+ if is_unique:
+ self.unique = 1
+ self.need_unique_check = 0
cdef _call_monotonic(self, values):
return algos.is_monotonic(values, timelike=False)
@@ -286,7 +302,7 @@ cdef class IndexEngine:
values = self.values
self.mapping = self._make_hash_table(len(values))
- self.mapping.map_locations(values)
+ self.mapping.map_locations(values, self.mask)
if len(self.mapping) == len(values):
self.unique = 1
@@ -841,6 +857,15 @@ cdef class BoolEngine(UInt8Engine):
return <uint8_t>val
+cdef class MaskedBoolEngine(MaskedUInt8Engine):
+ cdef _check_type(self, object val):
+ if val is C_NA:
+ return val
+ if not util.is_bool_object(val):
+ raise KeyError(val)
+ return <uint8_t>val
+
+
@cython.internal
@cython.freelist(32)
cdef class SharedEngine:
@@ -1110,3 +1135,130 @@ cdef class ExtensionEngine(SharedEngine):
cdef _check_type(self, object val):
hash(val)
+
+
+cdef class MaskedIndexEngine(IndexEngine):
+ def __init__(self, object values):
+ super().__init__(values._data)
+ self.mask = values._mask
+
+ def get_indexer(self, object values) -> np.ndarray:
+ self._ensure_mapping_populated()
+ return self.mapping.lookup(values._data, values._mask)
+
+ def get_indexer_non_unique(self, object targets):
+ """
+ Return an indexer suitable for taking from a non unique index
+ return the labels in the same order as the target
+ and a missing indexer into the targets (which correspond
+ to the -1 indices in the results
+
+ Returns
+ -------
+ indexer : np.ndarray[np.intp]
+ missing : np.ndarray[np.intp]
+ """
+ # TODO: Unify with parent class
+ cdef:
+ ndarray values, mask, target_vals, target_mask
+ ndarray[intp_t] result, missing
+ set stargets
+ list na_pos
+ dict d = {}
+ object val
+ Py_ssize_t count = 0, count_missing = 0
+ Py_ssize_t i, j, n, n_t, n_alloc, start, end, na_idx
+
+ target_vals = targets._data
+ target_mask = targets._mask
+
+ values = self.values
+ assert not values.dtype == object # go through object path instead
+
+ mask = self.mask
+ stargets = set(target_vals[~target_mask])
+
+ n = len(values)
+ n_t = len(target_vals)
+ if n > 10_000:
+ n_alloc = 10_000
+ else:
+ n_alloc = n
+
+ result = np.empty(n_alloc, dtype=np.intp)
+ missing = np.empty(n_t, dtype=np.intp)
+
+ # map each starget to its position in the index
+ if (
+ stargets and
+ len(stargets) < 5 and
+ not np.any(target_mask) and
+ self.is_monotonic_increasing
+ ):
+ # if there are few enough stargets and the index is monotonically
+ # increasing, then use binary search for each starget
+ for starget in stargets:
+ start = values.searchsorted(starget, side="left")
+ end = values.searchsorted(starget, side="right")
+ if start != end:
+ d[starget] = list(range(start, end))
+
+ stargets = set()
+
+ if stargets:
+ # otherwise, map by iterating through all items in the index
+
+ na_pos = []
+
+ for i in range(n):
+ val = values[i]
+
+ if mask[i]:
+ na_pos.append(i)
+
+ else:
+ if val in stargets:
+ if val not in d:
+ d[val] = []
+ d[val].append(i)
+
+ for i in range(n_t):
+ val = target_vals[i]
+
+ if target_mask[i]:
+ if na_pos:
+ for na_idx in na_pos:
+ # realloc if needed
+ if count >= n_alloc:
+ n_alloc += 10_000
+ result = np.resize(result, n_alloc)
+
+ result[count] = na_idx
+ count += 1
+ continue
+
+ elif val in d:
+ # found
+ key = val
+
+ for j in d[key]:
+
+ # realloc if needed
+ if count >= n_alloc:
+ n_alloc += 10_000
+ result = np.resize(result, n_alloc)
+
+ result[count] = j
+ count += 1
+ continue
+
+ # value not found
+ if count >= n_alloc:
+ n_alloc += 10_000
+ result = np.resize(result, n_alloc)
+ result[count] = -1
+ count += 1
+ missing[count_missing] = i
+ count_missing += 1
+
+ return result[0:count], missing[0:count_missing]
diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in
index b9c02ba64f69c..bf3d88edd9386 100644
--- a/pandas/_libs/index_class_helper.pxi.in
+++ b/pandas/_libs/index_class_helper.pxi.in
@@ -24,17 +24,29 @@ dtypes = [('Float64', 'float64'),
('Complex64', 'complex64'),
('Complex128', 'complex128'),
]
+
+engines = [('', 'IndexEngine'), ('Masked', 'MaskedIndexEngine')]
+
}}
{{for name, dtype in dtypes}}
+{{for prefix, engine in engines}}
-cdef class {{name}}Engine(IndexEngine):
+cdef class {{prefix}}{{name}}Engine({{engine}}):
cdef _make_hash_table(self, Py_ssize_t n):
+ {{if engine == 'MaskedIndexEngine'}}
+ return _hash.{{name}}HashTable(n, uses_mask=True)
+ {{else}}
return _hash.{{name}}HashTable(n)
+ {{endif}}
cdef _check_type(self, object val):
+ {{if engine == 'MaskedIndexEngine'}}
+ if val is C_NA:
+ return val
+ {{endif}}
{{if name not in {'Float64', 'Float32', 'Complex64', 'Complex128'} }}
if not util.is_integer_object(val):
if util.is_float_object(val):
@@ -61,5 +73,6 @@ cdef class {{name}}Engine(IndexEngine):
{{endif}}
return val
+{{endfor}}
{{endfor}}
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2948bb81d0b6a..df98df1a11557 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -206,6 +206,22 @@
_dtype_obj = np.dtype("object")
+_masked_engines = {
+ "Complex128": libindex.MaskedComplex128Engine,
+ "Complex64": libindex.MaskedComplex64Engine,
+ "Float64": libindex.MaskedFloat64Engine,
+ "Float32": libindex.MaskedFloat32Engine,
+ "UInt64": libindex.MaskedUInt64Engine,
+ "UInt32": libindex.MaskedUInt32Engine,
+ "UInt16": libindex.MaskedUInt16Engine,
+ "UInt8": libindex.MaskedUInt8Engine,
+ "Int64": libindex.MaskedInt64Engine,
+ "Int32": libindex.MaskedInt32Engine,
+ "Int16": libindex.MaskedInt16Engine,
+ "Int8": libindex.MaskedInt8Engine,
+ "boolean": libindex.MaskedBoolEngine,
+}
+
def _maybe_return_indexers(meth: F) -> F:
"""
@@ -755,14 +771,15 @@ def _cleanup(self) -> None:
@cache_readonly
def _engine(
self,
- ) -> libindex.IndexEngine | libindex.ExtensionEngine:
+ ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine:
# For base class (object dtype) we get ObjectEngine
target_values = self._get_engine_target()
- if (
- isinstance(target_values, ExtensionArray)
- and self._engine_type is libindex.ObjectEngine
- ):
- return libindex.ExtensionEngine(target_values)
+ if isinstance(target_values, ExtensionArray):
+
+ if isinstance(target_values, BaseMaskedArray):
+ return _masked_engines[target_values.dtype.name](target_values)
+ elif self._engine_type is libindex.ObjectEngine:
+ return libindex.ExtensionEngine(target_values)
target_values = cast(np.ndarray, target_values)
# to avoid a reference cycle, bind `target_values` to a local variable, so
@@ -4836,7 +4853,11 @@ def _get_engine_target(self) -> ArrayLike:
if isinstance(vals, StringArray):
# GH#45652 much more performant than ExtensionEngine
return vals._ndarray
- if type(self) is Index and isinstance(self._values, ExtensionArray):
+ if (
+ type(self) is Index
+ and isinstance(self._values, ExtensionArray)
+ and not isinstance(self._values, BaseMaskedArray)
+ ):
# TODO(ExtensionIndex): remove special-case, just use self._values
return self._values.astype(object)
return vals
diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py
index 5c4596b0d9503..eec1df8b44f33 100644
--- a/pandas/tests/indexes/numeric/test_indexing.py
+++ b/pandas/tests/indexes/numeric/test_indexing.py
@@ -4,12 +4,14 @@
from pandas.errors import InvalidIndexError
from pandas import (
+ NA,
Index,
RangeIndex,
Series,
Timestamp,
)
import pandas._testing as tm
+from pandas.core.arrays import FloatingArray
@pytest.fixture
@@ -314,6 +316,76 @@ def test_get_indexer_uint64(self, index_large):
expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
+ @pytest.mark.parametrize("val, val2", [(4, 5), (4, 4), (4, NA), (NA, NA)])
+ def test_get_loc_masked(self, val, val2, any_numeric_ea_dtype):
+ # GH#39133
+ idx = Index([1, 2, 3, val, val2], dtype=any_numeric_ea_dtype)
+ result = idx.get_loc(2)
+ assert result == 1
+
+ with pytest.raises(KeyError, match="9"):
+ idx.get_loc(9)
+
+ def test_get_loc_masked_na(self, any_numeric_ea_dtype):
+ # GH#39133
+ idx = Index([1, 2, NA], dtype=any_numeric_ea_dtype)
+ result = idx.get_loc(NA)
+ assert result == 2
+
+ idx = Index([1, 2, NA, NA], dtype=any_numeric_ea_dtype)
+ result = idx.get_loc(NA)
+ tm.assert_numpy_array_equal(result, np.array([False, False, True, True]))
+
+ idx = Index([1, 2, 3], dtype=any_numeric_ea_dtype)
+ with pytest.raises(KeyError, match="NA"):
+ idx.get_loc(NA)
+
+ def test_get_loc_masked_na_and_nan(self):
+ # GH#39133
+ idx = Index(
+ FloatingArray(
+ np.array([1, 2, 1, np.nan]), mask=np.array([False, False, True, False])
+ )
+ )
+ result = idx.get_loc(NA)
+ assert result == 2
+ result = idx.get_loc(np.nan)
+ assert result == 3
+
+ idx = Index(
+ FloatingArray(np.array([1, 2, 1.0]), mask=np.array([False, False, True]))
+ )
+ result = idx.get_loc(NA)
+ assert result == 2
+ with pytest.raises(KeyError, match="nan"):
+ idx.get_loc(np.nan)
+
+ idx = Index(
+ FloatingArray(
+ np.array([1, 2, np.nan]), mask=np.array([False, False, False])
+ )
+ )
+ result = idx.get_loc(np.nan)
+ assert result == 2
+ with pytest.raises(KeyError, match="NA"):
+ idx.get_loc(NA)
+
+ @pytest.mark.parametrize("val", [4, 2])
+ def test_get_indexer_masked_na(self, any_numeric_ea_dtype, val):
+ # GH#39133
+ idx = Index([1, 2, NA, 3, val], dtype=any_numeric_ea_dtype)
+ result = idx.get_indexer_for([1, NA, 5])
+ expected = np.array([0, 2, -1])
+ tm.assert_numpy_array_equal(result, expected, check_dtype=False)
+
+ def test_get_indexer_masked_na_boolean(self):
+ # GH#39133
+ idx = Index([True, False, NA], dtype="boolean")
+ result = idx.get_loc(False)
+ assert result == 1
+ result = idx.get_loc(NA)
+ assert result == 2
+
class TestWhere:
@pytest.mark.parametrize(
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
```
before after ratio
[626b6510] [efde9dd0]
- 270±10μs 148±3μs 0.55 indexing.NumericMaskedIndexing.time_get_indexer('UInt64', True)
- 210±5μs 106±1μs 0.51 indexing.NumericMaskedIndexing.time_get_indexer('Int64', True)
- 306±50μs 150±3μs 0.49 indexing.NumericMaskedIndexing.time_get_indexer('UInt64', False)
- 340±100μs 158±3μs 0.46 indexing.NumericMaskedIndexing.time_get_indexer_dups('Float64', False)
- 237±20μs 107±0.6μs 0.45 indexing.NumericMaskedIndexing.time_get_indexer('Int64', False)
- 360±70μs 157±2μs 0.44 indexing.NumericMaskedIndexing.time_get_indexer_dups('Float64', True)
- 369±100μs 155±3μs 0.42 indexing.NumericMaskedIndexing.time_get_indexer('Float64', False)
- 330±80μs 137±1μs 0.42 indexing.NumericMaskedIndexing.time_get_indexer_dups('UInt64', True)
- 335±50μs 140±2μs 0.42 indexing.NumericMaskedIndexing.time_get_indexer_dups('UInt64', False)
- 373±200μs 155±3μs 0.42 indexing.NumericMaskedIndexing.time_get_indexer('Float64', True)
- 270±30μs 106±1μs 0.39 indexing.NumericMaskedIndexing.time_get_indexer_dups('Int64', False)
- 310±60μs 110±1μs 0.36 indexing.NumericMaskedIndexing.time_get_indexer_dups('Int64', True)
```
Lets see if ci passes
| https://api.github.com/repos/pandas-dev/pandas/pulls/49420 | 2022-10-31T18:25:55Z | 2023-01-26T19:36:19Z | 2023-01-26T19:36:19Z | 2023-01-27T00:38:20Z |
BUG: Unclear error message when merging tables and passing invalid option to validate | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index ac3a1ca95fbb4..4aa26d406015f 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -464,6 +464,7 @@ Reshaping
- Bug in :meth:`DataFrame.pivot` not respecting ``None`` as column name (:issue:`48293`)
- Bug in :func:`join` when ``left_on`` or ``right_on`` is or includes a :class:`CategoricalIndex` incorrectly raising ``AttributeError`` (:issue:`48464`)
- Bug in :meth:`DataFrame.pivot_table` raising ``ValueError`` with parameter ``margins=True`` when result is an empty :class:`DataFrame` (:issue:`49240`)
+- Clarified error message in :func:`merge` when passing invalid ``validate`` option (:issue:`49417`)
Sparse
^^^^^^
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 7d9842f7e5341..3f98ab16c6797 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1568,7 +1568,18 @@ def _validate(self, validate: str) -> None:
pass
else:
- raise ValueError("Not a valid argument for validate")
+ raise ValueError(
+ f'"{validate}" is not a valid argument. '
+ "Valid arguments are:\n"
+ '- "1:1"\n'
+ '- "1:m"\n'
+ '- "m:1"\n'
+ '- "m:m"\n'
+ '- "one_to_one"\n'
+ '- "one_to_many"\n'
+ '- "many_to_one"\n'
+ '- "many_to_many"'
+ )
def get_join_indexers(
diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py
index 9081f69d5d2bc..9a4837939aceb 100644
--- a/pandas/tests/frame/methods/test_join.py
+++ b/pandas/tests/frame/methods/test_join.py
@@ -143,7 +143,18 @@ def test_suffix_on_list_join():
def test_join_invalid_validate(left_no_dup, right_no_dup):
# GH 46622
# Check invalid arguments
- msg = "Not a valid argument for validate"
+ msg = (
+ '"invalid" is not a valid argument. '
+ "Valid arguments are:\n"
+ '- "1:1"\n'
+ '- "1:m"\n'
+ '- "m:1"\n'
+ '- "m:m"\n'
+ '- "one_to_one"\n'
+ '- "one_to_many"\n'
+ '- "many_to_one"\n'
+ '- "many_to_many"'
+ )
with pytest.raises(ValueError, match=msg):
left_no_dup.merge(right_no_dup, on="a", validate="invalid")
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index dd50d97c47f37..e4638c43e5a66 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1253,7 +1253,18 @@ def test_validation(self):
merge(left_w_dups, right_w_dups, on="a", validate="one_to_many")
# Check invalid arguments
- msg = "Not a valid argument for validate"
+ msg = (
+ '"jibberish" is not a valid argument. '
+ "Valid arguments are:\n"
+ '- "1:1"\n'
+ '- "1:m"\n'
+ '- "m:1"\n'
+ '- "m:m"\n'
+ '- "one_to_one"\n'
+ '- "one_to_many"\n'
+ '- "many_to_one"\n'
+ '- "many_to_many"'
+ )
with pytest.raises(ValueError, match=msg):
merge(left, right, on="a", validate="jibberish")
| - [ ] closes #49417
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49419 | 2022-10-31T18:22:31Z | 2022-11-03T17:33:18Z | 2022-11-03T17:33:18Z | 2022-11-03T17:33:29Z |
issue 48855 enable pylint unnecessary-pass | diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py
index 833bc3143b968..a3a05a9d75c6e 100644
--- a/pandas/core/computation/engines.py
+++ b/pandas/core/computation/engines.py
@@ -102,7 +102,6 @@ def _evaluate(self):
-----
Must be implemented by subclasses.
"""
- pass
class NumExprEngine(AbstractEngine):
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 85b1a2a61012a..e57e11f1bd2bd 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -116,8 +116,6 @@ class CategoricalDtypeType(type):
the type of CategoricalDtype, this metaclass determines subclass ability
"""
- pass
-
@register_extension_dtype
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 27058cb54dddf..3b8380a88bb8b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3610,7 +3610,6 @@ def _union(self, other, sort) -> MultiIndex:
RuntimeWarning,
stacklevel=find_stack_level(),
)
- pass
return result
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
diff --git a/pandas/core/interchange/dataframe_protocol.py b/pandas/core/interchange/dataframe_protocol.py
index 3ab87d9a60399..4ec905eef8646 100644
--- a/pandas/core/interchange/dataframe_protocol.py
+++ b/pandas/core/interchange/dataframe_protocol.py
@@ -134,7 +134,6 @@ def bufsize(self) -> int:
"""
Buffer size in bytes.
"""
- pass
@property
@abstractmethod
@@ -142,7 +141,6 @@ def ptr(self) -> int:
"""
Pointer to start of the buffer as an integer.
"""
- pass
@abstractmethod
def __dlpack__(self):
@@ -166,7 +164,6 @@ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
Uses device type codes matching DLPack.
Note: must be implemented even if ``__dlpack__`` is not.
"""
- pass
class Column(ABC):
@@ -222,7 +219,6 @@ def size(self) -> int:
Corresponds to DataFrame.num_rows() if column is a single chunk;
equal to size of this current chunk otherwise.
"""
- pass
@property
@abstractmethod
@@ -234,7 +230,6 @@ def offset(self) -> int:
equal size M (only the last chunk may be shorter),
``offset = n * M``, ``n = 0 .. N-1``.
"""
- pass
@property
@abstractmethod
@@ -266,7 +261,6 @@ def dtype(self) -> tuple[DtypeKind, int, str, str]:
- Data types not included: complex, Arrow-style null, binary, decimal,
and nested (list, struct, map, union) dtypes.
"""
- pass
@property
@abstractmethod
@@ -289,7 +283,6 @@ def describe_categorical(self) -> CategoricalDescription:
TBD: are there any other in-memory representations that are needed?
"""
- pass
@property
@abstractmethod
@@ -302,7 +295,6 @@ def describe_null(self) -> tuple[ColumnNullType, Any]:
mask or a byte mask, the value (0 or 1) indicating a missing value. None
otherwise.
"""
- pass
@property
@abstractmethod
@@ -312,7 +304,6 @@ def null_count(self) -> int | None:
Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.
"""
- pass
@property
@abstractmethod
@@ -320,14 +311,12 @@ def metadata(self) -> dict[str, Any]:
"""
The metadata for the column. See `DataFrame.metadata` for more details.
"""
- pass
@abstractmethod
def num_chunks(self) -> int:
"""
Return the number of chunks the column consists of.
"""
- pass
@abstractmethod
def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
@@ -336,7 +325,6 @@ def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
See `DataFrame.get_chunks` for details on ``n_chunks``.
"""
- pass
@abstractmethod
def get_buffers(self) -> ColumnBuffers:
@@ -360,7 +348,6 @@ def get_buffers(self) -> ColumnBuffers:
if the data buffer does not have an associated offsets
buffer.
"""
- pass
# def get_children(self) -> Iterable[Column]:
@@ -391,7 +378,6 @@ class DataFrame(ABC):
@abstractmethod
def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
"""Construct a new interchange object, potentially changing the parameters."""
- pass
@property
@abstractmethod
@@ -405,14 +391,12 @@ def metadata(self) -> dict[str, Any]:
entries, please add name the keys with the name of the library
followed by a period and the desired name, e.g, ``pandas.indexcol``.
"""
- pass
@abstractmethod
def num_columns(self) -> int:
"""
Return the number of columns in the DataFrame.
"""
- pass
@abstractmethod
def num_rows(self) -> int | None:
@@ -422,56 +406,48 @@ def num_rows(self) -> int | None:
"""
Return the number of rows in the DataFrame, if available.
"""
- pass
@abstractmethod
def num_chunks(self) -> int:
"""
Return the number of chunks the DataFrame consists of.
"""
- pass
@abstractmethod
def column_names(self) -> Iterable[str]:
"""
Return an iterator yielding the column names.
"""
- pass
@abstractmethod
def get_column(self, i: int) -> Column:
"""
Return the column at the indicated position.
"""
- pass
@abstractmethod
def get_column_by_name(self, name: str) -> Column:
"""
Return the column whose name is the indicated name.
"""
- pass
@abstractmethod
def get_columns(self) -> Iterable[Column]:
"""
Return an iterator yielding the columns.
"""
- pass
@abstractmethod
def select_columns(self, indices: Sequence[int]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by index.
"""
- pass
@abstractmethod
def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by name.
"""
- pass
@abstractmethod
def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
@@ -483,4 +459,3 @@ def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
``self.num_chunks()``, meaning the producer must subdivide each chunk
before yielding it.
"""
- pass
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index bc3abfb94f31c..0b284fd4e9750 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -1142,7 +1142,6 @@ def engine(self) -> str:
@abc.abstractmethod
def sheets(self) -> dict[str, Any]:
"""Mapping of sheet names to sheet objects."""
- pass
@property
@abc.abstractmethod
@@ -1152,7 +1151,6 @@ def book(self):
This attribute can be used to access engine-specific features.
"""
- pass
@book.setter
@abc.abstractmethod
@@ -1160,7 +1158,6 @@ def book(self, other) -> None:
"""
Set book instance. Class type will depend on the engine used.
"""
- pass
def write_cells(
self,
@@ -1212,7 +1209,6 @@ def _write_cells(
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
- pass
def save(self) -> None:
"""
@@ -1228,7 +1224,6 @@ def _save(self) -> None:
"""
Save workbook to disk.
"""
- pass
def __init__(
self,
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 67c81db8d3a8f..c6ee535c1bb0b 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -249,7 +249,6 @@ def write(self) -> str:
@abstractmethod
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
"""Object to write in JSON format."""
- pass
class SeriesWriter(Writer):
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 483c385fa32fc..1f2bb4c5d21b4 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2593,8 +2593,6 @@ def get_atom_timedelta64(cls, shape):
class GenericDataIndexableCol(DataIndexableCol):
"""represent a generic pytables data column"""
- pass
-
class Fixed:
"""
@@ -2701,11 +2699,9 @@ def attrs(self):
def set_attrs(self) -> None:
"""set our object attributes"""
- pass
def get_attrs(self) -> None:
"""get our object attributes"""
- pass
@property
def storable(self):
@@ -2728,7 +2724,6 @@ def validate(self, other) -> Literal[True] | None:
def validate_version(self, where=None) -> None:
"""are we trying to operate on an old version?"""
- pass
def infer_axes(self) -> bool:
"""
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 60c710d1930ed..5860aa4ae7c3e 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -2417,7 +2417,6 @@ def _replace_nans(self, data: DataFrame) -> DataFrame:
def _update_strl_names(self) -> None:
"""No-op, forward compatibility"""
- pass
def _validate_variable_name(self, name: str) -> str:
"""
@@ -2701,19 +2700,15 @@ def _close(self) -> None:
def _write_map(self) -> None:
"""No-op, future compatibility"""
- pass
def _write_file_close_tag(self) -> None:
"""No-op, future compatibility"""
- pass
def _write_characteristics(self) -> None:
"""No-op, future compatibility"""
- pass
def _write_strls(self) -> None:
"""No-op, future compatibility"""
- pass
def _write_expansion_fields(self) -> None:
"""Write 5 zeros for expansion fields"""
@@ -3438,7 +3433,6 @@ def _write_strls(self) -> None:
def _write_expansion_fields(self) -> None:
"""No-op in dta 117+"""
- pass
def _write_value_labels(self) -> None:
self._update_map("value_labels")
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 9bcb51a7b032a..95d1d7db1ab9a 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -668,7 +668,6 @@ def _post_plot_logic_common(self, ax, data):
def _post_plot_logic(self, ax, data) -> None:
"""Post process for each axes. Overridden in child classes"""
- pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
diff --git a/pandas/tests/base/test_constructors.py b/pandas/tests/base/test_constructors.py
index c8b923031b9e8..5f654ca3a996b 100644
--- a/pandas/tests/base/test_constructors.py
+++ b/pandas/tests/base/test_constructors.py
@@ -56,7 +56,6 @@ def _get_foo(self):
def bar(self, *args, **kwargs):
"""a test bar method"""
- pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj) -> None:
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index 315578a5dbf50..f8c6042c5007d 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -377,7 +377,6 @@ def test_generic_errors(self, constructor):
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
- pass
def test_constructor_string(self):
# GH23013
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index 28545b7ab2cc6..109c6dbb469c9 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -423,11 +423,9 @@ def test_encode_recursion_max(self):
class O2:
member = 0
- pass
class O1:
member = 0
- pass
decoded_input = O1()
decoded_input.member = O2()
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index 40b08e6d68016..890819d42841e 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -1673,7 +1673,6 @@ def _helper_hypothesis_delimited_date(call, date_string, **kwargs):
result = call(date_string, **kwargs)
except ValueError as er:
msg = str(er)
- pass
return msg, result
diff --git a/pandas/tests/util/test_deprecate.py b/pandas/tests/util/test_deprecate.py
index ee4f7e3f34f2e..92f422b8269f5 100644
--- a/pandas/tests/util/test_deprecate.py
+++ b/pandas/tests/util/test_deprecate.py
@@ -34,7 +34,6 @@ def new_func_with_deprecation():
This is the extended summary. The deprecate directive goes before this.
"""
- pass
def test_deprecate_ok():
diff --git a/pyproject.toml b/pyproject.toml
index aecb4cc82cbbc..4f1e072de7e8d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -177,7 +177,6 @@ disable = [
"try-except-raise",
"undefined-loop-variable",
"unnecessary-lambda",
- "unnecessary-pass",
"unspecified-encoding",
"unused-argument",
"unused-import",
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index dcfef648e8f1c..35bc477d5b3d5 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -23,7 +23,6 @@ def prefix_pandas(self):
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
- pass
def redundant_import(self, foo=None, bar=None):
"""
@@ -45,7 +44,6 @@ def redundant_import(self, foo=None, bar=None):
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
- pass
def unused_import(self):
"""
@@ -54,7 +52,6 @@ def unused_import(self):
>>> import pandas as pdf
>>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
"""
- pass
def missing_whitespace_around_arithmetic_operator(self):
"""
@@ -63,7 +60,6 @@ def missing_whitespace_around_arithmetic_operator(self):
>>> 2+5
7
"""
- pass
def indentation_is_not_a_multiple_of_four(self):
"""
@@ -72,7 +68,6 @@ def indentation_is_not_a_multiple_of_four(self):
>>> if 2 + 5:
... pass
"""
- pass
def missing_whitespace_after_comma(self):
"""
@@ -80,13 +75,11 @@ def missing_whitespace_after_comma(self):
--------
>>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
"""
- pass
def write_array_like_with_hyphen_not_underscore(self):
"""
In docstrings, use array-like over array_like
"""
- pass
def leftover_files(self):
"""
@@ -95,7 +88,6 @@ def leftover_files(self):
>>> import pathlib
>>> pathlib.Path("foo.txt").touch()
"""
- pass
class TestValidator:
| Issue #48855. This PR enables pylint type "W" warning: `unnecessary-pass`.
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). | https://api.github.com/repos/pandas-dev/pandas/pulls/49418 | 2022-10-31T18:20:56Z | 2022-11-01T14:54:19Z | 2022-11-01T14:54:19Z | 2022-11-01T14:54:20Z |
API: make some Timestamp args keyword-only | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7335dbc28a8f9..0cf47cb1ddbfe 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -131,6 +131,7 @@ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for mor
Other API changes
^^^^^^^^^^^^^^^^^
+- The ``freq``, ``tz``, ``nanosecond``, and ``unit`` keywords in the :class:`Timestamp` constructor are now keyword-only (:issue:`45307`)
- Passing ``nanoseconds`` greater than 999 or less than 0 in :class:`Timestamp` now raises a ``ValueError`` (:issue:`48538`, :issue:`48255`)
- :func:`read_csv`: specifying an incorrect number of columns with ``index_col`` of now raises ``ParserError`` instead of ``IndexError`` when using the c parser.
- Default value of ``dtype`` in :func:`get_dummies` is changed to ``bool`` from ``uint8`` (:issue:`45848`)
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index e916d7eb12dbf..da9fe7b4126e9 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -37,9 +37,6 @@ class Timestamp(datetime):
def __new__( # type: ignore[misc]
cls: type[_DatetimeT],
ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ...,
- freq: int | None | str | BaseOffset = ...,
- tz: str | _tzinfo | None | int = ...,
- unit: str | int | None = ...,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
@@ -47,9 +44,12 @@ class Timestamp(datetime):
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
- nanosecond: int | None = ...,
tzinfo: _tzinfo | None = ...,
*,
+ nanosecond: int | None = ...,
+ freq: int | None | str | BaseOffset = ...,
+ tz: str | _tzinfo | None | int = ...,
+ unit: str | int | None = ...,
fold: int | None = ...,
) -> _DatetimeT | NaTType: ...
def _set_freq(self, freq: BaseOffset | None) -> None: ...
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 1a2e2760d3d8d..4a855d5f7844d 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1242,6 +1242,10 @@ class Timestamp(_Timestamp):
----------
ts_input : datetime-like, str, int, float
Value to be converted to Timestamp.
+ year, month, day : int
+ hour, minute, second, microsecond : int, optional, default 0
+ tzinfo : datetime.tzinfo, optional, default None
+ nanosecond : int, optional, default 0
freq : str, DateOffset
Offset which Timestamp will have.
tz : str, pytz.timezone, dateutil.tz.tzfile or None
@@ -1250,10 +1254,6 @@ class Timestamp(_Timestamp):
Unit used for conversion if ts_input is of type int or float. The
valid values are 'D', 'h', 'm', 's', 'ms', 'us', and 'ns'. For
example, 's' means seconds and 'ms' means milliseconds.
- year, month, day : int
- hour, minute, second, microsecond : int, optional, default 0
- nanosecond : int, optional, default 0
- tzinfo : datetime.tzinfo, optional, default None
fold : {0, 1}, default None, keyword-only
Due to daylight saving time, one wall clock time can occur twice
when shifting from summer to winter time; fold describes whether the
@@ -1480,9 +1480,6 @@ class Timestamp(_Timestamp):
def __new__(
cls,
object ts_input=_no_input,
- object freq=None,
- tz=None,
- unit=None,
year=None,
month=None,
day=None,
@@ -1490,9 +1487,12 @@ class Timestamp(_Timestamp):
minute=None,
second=None,
microsecond=None,
- nanosecond=None,
tzinfo_type tzinfo=None,
*,
+ nanosecond=None,
+ object freq=None,
+ tz=None,
+ unit=None,
fold=None,
):
# The parameter list folds together legacy parameter names (the first
@@ -1527,27 +1527,6 @@ class Timestamp(_Timestamp):
# GH#17690 tzinfo must be a datetime.tzinfo object, ensured
# by the cython annotation.
if tz is not None:
- if (is_integer_object(tz)
- and is_integer_object(ts_input)
- and is_integer_object(freq)
- ):
- # GH#31929 e.g. Timestamp(2019, 3, 4, 5, 6, tzinfo=foo)
- # TODO(GH#45307): this will still be fragile to
- # mixed-and-matched positional/keyword arguments
- ts_input = datetime(
- ts_input,
- freq,
- tz,
- unit or 0,
- year or 0,
- month or 0,
- day or 0,
- fold=fold or 0,
- )
- nanosecond = hour
- tz = tzinfo
- return cls(ts_input, nanosecond=nanosecond, tz=tz)
-
raise ValueError('Can provide at most one of tz, tzinfo')
# User passed tzinfo instead of tz; avoid silently ignoring
@@ -1596,7 +1575,7 @@ class Timestamp(_Timestamp):
if any(arg is not None for arg in _date_attributes):
raise ValueError(
"Cannot pass a date attribute keyword "
- "argument when passing a date string"
+ "argument when passing a date string; 'tz' is keyword-only"
)
elif ts_input is _no_input:
@@ -1620,17 +1599,20 @@ class Timestamp(_Timestamp):
ts_input = datetime(**datetime_kwargs)
- elif is_integer_object(freq):
+ elif is_integer_object(year):
# User passed positional arguments:
# Timestamp(year, month, day[, hour[, minute[, second[,
# microsecond[, nanosecond[, tzinfo]]]]]])
- ts_input = datetime(ts_input, freq, tz, unit or 0,
- year or 0, month or 0, day or 0, fold=fold or 0)
- nanosecond = hour
- tz = minute
+ ts_input = datetime(ts_input, year, month, day or 0,
+ hour or 0, minute or 0, second or 0, fold=fold or 0)
freq = None
unit = None
+ if nanosecond is None:
+ # nanosecond was not passed as a keyword, but may have been
+ # passed positionally see test_constructor_nanosecond
+ nanosecond = microsecond
+
if getattr(ts_input, 'tzinfo', None) is not None and tz is not None:
raise ValueError("Cannot pass a datetime or Timestamp with tzinfo with "
"the tz parameter. Use tz_convert instead.")
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index 890590094094a..d930b63fd0c0b 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -81,7 +81,7 @@ def test_dti_timestamp_freq_fields(self):
msg = "The 'freq' argument in Timestamp is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
- ts = Timestamp(idx[-1], idx.freq)
+ ts = Timestamp(idx[-1], freq=idx.freq)
msg2 = "Timestamp.freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg2):
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 341e850a7464e..70f35a8ff339b 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -237,7 +237,7 @@ def test_constructor_invalid_tz(self):
with pytest.raises(ValueError, match=msg):
Timestamp("2017-10-22", tzinfo=pytz.utc, tz="UTC")
- msg = "Invalid frequency:"
+ msg = "Cannot pass a date attribute keyword argument when passing a date string"
msg2 = "The 'freq' argument"
with pytest.raises(ValueError, match=msg):
# GH#5168
@@ -273,11 +273,15 @@ def test_constructor_positional_with_tzinfo(self):
expected = Timestamp("2020-12-31", tzinfo=timezone.utc)
assert ts == expected
- @pytest.mark.xfail(reason="GH#45307")
@pytest.mark.parametrize("kwd", ["nanosecond", "microsecond", "second", "minute"])
- def test_constructor_positional_keyword_mixed_with_tzinfo(self, kwd):
+ def test_constructor_positional_keyword_mixed_with_tzinfo(self, kwd, request):
# TODO: if we passed microsecond with a keyword we would mess up
# xref GH#45307
+ if kwd != "nanosecond":
+ # nanosecond is keyword-only as of 2.0, others are not
+ mark = pytest.mark.xfail(reason="GH#45307")
+ request.node.add_marker(mark)
+
kwargs = {kwd: 4}
ts = Timestamp(2020, 12, 31, tzinfo=timezone.utc, **kwargs)
@@ -399,9 +403,7 @@ def test_constructor_fromordinal(self):
tz="UTC",
),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
- # error: Argument 9 to "Timestamp" has incompatible type "_UTCclass";
- # expected "Optional[int]"
- Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC), # type: ignore[arg-type]
+ Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC),
],
)
def test_constructor_nanosecond(self, result):
| - [x] closes #45307 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I think this will make it easier to get #49365 working. | https://api.github.com/repos/pandas-dev/pandas/pulls/49416 | 2022-10-31T17:38:48Z | 2022-11-02T18:53:35Z | 2022-11-02T18:53:35Z | 2022-11-02T18:55:03Z |
DEPR: non-keyword args, errors arg | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index eab8df5ccff73..b97cdd5d1e298 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -198,7 +198,9 @@ Removal of prior version deprecations/changes
- Removed argument ``inplace`` from :meth:`Categorical.remove_unused_categories` (:issue:`37918`)
- Disallow passing non-round floats to :class:`Timestamp` with ``unit="M"`` or ``unit="Y"`` (:issue:`47266`)
- Remove keywords ``convert_float`` and ``mangle_dupe_cols`` from :func:`read_excel` (:issue:`41176`)
+- Removed ``errors`` keyword from :meth:`DataFrame.where`, :meth:`Series.where`, :meth:`DataFrame.mask` and :meth:`Series.mask` (:issue:`47728`)
- Disallow passing non-keyword arguments to :func:`read_excel` except ``io`` and ``sheet_name`` (:issue:`34418`)
+- Disallow passing non-keyword arguments to :meth:`StringMethods.split` and :meth:`StringMethods.rsplit` except for ``pat`` (:issue:`47448`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.set_index` except ``keys`` (:issue:`41495`)
- Disallow passing non-keyword arguments to :meth:`Resampler.interpolate` except ``method`` (:issue:`41699`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.reset_index` and :meth:`Series.reset_index` except ``level`` (:issue:`41496`)
@@ -215,6 +217,7 @@ Removal of prior version deprecations/changes
- Disallow passing non-keyword arguments to :func:`concat` except for ``objs`` (:issue:`41485`)
- Disallow passing non-keyword arguments to :func:`pivot` except for ``data`` (:issue:`48301`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.pivot` (:issue:`48301`)
+- Disallow passing non-keyword arguments to :func:`read_html` except for ``io`` (:issue:`27573`)
- Disallow passing non-keyword arguments to :func:`read_json` except for ``path_or_buf`` (:issue:`27573`)
- Disallow passing non-keyword arguments to :func:`read_sas` except for ``filepath_or_buffer`` (:issue:`47154`)
- Disallow passing non-keyword arguments to :func:`read_stata` except for ``filepath_or_buffer`` (:issue:`48128`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 17c4bde9d0279..42953f3ca7fdf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -95,7 +95,6 @@
from pandas.util._decorators import (
Appender,
Substitution,
- deprecate_kwarg,
deprecate_nonkeyword_arguments,
doc,
rewrite_axis_style_signature,
@@ -11782,7 +11781,6 @@ def where(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> DataFrame:
...
@@ -11795,7 +11793,6 @@ def where(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> None:
...
@@ -11808,13 +11805,10 @@ def where(
inplace: bool = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> DataFrame | None:
...
- # error: Signature of "where" incompatible with supertype "NDFrame"
- @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
- def where( # type: ignore[override]
+ def where(
self,
cond,
other=lib.no_default,
@@ -11822,7 +11816,6 @@ def where( # type: ignore[override]
inplace: bool = False,
axis: Axis | None = None,
level: Level = None,
- errors: IgnoreRaise | lib.NoDefault = "raise",
) -> DataFrame | None:
return super().where(
cond,
@@ -11841,7 +11834,6 @@ def mask(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> DataFrame:
...
@@ -11854,7 +11846,6 @@ def mask(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> None:
...
@@ -11867,13 +11858,10 @@ def mask(
inplace: bool = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> DataFrame | None:
...
- # error: Signature of "mask" incompatible with supertype "NDFrame"
- @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
- def mask( # type: ignore[override]
+ def mask(
self,
cond,
other=lib.no_default,
@@ -11881,7 +11869,6 @@ def mask( # type: ignore[override]
inplace: bool = False,
axis: Axis | None = None,
level: Level = None,
- errors: IgnoreRaise | lib.NoDefault = "raise",
) -> DataFrame | None:
return super().mask(
cond,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8bf3820d2ea3c..39bc9fafd9f6d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9727,7 +9727,6 @@ def where(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> NDFrameT:
...
@@ -9740,7 +9739,6 @@ def where(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> None:
...
@@ -9753,11 +9751,9 @@ def where(
inplace: bool_t = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> NDFrameT | None:
...
- @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@doc(
klass=_shared_doc_kwargs["klass"],
cond="True",
@@ -9773,7 +9769,6 @@ def where(
inplace: bool_t = False,
axis: Axis | None = None,
level: Level = None,
- errors: IgnoreRaise | lib.NoDefault = "raise",
) -> NDFrameT | None:
"""
Replace values where the condition is {cond_rev}.
@@ -9802,15 +9797,6 @@ def where(
unused and defaults to 0.
level : int, default None
Alignment level if needed.
- errors : str, {{'raise', 'ignore'}}, default 'raise'
- Note that currently this parameter won't affect
- the results and will always coerce to a suitable dtype.
-
- - 'raise' : allow exceptions to be raised.
- - 'ignore' : suppress exceptions. On error return original object.
-
- .. deprecated:: 1.5.0
- This argument had no effect.
Returns
-------
@@ -9933,7 +9919,6 @@ def mask(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> NDFrameT:
...
@@ -9946,7 +9931,6 @@ def mask(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> None:
...
@@ -9959,11 +9943,9 @@ def mask(
inplace: bool_t = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> NDFrameT | None:
...
- @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@doc(
where,
klass=_shared_doc_kwargs["klass"],
@@ -9980,7 +9962,6 @@ def mask(
inplace: bool_t = False,
axis: Axis | None = None,
level: Level = None,
- errors: IgnoreRaise | lib.NoDefault = "raise",
) -> NDFrameT | None:
inplace = validate_bool_kwarg(inplace, "inplace")
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7854bf6180733..645623f56e46d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -67,7 +67,6 @@
from pandas.util._decorators import (
Appender,
Substitution,
- deprecate_kwarg,
deprecate_nonkeyword_arguments,
doc,
)
@@ -6019,7 +6018,6 @@ def where(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> Series:
...
@@ -6032,7 +6030,6 @@ def where(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> None:
...
@@ -6045,13 +6042,10 @@ def where(
inplace: bool = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> Series | None:
...
- # error: Signature of "where" incompatible with supertype "NDFrame"
- @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
- def where( # type: ignore[override]
+ def where(
self,
cond,
other=lib.no_default,
@@ -6059,7 +6053,6 @@ def where( # type: ignore[override]
inplace: bool = False,
axis: Axis | None = None,
level: Level = None,
- errors: IgnoreRaise | lib.NoDefault = lib.no_default,
) -> Series | None:
return super().where(
cond,
@@ -6078,7 +6071,6 @@ def mask(
inplace: Literal[False] = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> Series:
...
@@ -6091,7 +6083,6 @@ def mask(
inplace: Literal[True],
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> None:
...
@@ -6104,13 +6095,10 @@ def mask(
inplace: bool = ...,
axis: Axis | None = ...,
level: Level = ...,
- errors: IgnoreRaise | lib.NoDefault = ...,
) -> Series | None:
...
- # error: Signature of "mask" incompatible with supertype "NDFrame"
- @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
- def mask( # type: ignore[override]
+ def mask(
self,
cond,
other=lib.no_default,
@@ -6118,7 +6106,6 @@ def mask( # type: ignore[override]
inplace: bool = False,
axis: Axis | None = None,
level: Level = None,
- errors: IgnoreRaise | lib.NoDefault = lib.no_default,
) -> Series | None:
return super().mask(
cond,
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 0888fa6985560..0024cbcb01bfc 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -21,10 +21,7 @@
F,
Scalar,
)
-from pandas.util._decorators import (
- Appender,
- deprecate_nonkeyword_arguments,
-)
+from pandas.util._decorators import Appender
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
@@ -840,14 +837,13 @@ def cat(
""",
}
)
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "pat"])
@forbid_nonstring_types(["bytes"])
def split(
self,
pat: str | re.Pattern | None = None,
+ *,
n=-1,
expand: bool = False,
- *,
regex: bool | None = None,
):
if regex is False and is_re(pat):
@@ -872,9 +868,8 @@ def split(
"regex_examples": "",
}
)
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "pat"])
@forbid_nonstring_types(["bytes"])
- def rsplit(self, pat=None, n=-1, expand: bool = False):
+ def rsplit(self, pat=None, *, n=-1, expand: bool = False):
result = self._data.array._str_rsplit(pat, n=n)
return self._wrap_result(result, expand=expand, returns_string=expand)
diff --git a/pandas/io/html.py b/pandas/io/html.py
index a08b73d94250b..b9bffdc7b871e 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -27,7 +27,6 @@
AbstractMethodError,
EmptyDataError,
)
-from pandas.util._decorators import deprecate_nonkeyword_arguments
from pandas.core.dtypes.common import is_list_like
@@ -1026,9 +1025,9 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, extract_links, **
return ret
-@deprecate_nonkeyword_arguments(version="2.0")
def read_html(
io: FilePath | ReadBuffer[str],
+ *,
match: str | Pattern = ".+",
flavor: str | None = None,
header: int | Sequence[int] | None = None,
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index 1212659ea24e2..501822f856a63 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -1009,20 +1009,6 @@ def test_where_dt64_2d():
_check_where_equivalences(df, mask, other, expected)
-def test_where_mask_deprecated(frame_or_series):
- # GH 47728
- obj = DataFrame(np.random.randn(4, 3))
- obj = tm.get_obj(obj, frame_or_series)
-
- mask = obj > 0
-
- with tm.assert_produces_warning(FutureWarning):
- obj.where(mask, -1, errors="raise")
-
- with tm.assert_produces_warning(FutureWarning):
- obj.mask(mask, -1, errors="raise")
-
-
def test_where_producing_ea_cond_for_np_dtype():
# GH#44014
df = DataFrame({"a": Series([1, pd.NA, 2], dtype="Int64"), "b": [1, 2, 3]})
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 0ea1203359153..65e1a1e9830cc 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -133,32 +133,6 @@ def test_to_html_compat(self):
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
- @pytest.mark.network
- @tm.network(
- url=(
- "https://www.fdic.gov/resources/resolutions/"
- "bank-failures/failed-bank-list/index.html"
- ),
- check_before_test=True,
- )
- def test_banklist_url_positional_match(self):
- url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
- # Passing match argument as positional should cause a FutureWarning.
- with tm.assert_produces_warning(FutureWarning):
- df1 = self.read_html(
- # lxml cannot find attrs leave out for now
- url,
- "First Federal Bank of Florida", # attrs={"class": "dataTable"}
- )
- with tm.assert_produces_warning(FutureWarning):
- # lxml cannot find attrs leave out for now
- df2 = self.read_html(
- url,
- "Metcalf Bank",
- ) # attrs={"class": "dataTable"})
-
- assert_framelist_equal(df1, df2)
-
@pytest.mark.network
@tm.network(
url=(
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index 409a3b231fa95..18a4d8355c764 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -151,17 +151,10 @@ def test_fillna_consistency(self):
)
tm.assert_series_equal(result, expected)
- # where (we ignore the errors=)
- with tm.assert_produces_warning(FutureWarning, match="the 'errors' keyword"):
- result = ser.where(
- [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
- )
+ result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern"))
tm.assert_series_equal(result, expected)
- with tm.assert_produces_warning(FutureWarning, match="the 'errors' keyword"):
- result = ser.where(
- [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
- )
+ result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern"))
tm.assert_series_equal(result, expected)
# with a non-datetime
diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py
index 7d73414a672c8..74458c13e8df7 100644
--- a/pandas/tests/strings/test_split_partition.py
+++ b/pandas/tests/strings/test_split_partition.py
@@ -130,23 +130,6 @@ def test_rsplit_max_number(any_string_dtype):
tm.assert_series_equal(result, exp)
-@pytest.mark.parametrize("method", ["split", "rsplit"])
-def test_posargs_deprecation(method):
- # GH 47423; Deprecate passing n as positional.
- s = Series(["foo,bar,lorep"])
-
- msg = (
- f"In a future version of pandas all arguments of StringMethods.{method} "
- "except for the argument 'pat' will be keyword-only"
- )
-
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = getattr(s.str, method)(",", 3)
-
- expected = Series([["foo", "bar", "lorep"]])
- tm.assert_series_equal(result, expected)
-
-
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49415 | 2022-10-31T17:18:16Z | 2022-11-01T17:40:29Z | 2022-11-01T17:40:29Z | 2022-11-01T18:02:28Z |
Backport PR #48234 on branch 1.5.x (REGR: Fix regression RecursionError when replacing numeric scalar with None) | diff --git a/doc/source/whatsnew/v1.5.2.rst b/doc/source/whatsnew/v1.5.2.rst
index aaf00804262bb..4f6274b9084da 100644
--- a/doc/source/whatsnew/v1.5.2.rst
+++ b/doc/source/whatsnew/v1.5.2.rst
@@ -13,7 +13,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed regression in :meth:`Series.replace` raising ``RecursionError`` with numeric dtype and when specifying ``value=None`` (:issue:`45725`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 9c6b3e506b1d4..5e95f83ddfd08 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -569,7 +569,6 @@ def replace(
# Note: the checks we do in NDFrame.replace ensure we never get
# here with listlike to_replace or value, as those cases
# go through replace_list
-
values = self.values
if isinstance(values, Categorical):
@@ -608,7 +607,10 @@ def replace(
return blocks
elif self.ndim == 1 or self.shape[0] == 1:
- blk = self.coerce_to_target_dtype(value)
+ if value is None:
+ blk = self.astype(np.dtype(object))
+ else:
+ blk = self.coerce_to_target_dtype(value)
return blk.replace(
to_replace=to_replace,
value=value,
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 177f3ec1b4504..f4de685688b00 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1496,6 +1496,18 @@ def test_replace_list_with_mixed_type(
result = obj.replace(box(to_replace), value)
tm.assert_equal(result, expected)
+ @pytest.mark.parametrize("val", [2, np.nan, 2.0])
+ def test_replace_value_none_dtype_numeric(self, val):
+ # GH#48231
+ df = DataFrame({"a": [1, val]})
+ result = df.replace(val, None)
+ expected = DataFrame({"a": [1, None]}, dtype=object)
+ tm.assert_frame_equal(result, expected)
+
+ df = DataFrame({"a": [1, val]})
+ result = df.replace({val: None})
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameReplaceRegex:
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 77c9cf4013bd7..126a89503d636 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -667,3 +667,11 @@ def test_replace_different_int_types(self, any_int_numpy_dtype):
result = labs.replace(map_dict)
expected = labs.replace({0: 0, 2: 1, 1: 2})
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("val", [2, np.nan, 2.0])
+ def test_replace_value_none_dtype_numeric(self, val):
+ # GH#48231
+ ser = pd.Series([1, val])
+ result = ser.replace(val, None)
+ expected = pd.Series([1, None], dtype=object)
+ tm.assert_series_equal(result, expected)
| Backport PR #48234: REGR: Fix regression RecursionError when replacing numeric scalar with None | https://api.github.com/repos/pandas-dev/pandas/pulls/49414 | 2022-10-31T17:14:00Z | 2022-10-31T19:58:04Z | 2022-10-31T19:58:04Z | 2022-10-31T19:58:05Z |
DEPR: indexing | diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index c2f40dfbfb250..7d6f841106198 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -32,7 +32,7 @@ dependencies:
- gcsfs=2021.07.0
- jinja2=3.0.0
- lxml=4.6.3
- - matplotlib=3.3.2
+ - matplotlib=3.6.0
- numba=0.53.1
- numexpr=2.7.3
- odfpy=1.4.1
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index aea65eb15e929..e9bf341eae73f 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -310,7 +310,7 @@ Can be managed as optional_extra with ``pandas[plot, output_formatting]``, depen
========================= ================== ================== =============================================================
Dependency Minimum Version optional_extra Notes
========================= ================== ================== =============================================================
-matplotlib 3.3.2 plot Plotting library
+matplotlib 3.6.0 plot Plotting library
Jinja2 3.0.0 output_formatting Conditional formatting with DataFrame.style
tabulate 0.8.9 output_formatting Printing in Markdown-friendly format (see `tabulate`_)
========================= ================== ================== =============================================================
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 92cc7b04496bc..b0db2185f38d4 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -124,6 +124,8 @@ Optional libraries below the lowest tested version may still work, but are not c
+=================+=================+=========+
| pyarrow | 6.0.0 | X |
+-----------------+-----------------+---------+
+| matplotlib | 3.6.0 | X |
++-----------------+-----------------+---------+
| fastparquet | 0.6.3 | X |
+-----------------+-----------------+---------+
@@ -276,6 +278,8 @@ Removal of prior version deprecations/changes
- Enforced disallowing a string column label into ``times`` in :meth:`DataFrame.ewm` (:issue:`43265`)
- Enforced disallowing a tuple of column labels into :meth:`.DataFrameGroupBy.__getitem__` (:issue:`30546`)
- Enforced disallowing setting values with ``.loc`` using a positional slice. Use ``.loc`` with labels or ``.iloc`` with positions instead (:issue:`31840`)
+- Enforced disallowing positional indexing with a ``float`` key even if that key is a round number, manually cast to integer instead (:issue:`34193`)
+- Enforced disallowing indexing on a :class:`Index` or positional indexing on a :class:`Series` producing multi-dimensional objects e.g. ``obj[:, None]``, convert to numpy before indexing instead (:issue:`35141`)
- Enforced disallowing ``dict`` or ``set`` objects in ``suffixes`` in :func:`merge` (:issue:`34810`)
- Enforced disallowing :func:`merge` to produce duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`)
- Enforced disallowing using :func:`merge` or :func:`join` on a different number of levels (:issue:`34862`)
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index 1bfef131aac1d..7577058f7c80b 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -23,7 +23,7 @@
"gcsfs": "2021.07.0",
"jinja2": "3.0.0",
"lxml.etree": "4.6.3",
- "matplotlib": "3.3.2",
+ "matplotlib": "3.6.0",
"numba": "0.53.1",
"numexpr": "2.7.3",
"odfpy": "1.4.1",
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 817b889623d99..02297855ad389 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -148,15 +148,13 @@ def is_bool_indexer(key: Any) -> bool:
return False
-def cast_scalar_indexer(val, warn_float: bool = False):
+def cast_scalar_indexer(val):
"""
- To avoid numpy DeprecationWarnings, cast float to integer where valid.
+ Disallow indexing with a float key, even if that key is a round number.
Parameters
----------
val : scalar
- warn_float : bool, default False
- If True, issue deprecation warning for a float indexer.
Returns
-------
@@ -164,14 +162,11 @@ def cast_scalar_indexer(val, warn_float: bool = False):
"""
# assumes lib.is_scalar(val)
if lib.is_float(val) and val.is_integer():
- if warn_float:
- warnings.warn(
- "Indexing with a float is deprecated, and will raise an IndexError "
- "in pandas 2.0. You can manually convert to an integer key instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return int(val)
+ raise IndexError(
+ # GH#34193
+ "Indexing with a float is no longer supported. Manually convert "
+ "to an integer key instead."
+ )
return val
diff --git a/pandas/core/indexers/__init__.py b/pandas/core/indexers/__init__.py
index 6431f12a08dc8..ba8a4f1d0ee7a 100644
--- a/pandas/core/indexers/__init__.py
+++ b/pandas/core/indexers/__init__.py
@@ -2,7 +2,7 @@
check_array_indexer,
check_key_length,
check_setitem_lengths,
- deprecate_ndim_indexing,
+ disallow_ndim_indexing,
is_empty_indexer,
is_list_like_indexer,
is_scalar_indexer,
@@ -23,7 +23,7 @@
"validate_indices",
"maybe_convert_indices",
"length_of_indexer",
- "deprecate_ndim_indexing",
+ "disallow_ndim_indexing",
"unpack_1tuple",
"check_key_length",
"check_array_indexer",
diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index 50b53ecb63082..90503876ee5d5 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -7,12 +7,10 @@
TYPE_CHECKING,
Any,
)
-import warnings
import numpy as np
from pandas._typing import AnyArrayLike
-from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_array_like,
@@ -333,22 +331,18 @@ def length_of_indexer(indexer, target=None) -> int:
raise AssertionError("cannot find the length of the indexer")
-def deprecate_ndim_indexing(result, stacklevel: int = 3) -> None:
+def disallow_ndim_indexing(result) -> None:
"""
- Helper function to raise the deprecation warning for multi-dimensional
- indexing on 1D Series/Index.
+ Helper function to disallow multi-dimensional indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
- and keep an index, so we currently return ndarray, which is deprecated
- (Deprecation GH#30588).
+ and keep an index, so we used to return ndarray, which was deprecated
+ in GH#30588.
"""
if np.ndim(result) > 1:
- warnings.warn(
- "Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
- "is deprecated and will be removed in a future "
- "version. Convert to a numpy array before indexing instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
+ raise ValueError(
+ "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer "
+ "supported. Convert to a numpy array before indexing instead."
)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index d8300bb29c274..fc6ad055fd4ca 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -162,7 +162,7 @@
extract_array,
sanitize_array,
)
-from pandas.core.indexers import deprecate_ndim_indexing
+from pandas.core.indexers import disallow_ndim_indexing
from pandas.core.indexes.frozen import FrozenList
from pandas.core.ops import get_op_result_name
from pandas.core.ops.invalid import make_invalid_op
@@ -5221,7 +5221,7 @@ def __getitem__(self, key):
if is_integer(key) or is_float(key):
# GH#44051 exclude bool, which would return a 2d ndarray
- key = com.cast_scalar_indexer(key, warn_float=True)
+ key = com.cast_scalar_indexer(key)
return getitem(key)
if isinstance(key, slice):
@@ -5244,15 +5244,7 @@ def __getitem__(self, key):
result = getitem(key)
# Because we ruled out integer above, we always get an arraylike here
if result.ndim > 1:
- deprecate_ndim_indexing(result)
- if hasattr(result, "_ndarray"):
- # i.e. NDArrayBackedExtensionArray
- # Unpack to ndarray for MPL compat
- # error: Item "ndarray[Any, Any]" of
- # "Union[ExtensionArray, ndarray[Any, Any]]"
- # has no attribute "_ndarray"
- return result._ndarray # type: ignore[union-attr]
- return result
+ disallow_ndim_indexing(result)
# NB: Using _constructor._simple_new would break if MultiIndex
# didn't override __getitem__
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 3b8380a88bb8b..32db99ab08c1f 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2031,7 +2031,7 @@ def __reduce__(self):
def __getitem__(self, key):
if is_scalar(key):
- key = com.cast_scalar_indexer(key, warn_float=True)
+ key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9607d57766b11..4582c141138d7 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -123,7 +123,7 @@
)
from pandas.core.generic import NDFrame
from pandas.core.indexers import (
- deprecate_ndim_indexing,
+ disallow_ndim_indexing,
unpack_1tuple,
)
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
@@ -1003,7 +1003,7 @@ def _get_values_tuple(self, key: tuple):
# see tests.series.timeseries.test_mpl_compat_hack
# the asarray is needed to avoid returning a 2D DatetimeArray
result = np.asarray(self._values[key])
- deprecate_ndim_indexing(result, stacklevel=find_stack_level())
+ disallow_ndim_indexing(result)
return result
if not isinstance(self.index, MultiIndex):
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 85833224fea10..f2141b0b74ac6 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -699,20 +699,16 @@ def test_engine_reference_cycle(self, simple_index):
def test_getitem_2d_deprecated(self, simple_index):
# GH#30588, GH#31479
idx = simple_index
- msg = "Support for multi-dimensional indexing"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = idx[:, None]
-
- assert isinstance(res, np.ndarray), type(res)
+ msg = "Multi-dimensional indexing"
+ with pytest.raises(ValueError, match=msg):
+ idx[:, None]
if not isinstance(idx, RangeIndex):
- # GH#44051 RangeIndex already raises
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = idx[True]
- assert isinstance(res, np.ndarray), type(res)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = idx[False]
- assert isinstance(res, np.ndarray), type(res)
+ # GH#44051 RangeIndex already raised pre-2.0 with a different message
+ with pytest.raises(ValueError, match=msg):
+ idx[True]
+ with pytest.raises(ValueError, match=msg):
+ idx[False]
else:
msg = "only integers, slices"
with pytest.raises(IndexError, match=msg):
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 87bf0199b2528..7e4df5ae8699c 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -98,11 +98,9 @@ def test_dti_business_getitem(self, freq):
@pytest.mark.parametrize("freq", ["B", "C"])
def test_dti_business_getitem_matplotlib_hackaround(self, freq):
rng = bdate_range(START, END, freq=freq)
- with tm.assert_produces_warning(FutureWarning):
+ with pytest.raises(ValueError, match="Multi-dimensional indexing"):
# GH#30588 multi-dimensional indexing deprecated
- values = rng[:, None]
- expected = rng.values[:, None]
- tm.assert_numpy_array_equal(values, expected)
+ rng[:, None]
def test_getitem_int_list(self):
dti = date_range(start="1/1/2005", end="12/1/2005", freq="M")
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 523decba33b6e..476d90fac2651 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -62,11 +62,11 @@ def test_can_hold_identifiers(self, simple_index):
@pytest.mark.parametrize("index", ["datetime"], indirect=True)
def test_new_axis(self, index):
- with tm.assert_produces_warning(FutureWarning):
+ # TODO: a bunch of scattered tests check this deprecation is enforced.
+ # de-duplicate/centralize them.
+ with pytest.raises(ValueError, match="Multi-dimensional indexing"):
# GH#30588 multi-dimensional indexing deprecated
- new_index = index[None, :]
- assert new_index.ndim == 2
- assert isinstance(new_index, np.ndarray)
+ index[None, :]
def test_argsort(self, index):
with tm.maybe_produces_warning(
@@ -1532,15 +1532,15 @@ def test_deprecated_fastpath():
def test_shape_of_invalid_index():
- # Currently, it is possible to create "invalid" index objects backed by
+ # Pre-2.0, it was possible to create "invalid" index objects backed by
# a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125
# about this). However, as long as this is not solved in general,this test ensures
# that the returned shape is consistent with this underlying array for
# compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775)
idx = Index([0, 1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
+ with pytest.raises(ValueError, match="Multi-dimensional indexing"):
# GH#30588 multi-dimensional indexing deprecated
- assert idx[:, None].shape == (4, 1)
+ idx[:, None]
def test_validate_1d_input():
diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py
index 2b7c5745e0c67..57268c07024f7 100644
--- a/pandas/tests/indexes/test_indexing.py
+++ b/pandas/tests/indexes/test_indexing.py
@@ -290,11 +290,9 @@ def test_putmask_with_wrong_mask(self, index):
def test_getitem_deprecated_float(idx):
# https://github.com/pandas-dev/pandas/issues/34191
- with tm.assert_produces_warning(FutureWarning):
- result = idx[1.0]
-
- expected = idx[1]
- assert result == expected
+ msg = "Indexing with a float is no longer supported"
+ with pytest.raises(IndexError, match=msg):
+ idx[1.0]
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index e3a38fcd10642..1d287f70a4606 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -269,19 +269,10 @@ def test_getitem_partial_str_slice_high_reso_with_timedeltaindex(self):
def test_getitem_slice_2d(self, datetime_series):
# GH#30588 multi-dimensional indexing deprecated
-
- with tm.assert_produces_warning(
- FutureWarning, match="Support for multi-dimensional indexing"
- ):
- # GH#30867 Don't want to support this long-term, but
- # for now ensure that the warning from Index
- # doesn't comes through via Series.__getitem__.
- result = datetime_series[:, np.newaxis]
- expected = datetime_series.values[:, np.newaxis]
- tm.assert_almost_equal(result, expected)
+ with pytest.raises(ValueError, match="Multi-dimensional indexing"):
+ datetime_series[:, np.newaxis]
# FutureWarning from NumPy.
- @pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_getitem_median_slice_bug(self):
index = date_range("20090415", "20090519", freq="2B")
ser = Series(np.random.randn(13), index=index)
@@ -291,6 +282,10 @@ def test_getitem_median_slice_bug(self):
with pytest.raises(ValueError, match=msg):
# GH#31299
ser[indexer]
+ # but we're OK with a single-element tuple
+ result = ser[(indexer[0],)]
+ expected = ser[indexer[0]]
+ tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"slc, positions",
@@ -554,14 +549,8 @@ def test_getitem_generator(string_series):
],
)
def test_getitem_ndim_deprecated(series):
- with tm.assert_produces_warning(
- FutureWarning,
- match="Support for multi-dimensional indexing",
- ):
- result = series[:, None]
-
- expected = np.asarray(series)[:, None]
- tm.assert_numpy_array_equal(result, expected)
+ with pytest.raises(ValueError, match="Multi-dimensional indexing"):
+ series[:, None]
def test_getitem_multilevel_scalar_slice_not_implemented(
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 7e4912ec88d36..fa9cf5215c0f7 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -184,8 +184,6 @@ def test_setslice(datetime_series):
assert sl.index.is_unique is True
-# FutureWarning from NumPy about [slice(None, 5).
-@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_basic_getitem_setitem_corner(datetime_series):
# invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2]
msg = "key of type tuple not found and not a MultiIndex"
@@ -200,6 +198,11 @@ def test_basic_getitem_setitem_corner(datetime_series):
# GH#31299
datetime_series[[slice(None, 5)]]
+ # but we're OK with a single-element tuple
+ result = datetime_series[(slice(None, 5),)]
+ expected = datetime_series[:5]
+ tm.assert_series_equal(result, expected)
+
# OK
msg = r"unhashable type(: 'slice')?"
with pytest.raises(TypeError, match=msg):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49412 | 2022-10-31T16:40:58Z | 2022-11-02T18:56:50Z | 2022-11-02T18:56:50Z | 2022-11-03T11:54:46Z |
Fix Scorecards GitHub Action | diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 73cab7ff909fc..1eedfe5b8ab51 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -29,7 +29,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@v2.0.3
+ uses: ossf/scorecard-action@v2.0.6
with:
results_file: results.sarif
results_format: sarif
| Fixes sudden failure reported in https://github.com/pandas-dev/pandas/issues/48566#issuecomment-1296174657.
Bumps ossf/scorecard-action to v2.0.6, fixing an unexpected breaking change in upstream API.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49410 | 2022-10-31T13:39:17Z | 2022-10-31T17:34:19Z | 2022-10-31T17:34:19Z | 2022-11-03T17:28:51Z |
enable pylint nan-comparison | diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 11f8544b78e94..752dc4d4ad1a2 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -134,7 +134,7 @@ def test_constructor(self, datetime_series):
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
- assert mixed[1] is np.NaN
+ assert np.isnan(mixed[1])
assert not empty_series.index._is_all_dates
with tm.assert_produces_warning(FutureWarning):
diff --git a/pyproject.toml b/pyproject.toml
index 0ce8cf87ab17e..8f7bb24e6fc66 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -160,7 +160,6 @@ disable = [
"invalid-overridden-method",
"keyword-arg-before-vararg",
"method-cache-max-size-none",
- "nan-comparison",
"non-parent-init-called",
"overridden-final-method",
"pointless-statement",
| Issue #48855. This PR enables pylint type "W" warning: "nan-comparison".
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). | https://api.github.com/repos/pandas-dev/pandas/pulls/49407 | 2022-10-31T12:00:07Z | 2022-10-31T17:05:57Z | 2022-10-31T17:05:57Z | 2022-10-31T17:05:58Z |
BUG/PERF: Series.replace with dtype="category" | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 0ab75355291f6..151889e1b6a81 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -310,6 +310,7 @@ Performance improvements
- Performance improvement for :class:`DatetimeIndex` constructor passing a list (:issue:`48609`)
- Performance improvement in :func:`merge` and :meth:`DataFrame.join` when joining on a sorted :class:`MultiIndex` (:issue:`48504`)
- Performance improvement in :meth:`DataFrame.loc` and :meth:`Series.loc` for tuple-based indexing of a :class:`MultiIndex` (:issue:`48384`)
+- Performance improvement for :meth:`Series.replace` with categorical dtype (:issue:`49404`)
- Performance improvement for :meth:`MultiIndex.unique` (:issue:`48335`)
- Performance improvement for :func:`concat` with extension array backed indexes (:issue:`49128`, :issue:`49178`)
- Reduce memory usage of :meth:`DataFrame.to_pickle`/:meth:`Series.to_pickle` when using BZ2 or LZMA (:issue:`49068`)
@@ -331,6 +332,8 @@ Bug fixes
Categorical
^^^^^^^^^^^
- Bug in :meth:`Categorical.set_categories` losing dtype information (:issue:`48812`)
+- Bug in :meth:`Series.replace` with categorical dtype when ``to_replace`` values overlap with new values (:issue:`49404`)
+- Bug in :meth:`Series.replace` with categorical dtype losing nullable dtypes of underlying categories (:issue:`49404`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` would reorder categories when used as a grouper (:issue:`48749`)
Datetimelike
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index becca2b668290..5f769e5fd8467 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2286,42 +2286,24 @@ def isin(self, values) -> npt.NDArray[np.bool_]:
return algorithms.isin(self.codes, code_values)
def _replace(self, *, to_replace, value, inplace: bool = False):
+ from pandas import (
+ Index,
+ Series,
+ )
+
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
- # other cases, like if both to_replace and value are list-like or if
- # to_replace is a dict, are handled separately in NDFrame
- if not is_list_like(to_replace):
- to_replace = [to_replace]
-
- categories = cat.categories.tolist()
- removals = set()
- for replace_value in to_replace:
- if value == replace_value:
- continue
- if replace_value not in cat.categories:
- continue
- if isna(value):
- removals.add(replace_value)
- continue
-
- index = categories.index(replace_value)
-
- if value in cat.categories:
- value_index = categories.index(value)
- cat._codes[cat._codes == index] = value_index
- removals.add(replace_value)
- else:
- categories[index] = value
- cat._set_categories(categories)
+ ser = Series(cat.categories, copy=True)
+ ser = ser.replace(to_replace=to_replace, value=value)
- if len(removals):
- new_categories = [c for c in categories if c not in removals]
- new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered)
- codes = recode_for_categories(
- cat.codes, cat.categories, new_dtype.categories
- )
- NDArrayBacked.__init__(cat, codes, new_dtype)
+ all_values = Index(ser)
+ new_categories = Index(ser.dropna().drop_duplicates(keep="first"))
+ new_codes = recode_for_categories(
+ cat._codes, all_values, new_categories, copy=False
+ )
+ new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered)
+ NDArrayBacked.__init__(cat, new_codes, new_dtype)
if not inplace:
return cat
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 2158d80ec1977..06fc70c0af2dd 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -575,12 +575,10 @@ def replace(
if isinstance(values, Categorical):
# TODO: avoid special-casing
+ # GH49404
blk = self if inplace else self.copy()
- # error: Item "ExtensionArray" of "Union[ndarray[Any, Any],
- # ExtensionArray]" has no attribute "_replace"
- blk.values._replace( # type: ignore[union-attr]
- to_replace=to_replace, value=value, inplace=True
- )
+ values = cast(Categorical, blk.values)
+ values._replace(to_replace=to_replace, value=value, inplace=True)
return [blk]
if not self._can_hold_element(to_replace):
@@ -690,6 +688,14 @@ def replace_list(
"""
values = self.values
+ if isinstance(values, Categorical):
+ # TODO: avoid special-casing
+ # GH49404
+ blk = self if inplace else self.copy()
+ values = cast(Categorical, blk.values)
+ values._replace(to_replace=src_list, value=dest_list, inplace=True)
+ return [blk]
+
# Exclude anything that we know we won't contain
pairs = [
(x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x)
diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py
index a3ba420c84a17..62a7bf0673a16 100644
--- a/pandas/tests/arrays/categorical/test_replace.py
+++ b/pandas/tests/arrays/categorical/test_replace.py
@@ -21,6 +21,8 @@
((5, 6), 2, [1, 2, 3], False),
([1], [2], [2, 2, 3], False),
([1, 4], [5, 2], [5, 2, 3], False),
+ # GH49404
+ ([1, 2, 3], [2, 3, 4], [2, 3, 4], False),
# check_categorical sorts categories, which crashes on mixed dtypes
(3, "4", [1, 2, "4"], False),
([1, 2, "3"], "5", ["5", "5", 3], True),
@@ -65,3 +67,11 @@ def test_replace_categorical(to_replace, value, result, expected_error_msg):
pd.Series(cat).replace(to_replace, value, inplace=True)
tm.assert_categorical_equal(cat, expected)
+
+
+def test_replace_categorical_ea_dtype():
+ # GH49404
+ cat = Categorical(pd.array(["a", "b"], dtype="string"))
+ result = pd.Series(cat).replace(["a", "b"], ["c", pd.NA])._values
+ expected = Categorical(pd.array(["c", pd.NA], dtype="string"))
+ tm.assert_categorical_equal(result, expected)
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
Refactor of `Categorical._replace` to fix a few bugs with `Series(..., dtype="category").replace` and improve performance.
BUG 1: overlap between `to_replace` and `value`:
```
Series([1, 2, 3], dtype="category").replace({1:2, 2:3, 3:4})
# main:
0 4
1 4
2 4
dtype: category
Categories (1, int64): [4]
# PR:
0 2
1 3
2 4
dtype: category
Categories (3, int64): [2, 3, 4]
```
BUG 2: losing nullable dtypes of underlying categories:
```
Series(["a", "b"], dtype="string").astype("category").replace("b", "c")
# main:
0 a
1 c
dtype: category
Categories (2, object): ['a', 'c']
# PR:
0 a
1 c
dtype: category
Categories (2, string): [a, c]
```
Perf improvements:
```
import pandas as pd
import numpy as np
arr = np.repeat(np.arange(1000), 1000)
ser = pd.Series(arr, dtype="category")
%timeit ser.replace(np.arange(200), 5)
681 ms ± 9.65 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) <- main
11 ms ± 690 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) <- PR
```
""" | https://api.github.com/repos/pandas-dev/pandas/pulls/49404 | 2022-10-31T01:24:01Z | 2023-01-18T20:29:40Z | 2023-01-18T20:29:40Z | 2023-01-18T22:37:08Z |
DEPR: core.index file | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 819f4e28e4b83..e059fda693bf6 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -156,6 +156,7 @@ Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- Removed deprecated module ``pandas.core.index`` (:issue:`30193`)
- Removed deprecated :meth:`Categorical.to_dense`, use ``np.asarray(cat)`` instead (:issue:`32639`)
- Removed deprecated :meth:`Categorical.take_nd` (:issue:`27745`)
- Removed deprecated :meth:`Categorical.mode`, use ``Series(cat).mode()`` instead (:issue:`45033`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
deleted file mode 100644
index 19e9c6b27e4e7..0000000000000
--- a/pandas/core/index.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# pyright: reportUnusedImport = false
-from __future__ import annotations
-
-import warnings
-
-from pandas.util._exceptions import find_stack_level
-
-from pandas.core.indexes.api import ( # noqa:F401
- CategoricalIndex,
- DatetimeIndex,
- Float64Index,
- Index,
- Int64Index,
- IntervalIndex,
- MultiIndex,
- NaT,
- NumericIndex,
- PeriodIndex,
- RangeIndex,
- TimedeltaIndex,
- UInt64Index,
- _new_Index,
- ensure_index,
- ensure_index_from_sequences,
- get_objs_combined_axis,
-)
-from pandas.core.indexes.multi import sparsify_labels # noqa:F401
-
-# GH#30193
-warnings.warn(
- "pandas.core.index is deprecated and will be removed in a future version. "
- "The public classes are available in the top-level namespace.",
- FutureWarning,
- stacklevel=find_stack_level(),
-)
-
-__all__: list[str] = []
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49403 | 2022-10-30T20:02:49Z | 2022-10-31T17:35:24Z | 2022-10-31T17:35:24Z | 2022-10-31T17:40:47Z |
DEPR: Enforce deprecation of na_sentinel | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index c76555f9ef417..b97d0f5232f1e 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -441,6 +441,7 @@ Removal of prior version deprecations/changes
- Changed behavior of comparison of a :class:`Timestamp` with a ``datetime.date`` object; these now compare as un-equal and raise on inequality comparisons, matching the ``datetime.datetime`` behavior (:issue:`36131`)
- Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`)
- Change behavior of :meth:`DataFrame.apply` with list-like so that any partial failure will raise an error (:issue:`43740`)
+- Removed ``na_sentinel`` argument from :func:`factorize`, :meth:`.Index.factorize`, and :meth:`.ExtensionArray.factorize` (:issue:`47157`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index aca5c4345d247..7494a8a54f9bb 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -4,7 +4,6 @@
"""
from __future__ import annotations
-import inspect
import operator
from textwrap import dedent
from typing import (
@@ -524,7 +523,7 @@ def f(c, v):
def factorize_array(
values: np.ndarray,
- na_sentinel: int | None = -1,
+ use_na_sentinel: bool = True,
size_hint: int | None = None,
na_value: object = None,
mask: npt.NDArray[np.bool_] | None = None,
@@ -537,7 +536,10 @@ def factorize_array(
Parameters
----------
values : ndarray
- na_sentinel : int, default -1
+ use_na_sentinel : bool, default True
+ If True, the sentinel -1 will be used for NaN values. If False,
+ NaN values will be encoded as non-negative integers and will not drop the
+ NaN from the uniques of the values.
size_hint : int, optional
Passed through to the hashtable's 'get_labels' method
na_value : object, optional
@@ -555,10 +557,6 @@ def factorize_array(
codes : ndarray[np.intp]
uniques : ndarray
"""
- ignore_na = na_sentinel is not None
- if not ignore_na:
- na_sentinel = -1
-
original = values
if values.dtype.kind in ["m", "M"]:
# _get_hashtable_algo will cast dt64/td64 to i8 via _ensure_data, so we
@@ -572,10 +570,10 @@ def factorize_array(
table = hash_klass(size_hint or len(values))
uniques, codes = table.factorize(
values,
- na_sentinel=na_sentinel,
+ na_sentinel=-1,
na_value=na_value,
mask=mask,
- ignore_na=ignore_na,
+ ignore_na=use_na_sentinel,
)
# re-cast e.g. i8->dt64/td64, uint8->bool
@@ -610,8 +608,7 @@ def factorize_array(
def factorize(
values,
sort: bool = False,
- na_sentinel: int | None | lib.NoDefault = lib.no_default,
- use_na_sentinel: bool | lib.NoDefault = lib.no_default,
+ use_na_sentinel: bool = True,
size_hint: int | None = None,
) -> tuple[np.ndarray, np.ndarray | Index]:
"""
@@ -625,17 +622,6 @@ def factorize(
Parameters
----------
{values}{sort}
- na_sentinel : int or None, default -1
- Value to mark "not found". If None, will not drop the NaN
- from the uniques of the values.
-
- .. deprecated:: 1.5.0
- The na_sentinel argument is deprecated and
- will be removed in a future version of pandas. Specify use_na_sentinel as
- either True or False.
-
- .. versionchanged:: 1.1.2
-
use_na_sentinel : bool, default True
If True, the sentinel -1 will be used for NaN values. If False,
NaN values will be encoded as non-negative integers and will not drop the
@@ -748,12 +734,6 @@ def factorize(
# Step 2 is dispatched to extension types (like Categorical). They are
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
-
- # GH#46910 deprecated na_sentinel in favor of use_na_sentinel:
- # na_sentinel=None corresponds to use_na_sentinel=False
- # na_sentinel=-1 correspond to use_na_sentinel=True
- # Other na_sentinel values will not be supported when the deprecation is enforced.
- na_sentinel = resolve_na_sentinel(na_sentinel, use_na_sentinel)
if isinstance(values, ABCRangeIndex):
return values.factorize(sort=sort)
@@ -772,25 +752,12 @@ def factorize(
return _re_wrap_factorize(original, uniques, codes)
elif not isinstance(values.dtype, np.dtype):
- if (
- na_sentinel == -1 or na_sentinel is None
- ) and "use_na_sentinel" in inspect.signature(values.factorize).parameters:
- # Avoid using catch_warnings when possible
- # GH#46910 - TimelikeOps has deprecated signature
- codes, uniques = values.factorize( # type: ignore[call-arg]
- use_na_sentinel=na_sentinel is not None
- )
- else:
- na_sentinel_arg = -1 if na_sentinel is None else na_sentinel
- with warnings.catch_warnings():
- # We've already warned above
- warnings.filterwarnings("ignore", ".*use_na_sentinel.*", FutureWarning)
- codes, uniques = values.factorize(na_sentinel=na_sentinel_arg)
+ codes, uniques = values.factorize(use_na_sentinel=use_na_sentinel)
else:
values = np.asarray(values) # convert DTA/TDA/MultiIndex
- if na_sentinel is None and is_object_dtype(values):
+ if not use_na_sentinel and is_object_dtype(values):
# factorize can now handle differentiating various types of null values.
# These can only occur when the array has object dtype.
# However, for backwards compatibility we only use the null for the
@@ -803,13 +770,17 @@ def factorize(
codes, uniques = factorize_array(
values,
- na_sentinel=na_sentinel,
+ use_na_sentinel=use_na_sentinel,
size_hint=size_hint,
)
if sort and len(uniques) > 0:
uniques, codes = safe_sort(
- uniques, codes, na_sentinel=na_sentinel, assume_unique=True, verify=False
+ uniques,
+ codes,
+ use_na_sentinel=use_na_sentinel,
+ assume_unique=True,
+ verify=False,
)
uniques = _reconstruct_data(uniques, original.dtype, original)
@@ -817,56 +788,6 @@ def factorize(
return _re_wrap_factorize(original, uniques, codes)
-def resolve_na_sentinel(
- na_sentinel: int | None | lib.NoDefault,
- use_na_sentinel: bool | lib.NoDefault,
-) -> int | None:
- """
- Determine value of na_sentinel for factorize methods.
-
- See GH#46910 for details on the deprecation.
-
- Parameters
- ----------
- na_sentinel : int, None, or lib.no_default
- Value passed to the method.
- use_na_sentinel : bool or lib.no_default
- Value passed to the method.
-
- Returns
- -------
- Resolved value of na_sentinel.
- """
- if na_sentinel is not lib.no_default and use_na_sentinel is not lib.no_default:
- raise ValueError(
- "Cannot specify both `na_sentinel` and `use_na_sentile`; "
- f"got `na_sentinel={na_sentinel}` and `use_na_sentinel={use_na_sentinel}`"
- )
- if na_sentinel is lib.no_default:
- result = -1 if use_na_sentinel is lib.no_default or use_na_sentinel else None
- else:
- if na_sentinel is None:
- msg = (
- "Specifying `na_sentinel=None` is deprecated, specify "
- "`use_na_sentinel=False` instead."
- )
- elif na_sentinel == -1:
- msg = (
- "Specifying `na_sentinel=-1` is deprecated, specify "
- "`use_na_sentinel=True` instead."
- )
- else:
- msg = (
- "Specifying the specific value to use for `na_sentinel` is "
- "deprecated and will be removed in a future version of pandas. "
- "Specify `use_na_sentinel=True` to use the sentinel value -1, and "
- "`use_na_sentinel=False` to encode NaN values."
- )
- warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
- result = na_sentinel
- return result
-
-
def _re_wrap_factorize(original, uniques, codes: np.ndarray):
"""
Wrap factorize results in Series or Index depending on original type.
@@ -1764,7 +1685,7 @@ def diff(arr, n: int, axis: AxisInt = 0):
def safe_sort(
values,
codes=None,
- na_sentinel: int | None = -1,
+ use_na_sentinel: bool = True,
assume_unique: bool = False,
verify: bool = True,
) -> AnyArrayLike | tuple[AnyArrayLike, np.ndarray]:
@@ -1780,16 +1701,17 @@ def safe_sort(
Sequence; must be unique if ``codes`` is not None.
codes : list_like, optional
Indices to ``values``. All out of bound indices are treated as
- "not found" and will be masked with ``na_sentinel``.
- na_sentinel : int or None, default -1
- Value in ``codes`` to mark "not found", or None to encode null values as normal.
- Ignored when ``codes`` is None.
+ "not found" and will be masked with ``-1``.
+ use_na_sentinel : bool, default True
+ If True, the sentinel -1 will be used for NaN values. If False,
+ NaN values will be encoded as non-negative integers and will not drop the
+ NaN from the uniques of the values.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``codes`` is None.
verify : bool, default True
Check if codes are out of bound for the values and put out of bound
- codes equal to na_sentinel. If ``verify=False``, it is assumed there
+ codes equal to ``-1``. If ``verify=False``, it is assumed there
are no out of bound codes. Ignored when ``codes`` is None.
.. versionadded:: 0.25.0
@@ -1867,7 +1789,7 @@ def safe_sort(
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
- if na_sentinel == -1:
+ if use_na_sentinel:
# take_nd is faster, but only works for na_sentinels of -1
order2 = sorter.argsort()
new_codes = take_nd(order2, codes, fill_value=-1)
@@ -1878,17 +1800,17 @@ def safe_sort(
else:
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
- # Out of bound indices will be masked with `na_sentinel` next, so we
+ # Out of bound indices will be masked with `-1` next, so we
# may deal with them here without performance loss using `mode='wrap'`
new_codes = reverse_indexer.take(codes, mode="wrap")
- if na_sentinel is not None:
- mask = codes == na_sentinel
+ if use_na_sentinel:
+ mask = codes == -1
if verify:
mask = mask | (codes < -len(values)) | (codes >= len(values))
- if na_sentinel is not None and mask is not None:
- np.putmask(new_codes, mask, na_sentinel)
+ if use_na_sentinel and mask is not None:
+ np.putmask(new_codes, mask, -1)
return ordered, ensure_platform_int(new_codes)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 945ae52c53047..06d91730804ab 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -8,7 +8,6 @@
import numpy as np
-from pandas._libs import lib
from pandas._typing import (
Dtype,
PositionalIndexer,
@@ -31,7 +30,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas.core.algorithms import resolve_na_sentinel
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.base import ExtensionArray
from pandas.core.indexers import (
@@ -553,11 +551,9 @@ def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
@doc(ExtensionArray.factorize)
def factorize(
self,
- na_sentinel: int | lib.NoDefault = lib.no_default,
- use_na_sentinel: bool | lib.NoDefault = lib.no_default,
+ use_na_sentinel: bool = True,
) -> tuple[np.ndarray, ExtensionArray]:
- resolved_na_sentinel = resolve_na_sentinel(na_sentinel, use_na_sentinel)
- null_encoding = "mask" if resolved_na_sentinel is not None else "encode"
+ null_encoding = "mask" if use_na_sentinel else "encode"
encoded = self._data.dictionary_encode(null_encoding=null_encoding)
if encoded.length() == 0:
indices = np.array([], dtype=np.intp)
@@ -565,10 +561,7 @@ def factorize(
else:
pa_indices = encoded.combine_chunks().indices
if pa_indices.null_count > 0:
- fill_value = (
- resolved_na_sentinel if resolved_na_sentinel is not None else -1
- )
- pa_indices = pc.fill_null(pa_indices, fill_value)
+ pa_indices = pc.fill_null(pa_indices, -1)
indices = pa_indices.to_numpy(zero_copy_only=False, writable=True).astype(
np.intp, copy=False
)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index cc9b2ce3fed42..e536ee434fa55 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -8,7 +8,6 @@
"""
from __future__ import annotations
-import inspect
import operator
from typing import (
TYPE_CHECKING,
@@ -22,7 +21,6 @@
cast,
overload,
)
-import warnings
import numpy as np
@@ -49,7 +47,6 @@
Substitution,
cache_readonly,
)
-from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
@@ -81,7 +78,6 @@
isin,
mode,
rank,
- resolve_na_sentinel,
unique,
)
from pandas.core.array_algos.quantile import quantile_with_mask
@@ -454,24 +450,6 @@ def __ne__(self, other: Any) -> ArrayLike: # type: ignore[override]
"""
return ~(self == other)
- def __init_subclass__(cls, **kwargs) -> None:
- factorize = getattr(cls, "factorize")
- if (
- "use_na_sentinel" not in inspect.signature(factorize).parameters
- # TimelikeOps uses old factorize args to ensure we don't break things
- and cls.__name__ not in ("TimelikeOps", "DatetimeArray", "TimedeltaArray")
- ):
- # See GH#46910 for details on the deprecation
- name = cls.__name__
- warnings.warn(
- f"The `na_sentinel` argument of `{name}.factorize` is deprecated. "
- f"In the future, pandas will use the `use_na_sentinel` argument "
- f"instead. Add this argument to `{name}.factorize` to be compatible "
- f"with future versions of pandas and silence this warning.",
- DeprecationWarning,
- stacklevel=find_stack_level(),
- )
-
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
@@ -1009,7 +987,7 @@ def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
- `na_sentinel` and not included in `uniques`. By default,
+ `-1` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
@@ -1021,22 +999,13 @@ def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
def factorize(
self,
- na_sentinel: int | lib.NoDefault = lib.no_default,
- use_na_sentinel: bool | lib.NoDefault = lib.no_default,
+ use_na_sentinel: bool = True,
) -> tuple[np.ndarray, ExtensionArray]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
- na_sentinel : int, default -1
- Value to use in the `codes` array to indicate missing values.
-
- .. deprecated:: 1.5.0
- The na_sentinel argument is deprecated and
- will be removed in a future version of pandas. Specify use_na_sentinel
- as either True or False.
-
use_na_sentinel : bool, default True
If True, the sentinel -1 will be used for NaN values. If False,
NaN values will be encoded as non-negative integers and will not drop the
@@ -1074,11 +1043,10 @@ def factorize(
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
- resolved_na_sentinel = resolve_na_sentinel(na_sentinel, use_na_sentinel)
arr, na_value = self._values_for_factorize()
codes, uniques = factorize_array(
- arr, na_sentinel=resolved_na_sentinel, na_value=na_value
+ arr, use_na_sentinel=use_na_sentinel, na_value=na_value
)
uniques_ea = self._from_factorized(uniques, self)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index e82045eee6143..f98fbfe429871 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -2144,10 +2144,9 @@ def _with_freq(self, freq):
# --------------------------------------------------------------
- # GH#46910 - Keep old signature to test we don't break things for EA library authors
- def factorize( # type:ignore[override]
+ def factorize(
self,
- na_sentinel: int = -1,
+ use_na_sentinel: bool = True,
sort: bool = False,
):
if self.freq is not None:
@@ -2159,7 +2158,7 @@ def factorize( # type:ignore[override]
uniques = uniques[::-1]
return codes, uniques
# FIXME: shouldn't get here; we are ignoring sort
- return super().factorize(na_sentinel=na_sentinel)
+ return super().factorize(use_na_sentinel=use_na_sentinel)
# -------------------------------------------------------------------
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index e74bd2a25bc5e..9968ebc826575 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -901,28 +901,25 @@ def searchsorted(
@doc(ExtensionArray.factorize)
def factorize(
self,
- na_sentinel: int | lib.NoDefault = lib.no_default,
- use_na_sentinel: bool | lib.NoDefault = lib.no_default,
+ use_na_sentinel: bool = True,
) -> tuple[np.ndarray, ExtensionArray]:
- resolved_na_sentinel = algos.resolve_na_sentinel(na_sentinel, use_na_sentinel)
arr = self._data
mask = self._mask
- # Pass non-None na_sentinel; recode and add NA to uniques if necessary below
- na_sentinel_arg = -1 if resolved_na_sentinel is None else resolved_na_sentinel
- codes, uniques = factorize_array(arr, na_sentinel=na_sentinel_arg, mask=mask)
+ # Use a sentinel for na; recode and add NA to uniques if necessary below
+ codes, uniques = factorize_array(arr, use_na_sentinel=True, mask=mask)
# check that factorize_array correctly preserves dtype.
assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype)
has_na = mask.any()
- if resolved_na_sentinel is not None or not has_na:
+ if use_na_sentinel or not has_na:
size = len(uniques)
else:
# Make room for an NA value
size = len(uniques) + 1
uniques_mask = np.zeros(size, dtype=bool)
- if resolved_na_sentinel is None and has_na:
+ if not use_na_sentinel and has_na:
na_index = mask.argmax()
# Insert na with the proper code
if na_index == 0:
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 93d6ac0ef6e06..d10b3a216c215 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -873,8 +873,7 @@ def _values_for_factorize(self):
def factorize(
self,
- na_sentinel: int | lib.NoDefault = lib.no_default,
- use_na_sentinel: bool | lib.NoDefault = lib.no_default,
+ use_na_sentinel: bool = True,
) -> tuple[np.ndarray, SparseArray]:
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
@@ -882,12 +881,8 @@ def factorize(
# Given that we have to return a dense array of codes, why bother
# implementing an efficient factorize?
codes, uniques = algos.factorize(
- np.asarray(self), na_sentinel=na_sentinel, use_na_sentinel=use_na_sentinel
+ np.asarray(self), use_na_sentinel=use_na_sentinel
)
- if na_sentinel is lib.no_default:
- na_sentinel = -1
- if use_na_sentinel is lib.no_default or use_na_sentinel:
- codes[codes == -1] = na_sentinel
uniques_sp = SparseArray(uniques, dtype=self.dtype)
return codes, uniques_sp
diff --git a/pandas/core/base.py b/pandas/core/base.py
index afcab23e130cd..46803e1f28975 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1138,12 +1138,9 @@ def _memory_usage(self, deep: bool = False) -> int:
def factorize(
self,
sort: bool = False,
- na_sentinel: int | lib.NoDefault = lib.no_default,
- use_na_sentinel: bool | lib.NoDefault = lib.no_default,
+ use_na_sentinel: bool = True,
):
- return algorithms.factorize(
- self, sort=sort, na_sentinel=na_sentinel, use_na_sentinel=use_na_sentinel
- )
+ return algorithms.factorize(self, sort=sort, use_na_sentinel=use_na_sentinel)
_shared_docs[
"searchsorted"
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index f15c244d8b628..ae88b85aa06e1 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -43,7 +43,6 @@
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
-from pandas.core.algorithms import resolve_na_sentinel
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
@@ -457,11 +456,8 @@ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
def factorize(
self,
sort: bool = False,
- na_sentinel: int | lib.NoDefault = lib.no_default,
- use_na_sentinel: bool | lib.NoDefault = lib.no_default,
+ use_na_sentinel: bool = True,
) -> tuple[npt.NDArray[np.intp], RangeIndex]:
- # resolve to emit warning if appropriate
- resolve_na_sentinel(na_sentinel, use_na_sentinel)
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index f46cf6085b06d..74a1051825820 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -2404,7 +2404,7 @@ def _sort_labels(
llength = len(left)
labels = np.concatenate([left, right])
- _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
+ _, new_labels = algos.safe_sort(uniques, labels, use_na_sentinel=True)
new_left, new_right = new_labels[:llength], new_labels[llength:]
return new_left, new_right
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 838c9f5b8a35f..2df410dff2b00 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -211,33 +211,17 @@ def test_unique(self, data, box, method):
assert isinstance(result, type(data))
assert result[0] == duplicated[0]
- @pytest.mark.parametrize("na_sentinel", [-1, -2])
- def test_factorize(self, data_for_grouping, na_sentinel):
- if na_sentinel == -1:
- msg = "Specifying `na_sentinel=-1` is deprecated"
- else:
- msg = "Specifying the specific value to use for `na_sentinel` is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- codes, uniques = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
- expected_codes = np.array(
- [0, 0, na_sentinel, na_sentinel, 1, 1, 0, 2], dtype=np.intp
- )
+ def test_factorize(self, data_for_grouping):
+ codes, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True)
+ expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 2], dtype=np.intp)
expected_uniques = data_for_grouping.take([0, 4, 7])
tm.assert_numpy_array_equal(codes, expected_codes)
self.assert_extension_array_equal(uniques, expected_uniques)
- @pytest.mark.parametrize("na_sentinel", [-1, -2])
- def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
- if na_sentinel == -1:
- msg = "Specifying `na_sentinel=-1` is deprecated"
- else:
- msg = "Specifying the specific value to use for `na_sentinel` is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- codes_1, uniques_1 = pd.factorize(
- data_for_grouping, na_sentinel=na_sentinel
- )
- codes_2, uniques_2 = data_for_grouping.factorize(na_sentinel=na_sentinel)
+ def test_factorize_equivalence(self, data_for_grouping):
+ codes_1, uniques_1 = pd.factorize(data_for_grouping, use_na_sentinel=True)
+ codes_2, uniques_2 = data_for_grouping.factorize(use_na_sentinel=True)
tm.assert_numpy_array_equal(codes_1, codes_2)
self.assert_extension_array_equal(uniques_1, uniques_2)
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index f68e38be44811..d44944c74f9d5 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -871,8 +871,7 @@ def test_unique(self, data, box, method, request):
)
super().test_unique(data, box, method)
- @pytest.mark.parametrize("na_sentinel", [-1, -2])
- def test_factorize(self, data_for_grouping, na_sentinel, request):
+ def test_factorize(self, data_for_grouping, request):
pa_dtype = data_for_grouping.dtype.pyarrow_dtype
if pa.types.is_duration(pa_dtype):
request.node.add_marker(
@@ -887,10 +886,9 @@ def test_factorize(self, data_for_grouping, na_sentinel, request):
reason=f"{pa_dtype} only has 2 unique possible values",
)
)
- super().test_factorize(data_for_grouping, na_sentinel)
+ super().test_factorize(data_for_grouping)
- @pytest.mark.parametrize("na_sentinel", [-1, -2])
- def test_factorize_equivalence(self, data_for_grouping, na_sentinel, request):
+ def test_factorize_equivalence(self, data_for_grouping, request):
pa_dtype = data_for_grouping.dtype.pyarrow_dtype
if pa.types.is_duration(pa_dtype):
request.node.add_marker(
@@ -899,7 +897,7 @@ def test_factorize_equivalence(self, data_for_grouping, na_sentinel, request):
reason=f"dictionary_encode has no pyarrow kernel for {pa_dtype}",
)
)
- super().test_factorize_equivalence(data_for_grouping, na_sentinel)
+ super().test_factorize_equivalence(data_for_grouping)
def test_factorize_empty(self, data, request):
pa_dtype = data.dtype.pyarrow_dtype
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index dd067102aba6c..b846028dab947 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -174,18 +174,10 @@ class TestReshaping(base.BaseReshapingTests):
class TestMethods(base.BaseMethodsTests):
- @pytest.mark.parametrize("na_sentinel", [-1, -2])
- def test_factorize(self, data_for_grouping, na_sentinel):
+ def test_factorize(self, data_for_grouping):
# override because we only have 2 unique values
- if na_sentinel == -1:
- msg = "Specifying `na_sentinel=-1` is deprecated"
- else:
- msg = "Specifying the specific value to use for `na_sentinel` is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- labels, uniques = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
- expected_labels = np.array(
- [0, 0, na_sentinel, na_sentinel, 1, 1, 0], dtype=np.intp
- )
+ labels, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True)
+ expected_labels = np.array([0, 0, -1, -1, 1, 1, 0], dtype=np.intp)
expected_uniques = data_for_grouping.take([0, 4])
tm.assert_numpy_array_equal(labels, expected_labels)
diff --git a/pandas/tests/extension/test_extension.py b/pandas/tests/extension/test_extension.py
index a4b1a4b43ef2b..1ed626cd51080 100644
--- a/pandas/tests/extension/test_extension.py
+++ b/pandas/tests/extension/test_extension.py
@@ -4,7 +4,6 @@
import numpy as np
import pytest
-import pandas._testing as tm
from pandas.core.arrays import ExtensionArray
@@ -25,16 +24,3 @@ def test_errors(self, data, all_arithmetic_operators):
op_name = all_arithmetic_operators
with pytest.raises(AttributeError):
getattr(data, op_name)
-
-
-def test_depr_na_sentinel():
- # GH#46910
- msg = "The `na_sentinel` argument of `MyEA.factorize` is deprecated"
- with tm.assert_produces_warning(DeprecationWarning, match=msg):
-
- class MyEA(ExtensionArray):
- def factorize(self, na_sentinel=-1):
- pass
-
- with tm.assert_produces_warning(None):
- MyEA()
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index c6aefd5bb73b9..d2de6cb7f21a3 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -102,20 +102,6 @@ def test_series_factorize_use_na_sentinel_false(self):
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_index_equal(uniques, expected_uniques)
- @pytest.mark.parametrize("na_sentinel", [None, -1, -10])
- def test_depr_na_sentinel(self, na_sentinel, index_or_series_obj):
- # GH#46910
- if na_sentinel is None:
- msg = "Specifying `na_sentinel=None` is deprecated"
- elif na_sentinel == -1:
- msg = "Specifying `na_sentinel=-1` is deprecated"
- else:
- msg = "Specifying the specific value to use for `na_sentinel` is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- pd.factorize(index_or_series_obj, na_sentinel=na_sentinel)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- index_or_series_obj.factorize(na_sentinel=na_sentinel)
-
def test_basic(self):
codes, uniques = algos.factorize(["a", "b", "b", "a", "a", "c", "c", "c"])
@@ -421,7 +407,6 @@ def test_parametrized_factorize_na_value(self, data, na_value):
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize("sort", [True, False])
- @pytest.mark.parametrize("na_sentinel", [-1, -10, 100])
@pytest.mark.parametrize(
"data, uniques",
[
@@ -436,18 +421,13 @@ def test_parametrized_factorize_na_value(self, data, na_value):
],
ids=["numpy_array", "extension_array"],
)
- def test_factorize_na_sentinel(self, sort, na_sentinel, data, uniques):
- if na_sentinel == -1:
- msg = "Specifying `na_sentinel=-1` is deprecated"
- else:
- msg = "the specific value to use for `na_sentinel` is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- codes, uniques = algos.factorize(data, sort=sort, na_sentinel=na_sentinel)
+ def test_factorize_use_na_sentinel(self, sort, data, uniques):
+ codes, uniques = algos.factorize(data, sort=sort, use_na_sentinel=True)
if sort:
- expected_codes = np.array([1, 0, na_sentinel, 1], dtype=np.intp)
+ expected_codes = np.array([1, 0, -1, 1], dtype=np.intp)
expected_uniques = algos.safe_sort(uniques)
else:
- expected_codes = np.array([0, 1, na_sentinel, 0], dtype=np.intp)
+ expected_codes = np.array([0, 1, -1, 0], dtype=np.intp)
expected_uniques = uniques
tm.assert_numpy_array_equal(codes, expected_codes)
if isinstance(data, np.ndarray):
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index ba1943878cfad..44895cc576fd0 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -412,19 +412,18 @@ def test_basic_sort(self, arg, exp):
@pytest.mark.parametrize("verify", [True, False])
@pytest.mark.parametrize(
- "codes, exp_codes, na_sentinel",
+ "codes, exp_codes",
[
- [[0, 1, 1, 2, 3, 0, -1, 4], [3, 1, 1, 2, 0, 3, -1, 4], -1],
- [[0, 1, 1, 2, 3, 0, 99, 4], [3, 1, 1, 2, 0, 3, 99, 4], 99],
- [[], [], -1],
+ [[0, 1, 1, 2, 3, 0, -1, 4], [3, 1, 1, 2, 0, 3, -1, 4]],
+ [[], []],
],
)
- def test_codes(self, verify, codes, exp_codes, na_sentinel):
+ def test_codes(self, verify, codes, exp_codes):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
result, result_codes = safe_sort(
- values, codes, na_sentinel=na_sentinel, verify=verify
+ values, codes, use_na_sentinel=True, verify=verify
)
expected_codes = np.array(exp_codes, dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@@ -435,17 +434,14 @@ def test_codes(self, verify, codes, exp_codes, na_sentinel):
reason="In CI environment can crash thread with: "
"Windows fatal exception: access violation",
)
- @pytest.mark.parametrize("na_sentinel", [-1, 99])
- def test_codes_out_of_bound(self, na_sentinel):
+ def test_codes_out_of_bound(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
# out of bound indices
codes = [0, 101, 102, 2, 3, 0, 99, 4]
- result, result_codes = safe_sort(values, codes, na_sentinel=na_sentinel)
- expected_codes = np.array(
- [3, na_sentinel, na_sentinel, 2, 0, 3, na_sentinel, 4], dtype=np.intp
- )
+ result, result_codes = safe_sort(values, codes, use_na_sentinel=True)
+ expected_codes = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
@@ -494,14 +490,11 @@ def test_extension_array(self, arg, exp):
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("verify", [True, False])
- @pytest.mark.parametrize("na_sentinel", [-1, 99])
- def test_extension_array_codes(self, verify, na_sentinel):
+ def test_extension_array_codes(self, verify):
a = array([1, 3, 2], dtype="Int64")
- result, codes = safe_sort(
- a, [0, 1, na_sentinel, 2], na_sentinel=na_sentinel, verify=verify
- )
+ result, codes = safe_sort(a, [0, 1, -1, 2], use_na_sentinel=True, verify=verify)
expected_values = array([1, 2, 3], dtype="Int64")
- expected_codes = np.array([0, 2, na_sentinel, 1], dtype=np.intp)
+ expected_codes = np.array([0, 2, -1, 1], dtype=np.intp)
tm.assert_extension_array_equal(result, expected_values)
tm.assert_numpy_array_equal(codes, expected_codes)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
cc @jbrockmendel @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/49402 | 2022-10-30T18:12:17Z | 2022-11-07T17:34:41Z | 2022-11-07T17:34:41Z | 2022-11-08T02:33:43Z |
DEPR: Enforce deprecation of silent dropping of nuisance columns in agg_list_like | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 2225cbd74d718..0f0382eaf1584 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -310,7 +310,7 @@ def time_different_python_functions_multicol(self, df):
df.groupby(["key1", "key2"]).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
- df.groupby("key1").agg([sum, min, max])
+ df.groupby("key1")[["value1", "value2", "value3"]].agg([sum, min, max])
class GroupStrings:
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index 0883113474f54..2204c8b04e438 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -1039,34 +1039,6 @@ not noted for a particular column will be ``NaN``:
tsdf.agg({"A": ["mean", "min"], "B": "sum"})
-.. _basics.aggregation.mixed_string:
-
-Mixed dtypes
-++++++++++++
-
-.. deprecated:: 1.4.0
- Attempting to determine which columns cannot be aggregated and silently dropping them from the results is deprecated and will be removed in a future version. If any porition of the columns or operations provided fail, the call to ``.agg`` will raise.
-
-When presented with mixed dtypes that cannot aggregate, ``.agg`` will only take the valid
-aggregations. This is similar to how ``.groupby.agg`` works.
-
-.. ipython:: python
-
- mdf = pd.DataFrame(
- {
- "A": [1, 2, 3],
- "B": [1.0, 2.0, 3.0],
- "C": ["foo", "bar", "baz"],
- "D": pd.date_range("20130101", periods=3),
- }
- )
- mdf.dtypes
-
-.. ipython:: python
- :okwarning:
-
- mdf.agg(["min", "sum"])
-
.. _basics.aggregation.custom_describe:
Custom describe
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index f9b8b793bfde8..dae42dd4f1118 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -1007,7 +1007,7 @@ functions:
.. ipython:: python
:okwarning:
- grouped = df.groupby("A")
+ grouped = df.groupby("A")[["C", "D"]]
grouped.agg(lambda x: x.std())
But, it's rather verbose and can be untidy if you need to pass additional
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst
index faf4b1ac44d5b..b41a469fe0c1f 100644
--- a/doc/source/whatsnew/v0.20.0.rst
+++ b/doc/source/whatsnew/v0.20.0.rst
@@ -104,10 +104,13 @@ aggregations. This is similar to how groupby ``.agg()`` works. (:issue:`15015`)
'D': pd.date_range('20130101', periods=3)})
df.dtypes
-.. ipython:: python
- :okwarning:
+.. code-block:: python
- df.agg(['min', 'sum'])
+ In [10]: df.agg(['min', 'sum'])
+ Out[10]:
+ A B C D
+ min 1 1.0 bar 2013-01-01
+ sum 6 6.0 foobarbaz NaT
.. _whatsnew_0200.enhancements.dataio_dtype:
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 5614b7a2c0846..d672136fe3581 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -277,7 +277,7 @@ Removal of prior version deprecations/changes
- Removed the deprecated ``base`` and ``loffset`` arguments from :meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample` and :class:`pandas.Grouper`. Use ``offset`` or ``origin`` instead (:issue:`31809`)
- Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`)
- Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`)
--
+- Change behavior of :meth:`DataFrame.apply` with list-like so that any partial failure will raise an error (:issue:`43740`)
.. ---------------------------------------------------------------------------
.. _whatsnew_200.performance:
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index cccef939f94d4..67edde2feafbe 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -4,7 +4,6 @@
from collections import defaultdict
from functools import partial
import inspect
-import re
from typing import (
TYPE_CHECKING,
Any,
@@ -18,7 +17,6 @@
Sequence,
cast,
)
-import warnings
import numpy as np
@@ -35,12 +33,8 @@
NDFrameT,
npt,
)
-from pandas.errors import (
- DataError,
- SpecificationError,
-)
+from pandas.errors import SpecificationError
from pandas.util._decorators import cache_readonly
-from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
@@ -320,88 +314,28 @@ def agg_list_like(self) -> DataFrame | Series:
results = []
keys = []
- failed_names = []
-
- depr_nuisance_columns_msg = (
- "{} did not aggregate successfully. If any error is "
- "raised this will raise in a future version of pandas. "
- "Drop these columns/ops to avoid this warning."
- )
# degenerate case
if selected_obj.ndim == 1:
for a in arg:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
- try:
- new_res = colg.aggregate(a)
-
- except TypeError:
- failed_names.append(com.get_callable_name(a) or a)
- else:
- results.append(new_res)
+ new_res = colg.aggregate(a)
+ results.append(new_res)
- # make sure we find a good name
- name = com.get_callable_name(a) or a
- keys.append(name)
+ # make sure we find a good name
+ name = com.get_callable_name(a) or a
+ keys.append(name)
# multiples
else:
indices = []
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
- try:
- # Capture and suppress any warnings emitted by us in the call
- # to agg below, but pass through any warnings that were
- # generated otherwise.
- # This is necessary because of https://bugs.python.org/issue29672
- # See GH #43741 for more details
- with warnings.catch_warnings(record=True) as record:
- new_res = colg.aggregate(arg)
- if len(record) > 0:
- match = re.compile(depr_nuisance_columns_msg.format(".*"))
- for warning in record:
- if re.match(match, str(warning.message)):
- failed_names.append(col)
- else:
- warnings.warn_explicit(
- message=warning.message,
- category=warning.category,
- filename=warning.filename,
- lineno=warning.lineno,
- )
-
- except (TypeError, DataError):
- failed_names.append(col)
- except ValueError as err:
- # cannot aggregate
- if "Must produce aggregated value" in str(err):
- # raised directly in _aggregate_named
- failed_names.append(col)
- elif "no results" in str(err):
- # reached in test_frame_apply.test_nuiscance_columns
- # where the colg.aggregate(arg) ends up going through
- # the selected_obj.ndim == 1 branch above with arg == ["sum"]
- # on a datetime64[ns] column
- failed_names.append(col)
- else:
- raise
- else:
- results.append(new_res)
- indices.append(index)
-
+ new_res = colg.aggregate(arg)
+ results.append(new_res)
+ indices.append(index)
keys = selected_obj.columns.take(indices)
- # if we are empty
- if not len(results):
- raise ValueError("no results")
-
- if len(failed_names) > 0:
- warnings.warn(
- depr_nuisance_columns_msg.format(failed_names),
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
try:
concatenated = concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
@@ -482,8 +416,6 @@ def agg_dict_like(self) -> DataFrame | Series:
keys_to_use = ktu
axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1
- # error: Key expression in dictionary comprehension has incompatible type
- # "Hashable"; expected type "NDFrame" [misc]
result = concat(
{k: results[k] for k in keys_to_use}, # type: ignore[misc]
axis=axis,
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 4c06ee60d3f6a..28b5e944174a8 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1141,8 +1141,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
result = gba.agg()
except ValueError as err:
- if "no results" not in str(err):
- # raised directly by _aggregate_multiple_funcs
+ if "No objects to concatenate" not in str(err):
raise
result = self._aggregate_frame(func)
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index c6294cfc0c670..510d4ab702fdd 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1141,14 +1141,13 @@ def test_agg_with_name_as_column_name():
tm.assert_series_equal(result, expected)
-def test_agg_multiple_mixed_no_warning():
+def test_agg_multiple_mixed():
# GH 20909
mdf = DataFrame(
{
"A": [1, 2, 3],
"B": [1.0, 2.0, 3.0],
"C": ["foo", "bar", "baz"],
- "D": date_range("20130101", periods=3),
}
)
expected = DataFrame(
@@ -1156,29 +1155,41 @@ def test_agg_multiple_mixed_no_warning():
"A": [1, 6],
"B": [1.0, 6.0],
"C": ["bar", "foobarbaz"],
- "D": [Timestamp("2013-01-01"), pd.NaT],
},
index=["min", "sum"],
)
# sorted index
- with tm.assert_produces_warning(
- FutureWarning, match=r"\['D'\] did not aggregate successfully"
- ):
- result = mdf.agg(["min", "sum"])
-
+ result = mdf.agg(["min", "sum"])
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(
- FutureWarning, match=r"\['D'\] did not aggregate successfully"
- ):
- result = mdf[["D", "C", "B", "A"]].agg(["sum", "min"])
-
+ result = mdf[["C", "B", "A"]].agg(["sum", "min"])
# GH40420: the result of .agg should have an index that is sorted
# according to the arguments provided to agg.
- expected = expected[["D", "C", "B", "A"]].reindex(["sum", "min"])
+ expected = expected[["C", "B", "A"]].reindex(["sum", "min"])
tm.assert_frame_equal(result, expected)
+def test_agg_multiple_mixed_raises():
+ # GH 20909
+ mdf = DataFrame(
+ {
+ "A": [1, 2, 3],
+ "B": [1.0, 2.0, 3.0],
+ "C": ["foo", "bar", "baz"],
+ "D": date_range("20130101", periods=3),
+ }
+ )
+
+ # sorted index
+ # TODO: GH#49399 will fix error message
+ msg = "DataFrame constructor called with"
+ with pytest.raises(TypeError, match=msg):
+ mdf.agg(["min", "sum"])
+
+ with pytest.raises(TypeError, match=msg):
+ mdf[["D", "C", "B", "A"]].agg(["sum", "min"])
+
+
def test_agg_reduce(axis, float_frame):
other_axis = 1 if axis in {0, "index"} else 0
name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values()
@@ -1277,14 +1288,10 @@ def test_nuiscance_columns():
expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"])
tm.assert_series_equal(result, expected)
- with tm.assert_produces_warning(
- FutureWarning, match=r"\['D'\] did not aggregate successfully"
- ):
- result = df.agg(["sum"])
- expected = DataFrame(
- [[6, 6.0, "foobarbaz"]], index=["sum"], columns=["A", "B", "C"]
- )
- tm.assert_frame_equal(result, expected)
+ # TODO: GH#49399 will fix error message
+ msg = "DataFrame constructor called with"
+ with pytest.raises(TypeError, match=msg):
+ df.agg(["sum"])
@pytest.mark.parametrize("how", ["agg", "apply"])
@@ -1499,27 +1506,23 @@ def test_aggregation_func_column_order():
# according to the arguments provided to agg.
df = DataFrame(
[
- ("1", 1, 0, 0),
- ("2", 2, 0, 0),
- ("3", 3, 0, 0),
- ("4", 4, 5, 4),
- ("5", 5, 6, 6),
- ("6", 6, 7, 7),
+ (1, 0, 0),
+ (2, 0, 0),
+ (3, 0, 0),
+ (4, 5, 4),
+ (5, 6, 6),
+ (6, 7, 7),
],
- columns=("item", "att1", "att2", "att3"),
+ columns=("att1", "att2", "att3"),
)
def foo(s):
return s.sum() / 2
aggs = ["sum", foo, "count", "min"]
- with tm.assert_produces_warning(
- FutureWarning, match=r"\['item'\] did not aggregate successfully"
- ):
- result = df.agg(aggs)
+ result = df.agg(aggs)
expected = DataFrame(
{
- "item": ["123456", np.nan, 6, "1"],
"att1": [21.0, 10.5, 6.0, 1.0],
"att2": [18.0, 9.0, 6.0, 0.0],
"att3": [17.0, 8.5, 6.0, 0.0],
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index ad7368a69c0f5..9514d4c95b394 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -383,21 +383,18 @@ def test_agg_multiple_functions_same_name_with_ohlc_present():
def test_multiple_functions_tuples_and_non_tuples(df):
# #1359
+ # Columns B and C would cause partial failure
+ df = df.drop(columns=["B", "C"])
+
funcs = [("foo", "mean"), "std"]
ex_funcs = [("foo", "mean"), ("std", "std")]
- result = df.groupby("A")["C"].agg(funcs)
- expected = df.groupby("A")["C"].agg(ex_funcs)
+ result = df.groupby("A")["D"].agg(funcs)
+ expected = df.groupby("A")["D"].agg(ex_funcs)
tm.assert_frame_equal(result, expected)
- with tm.assert_produces_warning(
- FutureWarning, match=r"\['B'\] did not aggregate successfully"
- ):
- result = df.groupby("A").agg(funcs)
- with tm.assert_produces_warning(
- FutureWarning, match=r"\['B'\] did not aggregate successfully"
- ):
- expected = df.groupby("A").agg(ex_funcs)
+ result = df.groupby("A").agg(funcs)
+ expected = df.groupby("A").agg(ex_funcs)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index f84abecea37da..6740729d038a7 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -25,10 +25,8 @@
from pandas.io.formats.printing import pprint_thing
-def test_agg_api():
- # GH 6337
- # https://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
- # different api for agg when passed custom function with mixed frame
+def test_agg_partial_failure_raises():
+ # GH#43741
df = DataFrame(
{
@@ -43,19 +41,11 @@ def test_agg_api():
def peak_to_peak(arr):
return arr.max() - arr.min()
- with tm.assert_produces_warning(
- FutureWarning,
- match=r"\['key2'\] did not aggregate successfully",
- ):
- expected = grouped.agg([peak_to_peak])
- expected.columns = ["data1", "data2"]
-
- with tm.assert_produces_warning(
- FutureWarning,
- match=r"\['key2'\] did not aggregate successfully",
- ):
- result = grouped.agg(peak_to_peak)
- tm.assert_frame_equal(result, expected)
+ with pytest.raises(TypeError, match="unsupported operand type"):
+ grouped.agg([peak_to_peak])
+
+ with pytest.raises(TypeError, match="unsupported operand type"):
+ grouped.agg(peak_to_peak)
def test_agg_datetimes_mixed():
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 26f269d3d4384..f4d8fc55d8d46 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -502,6 +502,54 @@ def test_multi_key_multiple_functions(df):
def test_frame_multi_key_function_list():
+ data = DataFrame(
+ {
+ "A": [
+ "foo",
+ "foo",
+ "foo",
+ "foo",
+ "bar",
+ "bar",
+ "bar",
+ "bar",
+ "foo",
+ "foo",
+ "foo",
+ ],
+ "B": [
+ "one",
+ "one",
+ "one",
+ "two",
+ "one",
+ "one",
+ "one",
+ "two",
+ "two",
+ "two",
+ "one",
+ ],
+ "D": np.random.randn(11),
+ "E": np.random.randn(11),
+ "F": np.random.randn(11),
+ }
+ )
+
+ grouped = data.groupby(["A", "B"])
+ funcs = [np.mean, np.std]
+ agged = grouped.agg(funcs)
+ expected = pd.concat(
+ [grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
+ keys=["D", "E", "F"],
+ axis=1,
+ )
+ assert isinstance(agged.index, MultiIndex)
+ assert isinstance(expected.index, MultiIndex)
+ tm.assert_frame_equal(agged, expected)
+
+
+def test_frame_multi_key_function_list_partial_failure():
data = DataFrame(
{
"A": [
@@ -551,18 +599,8 @@ def test_frame_multi_key_function_list():
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
- with tm.assert_produces_warning(
- FutureWarning, match=r"\['C'\] did not aggregate successfully"
- ):
- agged = grouped.agg(funcs)
- expected = pd.concat(
- [grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
- keys=["D", "E", "F"],
- axis=1,
- )
- assert isinstance(agged.index, MultiIndex)
- assert isinstance(expected.index, MultiIndex)
- tm.assert_frame_equal(agged, expected)
+ with pytest.raises(TypeError, match="Could not convert dullshinyshiny to numeric"):
+ grouped.agg(funcs)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index c5cd777962df3..53d416a74cac2 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -407,14 +407,14 @@ def test_agg():
expected.columns = pd.MultiIndex.from_product([["A", "B"], ["mean", "std"]])
for t in cases:
# In case 2, "date" is an index and a column, so agg still tries to agg
- warn = FutureWarning if t == cases[2] else None
- with tm.assert_produces_warning(
- warn,
- match=r"\['date'\] did not aggregate successfully",
- ):
- # .var on dt64 column raises and is dropped
+ if t == cases[2]:
+ # .var on dt64 column raises
+ msg = "Cannot cast DatetimeArray to dtype float64"
+ with pytest.raises(TypeError, match=msg):
+ t.aggregate([np.mean, np.std])
+ else:
result = t.aggregate([np.mean, np.std])
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
expected = pd.concat([a_mean, b_std], axis=1)
for t in cases:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
It seemed to me we'd lose test coverage with some of the tests if we just were tested these ops raised. So I split some into testing without the nuisance columns and with them.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49401 | 2022-10-30T17:13:16Z | 2022-11-02T00:46:30Z | 2022-11-02T00:46:30Z | 2022-11-03T00:11:54Z |
enable pylint:useless-return | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index fefc220403b9d..35f1ace7ec351 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -304,7 +304,6 @@ def _fill_mask_inplace(
# (for now) when self.ndim == 2, we assume axis=0
func = missing.get_fill_func(method, ndim=self.ndim)
func(self._ndarray.T, limit=limit, mask=mask.T)
- return
@doc(ExtensionArray.fillna)
def fillna(
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 60772cbcc30a1..cc9b2ce3fed42 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1556,7 +1556,6 @@ def _fill_mask_inplace(
func(npvalues, limit=limit, mask=mask.copy())
new_values = self._from_sequence(npvalues, dtype=self.dtype)
self[mask] = new_values[mask]
- return
def _rank(
self,
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 6ad8403c62720..0d058ead9d22c 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -258,7 +258,6 @@ def interpolate_array_2d(
fill_value=fill_value,
**kwargs,
)
- return
def _interpolate_2d_with_fill(
@@ -341,7 +340,6 @@ def func(yvalues: np.ndarray) -> None:
# Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
# Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
np.apply_along_axis(func, axis, data) # type: ignore[arg-type]
- return
def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:
@@ -762,8 +760,6 @@ def _interpolate_with_limit_area(
values[invalid] = np.nan
- return
-
def interpolate_2d(
values: np.ndarray,
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 0cd89a205bb82..935c39af8af3a 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -151,7 +151,6 @@ def f_scalar(group):
def f_none(group):
# GH10519, GH12155, GH21417
names.append(group.name)
- return None
def f_constant_df(group):
# GH2936, GH20084
diff --git a/pyproject.toml b/pyproject.toml
index 0ce8cf87ab17e..c4e8984d07100 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -135,7 +135,6 @@ disable = [
"unnecessary-list-index-lookup",
"use-a-generator",
"useless-option-value",
- "useless-return",
# pylint type "W": warning, for python specific problems
"abstract-method",
| Issue #48855. This PR enables pylint type "R" warning: "useless-return".
In one place (https://github.com/natmokval/pandas/blob/da3025ae6dec3fed73df0cf1d1bbc5ef0d816695/pandas/tests/groupby/test_apply.py#L151) the warning is disabled to match the style of surrounding functions.
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). | https://api.github.com/repos/pandas-dev/pandas/pulls/49400 | 2022-10-30T17:03:43Z | 2022-10-31T17:44:24Z | 2022-10-31T17:44:24Z | 2022-10-31T17:50:38Z |
DEPR: remove 1.0 deprecations in 2.0 (`Styler`) | diff --git a/asv_bench/benchmarks/io/style.py b/asv_bench/benchmarks/io/style.py
index f0902c9c2c328..1ebdb08e8c727 100644
--- a/asv_bench/benchmarks/io/style.py
+++ b/asv_bench/benchmarks/io/style.py
@@ -83,11 +83,11 @@ def _style_format(self):
def _style_apply_format_hide(self):
self.st = self.df.style.applymap(lambda v: "color: red;")
self.st.format("{:.3f}")
- self.st.hide_index(self.st.index[1:])
- self.st.hide_columns(self.st.columns[1:])
+ self.st.hide(self.st.index[1:], axis=0)
+ self.st.hide(self.st.columns[1:], axis=1)
def _style_tooltips(self):
ttips = DataFrame("abc", index=self.df.index[::2], columns=self.df.columns[::2])
self.st = self.df.style.set_tooltips(ttips)
- self.st.hide_index(self.st.index[12:])
- self.st.hide_columns(self.st.columns[12:])
+ self.st.hide(self.st.index[12:], axis=0)
+ self.st.hide(self.st.columns[12:], axis=1)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 1f245b585df48..136c97f18881c 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -173,6 +173,11 @@ Removal of prior version deprecations/changes
- Removed deprecated ``weekofyear`` and ``week`` attributes of :class:`DatetimeArray`, :class:`DatetimeIndex` and ``dt`` accessor in favor of ``isocalendar().week`` (:issue:`33595`)
- Removed deprecated :meth:`RangeIndex._start`, :meth:`RangeIndex._stop`, :meth:`RangeIndex._step`, use ``start``, ``stop``, ``step`` instead (:issue:`30482`)
- Removed deprecated :meth:`DatetimeIndex.to_perioddelta`, Use ``dtindex - dtindex.to_period(freq).to_timestamp()`` instead (:issue:`34853`)
+- Removed deprecated :meth:`.Styler.hide_index` and :meth:`.Styler.hide_columns` (:issue:`49397`)
+- Removed deprecated :meth:`.Styler.set_na_rep` and :meth:`.Styler.set_precision` (:issue:`49397`)
+- Removed deprecated :meth:`.Styler.where` (:issue:`49397`)
+- Removed deprecated :meth:`.Styler.render` (:issue:`49397`)
+- Removed deprecated argument ``null_color`` in :meth:`.Styler.highlight_null` (:issue:`49397`)
- Enforced deprecation disallowing passing a timezone-aware :class:`Timestamp` and ``dtype="datetime64[ns]"`` to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`)
- Enforced deprecation disallowing passing a sequence of timezone-aware values and ``dtype="datetime64[ns]"`` to to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`)
- Enforced deprecation disallowing using ``.astype`` to convert a ``datetime64[ns]`` :class:`Series`, :class:`DataFrame`, or :class:`DatetimeIndex` to timezone-aware dtype, use ``obj.tz_localize`` or ``ser.dt.tz_localize`` instead (:issue:`39258`)
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index f3c58ac2ad18d..fb44082aa5b58 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -16,13 +16,11 @@
Sequence,
overload,
)
-import warnings
import numpy as np
from pandas._config import get_option
-from pandas._libs import lib
from pandas._typing import (
Axis,
AxisInt,
@@ -39,7 +37,6 @@
Substitution,
doc,
)
-from pandas.util._exceptions import find_stack_level
import pandas as pd
from pandas import (
@@ -270,8 +267,6 @@ def __init__(
formatter = formatter or get_option("styler.format.formatter")
# precision is handled by superclass as default for performance
- self.precision = precision # can be removed on set_precision depr cycle
- self.na_rep = na_rep # can be removed on set_na_rep depr cycle
self.format(
formatter=formatter,
precision=precision,
@@ -390,72 +385,6 @@ def _repr_latex_(self) -> str | None:
return self.to_latex()
return None
- def render(
- self,
- sparse_index: bool | None = None,
- sparse_columns: bool | None = None,
- **kwargs,
- ) -> str:
- """
- Render the ``Styler`` including all applied styles to HTML.
-
- .. deprecated:: 1.4.0
-
- Parameters
- ----------
- sparse_index : bool, optional
- Whether to sparsify the display of a hierarchical index. Setting to False
- will display each explicit level element in a hierarchical key for each row.
- Defaults to ``pandas.options.styler.sparse.index`` value.
- sparse_columns : bool, optional
- Whether to sparsify the display of a hierarchical index. Setting to False
- will display each explicit level element in a hierarchical key for each row.
- Defaults to ``pandas.options.styler.sparse.columns`` value.
- **kwargs
- Any additional keyword arguments are passed
- through to ``self.template.render``.
- This is useful when you need to provide
- additional variables for a custom template.
-
- Returns
- -------
- rendered : str
- The rendered HTML.
-
- Notes
- -----
- This method is deprecated in favour of ``Styler.to_html``.
-
- Styler objects have defined the ``_repr_html_`` method
- which automatically calls ``self.to_html()`` when it's the
- last item in a Notebook cell.
-
- When calling ``Styler.render()`` directly, wrap the result in
- ``IPython.display.HTML`` to view the rendered HTML in the notebook.
-
- Pandas uses the following keys in render. Arguments passed
- in ``**kwargs`` take precedence, so think carefully if you want
- to override them:
-
- * head
- * cellstyle
- * body
- * uuid
- * table_styles
- * caption
- * table_attributes
- """
- warnings.warn(
- "this method is deprecated in favour of `Styler.to_html()`",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- if sparse_index is None:
- sparse_index = get_option("styler.sparse.index")
- if sparse_columns is None:
- sparse_columns = get_option("styler.sparse.columns")
- return self._render_html(sparse_index, sparse_columns, **kwargs)
-
def set_tooltips(
self,
ttips: DataFrame,
@@ -2063,108 +1992,6 @@ def applymap(
)
return self
- @Substitution(subset=subset)
- def where(
- self,
- cond: Callable,
- value: str,
- other: str | None = None,
- subset: Subset | None = None,
- **kwargs,
- ) -> Styler:
- """
- Apply CSS-styles based on a conditional function elementwise.
-
- .. deprecated:: 1.3.0
-
- Updates the HTML representation with a style which is
- selected in accordance with the return value of a function.
-
- Parameters
- ----------
- cond : callable
- ``cond`` should take a scalar, and optional keyword arguments, and return
- a boolean.
- value : str
- Applied when ``cond`` returns true.
- other : str
- Applied when ``cond`` returns false.
- %(subset)s
- **kwargs : dict
- Pass along to ``cond``.
-
- Returns
- -------
- self : Styler
-
- See Also
- --------
- Styler.applymap: Apply a CSS-styling function elementwise.
- Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.
-
- Notes
- -----
- This method is deprecated.
-
- This method is a convenience wrapper for :meth:`Styler.applymap`, which we
- recommend using instead.
-
- The example:
-
- >>> df = pd.DataFrame([[1, 2], [3, 4]])
- >>> def cond(v, limit=4):
- ... return v > 1 and v != limit
- >>> df.style.where(cond, value='color:green;', other='color:red;')
- ... # doctest: +SKIP
-
- should be refactored to:
-
- >>> def style_func(v, value, other, limit=4):
- ... cond = v > 1 and v != limit
- ... return value if cond else other
- >>> df.style.applymap(style_func, value='color:green;', other='color:red;')
- ... # doctest: +SKIP
- """
- warnings.warn(
- "this method is deprecated in favour of `Styler.applymap()`",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
- if other is None:
- other = ""
-
- return self.applymap(
- lambda val: value if cond(val, **kwargs) else other,
- subset=subset,
- )
-
- def set_precision(self, precision: int) -> StylerRenderer:
- """
- Set the precision used to display values.
-
- .. deprecated:: 1.3.0
-
- Parameters
- ----------
- precision : int
-
- Returns
- -------
- self : Styler
-
- Notes
- -----
- This method is deprecated see `Styler.format`.
- """
- warnings.warn(
- "this method is deprecated in favour of `Styler.format(precision=..)`",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- self.precision = precision
- return self.format(precision=precision, na_rep=self.na_rep)
-
def set_table_attributes(self, attributes: str) -> Styler:
"""
Set the table attributes added to the ``<table>`` HTML element.
@@ -2649,140 +2476,6 @@ def set_table_styles(
self.table_styles = table_styles
return self
- def set_na_rep(self, na_rep: str) -> StylerRenderer:
- """
- Set the missing data representation on a ``Styler``.
-
- .. versionadded:: 1.0.0
-
- .. deprecated:: 1.3.0
-
- Parameters
- ----------
- na_rep : str
-
- Returns
- -------
- self : Styler
-
- Notes
- -----
- This method is deprecated. See `Styler.format()`
- """
- warnings.warn(
- "this method is deprecated in favour of `Styler.format(na_rep=..)`",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- self.na_rep = na_rep
- return self.format(na_rep=na_rep, precision=self.precision)
-
- def hide_index(
- self,
- subset: Subset | None = None,
- level: Level | list[Level] | None = None,
- names: bool = False,
- ) -> Styler:
- """
- Hide the entire index, or specific keys in the index from rendering.
-
- This method has dual functionality:
-
- - if ``subset`` is ``None`` then the entire index, or specified levels, will
- be hidden whilst displaying all data-rows.
- - if a ``subset`` is given then those specific rows will be hidden whilst the
- index itself remains visible.
-
- .. versionchanged:: 1.3.0
-
- .. deprecated:: 1.4.0
- This method should be replaced by ``hide(axis="index", **kwargs)``
-
- Parameters
- ----------
- subset : label, array-like, IndexSlice, optional
- A valid 1d input or single key along the index axis within
- `DataFrame.loc[<subset>, :]`, to limit ``data`` to *before* applying
- the function.
- level : int, str, list
- The level(s) to hide in a MultiIndex if hiding the entire index. Cannot be
- used simultaneously with ``subset``.
-
- .. versionadded:: 1.4.0
- names : bool
- Whether to hide the index name(s), in the case the index or part of it
- remains visible.
-
- .. versionadded:: 1.4.0
-
- Returns
- -------
- self : Styler
-
- See Also
- --------
- Styler.hide: Hide the entire index / columns, or specific rows / columns.
- """
- warnings.warn(
- 'this method is deprecated in favour of `Styler.hide(axis="index")`',
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return self.hide(axis="index", level=level, subset=subset, names=names)
-
- def hide_columns(
- self,
- subset: Subset | None = None,
- level: Level | list[Level] | None = None,
- names: bool = False,
- ) -> Styler:
- """
- Hide the column headers or specific keys in the columns from rendering.
-
- This method has dual functionality:
-
- - if ``subset`` is ``None`` then the entire column headers row, or
- specific levels, will be hidden whilst the data-values remain visible.
- - if a ``subset`` is given then those specific columns, including the
- data-values will be hidden, whilst the column headers row remains visible.
-
- .. versionchanged:: 1.3.0
-
- ..deprecated:: 1.4.0
- This method should be replaced by ``hide(axis="columns", **kwargs)``
-
- Parameters
- ----------
- subset : label, array-like, IndexSlice, optional
- A valid 1d input or single key along the columns axis within
- `DataFrame.loc[:, <subset>]`, to limit ``data`` to *before* applying
- the function.
- level : int, str, list
- The level(s) to hide in a MultiIndex if hiding the entire column headers
- row. Cannot be used simultaneously with ``subset``.
-
- .. versionadded:: 1.4.0
- names : bool
- Whether to hide the column index name(s), in the case all column headers,
- or some levels, are visible.
-
- .. versionadded:: 1.4.0
-
- Returns
- -------
- self : Styler
-
- See Also
- --------
- Styler.hide: Hide the entire index / columns, or specific rows / columns.
- """
- warnings.warn(
- 'this method is deprecated in favour of `Styler.hide(axis="columns")`',
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return self.hide(axis="columns", level=level, subset=subset, names=names)
-
def hide(
self,
subset: Subset | None = None,
@@ -3334,10 +3027,9 @@ def bar(
@Substitution(subset=subset, props=props, color=color.format(default="red"))
def highlight_null(
self,
- color: str | None = None,
+ color: str = "red",
subset: Subset | None = None,
props: str | None = None,
- null_color: str | lib.NoDefault = lib.no_default,
) -> Styler:
"""
Highlight missing values with a style.
@@ -3356,13 +3048,6 @@ def highlight_null(
.. versionadded:: 1.3.0
- null_color : str, default None
- The background color for highlighting.
-
- .. deprecated:: 1.5.0
- Use ``color`` instead. If ``color`` is given ``null_color`` is
- not used.
-
Returns
-------
self : Styler
@@ -3378,17 +3063,6 @@ def highlight_null(
def f(data: DataFrame, props: str) -> np.ndarray:
return np.where(pd.isna(data).to_numpy(), props, "")
- if null_color != lib.no_default:
- warnings.warn(
- "`null_color` is deprecated: use `color` instead",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
- if color is None and null_color == lib.no_default:
- color = "red"
- elif color is None and null_color != lib.no_default:
- color = null_color
if props is None:
props = f"background-color: {color};"
return self.apply(f, axis=None, subset=subset, props=props)
diff --git a/pandas/tests/io/formats/style/test_deprecated.py b/pandas/tests/io/formats/style/test_deprecated.py
deleted file mode 100644
index 863c31ed3cccd..0000000000000
--- a/pandas/tests/io/formats/style/test_deprecated.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""
-modules collects tests for Styler methods which have been deprecated
-"""
-import numpy as np
-import pytest
-
-jinja2 = pytest.importorskip("jinja2")
-
-from pandas import (
- DataFrame,
- IndexSlice,
- NaT,
- Timestamp,
-)
-import pandas._testing as tm
-
-
-@pytest.fixture
-def df():
- return DataFrame({"A": [0, 1], "B": np.random.randn(2)})
-
-
-@pytest.mark.parametrize("axis", ["index", "columns"])
-def test_hide_index_columns(df, axis):
- with tm.assert_produces_warning(FutureWarning):
- getattr(df.style, "hide_" + axis)()
-
-
-def test_set_non_numeric_na():
- # GH 21527 28358
- df = DataFrame(
- {
- "object": [None, np.nan, "foo"],
- "datetime": [None, NaT, Timestamp("20120101")],
- }
- )
-
- with tm.assert_produces_warning(FutureWarning):
- ctx = df.style.set_na_rep("NA")._translate(True, True)
- assert ctx["body"][0][1]["display_value"] == "NA"
- assert ctx["body"][0][2]["display_value"] == "NA"
- assert ctx["body"][1][1]["display_value"] == "NA"
- assert ctx["body"][1][2]["display_value"] == "NA"
-
-
-def test_where_with_one_style(df):
- # GH 17474
- def f(x):
- return x > 0.5
-
- style1 = "foo: bar"
-
- with tm.assert_produces_warning(FutureWarning):
- result = df.style.where(f, style1)._compute().ctx
- expected = {
- (r, c): [("foo", "bar")]
- for r, row in enumerate(df.index)
- for c, col in enumerate(df.columns)
- if f(df.loc[row, col])
- }
- assert result == expected
-
-
-@pytest.mark.parametrize(
- "slice_",
- [
- IndexSlice[:],
- IndexSlice[:, ["A"]],
- IndexSlice[[1], :],
- IndexSlice[[1], ["A"]],
- IndexSlice[:2, ["A", "B"]],
- ],
-)
-def test_where_subset(df, slice_):
- # GH 17474
- def f(x):
- return x > 0.5
-
- style1 = "foo: bar"
- style2 = "baz: foo"
-
- with tm.assert_produces_warning(FutureWarning):
- res = df.style.where(f, style1, style2, subset=slice_)._compute().ctx
- expected = {
- (r, c): [("foo", "bar") if f(df.loc[row, col]) else ("baz", "foo")]
- for r, row in enumerate(df.index)
- for c, col in enumerate(df.columns)
- if row in df.loc[slice_].index and col in df.loc[slice_].columns
- }
- assert res == expected
-
-
-def test_where_subset_compare_with_applymap(df):
- # GH 17474
- def f(x):
- return x > 0.5
-
- style1 = "foo: bar"
- style2 = "baz: foo"
-
- def g(x):
- return style1 if f(x) else style2
-
- slices = [
- IndexSlice[:],
- IndexSlice[:, ["A"]],
- IndexSlice[[1], :],
- IndexSlice[[1], ["A"]],
- IndexSlice[:2, ["A", "B"]],
- ]
-
- for slice_ in slices:
- with tm.assert_produces_warning(FutureWarning):
- result = df.style.where(f, style1, style2, subset=slice_)._compute().ctx
- expected = df.style.applymap(g, subset=slice_)._compute().ctx
- assert result == expected
-
-
-def test_where_kwargs():
- df = DataFrame([[1, 2], [3, 4]])
-
- def f(x, val):
- return x > val
-
- with tm.assert_produces_warning(FutureWarning):
- res = df.style.where(f, "color:green;", "color:red;", val=2)._compute().ctx
- expected = {
- (0, 0): [("color", "red")],
- (0, 1): [("color", "red")],
- (1, 0): [("color", "green")],
- (1, 1): [("color", "green")],
- }
- assert res == expected
-
-
-def test_set_na_rep():
- # GH 21527 28358
- df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
-
- with tm.assert_produces_warning(FutureWarning):
- ctx = df.style.set_na_rep("NA")._translate(True, True)
- assert ctx["body"][0][1]["display_value"] == "NA"
- assert ctx["body"][0][2]["display_value"] == "NA"
-
- with tm.assert_produces_warning(FutureWarning):
- ctx = (
- df.style.set_na_rep("NA")
- .format(None, na_rep="-", subset=["B"])
- ._translate(True, True)
- )
- assert ctx["body"][0][1]["display_value"] == "NA"
- assert ctx["body"][0][2]["display_value"] == "-"
-
-
-def test_precision(df):
- styler = df.style
- with tm.assert_produces_warning(FutureWarning):
- s2 = styler.set_precision(1)
- assert styler is s2
- assert styler.precision == 1
-
-
-def test_render(df):
- with tm.assert_produces_warning(FutureWarning):
- df.style.render()
-
-
-def test_null_color(df):
- with tm.assert_produces_warning(FutureWarning):
- df.style.highlight_null(null_color="blue")
diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index 77a996b1f92d6..35cc977368c5d 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -272,8 +272,6 @@ def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler.to_html()
excl = [
- "na_rep", # deprecated
- "precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
@@ -333,8 +331,6 @@ def test_clear(mi_styler_comp):
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
- "precision", # deprecated
- "na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
| Removed deprecations from Styler:
- `where()`
- `hide_index`
- `hide_columns`
- `render`
- `set_na_rep`
- `set_precision`
- `null_color` kwarg in `highlight_null`
| https://api.github.com/repos/pandas-dev/pandas/pulls/49397 | 2022-10-30T06:43:49Z | 2022-10-31T17:49:35Z | 2022-10-31T17:49:35Z | 2022-11-01T06:33:33Z |
BUG: Fixing DataFrame.Update crashes when NaT present | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 032bcf09244e5..1ca513e8f5e6a 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -662,6 +662,7 @@ Missing
- Bug in :meth:`Index.equals` raising ``TypeError`` when :class:`Index` consists of tuples that contain ``NA`` (:issue:`48446`)
- Bug in :meth:`Series.map` caused incorrect result when data has NaNs and defaultdict mapping was used (:issue:`48813`)
- Bug in :class:`NA` raising a ``TypeError`` instead of return :class:`NA` when performing a binary operation with a ``bytes`` object (:issue:`49108`)
+- Bug in :meth:`DataFrame.update` with ``overwrite=False`` raising ``TypeError`` when ``self`` has column with ``NaT`` values and column not present in ``other`` (:issue:`16713`)
MultiIndex
^^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b465761c45c78..ca763151e29a0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8056,11 +8056,12 @@ def update(
if not isinstance(other, DataFrame):
other = DataFrame(other)
- other = other.reindex_like(self)
+ other = other.reindex(self.index)
- for col in self.columns:
+ for col in self.columns.intersection(other.columns):
this = self[col]._values
that = other[col]._values
+
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
index a35530100a425..40f87f1382625 100644
--- a/pandas/tests/frame/methods/test_update.py
+++ b/pandas/tests/frame/methods/test_update.py
@@ -166,3 +166,13 @@ def test_update_modify_view(self, using_copy_on_write):
tm.assert_frame_equal(result_view, df2_orig)
else:
tm.assert_frame_equal(result_view, expected)
+
+ def test_update_dt_column_with_NaT_create_column(self):
+ # GH#16713
+ df = DataFrame({"A": [1, None], "B": [pd.NaT, pd.to_datetime("2016-01-01")]})
+ df2 = DataFrame({"A": [2, 3]})
+ df.update(df2, overwrite=False)
+ expected = DataFrame(
+ {"A": [1.0, 3.0], "B": [pd.NaT, pd.to_datetime("2016-01-01")]}
+ )
+ tm.assert_frame_equal(df, expected)
| - [x] closes #16713 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49395 | 2022-10-30T05:48:29Z | 2022-11-15T14:19:48Z | 2022-11-15T14:19:48Z | 2022-11-15T14:19:48Z |
DEPR: Timestamp comparison with pydate | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index b1ff0ec305dc3..c875eb8c94119 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -283,6 +283,7 @@ Removal of prior version deprecations/changes
- Changed behavior of :class:`Index` constructor when passed a ``SparseArray`` or ``SparseDtype`` to retain that dtype instead of casting to ``numpy.ndarray`` (:issue:`43930`)
- Removed the deprecated ``base`` and ``loffset`` arguments from :meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample` and :class:`pandas.Grouper`. Use ``offset`` or ``origin`` instead (:issue:`31809`)
- Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`)
+- Changed behavior of comparison of a :class:`Timestamp` with a ``datetime.date`` object; these now compare as un-equal and raise on inequality comparisons, matching the ``datetime.datetime`` behavior (:issue:`36131`)
- Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`)
-
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 3c3bb8496aa6e..1a2e2760d3d8d 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -364,15 +364,13 @@ cdef class _Timestamp(ABCTimestamp):
# which incorrectly drops tz and normalizes to midnight
# before comparing
# We follow the stdlib datetime behavior of never being equal
- warnings.warn(
- "Comparison of Timestamp with datetime.date is deprecated in "
- "order to match the standard library behavior. "
- "In a future version these will be considered non-comparable. "
- "Use 'ts == pd.Timestamp(date)' or 'ts.date() == date' instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
+ if op == Py_EQ:
+ return False
+ elif op == Py_NE:
+ return True
+ raise TypeError("Cannot compare Timestamp with datetime.date. "
+ "Use ts == pd.Timestamp(date) or ts.date() == date instead."
)
- return NotImplemented
else:
return NotImplemented
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 88ae431fb1baf..ad3cec5824619 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1462,9 +1462,6 @@ def test_loc_setitem_datetime_coercion(self):
assert Timestamp("2008-08-08") == df.loc[0, "c"]
assert Timestamp("2008-08-08") == df.loc[1, "c"]
df.loc[2, "c"] = date(2005, 5, 5)
- with tm.assert_produces_warning(FutureWarning):
- # Comparing Timestamp to date obj is deprecated
- assert Timestamp("2005-05-05") == df.loc[2, "c"]
assert Timestamp("2005-05-05").date() == df.loc[2, "c"]
@pytest.mark.parametrize("idxer", ["var", ["var"]])
diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py
index 2c9b029bf109e..ad629604d1bc9 100644
--- a/pandas/tests/scalar/timestamp/test_comparisons.py
+++ b/pandas/tests/scalar/timestamp/test_comparisons.py
@@ -156,36 +156,22 @@ def test_compare_date(self, tz):
# GH#36131 comparing Timestamp with date object is deprecated
ts = Timestamp("2021-01-01 00:00:00.00000", tz=tz)
dt = ts.to_pydatetime().date()
- # These are incorrectly considered as equal because they
- # dispatch to the date comparisons which truncates ts
+ # in 2.0 we disallow comparing pydate objects with Timestamps,
+ # following the stdlib datetime behavior.
+ msg = "Cannot compare Timestamp with datetime.date"
for left, right in [(ts, dt), (dt, ts)]:
- with tm.assert_produces_warning(FutureWarning):
- assert left == right
- with tm.assert_produces_warning(FutureWarning):
- assert not left != right
- with tm.assert_produces_warning(FutureWarning):
- assert not left < right
- with tm.assert_produces_warning(FutureWarning):
- assert left <= right
- with tm.assert_produces_warning(FutureWarning):
- assert not left > right
- with tm.assert_produces_warning(FutureWarning):
- assert left >= right
-
- # Once the deprecation is enforced, the following assertions
- # can be enabled:
- # assert not left == right
- # assert left != right
- #
- # with pytest.raises(TypeError):
- # left < right
- # with pytest.raises(TypeError):
- # left <= right
- # with pytest.raises(TypeError):
- # left > right
- # with pytest.raises(TypeError):
- # left >= right
+ assert not left == right
+ assert left != right
+
+ with pytest.raises(TypeError, match=msg):
+ left < right
+ with pytest.raises(TypeError, match=msg):
+ left <= right
+ with pytest.raises(TypeError, match=msg):
+ left > right
+ with pytest.raises(TypeError, match=msg):
+ left >= right
def test_cant_compare_tz_naive_w_aware(self, utc_fixture):
# see GH#1404
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49394 | 2022-10-30T03:35:52Z | 2022-10-31T21:48:19Z | 2022-10-31T21:48:19Z | 2022-10-31T22:06:37Z |
Make the conversion from dtype to subclass just a little faster | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index d8300bb29c274..590fb187e9a1f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -106,7 +106,6 @@
is_scalar,
is_signed_integer_dtype,
is_string_dtype,
- is_unsigned_integer_dtype,
needs_i8_conversion,
pandas_dtype,
validate_all_hashable,
@@ -591,20 +590,20 @@ def _dtype_to_subclass(cls, dtype: DtypeObj):
return TimedeltaIndex
- elif is_float_dtype(dtype):
+ elif dtype.kind == "f":
from pandas.core.api import Float64Index
return Float64Index
- elif is_unsigned_integer_dtype(dtype):
+ elif dtype.kind == "u":
from pandas.core.api import UInt64Index
return UInt64Index
- elif is_signed_integer_dtype(dtype):
+ elif dtype.kind == "i":
from pandas.core.api import Int64Index
return Int64Index
- elif dtype == _dtype_obj:
+ elif dtype.kind == "O":
# NB: assuming away MultiIndex
return Index
| Calling `is_float_dtype` is a really "safe" way to take an array or a dtype and know if it is a subclass.
But I don't think you need to do that. Checking the `kind` should be enough. I get about 5-10% faster on the benchmarks shown. Mostly reducing 12us to 11.5 us. I think it is real.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49393 | 2022-10-29T22:07:55Z | 2022-11-02T01:02:18Z | 2022-11-02T01:02:18Z | 2022-11-02T01:02:47Z |
DEPR: internals | diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index 5540ba01a8f36..c2f40dfbfb250 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -25,7 +25,7 @@ dependencies:
- blosc=1.21.0
- bottleneck=1.3.2
- brotlipy=0.7.0
- - fastparquet=0.4.0
+ - fastparquet=0.6.3
- fsspec=2021.07.0
- html5lib=1.1
- hypothesis=6.13.0
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 780318769f04e..d7325f6014c86 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -124,6 +124,8 @@ Optional libraries below the lowest tested version may still work, but are not c
+=================+=================+=========+
| pyarrow | 6.0.0 | X |
+-----------------+-----------------+---------+
+| fastparquet | 0.6.3 | X |
++-----------------+-----------------+---------+
See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more.
@@ -157,6 +159,7 @@ Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- Removed deprecated :class:`CategoricalBlock`, :meth:`Block.is_categorical`, require datetime64 and timedelta64 values to be wrapped in :class:`DatetimeArray` or :class:`TimedeltaArray` before passing to :meth:`Block.make_block_same_class`, require ``DatetimeTZBlock.values`` to have the correct ndim when passing to the :class:`BlockManager` constructor, and removed the "fastpath" keyword from the :class:`SingleBlockManager` constructor (:issue:`40226`, :issue:`40571`)
- Removed deprecated module ``pandas.core.index`` (:issue:`30193`)
- Removed deprecated :meth:`Categorical.to_dense`, use ``np.asarray(cat)`` instead (:issue:`32639`)
- Removed deprecated :meth:`Categorical.take_nd` (:issue:`27745`)
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index 856fb5e4cb66b..1bfef131aac1d 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -16,7 +16,7 @@
"blosc": "1.21.0",
"bottleneck": "1.3.2",
"brotli": "0.7.0",
- "fastparquet": "0.4.0",
+ "fastparquet": "0.6.3",
"fsspec": "2021.07.0",
"html5lib": "1.1",
"hypothesis": "6.13.0",
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index ea69b567611e4..0797e62de7a9f 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -38,22 +38,3 @@
# this is preserved here for downstream compatibility (GH-33892)
"create_block_manager_from_blocks",
]
-
-
-def __getattr__(name: str):
- import warnings
-
- from pandas.util._exceptions import find_stack_level
-
- if name == "CategoricalBlock":
- warnings.warn(
- "CategoricalBlock is deprecated and will be removed in a future version. "
- "Use ExtensionBlock instead.",
- DeprecationWarning,
- stacklevel=find_stack_level(),
- )
- from pandas.core.internals.blocks import CategoricalBlock
-
- return CategoricalBlock
-
- raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'")
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 2158d80ec1977..4cc8a12de6821 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -59,7 +59,6 @@
is_string_dtype,
)
from pandas.core.dtypes.dtypes import (
- CategoricalDtype,
ExtensionDtype,
PandasDtype,
PeriodDtype,
@@ -175,18 +174,6 @@ def _can_hold_na(self) -> bool:
return dtype.kind not in ["b", "i", "u"]
return dtype._can_hold_na
- @final
- @cache_readonly
- def is_categorical(self) -> bool:
- warnings.warn(
- "Block.is_categorical is deprecated and will be removed in a "
- "future version. Use isinstance(block.values, Categorical) "
- "instead. See https://github.com/pandas-dev/pandas/issues/40226",
- DeprecationWarning,
- stacklevel=find_stack_level(),
- )
- return isinstance(self.values, Categorical)
-
@final
@property
def is_bool(self) -> bool:
@@ -240,24 +227,11 @@ def make_block_same_class(
self, values, placement: BlockPlacement | None = None
) -> Block:
"""Wrap given values in a block of same type as self."""
+ # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet
+ # relied on it, as of 2.0 the caller is responsible for this.
if placement is None:
placement = self._mgr_locs
- if values.dtype.kind in ["m", "M"]:
-
- new_values = ensure_wrapped_if_datetimelike(values)
- if new_values is not values:
- # TODO(2.0): remove once fastparquet has stopped relying on it
- warnings.warn(
- "In a future version, Block.make_block_same_class will "
- "assume that datetime64 and timedelta64 ndarrays have "
- "already been cast to DatetimeArray and TimedeltaArray, "
- "respectively.",
- DeprecationWarning,
- stacklevel=find_stack_level(),
- )
- values = new_values
-
# We assume maybe_coerce_values has already been called
return type(self)(values, placement=placement, ndim=self.ndim)
@@ -1649,7 +1623,7 @@ class ExtensionBlock(libinternals.Block, EABackedBlock):
Notes
-----
This holds all 3rd-party extension array types. It's also the immediate
- parent class for our internal extension types' blocks, CategoricalBlock.
+ parent class for our internal extension types' blocks.
ExtensionArrays are limited to 1-D.
"""
@@ -2066,17 +2040,6 @@ def convert(
return [self.make_block(res_values)]
-class CategoricalBlock(ExtensionBlock):
- # this Block type is kept for backwards-compatibility
- __slots__ = ()
-
- # GH#43232, GH#43334 self.values.dtype can be changed inplace until 2.0,
- # so this cannot be cached
- @property
- def dtype(self) -> DtypeObj:
- return self.values.dtype
-
-
# -----------------------------------------------------------------
# Constructor Helpers
@@ -2132,8 +2095,6 @@ def get_block_type(dtype: DtypeObj):
if isinstance(dtype, SparseDtype):
# Need this first(ish) so that Sparse[datetime] is sparse
cls = ExtensionBlock
- elif isinstance(dtype, CategoricalDtype):
- cls = CategoricalBlock
elif vtype is Timestamp:
cls = DatetimeTZBlock
elif isinstance(dtype, PeriodDtype):
@@ -2374,7 +2335,7 @@ def external_values(values: ArrayLike) -> ArrayLike:
elif isinstance(values, (DatetimeArray, TimedeltaArray)):
# NB: for datetime64tz this is different from np.asarray(values), since
# that returns an object-dtype ndarray of Timestamps.
- # Avoid FutureWarning in .astype in casting from dt64tz to dt64
+ # Avoid raising in .astype in casting from dt64tz to dt64
return values._data
else:
return values
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index f21c02a7823ae..f515dbeeb90c6 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -74,7 +74,6 @@
)
from pandas.core.internals.blocks import (
Block,
- DatetimeTZBlock,
NumpyBlock,
ensure_block_shape,
extend_blocks,
@@ -1008,27 +1007,9 @@ def __init__(
f"Number of Block dimensions ({block.ndim}) must equal "
f"number of axes ({self.ndim})"
)
- if isinstance(block, DatetimeTZBlock) and block.values.ndim == 1:
- # TODO(2.0): remove once fastparquet no longer needs this
- warnings.warn(
- "In a future version, the BlockManager constructor "
- "will assume that a DatetimeTZBlock with block.ndim==2 "
- "has block.values.ndim == 2.",
- DeprecationWarning,
- stacklevel=find_stack_level(),
- )
-
- # error: Incompatible types in assignment (expression has type
- # "Union[ExtensionArray, ndarray]", variable has type
- # "DatetimeArray")
- block.values = ensure_block_shape( # type: ignore[assignment]
- block.values, self.ndim
- )
- try:
- block._cache.clear()
- except AttributeError:
- # _cache not initialized
- pass
+ # As of 2.0, the caller is responsible for ensuring that
+ # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2;
+ # previously there was a special check for fastparquet compat.
self._verify_integrity()
@@ -1876,20 +1857,11 @@ def __init__(
axis: Index,
refs: list[weakref.ref | None] | None = None,
verify_integrity: bool = False,
- fastpath=lib.no_default,
) -> None:
# Assertions disabled for performance
# assert isinstance(block, Block), type(block)
# assert isinstance(axis, Index), type(axis)
- if fastpath is not lib.no_default:
- warnings.warn(
- "The `fastpath` keyword is deprecated and will be removed "
- "in a future version.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
self.axes = [axis]
self.blocks = (block,)
self.refs = refs
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index b64220d90f9a2..ecf247efd74bf 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -356,12 +356,6 @@ def test_split(self):
for res, exp in zip(result, expected):
assert_block_equal(res, exp)
- def test_is_categorical_deprecated(self, fblock):
- # GH#40571
- blk = fblock
- with tm.assert_produces_warning(DeprecationWarning):
- blk.is_categorical
-
class TestBlockManager:
def test_attrs(self):
@@ -1432,11 +1426,3 @@ def test_make_block_no_pandas_array(block_maker):
)
assert result.dtype.kind in ["i", "u"]
assert result.is_extension is False
-
-
-def test_single_block_manager_fastpath_deprecated():
- # GH#33092
- ser = Series(range(3))
- blk = ser._data.blocks[0]
- with tm.assert_produces_warning(FutureWarning):
- SingleBlockManager(blk, ser.index, fastpath=True)
diff --git a/pandas/tests/io/__init__.py b/pandas/tests/io/__init__.py
index c99d03afc8320..15294fd0cabbc 100644
--- a/pandas/tests/io/__init__.py
+++ b/pandas/tests/io/__init__.py
@@ -5,9 +5,6 @@
pytest.mark.filterwarnings(
"ignore:PY_SSIZE_T_CLEAN will be required.*:DeprecationWarning"
),
- pytest.mark.filterwarnings(
- "ignore:Block.is_categorical is deprecated:DeprecationWarning"
- ),
pytest.mark.filterwarnings(
r"ignore:`np\.bool` is a deprecated alias:DeprecationWarning"
),
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index ec48357e0395d..145682b484100 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -317,9 +317,6 @@ def test_read_expands_user_home_dir(
),
],
)
- @pytest.mark.filterwarnings(
- "ignore:CategoricalBlock is deprecated:DeprecationWarning"
- )
@pytest.mark.filterwarnings( # pytables np.object usage
"ignore:`np.object` is a deprecated alias:DeprecationWarning"
)
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 236a7f9e1a9c1..eaeb769a94c38 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -15,7 +15,6 @@
@filter_sparse
@pytest.mark.single_cpu
-@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestFeather:
def check_error_on_write(self, df, exc, err_msg):
# check that we are raising the exception
diff --git a/pandas/tests/io/test_orc.py b/pandas/tests/io/test_orc.py
index 0bb320907b813..a0acf160854ac 100644
--- a/pandas/tests/io/test_orc.py
+++ b/pandas/tests/io/test_orc.py
@@ -14,10 +14,6 @@
pytest.importorskip("pyarrow.orc")
-pytestmark = pytest.mark.filterwarnings(
- "ignore:RangeIndex.* is deprecated:DeprecationWarning"
-)
-
@pytest.fixture
def dirpath(datapath):
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 9c85ab4ba4a57..75683a1d96bfb 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -53,11 +53,6 @@
_HAVE_FASTPARQUET = False
-pytestmark = pytest.mark.filterwarnings(
- "ignore:RangeIndex.* is deprecated:DeprecationWarning"
-)
-
-
# TODO(ArrayManager) fastparquet relies on BlockManager internals
# setup engines & skips
@@ -688,7 +683,6 @@ def test_read_empty_array(self, pa, dtype):
)
-@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index 119ffd8cfd5a1..cea9484fbbf80 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -234,7 +234,6 @@ def test_geopandas():
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
-@pytest.mark.filterwarnings("ignore:RangeIndex.* is deprecated:DeprecationWarning")
def test_pyarrow(df):
pyarrow = import_module("pyarrow")
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49392 | 2022-10-29T19:45:02Z | 2022-11-01T22:51:56Z | 2022-11-01T22:51:56Z | 2022-11-01T23:01:46Z |
DEPR: disallow unit-less dt64 dtype in astype | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 1f245b585df48..2cad922a71a3c 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -175,6 +175,7 @@ Removal of prior version deprecations/changes
- Removed deprecated :meth:`DatetimeIndex.to_perioddelta`, Use ``dtindex - dtindex.to_period(freq).to_timestamp()`` instead (:issue:`34853`)
- Enforced deprecation disallowing passing a timezone-aware :class:`Timestamp` and ``dtype="datetime64[ns]"`` to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`)
- Enforced deprecation disallowing passing a sequence of timezone-aware values and ``dtype="datetime64[ns]"`` to to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`)
+- Enforced deprecation disallowing unit-less "datetime64" dtype in :meth:`Series.astype` and :meth:`DataFrame.astype` (:issue:`47844`)
- Enforced deprecation disallowing using ``.astype`` to convert a ``datetime64[ns]`` :class:`Series`, :class:`DataFrame`, or :class:`DatetimeIndex` to timezone-aware dtype, use ``obj.tz_localize`` or ``ser.dt.tz_localize`` instead (:issue:`39258`)
- Enforced deprecation disallowing using ``.astype`` to convert a timezone-aware :class:`Series`, :class:`DataFrame`, or :class:`DatetimeIndex` to timezone-naive ``datetime64[ns]`` dtype, use ``obj.tz_localize(None)`` or ``obj.tz_convert("UTC").tz_localize(None)`` instead (:issue:`39258`)
- Removed Date parser functions :func:`~pandas.io.date_converters.parse_date_time`,
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 8fc18691fb17e..ca54ab163ab64 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -683,15 +683,10 @@ def astype(self, dtype, copy: bool = True):
and dtype != self.dtype
and is_unitless(dtype)
):
- # TODO(2.0): just fall through to dtl.DatetimeLikeArrayMixin.astype
- warnings.warn(
- "Passing unit-less datetime64 dtype to .astype is deprecated "
- "and will raise in a future version. Pass 'datetime64[ns]' instead",
- FutureWarning,
- stacklevel=find_stack_level(),
+ raise TypeError(
+ "Casting to unit-less dtype 'datetime64' is not supported. "
+ "Pass e.g. 'datetime64[ns]' instead."
)
- # unit conversion e.g. datetime64[s]
- return self._ndarray.astype(dtype)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index 9b57f0f634a6c..1423581555ee6 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -30,18 +30,20 @@
class TestAstypeAPI:
- def test_astype_unitless_dt64_deprecated(self):
+ def test_astype_unitless_dt64_raises(self):
# GH#47844
ser = Series(["1970-01-01", "1970-01-01", "1970-01-01"], dtype="datetime64[ns]")
+ df = ser.to_frame()
- msg = "Passing unit-less datetime64 dtype to .astype is deprecated and "
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = ser.astype(np.datetime64)
- tm.assert_series_equal(ser, res)
-
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = ser.astype("datetime64")
- tm.assert_series_equal(ser, res)
+ msg = "Casting to unit-less dtype 'datetime64' is not supported"
+ with pytest.raises(TypeError, match=msg):
+ ser.astype(np.datetime64)
+ with pytest.raises(TypeError, match=msg):
+ df.astype(np.datetime64)
+ with pytest.raises(TypeError, match=msg):
+ ser.astype("datetime64")
+ with pytest.raises(TypeError, match=msg):
+ df.astype("datetime64")
def test_arg_for_errors_in_astype(self):
# see GH#14878
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49391 | 2022-10-29T19:44:39Z | 2022-10-31T18:13:35Z | 2022-10-31T18:13:35Z | 2022-10-31T18:17:16Z |
ENH Guess %Y-%m format | diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 469e0721f1207..1312124cfb77b 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -1011,10 +1011,11 @@ def guess_datetime_format(dt_str: str, bint dayfirst=False) -> str | None:
break
# Only consider it a valid guess if we have a year, month and day,
- # unless it's %Y which is both common and unambiguous.
+ # unless it's %Y or %Y-%m which conform with ISO8601. Note that we don't
+ # make an exception for %Y%m because it's explicitly not considered ISO8601.
if (
len({'year', 'month', 'day'} & found_attrs) != 3
- and format_guess != ['%Y']
+ and format_guess not in (['%Y'], ['%Y', None, '%m'])
):
return None
diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index a4e12315d34e0..f47dd2e725aec 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -148,6 +148,8 @@ def test_parsers_month_freq(date_str, expected):
("20111230", "%Y%m%d"),
("2011-12-30", "%Y-%m-%d"),
("2011", "%Y"),
+ ("2011-01", "%Y-%m"),
+ ("2011/01", "%Y/%m"),
("30-12-2011", "%d-%m-%Y"),
("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),
@@ -215,6 +217,7 @@ def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
"this_is_not_a_datetime",
"51a",
"13/2019",
+ "202001", # YYYYMM isn't ISO8601
],
)
def test_guess_datetime_format_invalid_inputs(invalid_dt):
| Broken off from https://github.com/pandas-dev/pandas/pull/49024, opening it separately as it's small and self-contained | https://api.github.com/repos/pandas-dev/pandas/pulls/49389 | 2022-10-29T13:47:34Z | 2022-10-29T17:50:54Z | 2022-10-29T17:50:54Z | 2022-10-29T17:50:57Z |
DOC: add name parameter to the IntervalIndex for #48911 | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 0004bc92b349e..288504c4f2b99 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -49,6 +49,7 @@ Other enhancements
- Fix ``test`` optional_extra by adding missing test package ``pytest-asyncio`` (:issue:`48361`)
- :func:`DataFrame.astype` exception message thrown improved to include column name when type conversion is not possible. (:issue:`47571`)
- :meth:`DataFrame.to_json` now supports a ``mode`` keyword with supported inputs 'w' and 'a'. Defaulting to 'w', 'a' can be used when lines=True and orient='records' to append record oriented json lines to an existing json file. (:issue:`35849`)
+- Added ``name`` parameter to :meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_arrays` and :meth:`IntervalIndex.from_tuples` (:issue:`48911`)
.. ---------------------------------------------------------------------------
.. _whatsnew_200.notable_bug_fixes:
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 2bc6c9174af81..5e888f4babc95 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -383,7 +383,8 @@ def _from_factorized(
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
- or neither.
+ or neither.\
+ %(name)s
copy : bool, default False
Copy the data.
dtype : dtype or None, default None
@@ -408,6 +409,7 @@ def _from_factorized(
_interval_shared_docs["from_breaks"]
% {
"klass": "IntervalArray",
+ "name": "",
"examples": textwrap.dedent(
"""\
Examples
@@ -443,7 +445,8 @@ def from_breaks(
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
- or neither.
+ or neither.\
+ %(name)s
copy : bool, default False
Copy the data.
dtype : dtype, optional
@@ -485,6 +488,7 @@ def from_breaks(
_interval_shared_docs["from_arrays"]
% {
"klass": "IntervalArray",
+ "name": "",
"examples": textwrap.dedent(
"""\
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
@@ -520,7 +524,8 @@ def from_arrays(
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
- or neither.
+ or neither.\
+ %(name)s
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
@@ -547,6 +552,7 @@ def from_arrays(
_interval_shared_docs["from_tuples"]
% {
"klass": "IntervalArray",
+ "name": "",
"examples": textwrap.dedent(
"""\
Examples
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 8507280a6cc8d..fa10aee4b6c72 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -236,6 +236,11 @@ def __new__(
_interval_shared_docs["from_breaks"]
% {
"klass": "IntervalIndex",
+ "name": textwrap.dedent(
+ """
+ name : str, optional
+ Name of the resulting IntervalIndex."""
+ ),
"examples": textwrap.dedent(
"""\
Examples
@@ -266,6 +271,11 @@ def from_breaks(
_interval_shared_docs["from_arrays"]
% {
"klass": "IntervalIndex",
+ "name": textwrap.dedent(
+ """
+ name : str, optional
+ Name of the resulting IntervalIndex."""
+ ),
"examples": textwrap.dedent(
"""\
Examples
@@ -297,6 +307,11 @@ def from_arrays(
_interval_shared_docs["from_tuples"]
% {
"klass": "IntervalIndex",
+ "name": textwrap.dedent(
+ """
+ name : str, optional
+ Name of the resulting IntervalIndex."""
+ ),
"examples": textwrap.dedent(
"""\
Examples
| - [x] closes #48911
- [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49386 | 2022-10-29T11:03:14Z | 2022-10-31T14:35:02Z | 2022-10-31T14:35:02Z | 2023-01-12T12:30:33Z |
asv groupby.string smaller_faster | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 2225cbd74d718..f369c095e59b1 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -683,7 +683,7 @@ class String:
def setup(self, dtype, method):
cols = list("abcdefghjkl")
self.df = DataFrame(
- np.random.randint(0, 100, size=(1_000_000, len(cols))),
+ np.random.randint(0, 100, size=(10_000, len(cols))),
columns=cols,
dtype=dtype,
)
| xref #44450
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Decreased the size, should take significantly less time | https://api.github.com/repos/pandas-dev/pandas/pulls/49385 | 2022-10-29T07:21:39Z | 2022-10-31T18:17:11Z | 2022-10-31T18:17:11Z | 2022-11-01T05:32:22Z |
DOC: add missing whatsnew for #49321 | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 0004bc92b349e..04cbda4e762ea 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -258,6 +258,8 @@ Removal of prior version deprecations/changes
- Enforced disallowing a tuple of column labels into :meth:`.DataFrameGroupBy.__getitem__` (:issue:`30546`)
- Enforced disallowing setting values with ``.loc`` using a positional slice. Use ``.loc`` with labels or ``.iloc`` with positions instead (:issue:`31840`)
- Removed setting Categorical._codes directly (:issue:`41429`)
+- Removed setting Categorical.categories directly (:issue:`47834`)
+- Removed argument ``inplace`` from :meth:`Categorical.add_categories`, :meth:`Categorical.remove_categories`, :meth:`Categorical.set_categories`, :meth:`Categorical.rename_categories`, :meth:`Categorical.reorder_categories`, :meth:`Categorical.set_ordered`, :meth:`Categorical.as_ordered`, :meth:`Categorical.as_unordered` (:issue:`37981`, :issue:`41118`, :issue:`41133`, :issue:`47834`)
- Enforced :meth:`Rolling.count` with ``min_periods=None`` to default to the size of the window (:issue:`31302`)
- Renamed ``fname`` to ``path`` in :meth:`DataFrame.to_parquet`, :meth:`DataFrame.to_stata` and :meth:`DataFrame.to_feather` (:issue:`30338`)
- Enforced disallowing indexing a :class:`Series` with a single item list with a slice (e.g. ``ser[[slice(0, 2)]]``). Either convert the list to tuple, or pass the slice directly instead (:issue:`31333`)
| - [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
Follow up to #49321 to add missing whatsnew.
cc @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/49383 | 2022-10-29T02:10:47Z | 2022-10-29T17:11:13Z | 2022-10-29T17:11:13Z | 2022-11-03T01:38:12Z |
STYLE: fix pylint unneeded-not warnings | diff --git a/pandas/core/arrays/arrow/extension_types.py b/pandas/core/arrays/arrow/extension_types.py
index c9badb2bd305b..25f597af5e3cf 100644
--- a/pandas/core/arrays/arrow/extension_types.py
+++ b/pandas/core/arrays/arrow/extension_types.py
@@ -35,6 +35,9 @@ def __eq__(self, other):
else:
return NotImplemented
+ def __ne__(self, other) -> bool:
+ return not self == other
+
def __hash__(self) -> int:
return hash((str(self), self.freq))
@@ -91,6 +94,9 @@ def __eq__(self, other):
else:
return NotImplemented
+ def __ne__(self, other) -> bool:
+ return not self == other
+
def __hash__(self) -> int:
return hash((str(self), str(self.subtype), self.closed))
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 0f93027f3f775..9244a8c5e672d 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -98,7 +98,7 @@ def __init__(
self.data: DataFrame = data
self.index: Index = data.index
self.columns: Index = data.columns
- if not isinstance(uuid_len, int) or not uuid_len >= 0:
+ if not isinstance(uuid_len, int) or uuid_len < 0:
raise TypeError("``uuid_len`` must be an integer in range [0, 32].")
self.uuid = uuid or uuid4().hex[: min(32, uuid_len)]
self.uuid_len = len(self.uuid)
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index 2a6bea3255342..28b5f441d3cd5 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -256,9 +256,9 @@ def test_arrow_extension_type():
assert p1.closed == "left"
assert p1 == p2
- assert not p1 == p3
+ assert p1 != p3
assert hash(p1) == hash(p2)
- assert not hash(p1) == hash(p3)
+ assert hash(p1) != hash(p3)
@pyarrow_skip
diff --git a/pandas/tests/arrays/period/test_arrow_compat.py b/pandas/tests/arrays/period/test_arrow_compat.py
index 03fd146572405..61670f74f78fb 100644
--- a/pandas/tests/arrays/period/test_arrow_compat.py
+++ b/pandas/tests/arrays/period/test_arrow_compat.py
@@ -21,9 +21,9 @@ def test_arrow_extension_type():
assert p1.freq == "D"
assert p1 == p2
- assert not p1 == p3
+ assert p1 != p3
assert hash(p1) == hash(p2)
- assert not hash(p1) == hash(p3)
+ assert hash(p1) != hash(p3)
@pytest.mark.parametrize(
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index fce3da6dd6aee..9b0e74f050b62 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -800,7 +800,7 @@ def test_contains_td64_level(self):
def test_large_mi_contains(self):
# GH#10645
result = MultiIndex.from_arrays([range(10**6), range(10**6)])
- assert not (10**6, 0) in result
+ assert (10**6, 0) not in result
def test_timestamp_multiindex_indexer():
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 72ee89a4b5108..1c3868bc85fd5 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -1114,5 +1114,5 @@ def test_ops_error_str():
with pytest.raises(TypeError, match=msg):
left > right
- assert not left == right
+ assert not left == right # pylint: disable=unneeded-not
assert left != right
diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py
index ad629604d1bc9..c3e0f6df9c7d5 100644
--- a/pandas/tests/scalar/timestamp/test_comparisons.py
+++ b/pandas/tests/scalar/timestamp/test_comparisons.py
@@ -310,5 +310,5 @@ def __eq__(self, other) -> bool:
for left, right in [(inf, timestamp), (timestamp, inf)]:
assert left > right or left < right
assert left >= right or left <= right
- assert not left == right
+ assert not left == right # pylint: disable=unneeded-not
assert left != right
diff --git a/pyproject.toml b/pyproject.toml
index 397f74ddab71a..223e31dfbb3cd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -91,7 +91,6 @@ disable = [
"unidiomatic-typecheck",
"unnecessary-dunder-call",
"unnecessary-lambda-assignment",
- "unneeded-not",
"use-implicit-booleaness-not-comparison",
"use-implicit-booleaness-not-len",
"use-sequence-for-iteration",
| Related to https://github.com/pandas-dev/pandas/issues/48855
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I added `# pylint: disable=unneeded-not` for tests where the intention was clearly to test the `not` operator. | https://api.github.com/repos/pandas-dev/pandas/pulls/49382 | 2022-10-29T01:58:42Z | 2022-11-17T00:51:56Z | 2022-11-17T00:51:56Z | 2022-11-17T20:36:06Z |
DEPR: Timestamp(dt64obj, tz=tz) | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 780318769f04e..d937f452b4023 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -281,6 +281,7 @@ Removal of prior version deprecations/changes
- Changed behavior of :class:`DataFrame` constructor given floating-point ``data`` and an integer ``dtype``, when the data cannot be cast losslessly, the floating point dtype is retained, matching :class:`Series` behavior (:issue:`41170`)
- Changed behavior of :class:`DataFrame` constructor when passed a ``dtype`` (other than int) that the data cannot be cast to; it now raises instead of silently ignoring the dtype (:issue:`41733`)
- Changed the behavior of :class:`Series` constructor, it will no longer infer a datetime64 or timedelta64 dtype from string entries (:issue:`41731`)
+- Changed behavior of :class:`Timestamp` constructor with a ``np.datetime64`` object and a ``tz`` passed to interpret the input as a wall-time as opposed to a UTC time (:issue:`42288`)
- Changed behavior of :class:`Index` constructor when passed a ``SparseArray`` or ``SparseDtype`` to retain that dtype instead of casting to ``numpy.ndarray`` (:issue:`43930`)
- Removed the deprecated ``base`` and ``loffset`` arguments from :meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample` and :class:`pandas.Grouper`. Use ``offset`` or ``origin`` instead (:issue:`31809`)
- Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 3c3bb8496aa6e..bfc6f872675d6 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1639,18 +1639,9 @@ class Timestamp(_Timestamp):
tzobj = maybe_get_tz(tz)
if tzobj is not None and is_datetime64_object(ts_input):
- # GH#24559, GH#42288 In the future we will treat datetime64 as
+ # GH#24559, GH#42288 As of 2.0 we treat datetime64 as
# wall-time (consistent with DatetimeIndex)
- warnings.warn(
- "In a future version, when passing a np.datetime64 object and "
- "a timezone to Timestamp, the datetime64 will be interpreted "
- "as a wall time, not a UTC time. To interpret as a UTC time, "
- "use `Timestamp(dt64).tz_localize('UTC').tz_convert(tz)`",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- # Once this deprecation is enforced, we can do
- # return Timestamp(ts_input).tz_localize(tzobj)
+ return cls(ts_input).tz_localize(tzobj)
if nanosecond is None:
nanosecond = 0
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 341e850a7464e..ba24804ce4634 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -54,18 +54,13 @@ def test_constructor_datetime64_with_tz(self):
dt = np.datetime64("1970-01-01 05:00:00")
tzstr = "UTC+05:00"
- msg = "interpreted as a wall time"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- ts = Timestamp(dt, tz=tzstr)
+ # pre-2.0 this interpreted dt as a UTC time. in 2.0 this is treated
+ # as a wall-time, consistent with DatetimeIndex behavior
+ ts = Timestamp(dt, tz=tzstr)
- # Check that we match the old behavior
- alt = Timestamp(dt).tz_localize("UTC").tz_convert(tzstr)
+ alt = Timestamp(dt).tz_localize(tzstr)
assert ts == alt
-
- # Check that we *don't* match the future behavior
- assert ts.hour != 5
- expected_future = Timestamp(dt).tz_localize(tzstr)
- assert ts != expected_future
+ assert ts.hour == 5
def test_constructor(self):
base_str = "2014-07-01 09:00"
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49381 | 2022-10-28T23:18:06Z | 2022-11-01T22:53:42Z | 2022-11-01T22:53:42Z | 2022-11-01T23:00:13Z |
DEPR: Remove week & weekofyear for datetimes | diff --git a/doc/source/getting_started/intro_tutorials/09_timeseries.rst b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
index 3c256081d7955..373470913c293 100644
--- a/doc/source/getting_started/intro_tutorials/09_timeseries.rst
+++ b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
@@ -144,7 +144,7 @@ I want to add a new column to the ``DataFrame`` containing only the month of the
By using ``Timestamp`` objects for dates, a lot of time-related
properties are provided by pandas. For example the ``month``, but also
-``year``, ``weekofyear``, ``quarter``,… All of these properties are
+``year``, ``quarter``,… All of these properties are
accessible by the ``dt`` accessor.
.. raw:: html
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 93897723d5d71..b7866a0076d84 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -343,8 +343,6 @@ Time/date components
DatetimeIndex.timetz
DatetimeIndex.dayofyear
DatetimeIndex.day_of_year
- DatetimeIndex.weekofyear
- DatetimeIndex.week
DatetimeIndex.dayofweek
DatetimeIndex.day_of_week
DatetimeIndex.weekday
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 3fda5db3a0199..c9604f48dd334 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -311,8 +311,6 @@ Datetime properties
Series.dt.second
Series.dt.microsecond
Series.dt.nanosecond
- Series.dt.week
- Series.dt.weekofyear
Series.dt.dayofweek
Series.dt.day_of_week
Series.dt.weekday
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 5aa753dffcf7f..50d3952be5148 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -170,6 +170,7 @@ Removal of prior version deprecations/changes
- Removed deprecated :meth:`Index.to_native_types`, use ``obj.astype(str)`` instead (:issue:`36418`)
- Removed deprecated :meth:`Series.iteritems`, :meth:`DataFrame.iteritems`, use ``obj.items`` instead (:issue:`45321`)
- Removed deprecated :meth:`DatetimeIndex.union_many` (:issue:`45018`)
+- Removed deprecated ``weekofyear`` and ``week`` attributes of :class:`DatetimeArray`, :class:`DatetimeIndex` and ``dt`` accessor in favor of ``isocalendar().week`` (:issue:`33595`)
- Removed deprecated :meth:`RangeIndex._start`, :meth:`RangeIndex._stop`, :meth:`RangeIndex._step`, use ``start``, ``stop``, ``step`` instead (:issue:`30482`)
- Removed deprecated :meth:`DatetimeIndex.to_perioddelta`, Use ``dtindex - dtindex.to_period(freq).to_timestamp()`` instead (:issue:`34853`)
- Enforced deprecation disallowing passing a timezone-aware :class:`Timestamp` and ``dtype="datetime64[ns]"`` to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 1768bb7507dd9..8fc18691fb17e 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -216,8 +216,6 @@ def _scalar_type(self) -> type[Timestamp]:
"hour",
"minute",
"second",
- "weekofyear",
- "week",
"weekday",
"dayofweek",
"day_of_week",
@@ -1365,32 +1363,6 @@ def isocalendar(self) -> DataFrame:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
- @property
- def weekofyear(self):
- """
- The week ordinal of the year.
-
- .. deprecated:: 1.1.0
-
- weekofyear and week have been deprecated.
- Please use DatetimeIndex.isocalendar().week instead.
- """
- warnings.warn(
- "weekofyear and week have been deprecated, please use "
- "DatetimeIndex.isocalendar().week instead, which returns "
- "a Series. To exactly reproduce the behavior of week and "
- "weekofyear and return an Index, you may call "
- "pd.Int64Index(idx.isocalendar().week)",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- week_series = self.isocalendar().week
- if week_series.hasnans:
- return week_series.to_numpy(dtype="float64", na_value=np.nan)
- return week_series.to_numpy(dtype="int64")
-
- week = weekofyear
-
year = _field_accessor(
"year",
"Y",
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 46959aa5cd3e2..da2a0a2a87137 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -4,12 +4,9 @@
from __future__ import annotations
from typing import TYPE_CHECKING
-import warnings
import numpy as np
-from pandas.util._exceptions import find_stack_level
-
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
@@ -276,31 +273,6 @@ def isocalendar(self) -> DataFrame:
"""
return self._get_values().isocalendar().set_index(self._parent.index)
- @property
- def weekofyear(self):
- """
- The week ordinal of the year according to the ISO 8601 standard.
-
- .. deprecated:: 1.1.0
-
- Series.dt.weekofyear and Series.dt.week have been deprecated. Please
- call :func:`Series.dt.isocalendar` and access the ``week`` column
- instead.
- """
- warnings.warn(
- "Series.dt.weekofyear and Series.dt.week have been deprecated. "
- "Please use Series.dt.isocalendar().week instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- week_series = self.isocalendar().week
- week_series.name = self.name
- if week_series.hasnans:
- return week_series.astype("float64")
- return week_series.astype("int64")
-
- week = weekofyear
-
@delegate_names(
delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index a899e95bac41d..72b2cd15d3222 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -805,18 +805,11 @@ def test_bool_properties(self, arr1d, propname):
@pytest.mark.parametrize("propname", DatetimeArray._field_ops)
def test_int_properties(self, arr1d, propname):
- warn = None
- msg = "weekofyear and week have been deprecated, please use"
- if propname in ["week", "weekofyear"]:
- # GH#33595 Deprecate week and weekofyear
- warn = FutureWarning
-
dti = self.index_cls(arr1d)
arr = arr1d
- with tm.assert_produces_warning(warn, match=msg):
- result = getattr(arr, propname)
- expected = np.array(getattr(dti, propname), dtype=result.dtype)
+ result = getattr(arr, propname)
+ expected = np.array(getattr(dti, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 24779c6e0c89d..babab81dfbe57 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -75,9 +75,6 @@ def test_non_nano(self, unit, reso, dtype):
assert tz_compare(dta.tz, dta[0].tz)
assert (dta[0] == dta[:1]).all()
- @pytest.mark.filterwarnings(
- "ignore:weekofyear and week have been deprecated:FutureWarning"
- )
@pytest.mark.parametrize(
"field", DatetimeArray._field_ops + DatetimeArray._bool_ops
)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index c69c35ee46307..0e8b0fe83279b 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -98,9 +98,6 @@ def test_datetimeindex_accessors(self):
# non boolean accessors -> return Index
for accessor in DatetimeArray._field_ops:
- if accessor in ["week", "weekofyear"]:
- # GH#33595 Deprecate week and weekofyear
- continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
@@ -287,15 +284,6 @@ def test_iter_readonly():
list(dti)
-def test_week_and_weekofyear_are_deprecated():
- # GH#33595 Deprecate week and weekofyear
- idx = date_range(start="2019-12-29", freq="D", periods=4)
- with tm.assert_produces_warning(FutureWarning):
- idx.week
- with tm.assert_produces_warning(FutureWarning):
- idx.weekofyear
-
-
def test_add_timedelta_preserves_freq():
# GH#37295 should hold for any DTI with freq=None or Tick freq
tz = "Canada/Eastern"
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 1fd5f5ab7c2a6..79e9e1d4fc68b 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -73,9 +73,6 @@ def test_nat_vector_field_access():
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
- if field in ["week", "weekofyear"]:
- # GH#33595 Deprecate week and weekofyear
- continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
@@ -88,9 +85,6 @@ def test_nat_vector_field_access():
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
- if field in ["week", "weekofyear"]:
- # GH#33595 Deprecate week and weekofyear
- continue
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
diff --git a/pandas/tests/series/accessors/test_cat_accessor.py b/pandas/tests/series/accessors/test_cat_accessor.py
index 48a01f0018775..2ac1574adc913 100644
--- a/pandas/tests/series/accessors/test_cat_accessor.py
+++ b/pandas/tests/series/accessors/test_cat_accessor.py
@@ -218,9 +218,6 @@ def test_dt_accessor_api_for_categorical(self, idx):
tm.assert_equal(res, exp)
for attr in attr_names:
- if attr in ["week", "weekofyear"]:
- # GH#33595 Deprecate week and weekofyear
- continue
res = getattr(cat.dt, attr)
exp = getattr(ser.dt, attr)
diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py
index 47e59be907929..ccd79d5cc58f4 100644
--- a/pandas/tests/series/accessors/test_dt_accessor.py
+++ b/pandas/tests/series/accessors/test_dt_accessor.py
@@ -107,8 +107,7 @@ def test_dt_namespace_accessor_datetime64(self, freq):
for prop in ok_for_dt:
# we test freq below
- # we ignore week and weekofyear because they are deprecated
- if prop not in ["freq", "week", "weekofyear"]:
+ if prop != "freq":
self._compare(ser, prop)
for prop in ok_for_dt_methods:
@@ -146,8 +145,7 @@ def test_dt_namespace_accessor_datetime64tz(self):
for prop in ok_for_dt:
# we test freq below
- # we ignore week and weekofyear because they are deprecated
- if prop not in ["freq", "week", "weekofyear"]:
+ if prop != "freq":
self._compare(ser, prop)
for prop in ok_for_dt_methods:
@@ -794,15 +792,6 @@ def test_to_period(self, input_vals):
tm.assert_series_equal(result, expected)
-def test_week_and_weekofyear_are_deprecated():
- # GH#33595 Deprecate week and weekofyear
- series = pd.to_datetime(Series(["2020-01-01"]))
- with tm.assert_produces_warning(FutureWarning):
- series.dt.week
- with tm.assert_produces_warning(FutureWarning):
- series.dt.weekofyear
-
-
def test_normalize_pre_epoch_dates():
# GH: 36294
ser = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"]))
| Introduced in https://github.com/pandas-dev/pandas/pull/33595 | https://api.github.com/repos/pandas-dev/pandas/pulls/49380 | 2022-10-28T23:14:21Z | 2022-10-29T16:51:50Z | 2022-10-29T16:51:50Z | 2022-10-29T17:49:12Z |
issue 48855 enable pylint C-type "disallowed-name " warning | diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index d4366c42f96aa..d515743ea4431 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -15,7 +15,7 @@ def setup(self):
self.cur_index = self.df.index
def time_get_index(self):
- self.foo = self.df.index
+ self.df.index
def time_set_index(self):
self.df.index = self.cur_index
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 8f21823ddcd02..841def76a156f 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -2902,7 +2902,7 @@ def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
return self.applymap(lambda x: values, subset=subset)
@Substitution(subset=subset)
- def bar(
+ def bar( # pylint: disable=disallowed-name
self,
subset: Subset | None = None,
axis: Axis | None = 0,
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 036d2c84f006e..35d743a64dd7b 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1131,7 +1131,9 @@ def line(self, x=None, y=None, **kwargs) -> PlotAccessor:
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
- def bar(self, x=None, y=None, **kwargs) -> PlotAccessor:
+ def bar( # pylint: disable=disallowed-name
+ self, x=None, y=None, **kwargs
+ ) -> PlotAccessor:
"""
Vertical bar plot.
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index 510d4ab702fdd..a1802a2fcb674 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1516,10 +1516,10 @@ def test_aggregation_func_column_order():
columns=("att1", "att2", "att3"),
)
- def foo(s):
+ def sum_div2(s):
return s.sum() / 2
- aggs = ["sum", foo, "count", "min"]
+ aggs = ["sum", sum_div2, "count", "min"]
result = df.agg(aggs)
expected = DataFrame(
{
@@ -1527,7 +1527,7 @@ def foo(s):
"att2": [18.0, 9.0, 6.0, 0.0],
"att3": [17.0, 8.5, 6.0, 0.0],
},
- index=["sum", "foo", "count", "min"],
+ index=["sum", "sum_div2", "count", "min"],
)
tm.assert_frame_equal(result, expected)
@@ -1548,13 +1548,13 @@ def test_nuisance_depr_passes_through_warnings():
# sure if some other warnings were raised, they get passed through to
# the user.
- def foo(x):
+ def expected_warning(x):
warnings.warn("Hello, World!")
return x.sum()
df = DataFrame({"a": [1, 2, 3]})
with tm.assert_produces_warning(UserWarning, match="Hello, World!"):
- df.agg([foo])
+ df.agg([expected_warning])
def test_apply_type():
diff --git a/pandas/tests/base/test_constructors.py b/pandas/tests/base/test_constructors.py
index 5f654ca3a996b..9576bf57c8503 100644
--- a/pandas/tests/base/test_constructors.py
+++ b/pandas/tests/base/test_constructors.py
@@ -43,19 +43,19 @@ def constructor(request):
class TestPandasDelegate:
class Delegator:
- _properties = ["foo"]
- _methods = ["bar"]
+ _properties = ["prop"]
+ _methods = ["test_method"]
- def _set_foo(self, value):
- self.foo = value
+ def _set_prop(self, value):
+ self.prop = value
- def _get_foo(self):
- return self.foo
+ def _get_prop(self):
+ return self.prop
- foo = property(_get_foo, _set_foo, doc="foo property")
+ prop = property(_get_prop, _set_prop, doc="foo property")
- def bar(self, *args, **kwargs):
- """a test bar method"""
+ def test_method(self, *args, **kwargs):
+ """a test method"""
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj) -> None:
@@ -77,17 +77,17 @@ def test_invalid_delegation(self):
delegate = self.Delegate(self.Delegator())
- msg = "You cannot access the property foo"
+ msg = "You cannot access the property prop"
with pytest.raises(TypeError, match=msg):
- delegate.foo
+ delegate.prop
- msg = "The property foo cannot be set"
+ msg = "The property prop cannot be set"
with pytest.raises(TypeError, match=msg):
- delegate.foo = 5
+ delegate.prop = 5
- msg = "You cannot access the property foo"
+ msg = "You cannot access the property prop"
with pytest.raises(TypeError, match=msg):
- delegate.foo()
+ delegate.prop()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 1dab8682ce887..215cda68cd016 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -203,16 +203,16 @@ def test_is_list_like_disallow_sets(maybe_list_like):
def test_is_list_like_recursion():
# GH 33721
# interpreter would crash with SIGABRT
- def foo():
+ def list_like():
inference.is_list_like([])
- foo()
+ list_like()
rec_limit = sys.getrecursionlimit()
try:
# Limit to avoid stack overflow on Windows CI
sys.setrecursionlimit(100)
with tm.external_error_raised(RecursionError):
- foo()
+ list_like()
finally:
sys.setrecursionlimit(rec_limit)
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index d5331b1060b23..b385091c9ff51 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -138,11 +138,11 @@ def test_subclass_attr_err_propagation(self):
# GH 11808
class A(DataFrame):
@property
- def bar(self):
+ def nonexistence(self):
return self.i_dont_exist
with pytest.raises(AttributeError, match=".*i_dont_exist.*"):
- A().bar
+ A().nonexistence
def test_subclass_align(self):
# GH 12983
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 9514d4c95b394..5bc323d82259e 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -283,15 +283,15 @@ def test_aggregate_item_by_item(df):
aggfun_0 = lambda ser: ser.size
result = grouped.agg(aggfun_0)
- foo = (df.A == "foo").sum()
- bar = (df.A == "bar").sum()
+ foosum = (df.A == "foo").sum()
+ barsum = (df.A == "bar").sum()
K = len(result.columns)
# GH5782
- exp = Series(np.array([foo] * K), index=list("BCD"), name="foo")
+ exp = Series(np.array([foosum] * K), index=list("BCD"), name="foo")
tm.assert_series_equal(result.xs("foo"), exp)
- exp = Series(np.array([bar] * K), index=list("BCD"), name="bar")
+ exp = Series(np.array([barsum] * K), index=list("BCD"), name="bar")
tm.assert_almost_equal(result.xs("bar"), exp)
def aggfun_1(ser):
@@ -417,10 +417,10 @@ def test_more_flexible_frame_multi_function(df):
expected = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
tm.assert_frame_equal(result, expected)
- def foo(x):
+ def numpymean(x):
return np.mean(x)
- def bar(x):
+ def numpystd(x):
return np.std(x, ddof=1)
# this uses column selection & renaming
@@ -430,7 +430,7 @@ def bar(x):
grouped.aggregate(d)
# But without renaming, these functions are OK
- d = {"C": [np.mean], "D": [foo, bar]}
+ d = {"C": [np.mean], "D": [numpymean, numpystd]}
grouped.aggregate(d)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 803a3a8f57442..9fdc0f02e8652 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1616,7 +1616,7 @@ def freduce(group):
assert group.name is not None
return group.sum()
- def foo(x):
+ def freducex(x):
return freduce(x)
grouped = df.groupby(grouper, group_keys=False)
@@ -1629,7 +1629,7 @@ def foo(x):
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
- grouped["C"].aggregate([freduce, foo])
+ grouped["C"].aggregate([freduce, freducex])
grouped["C"].transform(f)
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index ef72f1f3ffde8..e2d59e5511a52 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -233,14 +233,14 @@ def test_level_setting_resets_attributes():
def test_rangeindex_fallback_coercion_bug():
# GH 12893
- foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
- bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
- df = pd.concat({"foo": foo.stack(), "bar": bar.stack()}, axis=1)
+ df1 = pd.DataFrame(np.arange(100).reshape((10, 10)))
+ df2 = pd.DataFrame(np.arange(100).reshape((10, 10)))
+ df = pd.concat({"df1": df1.stack(), "df2": df2.stack()}, axis=1)
df.index.names = ["fizz", "buzz"]
str(df)
expected = pd.DataFrame(
- {"bar": np.arange(100), "foo": np.arange(100)},
+ {"df2": np.arange(100), "df1": np.arange(100)},
index=MultiIndex.from_product([range(10), range(10)], names=["fizz", "buzz"]),
)
tm.assert_frame_equal(df, expected, check_like=True)
diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index 35cc977368c5d..c97505eacd4c4 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -653,10 +653,10 @@ def test_apply_dataframe_return(self, index, columns):
)
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_subset(self, slice_, axis, df):
- def h(x, foo="bar"):
- return Series(f"color: {foo}", index=x.index, name=x.name)
+ def h(x, color="bar"):
+ return Series(f"color: {color}", index=x.index, name=x.name)
- result = df.style.apply(h, axis=axis, subset=slice_, foo="baz")._compute().ctx
+ result = df.style.apply(h, axis=axis, subset=slice_, color="baz")._compute().ctx
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(df.index)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index e1811114107c8..f89a06636a0c0 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2181,26 +2181,26 @@ def test_connectable_issue_example(self):
# https://github.com/pandas-dev/pandas/issues/10104
from sqlalchemy.engine import Engine
- def foo(connection):
+ def test_select(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
- def bar(connection, data):
+ def test_append(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
- def baz(conn):
+ def test_connectable(conn):
# https://github.com/sqlalchemy/sqlalchemy/commit/
# 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973
- foo_data = foo(conn)
- bar(conn, foo_data)
+ foo_data = test_select(conn)
+ test_append(conn, foo_data)
def main(connectable):
if isinstance(connectable, Engine):
with connectable.connect() as conn:
with conn.begin():
- baz(conn)
+ test_connectable(conn)
else:
- baz(connectable)
+ test_connectable(connectable)
assert (
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
@@ -2373,21 +2373,21 @@ def test_row_object_is_named_tuple(self):
class Test(BaseModel):
__tablename__ = "test_frame"
id = Column(Integer, primary_key=True)
- foo = Column(String(50))
+ string_column = Column(String(50))
BaseModel.metadata.create_all(self.conn)
Session = sessionmaker(bind=self.conn)
with Session() as session:
- df = DataFrame({"id": [0, 1], "foo": ["hello", "world"]})
+ df = DataFrame({"id": [0, 1], "string_column": ["hello", "world"]})
assert (
df.to_sql("test_frame", con=self.conn, index=False, if_exists="replace")
== 2
)
session.commit()
- foo = session.query(Test.id, Test.foo)
- df = DataFrame(foo)
+ test_query = session.query(Test.id, Test.string_column)
+ df = DataFrame(test_query)
- assert list(df.columns) == ["id", "foo"]
+ assert list(df.columns) == ["id", "string_column"]
class _TestMySQLAlchemy:
diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py
index 91804b5833465..e32708c4402e4 100644
--- a/pandas/tests/resample/test_period_index.py
+++ b/pandas/tests/resample/test_period_index.py
@@ -605,9 +605,11 @@ def test_resample_with_dst_time_change(self):
def test_resample_bms_2752(self):
# GH2753
- foo = Series(index=pd.bdate_range("20000101", "20000201"), dtype=np.float64)
- res1 = foo.resample("BMS").mean()
- res2 = foo.resample("BMS").mean().resample("B").mean()
+ timeseries = Series(
+ index=pd.bdate_range("20000101", "20000201"), dtype=np.float64
+ )
+ res1 = timeseries.resample("BMS").mean()
+ res2 = timeseries.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp("20000103")
assert res1.index[0] == res2.index[0]
diff --git a/pandas/tests/reshape/concat/test_empty.py b/pandas/tests/reshape/concat/test_empty.py
index 541a34bde8143..18c0645df1ceb 100644
--- a/pandas/tests/reshape/concat/test_empty.py
+++ b/pandas/tests/reshape/concat/test_empty.py
@@ -16,11 +16,11 @@ class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
- baz = df[:5].copy()
- baz["foo"] = "bar"
+ dfcopy = df[:5].copy()
+ dfcopy["foo"] = "bar"
empty = df[5:5]
- frames = [baz, empty, empty, df[5:]]
+ frames = [dfcopy, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
diff --git a/pandas/tests/reshape/concat/test_series.py b/pandas/tests/reshape/concat/test_series.py
index 8fa5988720c6b..886ada409a91a 100644
--- a/pandas/tests/reshape/concat/test_series.py
+++ b/pandas/tests/reshape/concat/test_series.py
@@ -120,24 +120,30 @@ def test_concat_series_name_npscalar_tuple(self, s1name, s2name):
def test_concat_series_partial_columns_names(self):
# GH10698
- foo = Series([1, 2], name="foo")
- bar = Series([1, 2])
- baz = Series([4, 5])
+ named_series = Series([1, 2], name="foo")
+ unnamed_series1 = Series([1, 2])
+ unnamed_series2 = Series([4, 5])
- result = concat([foo, bar, baz], axis=1)
+ result = concat([named_series, unnamed_series1, unnamed_series2], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
- result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
+ result = concat(
+ [named_series, unnamed_series1, unnamed_series2],
+ axis=1,
+ keys=["red", "blue", "yellow"],
+ )
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
- result = concat([foo, bar, baz], axis=1, ignore_index=True)
+ result = concat(
+ [named_series, unnamed_series1, unnamed_series2], axis=1, ignore_index=True
+ )
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index b021b1aa97a0e..4418fe483d83b 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -2106,9 +2106,9 @@ def test_pivot_table_no_column_raises(self):
def agg(arr):
return np.mean(arr)
- foo = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]})
+ df = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]})
with pytest.raises(KeyError, match="notpresent"):
- foo.pivot_table("notpresent", "X", "Y", aggfunc=agg)
+ df.pivot_table("notpresent", "X", "Y", aggfunc=agg)
def test_pivot_table_multiindex_columns_doctest_case(self):
# The relevant characteristic is that the call
diff --git a/pandas/tests/util/test_deprecate_nonkeyword_arguments.py b/pandas/tests/util/test_deprecate_nonkeyword_arguments.py
index f6501fa8315e4..2ea3dae19a3e4 100644
--- a/pandas/tests/util/test_deprecate_nonkeyword_arguments.py
+++ b/pandas/tests/util/test_deprecate_nonkeyword_arguments.py
@@ -140,7 +140,7 @@ def test_i_signature():
class Foo:
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "bar"])
- def baz(self, bar=None, foobar=None):
+ def baz(self, bar=None, foobar=None): # pylint: disable=disallowed-name
...
diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py
index b442c5a887503..56c2432ab1429 100644
--- a/pandas/tests/window/test_apply.py
+++ b/pandas/tests/window/test_apply.py
@@ -163,7 +163,7 @@ def test_invalid_raw_numba():
@pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]])
def test_rolling_apply_args_kwargs(args_kwargs):
# GH 33433
- def foo(x, par):
+ def numpysum(x, par):
return np.sum(x + par)
df = DataFrame({"gr": [1, 1], "a": [1, 2]})
@@ -171,7 +171,7 @@ def foo(x, par):
idx = Index(["gr", "a"])
expected = DataFrame([[11.0, 11.0], [11.0, 12.0]], columns=idx)
- result = df.rolling(1).apply(foo, args=args_kwargs[0], kwargs=args_kwargs[1])
+ result = df.rolling(1).apply(numpysum, args=args_kwargs[0], kwargs=args_kwargs[1])
tm.assert_frame_equal(result, expected)
midx = MultiIndex.from_tuples([(1, 0), (1, 1)], names=["gr", None])
@@ -179,7 +179,7 @@ def foo(x, par):
gb_rolling = df.groupby("gr")["a"].rolling(1)
- result = gb_rolling.apply(foo, args=args_kwargs[0], kwargs=args_kwargs[1])
+ result = gb_rolling.apply(numpysum, args=args_kwargs[0], kwargs=args_kwargs[1])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index 688a93223b3f4..3da14bce6facd 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -277,11 +277,11 @@ def test_rolling_apply_mutability(self):
def test_groupby_rolling(self, expected_value, raw_value):
# GH 31754
- def foo(x):
+ def isnumpyarray(x):
return int(isinstance(x, np.ndarray))
df = DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]})
- result = df.groupby("id").value.rolling(1).apply(foo, raw=raw_value)
+ result = df.groupby("id").value.rolling(1).apply(isnumpyarray, raw=raw_value)
expected = Series(
[expected_value] * 3,
index=MultiIndex.from_tuples(((1, 0), (1, 1), (1, 2)), names=["id", None]),
diff --git a/pyproject.toml b/pyproject.toml
index 761f3c687d08d..991f6eb24a778 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -79,7 +79,6 @@ disable = [
# pylint type "C": convention, for programming standard violation
"consider-using-f-string",
- "disallowed-name",
"import-outside-toplevel",
"invalid-name",
"line-too-long",
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index 35bc477d5b3d5..b490c2ffdc2e8 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -24,7 +24,7 @@ def prefix_pandas(self):
DataFrame.head : The first `n` rows of the caller object.
"""
- def redundant_import(self, foo=None, bar=None):
+ def redundant_import(self, paramx=None, paramy=None):
"""
A sample DataFrame method.
| [x] Refers to one of the issues in STYLE fix pylint issues [#48855](https://github.com/pandas-dev/pandas/issues/48855)
[x ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
I have enabled the C-type "disallowed-name " warning in pylint and renamed the "[bad names](https://pylint.pycqa.org/en/latest/user_guide/configuration/all-options.html#bad-names)" flagged by this warning
| https://api.github.com/repos/pandas-dev/pandas/pulls/49379 | 2022-10-28T22:48:38Z | 2022-11-02T21:07:03Z | 2022-11-02T21:07:02Z | 2022-11-02T21:19:49Z |
API/BUG: Fix is_string_dtype and make more strict | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index d71160cdbc369..b38e95c86d8cd 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -147,6 +147,7 @@ Other API changes
- The ``other`` argument in :meth:`DataFrame.mask` and :meth:`Series.mask` now defaults to ``no_default`` instead of ``np.nan`` consistent with :meth:`DataFrame.where` and :meth:`Series.where`. Entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). (:issue:`49111`)
- When creating a :class:`Series` with a object-dtype :class:`Index` of datetime objects, pandas no longer silently converts the index to a :class:`DatetimeIndex` (:issue:`39307`, :issue:`23598`)
- :meth:`Series.unique` with dtype "timedelta64[ns]" or "datetime64[ns]" now returns :class:`TimedeltaArray` or :class:`DatetimeArray` instead of ``numpy.ndarray`` (:issue:`49176`)
+- :func:`pandas.api.dtypes.is_string_dtype` now only returns ``True`` for array-likes with ``dtype=object`` when the elements are inferred to be strings (:issue:`15585`)
- Passing a sequence containing ``datetime`` objects and ``date`` objects to :class:`Series` constructor will return with ``object`` dtype instead of ``datetime64[ns]`` dtype, consistent with :class:`Index` behavior (:issue:`49341`)
- Passing strings that cannot be parsed as datetimes to :class:`Series` or :class:`DataFrame` with ``dtype="datetime64[ns]"`` will raise instead of silently ignoring the keyword and returning ``object`` dtype (:issue:`24435`)
-
@@ -393,7 +394,7 @@ Conversion
Strings
^^^^^^^
--
+- Bug in :func:`pandas.api.dtypes.is_string_dtype` that would not return ``True`` for :class:`StringDtype` (:issue:`15585`)
-
Interval
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 3c2aa1f6bab5d..a7b8e720ad8e2 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -500,6 +500,9 @@ def is_string_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the string dtype.
+ If an array is passed with an object dtype, the elements must be
+ inferred as strings.
+
Parameters
----------
arr_or_dtype : array-like or dtype
@@ -518,21 +521,23 @@ def is_string_dtype(arr_or_dtype) -> bool:
True
>>> is_string_dtype(int)
False
- >>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
+ >>> is_string_dtype(pd.Series([1, 2], dtype=object))
+ False
"""
- # TODO: gh-15585: consider making the checks stricter.
- def condition(dtype) -> bool:
- return dtype.kind in ("O", "S", "U") and not is_excluded_dtype(dtype)
+ if hasattr(arr_or_dtype, "dtype") and get_dtype(arr_or_dtype).kind == "O":
+ return is_all_strings(arr_or_dtype)
- def is_excluded_dtype(dtype) -> bool:
- """
- These have kind = "O" but aren't string dtypes so need to be explicitly excluded
- """
- return isinstance(dtype, (PeriodDtype, IntervalDtype, CategoricalDtype))
+ def condition(dtype) -> bool:
+ if is_string_or_object_np_dtype(dtype):
+ return True
+ try:
+ return dtype == "string"
+ except TypeError:
+ return False
return _is_dtype(arr_or_dtype, condition)
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 589e2e04d668a..c8a3c992248ad 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -290,6 +290,15 @@ def test_is_string_dtype():
assert com.is_string_dtype(pd.StringDtype())
+@pytest.mark.parametrize(
+ "data",
+ [[(0, 1), (1, 1)], pd.Categorical([1, 2, 3]), np.array([1, 2], dtype=object)],
+)
+def test_is_string_dtype_arraylike_with_object_elements_not_strings(data):
+ # GH 15585
+ assert not com.is_string_dtype(pd.Series(data))
+
+
def test_is_string_dtype_nullable(nullable_string_dtype):
assert com.is_string_dtype(pd.array(["a", "b"], dtype=nullable_string_dtype))
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index ea4443010c6a6..32a9246264d69 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -45,10 +45,10 @@ def test_is_dtype_other_input(self, dtype):
assert dtype.is_dtype([1, 2, 3]) is False
def test_is_not_string_type(self, dtype):
- return not is_string_dtype(dtype)
+ assert not is_string_dtype(dtype)
def test_is_not_object_type(self, dtype):
- return not is_object_dtype(dtype)
+ assert not is_object_dtype(dtype)
def test_eq_with_str(self, dtype):
assert dtype == dtype.name
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 148059a6a16f3..d6a5557c89f14 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -26,6 +26,7 @@
import pandas as pd
import pandas._testing as tm
+from pandas.api.types import is_object_dtype
from pandas.core.arrays.numpy_ import PandasArray
from pandas.core.internals import blocks
from pandas.tests.extension import base
@@ -218,6 +219,14 @@ def test_check_dtype(self, data, request):
)
super().test_check_dtype(data)
+ def test_is_not_object_type(self, dtype, request):
+ if dtype.numpy_dtype == "object":
+ # Different from BaseDtypeTests.test_is_not_object_type
+ # because PandasDtype(object) is an object type
+ assert is_object_dtype(dtype)
+ else:
+ super().test_is_not_object_type(dtype)
+
class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
@skip_nested
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index c5aebb282bafa..8cbd4342ea13f 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -26,6 +26,7 @@
import pandas as pd
import pandas._testing as tm
+from pandas.api.types import is_string_dtype
from pandas.core.arrays import ArrowStringArray
from pandas.core.arrays.string_ import StringDtype
from pandas.tests.extension import base
@@ -106,6 +107,11 @@ def test_eq_with_str(self, dtype):
assert dtype == f"string[{dtype.storage}]"
super().test_eq_with_str(dtype)
+ def test_is_not_string_type(self, dtype):
+ # Different from BaseDtypeTests.test_is_not_string_type
+ # because StringDtype is a string type
+ assert is_string_dtype(dtype)
+
class TestInterface(base.BaseInterfaceTests):
def test_view(self, data, request):
| - [x] closes #15585 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Started with fixing pytest warnings about `return` being used instead of `assert`
Fixing led to some extension dtype tests being seemingly incorrect w/r/t `is_string_dtype` which pointed to #15585 | https://api.github.com/repos/pandas-dev/pandas/pulls/49378 | 2022-10-28T22:14:13Z | 2022-11-03T16:09:18Z | 2022-11-03T16:09:18Z | 2023-01-12T04:48:57Z |
BUG: Fix passing `Colormap` instance to plot methods with mpl >= 3.6 | diff --git a/doc/source/whatsnew/v1.5.2.rst b/doc/source/whatsnew/v1.5.2.rst
index 4f6274b9084da..c95f563a77708 100644
--- a/doc/source/whatsnew/v1.5.2.rst
+++ b/doc/source/whatsnew/v1.5.2.rst
@@ -14,6 +14,8 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`Series.replace` raising ``RecursionError`` with numeric dtype and when specifying ``value=None`` (:issue:`45725`)
+- Fixed regression in :meth:`DataFrame.plot` preventing :class:`~matplotlib.colors.Colormap` instance
+ from being passed using the ``colormap`` argument if Matplotlib 3.6+ is used (:issue:`49374`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index fb44082aa5b58..8f21823ddcd02 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -3610,7 +3610,7 @@ def _background_gradient(
if cmap is None:
rgbas = mpl.colormaps[mpl.rcParams["image.cmap"]](norm(gmap))
else:
- rgbas = mpl.colormaps[cmap](norm(gmap))
+ rgbas = mpl.colormaps.get_cmap(cmap)(norm(gmap))
else:
rgbas = plt.cm.get_cmap(cmap)(norm(gmap))
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 9bcb51a7b032a..8292ffd0425f5 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1231,7 +1231,7 @@ def _make_plot(self):
if self.colormap is not None:
if mpl_ge_3_6_0():
- cmap = mpl.colormaps[self.colormap]
+ cmap = mpl.colormaps.get_cmap(self.colormap)
else:
cmap = self.plt.cm.get_cmap(self.colormap)
else:
@@ -1311,7 +1311,7 @@ def _make_plot(self) -> None:
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or "BuGn"
if mpl_ge_3_6_0():
- cmap = mpl.colormaps[cmap]
+ cmap = mpl.colormaps.get_cmap(cmap)
else:
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop("colorbar", True)
diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py
index c5b05b4e0d0c1..f0c4152e3339b 100644
--- a/pandas/tests/io/formats/style/test_matplotlib.py
+++ b/pandas/tests/io/formats/style/test_matplotlib.py
@@ -288,3 +288,17 @@ def test_bar_color_raises(df):
msg = "`color` and `cmap` cannot both be given"
with pytest.raises(ValueError, match=msg):
df.style.bar(color="something", cmap="something else").to_html()
+
+
+@pytest.mark.parametrize(
+ "plot_method",
+ ["scatter", "hexbin"],
+)
+def test_pass_colormap_instance(df, plot_method):
+ # https://github.com/pandas-dev/pandas/issues/49374
+ cmap = mpl.colors.ListedColormap([[1, 1, 1], [0, 0, 0]])
+ df["c"] = df.A + df.B
+ kwargs = dict(x="A", y="B", c="c", colormap=cmap)
+ if plot_method == "hexbin":
+ kwargs["C"] = kwargs.pop("c")
+ getattr(df.plot, plot_method)(**kwargs)
| - [x] closes #49374
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49377 | 2022-10-28T21:57:56Z | 2022-11-01T18:38:00Z | 2022-11-01T18:38:00Z | 2023-01-12T12:40:43Z |
CLN: Remove xlrd < 2.0 code | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 0b284fd4e9750..5e0ef4d6193f2 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -1592,9 +1592,9 @@ def __init__(
xlrd_version = Version(get_version(xlrd))
- ext = None
if engine is None:
# Only determine ext if it is needed
+ ext: str | None
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
else:
@@ -1611,32 +1611,6 @@ def __init__(
if engine == "auto":
engine = get_default_engine(ext, mode="reader")
- if engine == "xlrd" and xlrd_version is not None:
- if ext is None:
- # Need ext to determine ext in order to raise/warn
- if isinstance(path_or_buffer, xlrd.Book):
- ext = "xls"
- else:
- ext = inspect_excel_format(
- path_or_buffer, storage_options=storage_options
- )
-
- # Pass through if ext is None, otherwise check if ext valid for xlrd
- if ext and ext != "xls" and xlrd_version >= Version("2"):
- raise ValueError(
- f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, "
- f"only the xls format is supported. Install openpyxl instead."
- )
- elif ext and ext != "xls":
- stacklevel = find_stack_level()
- warnings.warn(
- f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, "
- f"only the xls format is supported. Install "
- f"openpyxl instead.",
- FutureWarning,
- stacklevel=stacklevel,
- )
-
assert engine is not None
self.engine = engine
self.storage_options = storage_options
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index 171705dee6e59..c556e4c68c6c0 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -30,7 +30,7 @@ def __init__(
Object to be parsed.
{storage_options}
"""
- err_msg = "Install xlrd >= 1.0.0 for Excel support"
+ err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
import_optional_dependency("xlrd", extra=err_msg)
super().__init__(filepath_or_buffer, storage_options=storage_options)
diff --git a/pandas/tests/io/excel/__init__.py b/pandas/tests/io/excel/__init__.py
index 9136153101e23..419761cbe1d6d 100644
--- a/pandas/tests/io/excel/__init__.py
+++ b/pandas/tests/io/excel/__init__.py
@@ -9,8 +9,4 @@
pytest.mark.filterwarnings(
"ignore:This method will be removed in future versions:DeprecationWarning"
),
- # GH 38571
- pytest.mark.filterwarnings(
- "ignore:.*In xlrd >= 2.0, only the xls format is supported:FutureWarning"
- ),
]
diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py
index 30dddbd7de50b..1f8fb4b801356 100644
--- a/pandas/tests/io/excel/test_xlrd.py
+++ b/pandas/tests/io/excel/test_xlrd.py
@@ -2,8 +2,6 @@
import pytest
-from pandas.compat._optional import import_optional_dependency
-
import pandas as pd
import pandas._testing as tm
@@ -39,35 +37,13 @@ def test_read_xlrd_book(read_ext_xlrd, datapath):
tm.assert_frame_equal(result, expected)
-def test_excel_file_warning_with_xlsx_file(datapath):
- # GH 29375
- path = datapath("io", "data", "excel", "test1.xlsx")
- has_openpyxl = import_optional_dependency("openpyxl", errors="ignore") is not None
- if not has_openpyxl:
- with tm.assert_produces_warning(
- FutureWarning,
- raise_on_extra_warnings=False,
- match="The xlrd engine is no longer maintained",
- ):
- ExcelFile(path, engine=None)
- else:
- with tm.assert_produces_warning(None):
- pd.read_excel(path, "Sheet1", engine=None)
-
-
-def test_read_excel_warning_with_xlsx_file(datapath):
+def test_read_xlsx_fails(datapath):
# GH 29375
+ from xlrd.biffh import XLRDError
+
path = datapath("io", "data", "excel", "test1.xlsx")
- has_openpyxl = import_optional_dependency("openpyxl", errors="ignore") is not None
- if not has_openpyxl:
- with pytest.raises(
- ValueError,
- match="Your version of xlrd is ",
- ):
- pd.read_excel(path, "Sheet1", engine=None)
- else:
- with tm.assert_produces_warning(None):
- pd.read_excel(path, "Sheet1", engine=None)
+ with pytest.raises(XLRDError, match="Excel xlsx file; not supported"):
+ pd.read_excel(path, engine="xlrd")
@pytest.mark.parametrize(
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
The minimum version of xlrd was bumped to 2.0.1 in pandas 1.4.0. This is mostly doing some cleanup - removing a FutureWarning that can no longer be hit. The only behavior change here is that when the user specifies `engine="xlrd"` on a non-xls file, we attempt to use the xlrd engine instead of raising directly.
Edit: And we now also warn the user any time xlrd is used.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49376 | 2022-10-28T19:56:03Z | 2022-11-02T00:42:59Z | 2022-11-02T00:42:59Z | 2022-11-03T00:27:53Z |
DEPR: Enforce deprecation of partial failure in transform for lists/dicts | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 5cf5c9aaccb52..0fe77d928be65 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -259,6 +259,7 @@ Removal of prior version deprecations/changes
- Changed the behavior of :class:`Series` constructor, it will no longer infer a datetime64 or timedelta64 dtype from string entries (:issue:`41731`)
- Changed behavior of :class:`Index` constructor when passed a ``SparseArray`` or ``SparseDtype`` to retain that dtype instead of casting to ``numpy.ndarray`` (:issue:`43930`)
- Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`)
+- Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 4f9af2d0c01d6..cccef939f94d4 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -266,34 +266,9 @@ def transform_dict_like(self, func):
func = self.normalize_dictlike_arg("transform", obj, func)
results: dict[Hashable, DataFrame | Series] = {}
- failed_names = []
- all_type_errors = True
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
- try:
- results[name] = colg.transform(how, 0, *args, **kwargs)
- except Exception as err:
- if str(err) in {
- "Function did not transform",
- "No transform functions were provided",
- }:
- raise err
- else:
- if not isinstance(err, TypeError):
- all_type_errors = False
- failed_names.append(name)
- # combine results
- if not results:
- klass = TypeError if all_type_errors else ValueError
- raise klass("Transform function failed")
- if len(failed_names) > 0:
- warnings.warn(
- f"{failed_names} did not transform successfully. If any error is "
- f"raised, this will raise in a future version of pandas. "
- f"Drop these columns/ops to avoid this warning.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
+ results[name] = colg.transform(how, 0, *args, **kwargs)
return concat(results, axis=1)
def transform_str_or_callable(self, func) -> DataFrame | Series:
diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py
index f884e8a7daf67..4749cec018fe6 100644
--- a/pandas/tests/apply/test_frame_transform.py
+++ b/pandas/tests/apply/test_frame_transform.py
@@ -133,32 +133,37 @@ def func(x):
@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1])
def test_transform_bad_dtype(op, frame_or_series, request):
# GH 35964
- if op == "rank":
- request.node.add_marker(
- pytest.mark.xfail(
- raises=ValueError, reason="GH 40418: rank does not raise a TypeError"
- )
- )
- elif op == "ngroup":
+ if op == "ngroup":
request.node.add_marker(
pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame")
)
obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms
obj = tm.get_obj(obj, frame_or_series)
+ if op == "rank":
+ error = ValueError
+ msg = "Transform function failed"
+ else:
+ error = TypeError
+ msg = "|".join(
+ [
+ "not supported between instances of 'type' and 'type'",
+ "unsupported operand type",
+ ]
+ )
- with pytest.raises(TypeError, match="unsupported operand|not supported"):
+ with pytest.raises(error, match=msg):
obj.transform(op)
- with pytest.raises(TypeError, match="Transform function failed"):
+ with pytest.raises(error, match=msg):
obj.transform([op])
- with pytest.raises(TypeError, match="Transform function failed"):
+ with pytest.raises(error, match=msg):
obj.transform({"A": op})
- with pytest.raises(TypeError, match="Transform function failed"):
+ with pytest.raises(error, match=msg):
obj.transform({"A": [op]})
@pytest.mark.parametrize("op", frame_kernels_raise)
-def test_transform_partial_failure_typeerror(request, op):
+def test_transform_failure_typeerror(request, op):
# GH 35964
if op == "ngroup":
@@ -168,62 +173,52 @@ def test_transform_partial_failure_typeerror(request, op):
# Using object makes most transform kernels fail
df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})
+ if op == "rank":
+ error = ValueError
+ msg = "Transform function failed"
+ else:
+ error = TypeError
+ msg = "|".join(
+ [
+ "not supported between instances of 'type' and 'type'",
+ "unsupported operand type",
+ ]
+ )
- expected = df[["B"]].transform([op])
- match = r"\['A'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform([op])
- tm.assert_equal(result, expected)
+ with pytest.raises(error, match=msg):
+ df.transform([op])
- expected = df[["B"]].transform({"B": op})
- match = r"\['A'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform({"A": op, "B": op})
- tm.assert_equal(result, expected)
+ with pytest.raises(error, match=msg):
+ df.transform({"A": op, "B": op})
- expected = df[["B"]].transform({"B": [op]})
- match = r"\['A'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform({"A": [op], "B": [op]})
- tm.assert_equal(result, expected)
+ with pytest.raises(error, match=msg):
+ df.transform({"A": [op], "B": [op]})
- expected = df.transform({"A": ["shift"], "B": [op]})
- match = rf"\['{op}'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform({"A": [op, "shift"], "B": [op]})
- tm.assert_equal(result, expected)
+ with pytest.raises(error, match=msg):
+ df.transform({"A": [op, "shift"], "B": [op]})
-def test_transform_partial_failure_valueerror():
+def test_transform_failure_valueerror():
# GH 40211
- match = ".*did not transform successfully"
-
def op(x):
if np.sum(np.sum(x)) < 10:
raise ValueError
return x
df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]})
+ msg = "Transform function failed"
- expected = df[["B"]].transform([op])
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform([op])
- tm.assert_equal(result, expected)
+ with pytest.raises(ValueError, match=msg):
+ df.transform([op])
- expected = df[["B"]].transform({"B": op})
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform({"A": op, "B": op})
- tm.assert_equal(result, expected)
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": op, "B": op})
- expected = df[["B"]].transform({"B": [op]})
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform({"A": [op], "B": [op]})
- tm.assert_equal(result, expected)
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": [op], "B": [op]})
- expected = df.transform({"A": ["shift"], "B": [op]})
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = df.transform({"A": [op, "shift"], "B": [op]})
- tm.assert_equal(result, expected)
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": [op, "shift"], "B": [op]})
@pytest.mark.parametrize("use_apply", [True, False])
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py
index bf7f3abc04aa5..6ed962c8f68e6 100644
--- a/pandas/tests/apply/test_invalid_arg.py
+++ b/pandas/tests/apply/test_invalid_arg.py
@@ -283,7 +283,7 @@ def test_agg_none_to_type():
def test_transform_none_to_type():
# GH#34377
df = DataFrame({"a": [None]})
- msg = "Transform function failed"
+ msg = "argument must be a"
with pytest.raises(TypeError, match=msg):
df.transform({"a": int})
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index b67af8c521090..9b51ea7fef5f8 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -280,45 +280,39 @@ def test_transform_partial_failure(op, request):
# GH 35964
if op in ("ffill", "bfill", "pad", "backfill", "shift"):
request.node.add_marker(
- pytest.mark.xfail(
- raises=AssertionError, reason=f"{op} is successful on any dtype"
- )
+ pytest.mark.xfail(reason=f"{op} is successful on any dtype")
)
# Using object makes most transform kernels fail
ser = Series(3 * [object])
- expected = ser.transform(["shift"])
- match = rf"\['{op}'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform([op, "shift"])
- tm.assert_equal(result, expected)
-
- expected = ser.transform({"B": "shift"})
- match = r"\['A'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform({"A": op, "B": "shift"})
- tm.assert_equal(result, expected)
-
- expected = ser.transform({"B": ["shift"]})
- match = r"\['A'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform({"A": [op], "B": ["shift"]})
- tm.assert_equal(result, expected)
-
- match = r"\['B'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- expected = ser.transform({"A": ["shift"], "B": [op]})
- match = rf"\['{op}'\] did not transform successfully"
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform({"A": [op, "shift"], "B": [op]})
- tm.assert_equal(result, expected)
+ if op in ("fillna", "ngroup", "rank"):
+ error = ValueError
+ msg = "Transform function failed"
+ else:
+ error = TypeError
+ msg = "|".join(
+ [
+ "not supported between instances of 'type' and 'type'",
+ "unsupported operand type",
+ ]
+ )
+
+ with pytest.raises(error, match=msg):
+ ser.transform([op, "shift"])
+
+ with pytest.raises(error, match=msg):
+ ser.transform({"A": op, "B": "shift"})
+
+ with pytest.raises(error, match=msg):
+ ser.transform({"A": [op], "B": ["shift"]})
+
+ with pytest.raises(error, match=msg):
+ ser.transform({"A": [op, "shift"], "B": [op]})
def test_transform_partial_failure_valueerror():
# GH 40211
- match = ".*did not transform successfully"
-
def noop(x):
return x
@@ -326,26 +320,19 @@ def raising_op(_):
raise ValueError
ser = Series(3 * [object])
+ msg = "Transform function failed"
+
+ with pytest.raises(ValueError, match=msg):
+ ser.transform([noop, raising_op])
+
+ with pytest.raises(ValueError, match=msg):
+ ser.transform({"A": raising_op, "B": noop})
+
+ with pytest.raises(ValueError, match=msg):
+ ser.transform({"A": [raising_op], "B": [noop]})
- expected = ser.transform([noop])
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform([noop, raising_op])
- tm.assert_equal(result, expected)
-
- expected = ser.transform({"B": noop})
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform({"A": raising_op, "B": noop})
- tm.assert_equal(result, expected)
-
- expected = ser.transform({"B": [noop]})
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform({"A": [raising_op], "B": [noop]})
- tm.assert_equal(result, expected)
-
- expected = ser.transform({"A": [noop], "B": [noop]})
- with tm.assert_produces_warning(FutureWarning, match=match):
- result = ser.transform({"A": [noop, raising_op], "B": [noop]})
- tm.assert_equal(result, expected)
+ with pytest.raises(ValueError, match=msg):
+ ser.transform({"A": [noop, raising_op], "B": [noop]})
def test_demo():
| From #43740
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49375 | 2022-10-28T19:02:32Z | 2022-10-30T16:18:21Z | 2022-10-30T16:18:21Z | 2022-10-30T17:15:52Z |
BUG: groupby with CategoricalIndex doesn't include unobserved categories | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index c76555f9ef417..a24aae0855887 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -582,7 +582,8 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.sample` raises ``ValueError`` when the object is empty (:issue:`48459`)
- Bug in :meth:`Series.groupby` raises ``ValueError`` when an entry of the index is equal to the name of the index (:issue:`48567`)
- Bug in :meth:`DataFrameGroupBy.resample` produces inconsistent results when passing empty DataFrame (:issue:`47705`)
--
+- Bug in :class:`.DataFrameGroupBy` and :class:`.SeriesGroupBy` would not include unobserved categories in result when grouping by categorical indexes (:issue:`49354`)
+- Bug in :class:`.DataFrameGroupBy` and :class:`.SeriesGroupBy` would change result order depending on the input index when grouping by categoricals (:issue:`49223`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index b11bbf35312c9..0a8e12caead1c 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -53,7 +53,7 @@ def recode_for_groupby(
unique_codes = unique1d(c.codes)
take_codes = unique_codes[unique_codes != -1]
- if c.ordered:
+ if c.ordered or sort:
take_codes = np.sort(take_codes)
# we recode according to the uniques
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 1cc5e90f9a3a4..7da7ea119cea3 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -476,11 +476,15 @@ def __init__(
# In extant tests, the new self.grouping_vector matches
# `index.get_level_values(ilevel)` whenever
# mapper is None and isinstance(index, MultiIndex)
+ if isinstance(index, MultiIndex):
+ index_level = index.get_level_values(ilevel)
+ else:
+ index_level = index
(
self.grouping_vector, # Index
self._codes,
self._group_index,
- ) = index._get_grouper_for_level(mapper, level=ilevel, dropna=dropna)
+ ) = index_level._get_grouper_for_level(mapper, dropna=dropna)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get codes
@@ -504,15 +508,6 @@ def __init__(
# use Index instead of ndarray so we can recover the name
self.grouping_vector = Index(ng, name=newgrouper.result_index.name)
- elif is_categorical_dtype(self.grouping_vector):
- # a passed Categorical
- self._passed_categorical = True
-
- self._orig_cats = self.grouping_vector.categories
- self.grouping_vector, self._all_grouper = recode_for_groupby(
- self.grouping_vector, sort, observed
- )
-
elif not isinstance(
self.grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
):
@@ -542,6 +537,14 @@ def __init__(
# TODO 2022-10-08 we only have one test that gets here and
# values are already in nanoseconds in that case.
self.grouping_vector = Series(self.grouping_vector).to_numpy()
+ elif is_categorical_dtype(self.grouping_vector):
+ # a passed Categorical
+ self._passed_categorical = True
+
+ self._orig_cats = self.grouping_vector.categories
+ self.grouping_vector, self._all_grouper = recode_for_groupby(
+ self.grouping_vector, sort, observed
+ )
def __repr__(self) -> str:
return f"Grouping({self.name})"
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 935c39af8af3a..3cdd87bd650a2 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -870,14 +870,20 @@ def test_apply_multi_level_name(category):
b = [1, 2] * 5
if category:
b = pd.Categorical(b, categories=[1, 2, 3])
- expected_index = pd.CategoricalIndex([1, 2], categories=[1, 2, 3], name="B")
+ expected_index = pd.CategoricalIndex([1, 2, 3], categories=[1, 2, 3], name="B")
+ # GH#40669 - summing an empty frame gives float dtype
+ expected_values = [20.0, 25.0, 0.0]
else:
expected_index = Index([1, 2], name="B")
+ expected_values = [20, 25]
+ expected = DataFrame(
+ {"C": expected_values, "D": expected_values}, index=expected_index
+ )
+
df = DataFrame(
{"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))}
).set_index(["A", "B"])
result = df.groupby("B").apply(lambda x: x.sum())
- expected = DataFrame({"C": [20, 25], "D": [20, 25]}, index=expected_index)
tm.assert_frame_equal(result, expected)
assert df.index.names == ["A", "B"]
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 092fd4a4d6be0..f8c7cdf658ebf 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -487,6 +487,60 @@ def test_observed_groups(observed):
tm.assert_dict_equal(result, expected)
+@pytest.mark.parametrize(
+ "keys, expected_values, expected_index_levels",
+ [
+ ("a", [15, 9, 0], CategoricalIndex([1, 2, 3], name="a")),
+ (
+ ["a", "b"],
+ [7, 8, 0, 0, 0, 9, 0, 0, 0],
+ [CategoricalIndex([1, 2, 3], name="a"), Index([4, 5, 6])],
+ ),
+ (
+ ["a", "a2"],
+ [15, 0, 0, 0, 9, 0, 0, 0, 0],
+ [
+ CategoricalIndex([1, 2, 3], name="a"),
+ CategoricalIndex([1, 2, 3], name="a"),
+ ],
+ ),
+ ],
+)
+@pytest.mark.parametrize("test_series", [True, False])
+def test_unobserved_in_index(keys, expected_values, expected_index_levels, test_series):
+ # GH#49354 - ensure unobserved cats occur when grouping by index levels
+ df = DataFrame(
+ {
+ "a": Categorical([1, 1, 2], categories=[1, 2, 3]),
+ "a2": Categorical([1, 1, 2], categories=[1, 2, 3]),
+ "b": [4, 5, 6],
+ "c": [7, 8, 9],
+ }
+ ).set_index(["a", "a2"])
+ if "b" not in keys:
+ # Only keep b when it is used for grouping for consistent columns in the result
+ df = df.drop(columns="b")
+
+ gb = df.groupby(keys, observed=False)
+ if test_series:
+ gb = gb["c"]
+ result = gb.sum()
+
+ if len(keys) == 1:
+ index = expected_index_levels
+ else:
+ codes = [[0, 0, 0, 1, 1, 1, 2, 2, 2], 3 * [0, 1, 2]]
+ index = MultiIndex(
+ expected_index_levels,
+ codes=codes,
+ names=keys,
+ )
+ expected = DataFrame({"c": expected_values}, index=index)
+ if test_series:
+ expected = expected["c"]
+ tm.assert_equal(result, expected)
+
+
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
@@ -1234,11 +1288,12 @@ def df_cat(df):
@pytest.mark.parametrize("operation", ["agg", "apply"])
def test_seriesgroupby_observed_true(df_cat, operation):
- # GH 24880
- lev_a = Index(["foo", "foo", "bar", "bar"], dtype=df_cat["A"].dtype, name="A")
- lev_b = Index(["one", "two", "one", "three"], dtype=df_cat["B"].dtype, name="B")
+ # GH#24880
+ # GH#49223 - order of results was wrong when grouping by index levels
+ lev_a = Index(["bar", "bar", "foo", "foo"], dtype=df_cat["A"].dtype, name="A")
+ lev_b = Index(["one", "three", "one", "two"], dtype=df_cat["B"].dtype, name="B")
index = MultiIndex.from_arrays([lev_a, lev_b])
- expected = Series(data=[1, 3, 2, 4], index=index, name="C")
+ expected = Series(data=[2, 4, 1, 3], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
@@ -1249,6 +1304,7 @@ def test_seriesgroupby_observed_true(df_cat, operation):
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
+ # GH#49223 - order of results was wrong when grouping by index levels
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
@@ -1272,16 +1328,16 @@ def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
True,
MultiIndex.from_arrays(
[
- Index(["foo"] * 4 + ["bar"] * 4, dtype="category", name="A"),
+ Index(["bar"] * 4 + ["foo"] * 4, dtype="category", name="A"),
Index(
- ["one", "one", "two", "two", "one", "one", "three", "three"],
+ ["one", "one", "three", "three", "one", "one", "two", "two"],
dtype="category",
name="B",
),
Index(["min", "max"] * 4),
]
),
- [1, 1, 3, 3, 2, 2, 4, 4],
+ [2, 2, 4, 4, 1, 1, 3, 3],
),
(
False,
@@ -1857,7 +1913,7 @@ def test_category_order_reducer(
if (
reduction_func in ("idxmax", "idxmin")
and not observed
- and index_kind == "range"
+ and index_kind != "multi"
):
msg = "GH#10694 - idxmax/min fail with unused categories"
request.node.add_marker(pytest.mark.xfail(reason=msg))
@@ -2005,10 +2061,13 @@ def test_category_order_apply(as_index, sort, observed, method, index_kind, orde
@pytest.mark.parametrize("index_kind", ["range", "single", "multi"])
-def test_many_categories(as_index, sort, index_kind, ordered):
+def test_many_categories(request, as_index, sort, index_kind, ordered):
# GH#48749 - Test when the grouper has many categories
if index_kind != "range" and not as_index:
pytest.skip(reason="Result doesn't have categories, nothing to test")
+ if index_kind == "multi" and as_index and not sort and ordered:
+ msg = "GH#48749 - values are unsorted even though the Categorical is ordered"
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
categories = np.arange(9999, -1, -1)
grouper = Categorical([2, 1, 2, 3], categories=categories, ordered=ordered)
df = DataFrame({"a": grouper, "b": range(4)})
@@ -2025,11 +2084,7 @@ def test_many_categories(as_index, sort, index_kind, ordered):
result = gb.sum()
# Test is setup so that data and index are the same values
- # TODO: GH#49223 - Order of values should be the same for all index_kinds
- if index_kind == "range":
- data = [3, 2, 1] if ordered else [2, 1, 3]
- else:
- data = [3, 2, 1] if sort else [2, 1, 3]
+ data = [3, 2, 1] if sort or ordered else [2, 1, 3]
index = CategoricalIndex(
data, categories=grouper.categories, ordered=ordered, name="a"
| - [x] closes #49354 (Replace xxxx with the GitHub issue number)
- [x] closes #49223
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
@gt-on-1234 - my apologies here; I was meaning to just tackle #49354 but found that my changes leaked over into #49223. Take a look and let me know what you think.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49373 | 2022-10-28T17:58:43Z | 2022-11-07T18:28:24Z | 2022-11-07T18:28:24Z | 2022-11-08T02:32:02Z |
DEPR: object-dtype bool_only | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 252c444b2e60c..29436b5095776 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -257,6 +257,8 @@ Removal of prior version deprecations/changes
- Changed behavior of :class:`DataFrame` constructor when passed a ``dtype`` (other than int) that the data cannot be cast to; it now raises instead of silently ignoring the dtype (:issue:`41733`)
- Changed the behavior of :class:`Series` constructor, it will no longer infer a datetime64 or timedelta64 dtype from string entries (:issue:`41731`)
- Changed behavior of :class:`Index` constructor when passed a ``SparseArray`` or ``SparseDtype`` to retain that dtype instead of casting to ``numpy.ndarray`` (:issue:`43930`)
+- Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_200.performance:
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 893e4a9be58ef..de240a39e2951 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -6,13 +6,10 @@
from numbers import Number
import re
from typing import Pattern
-import warnings
import numpy as np
from pandas._libs import lib
-from pandas._typing import ArrayLike
-from pandas.util._exceptions import find_stack_level
is_bool = lib.is_bool
@@ -425,42 +422,3 @@ def is_dataclass(item):
return is_dataclass(item) and not isinstance(item, type)
except ImportError:
return False
-
-
-def is_inferred_bool_dtype(arr: ArrayLike) -> bool:
- """
- Check if this is a ndarray[bool] or an ndarray[object] of bool objects.
-
- Parameters
- ----------
- arr : np.ndarray or ExtensionArray
-
- Returns
- -------
- bool
-
- Notes
- -----
- This does not include the special treatment is_bool_dtype uses for
- Categorical.
- """
- if not isinstance(arr, np.ndarray):
- return False
-
- dtype = arr.dtype
- if dtype == np.dtype(bool):
- return True
- elif dtype == np.dtype("object"):
- result = lib.is_bool_array(arr)
- if result:
- # GH#46188
- warnings.warn(
- "In a future version, object-dtype columns with all-bool values "
- "will not be included in reductions with bool_only=True. "
- "Explicitly cast to bool dtype instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return result
-
- return False
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 4aa16257b0802..f6e50d658a580 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -52,7 +52,6 @@
ABCDataFrame,
ABCSeries,
)
-from pandas.core.dtypes.inference import is_inferred_bool_dtype
from pandas.core.dtypes.missing import (
array_equals,
isna,
@@ -488,7 +487,7 @@ def get_bool_data(self: T, copy: bool = False) -> T:
copy : bool, default False
Whether to copy the blocks
"""
- return self._get_data_subset(is_inferred_bool_dtype)
+ return self._get_data_subset(lambda x: x.dtype == np.dtype(bool))
def get_numeric_data(self: T, copy: bool = False) -> T:
"""
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 83c1ca0084724..f0fdd9a58720e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -70,7 +70,6 @@
ABCPandasArray,
ABCSeries,
)
-from pandas.core.dtypes.inference import is_inferred_bool_dtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
@@ -194,7 +193,7 @@ def is_bool(self) -> bool:
"""
We can be bool if a) we are bool dtype or b) object dtype with bool objects.
"""
- return is_inferred_bool_dtype(self.values)
+ return self.values.dtype == np.dtype(bool)
@final
def external_values(self):
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 963ed24cb434b..0ce17a050da82 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1280,7 +1280,6 @@ def test_any_all_object(self):
assert result is False
def test_any_all_object_bool_only(self):
- msg = "object-dtype columns with all-bool values"
df = DataFrame({"A": ["foo", 2], "B": [True, False]}).astype(object)
df._consolidate_inplace()
@@ -1291,36 +1290,29 @@ def test_any_all_object_bool_only(self):
# The underlying bug is in DataFrame._get_bool_data, so we check
# that while we're here
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df._get_bool_data()
- expected = df[["B", "C"]]
+ res = df._get_bool_data()
+ expected = df[["C"]]
tm.assert_frame_equal(res, expected)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df.all(bool_only=True, axis=0)
- expected = Series([False, True], index=["B", "C"])
+ res = df.all(bool_only=True, axis=0)
+ expected = Series([True], index=["C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df[["B", "C"]].all(bool_only=True, axis=0)
+ res = df[["B", "C"]].all(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- assert not df.all(bool_only=True, axis=None)
+ assert df.all(bool_only=True, axis=None)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df.any(bool_only=True, axis=0)
- expected = Series([True, True], index=["B", "C"])
+ res = df.any(bool_only=True, axis=0)
+ expected = Series([True], index=["C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
- with tm.assert_produces_warning(FutureWarning, match=msg):
- res = df[["B", "C"]].any(bool_only=True, axis=0)
+ res = df[["C"]].any(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- assert df.any(bool_only=True, axis=None)
+ assert df.any(bool_only=True, axis=None)
@pytest.mark.parametrize("method", ["any", "all"])
def test_any_all_level_axis_none_raises(self, method):
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index b30b27f5bae1a..b64220d90f9a2 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -795,7 +795,6 @@ def test_get_numeric_data(self, using_copy_on_write):
)
def test_get_bool_data(self, using_copy_on_write):
- msg = "object-dtype columns with all-bool values"
mgr = create_mgr(
"int: int; float: float; complex: complex;"
"str: object; bool: bool; obj: object; dt: datetime",
@@ -803,9 +802,8 @@ def test_get_bool_data(self, using_copy_on_write):
)
mgr.iset(6, np.array([True, False, True], dtype=np.object_))
- with tm.assert_produces_warning(FutureWarning, match=msg):
- bools = mgr.get_bool_data()
- tm.assert_index_equal(bools.items, Index(["bool", "dt"]))
+ bools = mgr.get_bool_data()
+ tm.assert_index_equal(bools.items, Index(["bool"]))
tm.assert_almost_equal(
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
bools.iget(bools.items.get_loc("bool")).internal_values(),
@@ -824,8 +822,7 @@ def test_get_bool_data(self, using_copy_on_write):
)
# Check sharing
- with tm.assert_produces_warning(FutureWarning, match=msg):
- bools2 = mgr.get_bool_data(copy=True)
+ bools2 = mgr.get_bool_data(copy=True)
bools2.iset(0, np.array([False, True, False]))
if using_copy_on_write:
tm.assert_numpy_array_equal(
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49371 | 2022-10-28T14:47:51Z | 2022-10-28T16:47:51Z | 2022-10-28T16:47:51Z | 2022-10-28T17:41:04Z |
Backport PR #49308 on branch 1.5.x (DOC: Added pre-commit link inside the guideline for developers.) | diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst
index 4a70057cf18e3..afa0d0306f1af 100644
--- a/doc/source/development/contributing_environment.rst
+++ b/doc/source/development/contributing_environment.rst
@@ -10,7 +10,7 @@ To test out code changes, you'll need to build pandas from source, which
requires a C/C++ compiler and Python environment. If you're making documentation
changes, you can skip to :ref:`contributing to the documentation <contributing_documentation>` but if you skip
creating the development environment you won't be able to build the documentation
-locally before pushing your changes.
+locally before pushing your changes. It's recommended to also install the :ref:`pre-commit hooks <contributing.pre-commit>`.
.. contents:: Table of contents:
:local:
| Backport PR #49308: DOC: Added pre-commit link inside the guideline for developers. | https://api.github.com/repos/pandas-dev/pandas/pulls/49370 | 2022-10-28T13:18:58Z | 2022-10-28T15:30:23Z | 2022-10-28T15:30:23Z | 2022-10-28T15:30:23Z |
STYLE: fix pylint use-maxsplit-arg warning | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 252670565ebff..39acc28451f54 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -236,7 +236,7 @@
if ".dev" in version:
switcher_version = "dev"
elif "rc" in version:
- switcher_version = version.split("rc")[0] + " (rc)"
+ switcher_version = version.split("rc", maxsplit=1)[0] + " (rc)"
html_theme_options = {
"external_links": [],
diff --git a/pyproject.toml b/pyproject.toml
index 63c2719b3b0fd..d087253a462c7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -97,7 +97,6 @@ disable = [
"unneeded-not",
"use-implicit-booleaness-not-comparison",
"use-implicit-booleaness-not-len",
- "use-maxsplit-arg",
"use-sequence-for-iteration",
"useless-import-alias",
"wrong-import-order",
| Related to https://github.com/pandas-dev/pandas/issues/48855
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49369 | 2022-10-28T13:01:50Z | 2022-10-28T14:21:23Z | 2022-10-28T14:21:23Z | 2022-10-28T14:31:52Z |
DEPR: Remove datetime_is_numeric in describe | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index eab8df5ccff73..9c6faa03edf6a 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -194,6 +194,7 @@ Removal of prior version deprecations/changes
- Removed argument ``sort_columns`` in :meth:`DataFrame.plot` and :meth:`Series.plot` (:issue:`47563`)
- Removed argument ``is_copy`` from :meth:`DataFrame.take` and :meth:`Series.take` (:issue:`30615`)
- Removed argument ``kind`` from :meth:`Index.get_slice_bound`, :meth:`Index.slice_indexer` and :meth:`Index.slice_locs` (:issue:`41378`)
+- Removed argument ``datetime_is_numeric`` from :meth:`DataFrame.describe` and :meth:`Series.describe` as datetime data will always be summarized as numeric data (:issue:`34798`)
- Disallow subclass-specific keywords (e.g. "freq", "tz", "names", "closed") in the :class:`Index` constructor (:issue:`38597`)
- Removed argument ``inplace`` from :meth:`Categorical.remove_unused_categories` (:issue:`37918`)
- Disallow passing non-round floats to :class:`Timestamp` with ``unit="M"`` or ``unit="Y"`` (:issue:`47266`)
diff --git a/pandas/core/describe.py b/pandas/core/describe.py
index ce2fa950e6e62..33afbfe6489a6 100644
--- a/pandas/core/describe.py
+++ b/pandas/core/describe.py
@@ -17,7 +17,6 @@
Sequence,
cast,
)
-import warnings
import numpy as np
@@ -27,7 +26,6 @@
NDFrameT,
npt,
)
-from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_percentile
from pandas.core.dtypes.common import (
@@ -56,7 +54,6 @@ def describe_ndframe(
obj: NDFrameT,
include: str | Sequence[str] | None,
exclude: str | Sequence[str] | None,
- datetime_is_numeric: bool,
percentiles: Sequence[float] | np.ndarray | None,
) -> NDFrameT:
"""Describe series or dataframe.
@@ -71,8 +68,6 @@ def describe_ndframe(
A white list of data types to include in the result. Ignored for ``Series``.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored for ``Series``.
- datetime_is_numeric : bool, default False
- Whether to treat datetime dtypes as numeric.
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should fall between 0 and 1.
The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and
@@ -88,14 +83,12 @@ def describe_ndframe(
if obj.ndim == 1:
describer = SeriesDescriber(
obj=cast("Series", obj),
- datetime_is_numeric=datetime_is_numeric,
)
else:
describer = DataFrameDescriber(
obj=cast("DataFrame", obj),
include=include,
exclude=exclude,
- datetime_is_numeric=datetime_is_numeric,
)
result = describer.describe(percentiles=percentiles)
@@ -109,13 +102,10 @@ class NDFrameDescriberAbstract(ABC):
----------
obj : Series or DataFrame
Object to be described.
- datetime_is_numeric : bool
- Whether to treat datetime dtypes as numeric.
"""
- def __init__(self, obj: DataFrame | Series, datetime_is_numeric: bool) -> None:
+ def __init__(self, obj: DataFrame | Series) -> None:
self.obj = obj
- self.datetime_is_numeric = datetime_is_numeric
@abstractmethod
def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series:
@@ -136,7 +126,6 @@ class SeriesDescriber(NDFrameDescriberAbstract):
def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series:
describe_func = select_describe_func(
self.obj,
- self.datetime_is_numeric,
)
return describe_func(self.obj, percentiles)
@@ -152,8 +141,6 @@ class DataFrameDescriber(NDFrameDescriberAbstract):
A white list of data types to include in the result.
exclude : list-like of dtypes or None
A black list of data types to omit from the result.
- datetime_is_numeric : bool
- Whether to treat datetime dtypes as numeric.
"""
def __init__(
@@ -162,7 +149,6 @@ def __init__(
*,
include: str | Sequence[str] | None,
exclude: str | Sequence[str] | None,
- datetime_is_numeric: bool,
) -> None:
self.include = include
self.exclude = exclude
@@ -170,14 +156,14 @@ def __init__(
if obj.ndim == 2 and obj.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
- super().__init__(obj, datetime_is_numeric=datetime_is_numeric)
+ super().__init__(obj)
def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame:
data = self._select_data()
ldesc: list[Series] = []
for _, series in data.items():
- describe_func = select_describe_func(series, self.datetime_is_numeric)
+ describe_func = select_describe_func(series)
ldesc.append(describe_func(series, percentiles))
col_names = reorder_columns(ldesc)
@@ -193,9 +179,7 @@ def _select_data(self):
"""Select columns to be described."""
if (self.include is None) and (self.exclude is None):
# when some numerics are found, keep only numerics
- default_include: list[npt.DTypeLike] = [np.number]
- if self.datetime_is_numeric:
- default_include.append("datetime")
+ default_include: list[npt.DTypeLike] = [np.number, "datetime"]
data = self.obj.select_dtypes(include=default_include)
if len(data.columns) == 0:
data = self.obj
@@ -360,7 +344,6 @@ def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:
def select_describe_func(
data: Series,
- datetime_is_numeric: bool,
) -> Callable:
"""Select proper function for describing series based on data type.
@@ -368,26 +351,13 @@ def select_describe_func(
----------
data : Series
Series to be described.
- datetime_is_numeric : bool
- Whether to treat datetime dtypes as numeric.
"""
if is_bool_dtype(data.dtype):
return describe_categorical_1d
elif is_numeric_dtype(data):
return describe_numeric_1d
elif is_datetime64_any_dtype(data.dtype):
- if datetime_is_numeric:
- return describe_timestamp_1d
- else:
- warnings.warn(
- "Treating datetime data as categorical rather than numeric in "
- "`.describe` is deprecated and will be removed in a future "
- "version of pandas. Specify `datetime_is_numeric=True` to "
- "silence this warning and adopt the future behavior now.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return describe_timestamp_as_categorical_1d
+ return describe_timestamp_1d
elif is_timedelta64_dtype(data.dtype):
return describe_numeric_1d
else:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8bf3820d2ea3c..021cdd01ea1a6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10545,7 +10545,6 @@ def describe(
percentiles=None,
include=None,
exclude=None,
- datetime_is_numeric: bool_t = False,
) -> NDFrameT:
"""
Generate descriptive statistics.
@@ -10591,12 +10590,6 @@ def describe(
``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
- datetime_is_numeric : bool, default False
- Whether to treat datetime dtypes as numeric. This affects statistics
- calculated for the column. For DataFrame input, this also
- controls whether datetime columns are included by default.
-
- .. versionadded:: 1.1.0
Returns
-------
@@ -10674,7 +10667,7 @@ def describe(
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
- >>> s.describe(datetime_is_numeric=True)
+ >>> s.describe()
count 3
mean 2006-09-01 08:00:00
min 2000-01-01 00:00:00
@@ -10792,7 +10785,6 @@ def describe(
obj=self,
include=include,
exclude=exclude,
- datetime_is_numeric=datetime_is_numeric,
percentiles=percentiles,
)
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index 24d327a101143..e2b8a0f63c31a 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -274,12 +274,12 @@ def test_describe_tz_values(self, tz_naive_fixture):
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
)
- result = df.describe(include="all", datetime_is_numeric=True)
+ result = df.describe(include="all")
tm.assert_frame_equal(result, expected)
def test_datetime_is_numeric_includes_datetime(self):
df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]})
- result = df.describe(datetime_is_numeric=True)
+ result = df.describe()
expected = DataFrame(
{
"a": [
@@ -307,36 +307,22 @@ def test_describe_tz_values2(self):
df = DataFrame({"s1": s1, "s2": s2})
s1_ = s1.describe()
- s2_ = Series(
- [
- 5,
- 5,
- s2.value_counts().index[0],
- 1,
- start.tz_localize(tz),
- end.tz_localize(tz),
- ],
- index=["count", "unique", "top", "freq", "first", "last"],
- )
+ s2_ = s2.describe()
idx = [
"count",
- "unique",
- "top",
- "freq",
- "first",
- "last",
"mean",
- "std",
"min",
"25%",
"50%",
"75%",
"max",
+ "std",
]
- expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx]
+ expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).reindex(
+ idx, copy=False
+ )
- with tm.assert_produces_warning(FutureWarning):
- result = df.describe(include="all")
+ result = df.describe(include="all")
tm.assert_frame_equal(result, expected)
def test_describe_percentiles_integer_idx(self):
diff --git a/pandas/tests/series/methods/test_describe.py b/pandas/tests/series/methods/test_describe.py
index a7cedd580b2d0..3d813268b57be 100644
--- a/pandas/tests/series/methods/test_describe.py
+++ b/pandas/tests/series/methods/test_describe.py
@@ -99,7 +99,7 @@ def test_describe_with_tz(self, tz_naive_fixture):
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
- result = s.describe(datetime_is_numeric=True)
+ result = s.describe()
expected = Series(
[
5,
@@ -115,32 +115,32 @@ def test_describe_with_tz(self, tz_naive_fixture):
)
tm.assert_series_equal(result, expected)
- def test_describe_with_tz_warns(self):
+ def test_describe_with_tz_numeric(self):
name = tz = "CET"
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
- with tm.assert_produces_warning(FutureWarning):
- result = s.describe()
+ result = s.describe()
expected = Series(
[
5,
- 5,
- s.value_counts().index[0],
- 1,
- start.tz_localize(tz),
- end.tz_localize(tz),
+ Timestamp("2018-01-03 00:00:00", tz=tz),
+ Timestamp("2018-01-01 00:00:00", tz=tz),
+ Timestamp("2018-01-02 00:00:00", tz=tz),
+ Timestamp("2018-01-03 00:00:00", tz=tz),
+ Timestamp("2018-01-04 00:00:00", tz=tz),
+ Timestamp("2018-01-05 00:00:00", tz=tz),
],
name=name,
- index=["count", "unique", "top", "freq", "first", "last"],
+ index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
def test_datetime_is_numeric_includes_datetime(self):
s = Series(date_range("2012", periods=3))
- result = s.describe(datetime_is_numeric=True)
+ result = s.describe()
expected = Series(
[
3,
| Introduced in https://github.com/pandas-dev/pandas/pull/34798 | https://api.github.com/repos/pandas-dev/pandas/pulls/49368 | 2022-10-28T05:37:42Z | 2022-10-31T19:17:59Z | 2022-10-31T19:17:59Z | 2022-10-31T19:18:03Z |
ENH: Implement io.nullable_backend config for read_csv(engine="pyarrow") | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 4df2886454c0a..70018e985af19 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -28,10 +28,24 @@ Available optional dependencies (listed in order of appearance at `install guide
``[all, performance, computation, timezone, fss, aws, gcp, excel, parquet, feather, hdf5, spss, postgresql, mysql,
sql-other, html, xml, plot, output_formatting, clipboard, compression, test]`` (:issue:`39164`).
-.. _whatsnew_200.enhancements.enhancement2:
+.. _whatsnew_200.enhancements.io_readers_nullable_pyarrow:
-enhancement2
-^^^^^^^^^^^^
+Configuration option, ``io.nullable_backend``, to return pyarrow-backed dtypes from IO functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A new global configuration, ``io.nullable_backend`` can now be used in conjunction with the parameter ``use_nullable_dtypes=True`` in :func:`read_parquet` and :func:`read_csv` (with ``engine="pyarrow"``)
+to return pyarrow-backed dtypes when set to ``"pyarrow"`` (:issue:`48957`).
+
+.. ipython:: python
+
+ import io
+ data = io.StringIO("""a,b,c,d,e,f,g,h,i
+ 1,2.5,True,a,,,,,
+ 3,4.5,False,b,6,7.5,True,a,
+ """)
+ with pd.option_context("io.nullable_backend", "pyarrow"):
+ df = pd.read_csv(data, use_nullable_dtypes=True, engine="pyarrow")
+ df
.. _whatsnew_200.enhancements.other:
@@ -42,7 +56,6 @@ Other enhancements
- :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support an ``axis`` argument. If ``axis`` is set, the default behaviour of which axis to consider can be overwritten (:issue:`47819`)
- :func:`assert_frame_equal` now shows the first element where the DataFrames differ, analogously to ``pytest``'s output (:issue:`47910`)
- Added new argument ``use_nullable_dtypes`` to :func:`read_csv` and :func:`read_excel` to enable automatic conversion to nullable dtypes (:issue:`36712`)
-- Added new global configuration, ``io.nullable_backend`` to allow ``use_nullable_dtypes=True`` to return pyarrow-backed dtypes when set to ``"pyarrow"`` in :func:`read_parquet` (:issue:`48957`)
- Added ``index`` parameter to :meth:`DataFrame.to_dict` (:issue:`46398`)
- Added metadata propagation for binary operators on :class:`DataFrame` (:issue:`28283`)
- :class:`.CategoricalConversionWarning`, :class:`.InvalidComparison`, :class:`.InvalidVersion`, :class:`.LossySetitemError`, and :class:`.NoBufferPresent` are now exposed in ``pandas.errors`` (:issue:`27656`)
diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py
index 6cc56bb1c8840..68158a30f7fdf 100644
--- a/pandas/io/parsers/arrow_parser_wrapper.py
+++ b/pandas/io/parsers/arrow_parser_wrapper.py
@@ -1,16 +1,17 @@
from __future__ import annotations
-from typing import TYPE_CHECKING
-
from pandas._typing import ReadBuffer
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.inference import is_integer
-from pandas.io.parsers.base_parser import ParserBase
+from pandas import (
+ DataFrame,
+ arrays,
+ get_option,
+)
-if TYPE_CHECKING:
- from pandas import DataFrame
+from pandas.io.parsers.base_parser import ParserBase
class ArrowParserWrapper(ParserBase):
@@ -77,7 +78,7 @@ def _get_pyarrow_options(self) -> None:
else self.kwds["skiprows"],
}
- def _finalize_output(self, frame: DataFrame) -> DataFrame:
+ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
"""
Processes data read in based on kwargs.
@@ -148,6 +149,16 @@ def read(self) -> DataFrame:
parse_options=pyarrow_csv.ParseOptions(**self.parse_options),
convert_options=pyarrow_csv.ConvertOptions(**self.convert_options),
)
-
- frame = table.to_pandas()
- return self._finalize_output(frame)
+ if (
+ self.kwds["use_nullable_dtypes"]
+ and get_option("io.nullable_backend") == "pyarrow"
+ ):
+ frame = DataFrame(
+ {
+ col_name: arrays.ArrowExtensionArray(pa_col)
+ for col_name, pa_col in zip(table.column_names, table.itercolumns())
+ }
+ )
+ else:
+ frame = table.to_pandas()
+ return self._finalize_pandas_output(frame)
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index f2b466b06e062..af7b6027574e9 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -24,6 +24,8 @@
import numpy as np
+from pandas._config import get_option
+
from pandas._libs import lib
from pandas._libs.parsers import STR_NA_VALUES
from pandas._typing import (
@@ -560,6 +562,14 @@ def _read(
raise ValueError(
"The 'chunksize' option is not supported with the 'pyarrow' engine"
)
+ elif (
+ kwds.get("use_nullable_dtypes", False)
+ and get_option("io.nullable_backend") == "pyarrow"
+ ):
+ raise NotImplementedError(
+ f"use_nullable_dtypes=True and engine={kwds['engine']} with "
+ "io.nullable_backend set to 'pyarrow' is not implemented."
+ )
else:
chunksize = validate_integer("chunksize", chunksize, 1)
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index f5b3b608bd59e..030b38cceeb39 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -9,7 +9,6 @@
import pytest
from pandas.errors import ParserWarning
-import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
@@ -22,13 +21,10 @@
StringArray,
)
-# TODO(1.4): Change me into xfail at release time
-# and xfail individual tests
-pytestmark = pytest.mark.usefixtures("pyarrow_skip")
-
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
@@ -53,6 +49,7 @@ def test_dtype_all_columns(all_parsers, dtype, check_orig):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
@@ -71,6 +68,7 @@ def test_dtype_per_column(all_parsers):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
@@ -84,6 +82,7 @@ def test_invalid_dtype_per_column(all_parsers):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
# see gh-2631
parser = all_parsers
@@ -101,6 +100,7 @@ def test_raise_on_passed_int_dtype_with_nas(all_parsers):
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
@@ -132,6 +132,7 @@ def test_numeric_dtype(all_parsers, dtype):
tm.assert_frame_equal(expected, result)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_boolean_dtype(all_parsers):
parser = all_parsers
data = "\n".join(
@@ -184,6 +185,7 @@ def test_boolean_dtype(all_parsers):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_delimiter_with_usecols_and_parse_dates(all_parsers):
# GH#35873
result = all_parsers.read_csv(
@@ -264,6 +266,7 @@ def test_skip_whitespace(c_parser_only, float_precision):
tm.assert_series_equal(df.iloc[:, 1], pd.Series([1.2, 2.1, 1.0, 1.2], name="num"))
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_true_values_cast_to_bool(all_parsers):
# GH#34655
text = """a,b
@@ -286,6 +289,7 @@ def test_true_values_cast_to_bool(all_parsers):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)])
def test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value):
# GH#35211
@@ -300,6 +304,7 @@ def test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_mangle_dup_cols_single_dtype(all_parsers):
# GH#42022
parser = all_parsers
@@ -309,6 +314,7 @@ def test_dtype_mangle_dup_cols_single_dtype(all_parsers):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_multi_index(all_parsers):
# GH 42446
parser = all_parsers
@@ -355,6 +361,7 @@ def test_nullable_int_dtype(all_parsers, any_int_ea_dtype):
tm.assert_frame_equal(actual, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
@pytest.mark.parametrize("default", ["float", "float64"])
def test_dtypes_defaultdict(all_parsers, default):
# GH#41574
@@ -368,6 +375,7 @@ def test_dtypes_defaultdict(all_parsers, default):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtypes_defaultdict_mangle_dup_cols(all_parsers):
# GH#41574
data = """a,b,a,b,b.1
@@ -381,6 +389,7 @@ def test_dtypes_defaultdict_mangle_dup_cols(all_parsers):
tm.assert_frame_equal(result, expected)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtypes_defaultdict_invalid(all_parsers):
# GH#41574
data = """a,b
@@ -392,6 +401,7 @@ def test_dtypes_defaultdict_invalid(all_parsers):
parser.read_csv(StringIO(data), dtype=dtype)
+@pytest.mark.usefixtures("pyarrow_xfail")
def test_use_nullable_dtypes(all_parsers):
# GH#36712
@@ -435,11 +445,11 @@ def test_use_nullabla_dtypes_and_dtype(all_parsers):
tm.assert_frame_equal(result, expected)
-@td.skip_if_no("pyarrow")
+@pytest.mark.usefixtures("pyarrow_xfail")
@pytest.mark.parametrize("storage", ["pyarrow", "python"])
def test_use_nullable_dtypes_string(all_parsers, storage):
# GH#36712
- import pyarrow as pa
+ pa = pytest.importorskip("pyarrow")
with pd.option_context("mode.string_storage", storage):
@@ -477,3 +487,40 @@ def test_use_nullable_dtypes_ea_dtype_specified(all_parsers):
result = parser.read_csv(StringIO(data), dtype="Int64", use_nullable_dtypes=True)
expected = DataFrame({"a": [1], "b": 2}, dtype="Int64")
tm.assert_frame_equal(result, expected)
+
+
+def test_use_nullable_dtypes_pyarrow_backend(all_parsers, request):
+ # GH#36712
+ pa = pytest.importorskip("pyarrow")
+ parser = all_parsers
+
+ data = """a,b,c,d,e,f,g,h,i,j
+1,2.5,True,a,,,,,12-31-2019,
+3,4.5,False,b,6,7.5,True,a,12-31-2019,
+"""
+ with pd.option_context("io.nullable_backend", "pyarrow"):
+ if parser.engine != "pyarrow":
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=NotImplementedError,
+ reason=f"Not implemented with engine={parser.engine}",
+ )
+ )
+ result = parser.read_csv(
+ StringIO(data), use_nullable_dtypes=True, parse_dates=["i"]
+ )
+ expected = DataFrame(
+ {
+ "a": pd.Series([1, 3], dtype="int64[pyarrow]"),
+ "b": pd.Series([2.5, 4.5], dtype="float64[pyarrow]"),
+ "c": pd.Series([True, False], dtype="bool[pyarrow]"),
+ "d": pd.Series(["a", "b"], dtype=pd.ArrowDtype(pa.string())),
+ "e": pd.Series([pd.NA, 6], dtype="int64[pyarrow]"),
+ "f": pd.Series([pd.NA, 7.5], dtype="float64[pyarrow]"),
+ "g": pd.Series([pd.NA, True], dtype="bool[pyarrow]"),
+ "h": pd.Series(["", "a"], dtype=pd.ArrowDtype(pa.string())),
+ "i": pd.Series([Timestamp("2019-12-31")] * 2),
+ "j": pd.Series([pd.NA, pd.NA], dtype="null[pyarrow]"),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] xref #48957 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49366 | 2022-10-28T04:49:49Z | 2022-11-05T20:52:28Z | 2022-11-05T20:52:28Z | 2022-11-07T17:33:45Z |
DEPR: Remove deprecated Timestamp.freq | diff --git a/asv_bench/benchmarks/tslibs/timestamp.py b/asv_bench/benchmarks/tslibs/timestamp.py
index eda9bce89188c..d7706a39dfae5 100644
--- a/asv_bench/benchmarks/tslibs/timestamp.py
+++ b/asv_bench/benchmarks/tslibs/timestamp.py
@@ -50,62 +50,58 @@ def time_from_pd_timestamp(self):
class TimestampProperties:
- _freqs = [None, "B"]
- params = [_tzs, _freqs]
- param_names = ["tz", "freq"]
+ params = [_tzs]
+ param_names = ["tz"]
- def setup(self, tz, freq):
- self.ts = Timestamp("2017-08-25 08:16:14", tzinfo=tz, freq=freq)
+ def setup(self, tz):
+ self.ts = Timestamp("2017-08-25 08:16:14", tzinfo=tz)
- def time_tz(self, tz, freq):
+ def time_tz(self, tz):
self.ts.tz
- def time_dayofweek(self, tz, freq):
+ def time_dayofweek(self, tz):
self.ts.dayofweek
- def time_dayofyear(self, tz, freq):
+ def time_dayofyear(self, tz):
self.ts.dayofyear
- def time_week(self, tz, freq):
+ def time_week(self, tz):
self.ts.week
- def time_quarter(self, tz, freq):
+ def time_quarter(self, tz):
self.ts.quarter
- def time_days_in_month(self, tz, freq):
+ def time_days_in_month(self, tz):
self.ts.days_in_month
- def time_freqstr(self, tz, freq):
- self.ts.freqstr
-
- def time_is_month_start(self, tz, freq):
+ def time_is_month_start(self, tz):
self.ts.is_month_start
- def time_is_month_end(self, tz, freq):
+ def time_is_month_end(self, tz):
self.ts.is_month_end
- def time_is_quarter_start(self, tz, freq):
+ def time_is_quarter_start(self, tz):
self.ts.is_quarter_start
- def time_is_quarter_end(self, tz, freq):
+ def time_is_quarter_end(self, tz):
self.ts.is_quarter_end
- def time_is_year_start(self, tz, freq):
+ def time_is_year_start(self, tz):
self.ts.is_year_start
- def time_is_year_end(self, tz, freq):
+ def time_is_year_end(self, tz):
self.ts.is_year_end
- def time_is_leap_year(self, tz, freq):
+ def time_is_leap_year(self, tz):
self.ts.is_leap_year
- def time_microsecond(self, tz, freq):
+ def time_microsecond(self, tz):
self.ts.microsecond
- def time_month_name(self, tz, freq):
+ def time_month_name(self, tz):
self.ts.month_name()
- def time_weekday_name(self, tz, freq):
+ def time_weekday_name(self, tz):
self.ts.day_name()
diff --git a/doc/redirects.csv b/doc/redirects.csv
index f0fab09196f26..3b83444c273a5 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -1321,8 +1321,6 @@ generated/pandas.Timestamp.daysinmonth,../reference/api/pandas.Timestamp.daysinm
generated/pandas.Timestamp.dst,../reference/api/pandas.Timestamp.dst
generated/pandas.Timestamp.floor,../reference/api/pandas.Timestamp.floor
generated/pandas.Timestamp.fold,../reference/api/pandas.Timestamp.fold
-generated/pandas.Timestamp.freq,../reference/api/pandas.Timestamp.freq
-generated/pandas.Timestamp.freqstr,../reference/api/pandas.Timestamp.freqstr
generated/pandas.Timestamp.fromisoformat,../reference/api/pandas.Timestamp.fromisoformat
generated/pandas.Timestamp.fromordinal,../reference/api/pandas.Timestamp.fromordinal
generated/pandas.Timestamp.fromtimestamp,../reference/api/pandas.Timestamp.fromtimestamp
diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index 17510a0b7d479..33a611b15675d 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -157,8 +157,6 @@ Methods
Timestamp.day_name
Timestamp.dst
Timestamp.floor
- Timestamp.freq
- Timestamp.freqstr
Timestamp.fromordinal
Timestamp.fromtimestamp
Timestamp.isocalendar
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index f5175283cce4e..06356c8b02e84 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1357,6 +1357,7 @@ the object's ``freq`` attribute (:issue:`21939`, :issue:`23878`).
*New behavior*:
.. ipython:: python
+ :okexcept:
:okwarning:
ts = pd.Timestamp('1994-05-06 12:15:16', freq=pd.offsets.Hour())
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index b7995dca0a825..240062bc42d0c 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -250,6 +250,7 @@ Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- Removed deprecated :attr:`Timestamp.freq`, :attr:`Timestamp.freqstr` and argument ``freq`` from the :class:`Timestamp` constructor and :meth:`Timestamp.fromordinal` (:issue:`14146`)
- Removed deprecated :class:`CategoricalBlock`, :meth:`Block.is_categorical`, require datetime64 and timedelta64 values to be wrapped in :class:`DatetimeArray` or :class:`TimedeltaArray` before passing to :meth:`Block.make_block_same_class`, require ``DatetimeTZBlock.values`` to have the correct ndim when passing to the :class:`BlockManager` constructor, and removed the "fastpath" keyword from the :class:`SingleBlockManager` constructor (:issue:`40226`, :issue:`40571`)
- Removed deprecated module ``pandas.core.index`` (:issue:`30193`)
- Removed deprecated alias ``pandas.core.tools.datetimes.to_time``, import the function directly from ``pandas.core.tools.times`` instead (:issue:`34145`)
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 26cd332c3007a..e2a291dfe632f 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -685,8 +685,6 @@ class NaTType(_NaT):
----------
ordinal : int
Date corresponding to a proleptic Gregorian ordinal.
- freq : str, DateOffset
- Offset to apply to the Timestamp.
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for the Timestamp.
diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd
index 397df11144d60..fc62e04961dcb 100644
--- a/pandas/_libs/tslibs/timestamps.pxd
+++ b/pandas/_libs/tslibs/timestamps.pxd
@@ -15,7 +15,6 @@ from pandas._libs.tslibs.offsets cimport BaseOffset
cdef _Timestamp create_timestamp_from_ts(int64_t value,
npy_datetimestruct dts,
tzinfo tz,
- BaseOffset freq,
bint fold,
NPY_DATETIMEUNIT reso=*)
@@ -23,7 +22,6 @@ cdef _Timestamp create_timestamp_from_ts(int64_t value,
cdef class _Timestamp(ABCTimestamp):
cdef readonly:
int64_t value, nanosecond, year
- BaseOffset _freq
NPY_DATETIMEUNIT _creso
cdef bint _get_start_end_field(self, str field, freq)
@@ -34,7 +32,5 @@ cdef class _Timestamp(ABCTimestamp):
cpdef datetime to_pydatetime(_Timestamp self, bint warn=*)
cdef bint _compare_outside_nanorange(_Timestamp self, datetime other,
int op) except -1
- cpdef void _set_freq(self, freq)
- cdef _warn_on_field_deprecation(_Timestamp self, freq, str field)
cdef bint _compare_mismatched_resos(_Timestamp self, _Timestamp other, int op)
cdef _Timestamp _as_creso(_Timestamp self, NPY_DATETIMEUNIT reso, bint round_ok=*)
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index da9fe7b4126e9..77f02741aae48 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -47,12 +47,10 @@ class Timestamp(datetime):
tzinfo: _tzinfo | None = ...,
*,
nanosecond: int | None = ...,
- freq: int | None | str | BaseOffset = ...,
tz: str | _tzinfo | None | int = ...,
unit: str | int | None = ...,
fold: int | None = ...,
) -> _DatetimeT | NaTType: ...
- def _set_freq(self, freq: BaseOffset | None) -> None: ...
@classmethod
def _from_value_and_reso(
cls, value: int, reso: int, tz: _tzinfo | None
@@ -89,7 +87,6 @@ class Timestamp(datetime):
def fromordinal(
cls: type[_DatetimeT],
ordinal: int,
- freq: str | BaseOffset | None = ...,
tz: _tzinfo | str | None = ...,
) -> _DatetimeT: ...
@classmethod
@@ -176,7 +173,7 @@ class Timestamp(datetime):
def is_year_end(self) -> bool: ...
def to_pydatetime(self, warn: bool = ...) -> datetime: ...
def to_datetime64(self) -> np.datetime64: ...
- def to_period(self, freq: BaseOffset | str | None = ...) -> Period: ...
+ def to_period(self, freq: BaseOffset | str = ...) -> Period: ...
def to_julian_date(self) -> np.float64: ...
@property
def asm8(self) -> np.datetime64: ...
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index afb93e34935f0..62607df5b2aa8 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -97,11 +97,7 @@ from pandas._libs.tslibs.np_datetime import (
OutOfBoundsTimedelta,
)
-from pandas._libs.tslibs.offsets cimport (
- BaseOffset,
- is_offset_object,
- to_offset,
-)
+from pandas._libs.tslibs.offsets cimport to_offset
from pandas._libs.tslibs.timedeltas cimport (
_Timedelta,
delta_to_nanoseconds,
@@ -134,7 +130,6 @@ cdef inline _Timestamp create_timestamp_from_ts(
int64_t value,
npy_datetimestruct dts,
tzinfo tz,
- BaseOffset freq,
bint fold,
NPY_DATETIMEUNIT reso=NPY_FR_ns,
):
@@ -160,7 +155,6 @@ cdef inline _Timestamp create_timestamp_from_ts(
dts.sec, dts.us, tz, fold=fold)
ts_base.value = value
- ts_base._freq = freq
ts_base.year = dts.year
ts_base.nanosecond = dts.ps // 1000
ts_base._creso = reso
@@ -171,7 +165,6 @@ cdef inline _Timestamp create_timestamp_from_ts(
def _unpickle_timestamp(value, freq, tz, reso=NPY_FR_ns):
# GH#41949 dont warn on unpickle if we have a freq
ts = Timestamp._from_value_and_reso(value, reso, tz)
- ts._set_freq(freq)
return ts
@@ -239,21 +232,6 @@ cdef class _Timestamp(ABCTimestamp):
max = MinMaxReso("max")
resolution = MinMaxReso("resolution") # GH#21336, GH#21365
- cpdef void _set_freq(self, freq):
- # set the ._freq attribute without going through the constructor,
- # which would issue a warning
- # Caller is responsible for validation
- self._freq = freq
-
- @property
- def freq(self):
- warnings.warn(
- "Timestamp.freq is deprecated and will be removed in a future version.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return self._freq
-
@property
def _unit(self) -> str:
"""
@@ -283,7 +261,7 @@ cdef class _Timestamp(ABCTimestamp):
maybe_localize_tso(obj, tz, reso)
return create_timestamp_from_ts(
- value, obj.dts, tz=obj.tzinfo, freq=None, fold=obj.fold, reso=reso
+ value, obj.dts, tz=obj.tzinfo, fold=obj.fold, reso=reso
)
@classmethod
@@ -450,8 +428,6 @@ cdef class _Timestamp(ABCTimestamp):
f"Out of bounds nanosecond timestamp: {new_value}"
) from err
- if result is not NaT:
- result._set_freq(self._freq) # avoid warning in constructor
return result
elif is_integer_object(other):
@@ -588,7 +564,7 @@ cdef class _Timestamp(ABCTimestamp):
if freq:
kwds = freq.kwds
month_kw = kwds.get('startingMonth', kwds.get('month', 12))
- freqstr = self._freqstr
+ freqstr = freq.freqstr
else:
month_kw = 12
freqstr = None
@@ -599,31 +575,6 @@ cdef class _Timestamp(ABCTimestamp):
field, freqstr, month_kw, self._creso)
return out[0]
- cdef _warn_on_field_deprecation(self, freq, str field):
- """
- Warn if the removal of .freq change the value of start/end properties.
- """
- cdef:
- bint needs = False
-
- if freq is not None:
- kwds = freq.kwds
- month_kw = kwds.get("startingMonth", kwds.get("month", 12))
- freqstr = self._freqstr
- if month_kw != 12:
- needs = True
- if freqstr.startswith("B"):
- needs = True
-
- if needs:
- warnings.warn(
- "Timestamp.freq is deprecated and will be removed in a future "
- "version. When you have a freq, use "
- f"freq.{field}(timestamp) instead.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
@property
def is_month_start(self) -> bool:
"""
@@ -639,11 +590,7 @@ cdef class _Timestamp(ABCTimestamp):
>>> ts.is_month_start
True
"""
- if self._freq is None:
- # fast-path for non-business frequencies
- return self.day == 1
- self._warn_on_field_deprecation(self._freq, "is_month_start")
- return self._get_start_end_field("is_month_start", self._freq)
+ return self.day == 1
@property
def is_month_end(self) -> bool:
@@ -660,11 +607,7 @@ cdef class _Timestamp(ABCTimestamp):
>>> ts.is_month_end
True
"""
- if self._freq is None:
- # fast-path for non-business frequencies
- return self.day == self.days_in_month
- self._warn_on_field_deprecation(self._freq, "is_month_end")
- return self._get_start_end_field("is_month_end", self._freq)
+ return self.day == self.days_in_month
@property
def is_quarter_start(self) -> bool:
@@ -681,11 +624,7 @@ cdef class _Timestamp(ABCTimestamp):
>>> ts.is_quarter_start
True
"""
- if self._freq is None:
- # fast-path for non-business frequencies
- return self.day == 1 and self.month % 3 == 1
- self._warn_on_field_deprecation(self._freq, "is_quarter_start")
- return self._get_start_end_field("is_quarter_start", self._freq)
+ return self.day == 1 and self.month % 3 == 1
@property
def is_quarter_end(self) -> bool:
@@ -702,11 +641,7 @@ cdef class _Timestamp(ABCTimestamp):
>>> ts.is_quarter_end
True
"""
- if self._freq is None:
- # fast-path for non-business frequencies
- return (self.month % 3) == 0 and self.day == self.days_in_month
- self._warn_on_field_deprecation(self._freq, "is_quarter_end")
- return self._get_start_end_field("is_quarter_end", self._freq)
+ return (self.month % 3) == 0 and self.day == self.days_in_month
@property
def is_year_start(self) -> bool:
@@ -723,11 +658,7 @@ cdef class _Timestamp(ABCTimestamp):
>>> ts.is_year_start
True
"""
- if self._freq is None:
- # fast-path for non-business frequencies
- return self.day == self.month == 1
- self._warn_on_field_deprecation(self._freq, "is_year_start")
- return self._get_start_end_field("is_year_start", self._freq)
+ return self.day == self.month == 1
@property
def is_year_end(self) -> bool:
@@ -744,11 +675,7 @@ cdef class _Timestamp(ABCTimestamp):
>>> ts.is_year_end
True
"""
- if self._freq is None:
- # fast-path for non-business frequencies
- return self.month == 12 and self.day == 31
- self._warn_on_field_deprecation(self._freq, "is_year_end")
- return self._get_start_end_field("is_year_end", self._freq)
+ return self.month == 12 and self.day == 31
@cython.boundscheck(False)
cdef _get_date_name_field(self, str field, object locale):
@@ -926,7 +853,6 @@ cdef class _Timestamp(ABCTimestamp):
def __setstate__(self, state):
self.value = state[0]
- self._freq = state[1]
self.tzinfo = state[2]
if len(state) == 3:
@@ -938,7 +864,7 @@ cdef class _Timestamp(ABCTimestamp):
self._creso = reso
def __reduce__(self):
- object_state = self.value, self._freq, self.tzinfo, self._creso
+ object_state = self.value, None, self.tzinfo, self._creso
return (_unpickle_timestamp, object_state)
# -----------------------------------------------------------------
@@ -1017,9 +943,8 @@ cdef class _Timestamp(ABCTimestamp):
pass
tz = f", tz='{zone}'" if zone is not None else ""
- freq = "" if self._freq is None else f", freq='{self._freqstr}'"
- return f"Timestamp('{stamp}'{tz}{freq})"
+ return f"Timestamp('{stamp}'{tz})"
@property
def _repr_base(self) -> str:
@@ -1209,15 +1134,6 @@ cdef class _Timestamp(ABCTimestamp):
stacklevel=find_stack_level(),
)
- if freq is None:
- freq = self._freq
- warnings.warn(
- "In a future version, calling 'Timestamp.to_period()' without "
- "passing a 'freq' will raise an exception.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
-
return Period(self, freq=freq)
@@ -1244,8 +1160,6 @@ class Timestamp(_Timestamp):
hour, minute, second, microsecond : int, optional, default 0
tzinfo : datetime.tzinfo, optional, default None
nanosecond : int, optional, default 0
- freq : str, DateOffset
- Offset which Timestamp will have.
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time which Timestamp will have.
unit : str
@@ -1299,7 +1213,7 @@ class Timestamp(_Timestamp):
"""
@classmethod
- def fromordinal(cls, ordinal, freq=None, tz=None):
+ def fromordinal(cls, ordinal, tz=None):
"""
Construct a timestamp from a a proleptic Gregorian ordinal.
@@ -1307,8 +1221,6 @@ class Timestamp(_Timestamp):
----------
ordinal : int
Date corresponding to a proleptic Gregorian ordinal.
- freq : str, DateOffset
- Offset to apply to the Timestamp.
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for the Timestamp.
@@ -1321,8 +1233,7 @@ class Timestamp(_Timestamp):
>>> pd.Timestamp.fromordinal(737425)
Timestamp('2020-01-01 00:00:00')
"""
- return cls(datetime.fromordinal(ordinal),
- freq=freq, tz=tz)
+ return cls(datetime.fromordinal(ordinal), tz=tz)
@classmethod
def now(cls, tz=None):
@@ -1488,7 +1399,6 @@ class Timestamp(_Timestamp):
tzinfo_type tzinfo=None,
*,
nanosecond=None,
- object freq=None,
tz=None,
unit=None,
fold=None,
@@ -1560,7 +1470,7 @@ class Timestamp(_Timestamp):
# check that only ts_input is passed
# checking verbosely, because cython doesn't optimize
# list comprehensions (as of cython 0.29.x)
- if (isinstance(ts_input, _Timestamp) and freq is None and
+ if (isinstance(ts_input, _Timestamp) and
tz is None and unit is None and year is None and
month is None and day is None and hour is None and
minute is None and second is None and
@@ -1603,7 +1513,6 @@ class Timestamp(_Timestamp):
# microsecond[, nanosecond[, tzinfo]]]]]])
ts_input = datetime(ts_input, year, month, day or 0,
hour or 0, minute or 0, second or 0, fold=fold or 0)
- freq = None
unit = None
if nanosecond is None:
@@ -1631,22 +1540,7 @@ class Timestamp(_Timestamp):
if ts.value == NPY_NAT:
return NaT
- if freq is None:
- # GH 22311: Try to extract the frequency of a given Timestamp input
- freq = getattr(ts_input, '_freq', None)
- else:
- warnings.warn(
- "The 'freq' argument in Timestamp is deprecated and will be "
- "removed in a future version.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- if not is_offset_object(freq):
- freq = to_offset(freq)
-
- return create_timestamp_from_ts(
- ts.value, ts.dts, ts.tzinfo, freq, ts.fold, ts.creso
- )
+ return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, ts.fold, ts.creso)
def _round(self, freq, mode, ambiguous='raise', nonexistent='raise'):
cdef:
@@ -1965,22 +1859,6 @@ timedelta}, default 'raise'
"Use tz_localize() or tz_convert() as appropriate"
)
- @property
- def _freqstr(self):
- return getattr(self._freq, "freqstr", self._freq)
-
- @property
- def freqstr(self):
- """
- Return the total number of days in the month.
- """
- warnings.warn(
- "Timestamp.freqstr is deprecated and will be removed in a future version.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- return self._freqstr
-
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise'):
"""
Localize the Timestamp to a timezone.
@@ -2081,8 +1959,6 @@ default 'raise'
)
out = type(self)._from_value_and_reso(value, self._creso, tz=tz)
- if out is not NaT:
- out._set_freq(self._freq) # avoid warning in constructor
return out
def tz_convert(self, tz):
@@ -2136,8 +2012,6 @@ default 'raise'
# Same UTC timestamp, different time zone
tz = maybe_get_tz(tz)
out = type(self)._from_value_and_reso(self.value, reso=self._creso, tz=tz)
- if out is not NaT:
- out._set_freq(self._freq) # avoid warning in constructor
return out
astimezone = tz_convert
@@ -2270,7 +2144,7 @@ default 'raise'
ts_input, tzobj, nanos=dts.ps // 1000, reso=self._creso
)
return create_timestamp_from_ts(
- ts.value, dts, tzobj, self._freq, fold, reso=self._creso
+ ts.value, dts, tzobj, fold, reso=self._creso
)
def to_julian_date(self) -> np.float64:
diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi
index d24541aede8d8..22f457b9ddc0b 100644
--- a/pandas/_libs/tslibs/vectorized.pyi
+++ b/pandas/_libs/tslibs/vectorized.pyi
@@ -7,7 +7,6 @@ from datetime import tzinfo
import numpy as np
from pandas._libs.tslibs.dtypes import Resolution
-from pandas._libs.tslibs.offsets import BaseOffset
from pandas._typing import npt
def dt64arr_to_periodarr(
@@ -34,7 +33,6 @@ def get_resolution(
def ints_to_pydatetime(
arr: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
- freq: BaseOffset | None = ...,
fold: bool = ...,
box: str = ...,
reso: int = ..., # NPY_DATETIMEUNIT
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index c828a9dfe0ccb..8661ba4b9b2f1 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -32,7 +32,6 @@ from .np_datetime cimport (
npy_datetimestruct,
pandas_datetime_to_datetimestruct,
)
-from .offsets cimport BaseOffset
from .period cimport get_period_ordinal
from .timestamps cimport create_timestamp_from_ts
from .timezones cimport is_utc
@@ -95,7 +94,6 @@ def tz_convert_from_utc(ndarray stamps, tzinfo tz, NPY_DATETIMEUNIT reso=NPY_FR_
def ints_to_pydatetime(
ndarray stamps,
tzinfo tz=None,
- BaseOffset freq=None,
bint fold=False,
str box="datetime",
NPY_DATETIMEUNIT reso=NPY_FR_ns,
@@ -109,8 +107,6 @@ def ints_to_pydatetime(
stamps : array of i8
tz : str, optional
convert to this timezone
- freq : BaseOffset, optional
- freq to convert
fold : bint, default is 0
Due to daylight saving time, one wall clock time can occur twice
when shifting from summer to winter time; fold describes whether the
@@ -181,7 +177,7 @@ def ints_to_pydatetime(
if use_ts:
res_val = create_timestamp_from_ts(
- utc_val, dts, new_tz, freq, fold, reso=reso
+ utc_val, dts, new_tz, fold, reso=reso
)
elif use_pydt:
res_val = datetime(
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index b4198575c3f06..897fc51d667d9 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -431,7 +431,6 @@ def astype(self, dtype, copy: bool = True):
converted = ints_to_pydatetime(
i8data,
tz=self.tz,
- freq=self.freq,
box="timestamp",
reso=self._creso,
)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 43acaafdd65cc..c7496e03999d2 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -505,12 +505,6 @@ def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
# GH#42228
value = x.view("i8")
ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz)
- # Non-overlapping identity check (left operand type: "Timestamp",
- # right operand type: "NaTType")
- if ts is not NaT: # type: ignore[comparison-overlap]
- # GH#41586
- # do this instead of passing to the constructor to avoid FutureWarning
- ts._set_freq(self.freq)
return ts
@property
@@ -611,7 +605,6 @@ def __iter__(self) -> Iterator:
converted = ints_to_pydatetime(
data[start_i:end_i],
tz=self.tz,
- freq=self.freq,
box="timestamp",
reso=self._creso,
)
diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
index b02870d5d1247..f0df27a160bbc 100644
--- a/pandas/tests/frame/indexing/test_xs.py
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -54,7 +54,7 @@ def test_xs(self, float_frame, datetime_frame):
assert xs["B"] == "1"
with pytest.raises(
- KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00', freq='B')")
+ KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00')")
):
datetime_frame.xs(datetime_frame.index[0] - BDay())
diff --git a/pandas/tests/frame/methods/test_first_valid_index.py b/pandas/tests/frame/methods/test_first_valid_index.py
index e4cbd892de38e..6009851bab643 100644
--- a/pandas/tests/frame/methods/test_first_valid_index.py
+++ b/pandas/tests/frame/methods/test_first_valid_index.py
@@ -7,7 +7,6 @@
from pandas import (
DataFrame,
Series,
- date_range,
)
import pandas._testing as tm
@@ -73,22 +72,3 @@ def test_first_last_valid_all_nan(self, index_func):
ser = frame["foo"]
assert ser.first_valid_index() is None
assert ser.last_valid_index() is None
-
- @pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
- def test_first_last_valid_preserves_freq(self):
- # GH#20499: its preserves freq with holes
- index = date_range("20110101", periods=30, freq="B")
- frame = DataFrame(np.nan, columns=["foo"], index=index)
-
- frame.iloc[1] = 1
- frame.iloc[-2] = 1
- assert frame.first_valid_index() == frame.index[1]
- assert frame.last_valid_index() == frame.index[-2]
- assert frame.first_valid_index().freq == frame.index.freq
- assert frame.last_valid_index().freq == frame.index.freq
-
- ts = frame["foo"]
- assert ts.first_valid_index() == ts.index[1]
- assert ts.last_valid_index() == ts.index[-2]
- assert ts.first_valid_index().freq == ts.index.freq
- assert ts.last_valid_index().freq == ts.index.freq
diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 30c033572335a..e03fa317fded9 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -479,7 +479,6 @@ def test_reset_index_allow_duplicates_check(self, multiindex_df, allow_duplicate
with pytest.raises(ValueError, match="expected type bool"):
multiindex_df.reset_index(allow_duplicates=allow_duplicates)
- @pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_reset_index_datetime(self, tz_naive_fixture):
# GH#3950
tz = tz_naive_fixture
diff --git a/pandas/tests/indexes/datetimes/methods/test_to_period.py b/pandas/tests/indexes/datetimes/methods/test_to_period.py
index f6a598bd2a1ed..e8048e63afbf7 100644
--- a/pandas/tests/indexes/datetimes/methods/test_to_period.py
+++ b/pandas/tests/indexes/datetimes/methods/test_to_period.py
@@ -148,10 +148,9 @@ def test_to_period_tz(self, tz):
with tm.assert_produces_warning(UserWarning):
# GH#21333 warning that timezone info will be lost
# filter warning about freq deprecation
- warnings.filterwarnings("ignore", category=FutureWarning)
result = ts.to_period()[0]
- expected = ts[0].to_period()
+ expected = ts[0].to_period(ts.freq)
assert result == expected
@@ -159,7 +158,7 @@ def test_to_period_tz(self, tz):
with tm.assert_produces_warning(UserWarning):
# GH#21333 warning that timezone info will be lost
- result = ts.to_period()
+ result = ts.to_period(ts.freq)
tm.assert_index_equal(result, expected)
@@ -168,10 +167,9 @@ def test_to_period_tz_utc_offset_consistency(self, tz):
# GH#22905
ts = date_range("1/1/2000", "2/1/2000", tz="Etc/GMT-1")
with tm.assert_produces_warning(UserWarning):
- warnings.filterwarnings("ignore", category=FutureWarning)
result = ts.to_period()[0]
- expected = ts[0].to_period()
+ expected = ts[0].to_period(ts.freq)
assert result == expected
def test_to_period_nofreq(self):
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 0e8b0fe83279b..1dc01a3d7f937 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -18,6 +18,8 @@
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
+from pandas.tseries.frequencies import to_offset
+
class TestDatetime64:
def test_no_millisecond_field(self):
@@ -139,41 +141,48 @@ def test_datetimeindex_accessors4(self):
assert dti.is_month_start[0] == 1
def test_datetimeindex_accessors5(self):
- with tm.assert_produces_warning(FutureWarning, match="The 'freq' argument"):
- tests = [
- (Timestamp("2013-06-01", freq="M").is_month_start, 1),
- (Timestamp("2013-06-01", freq="BM").is_month_start, 0),
- (Timestamp("2013-06-03", freq="M").is_month_start, 0),
- (Timestamp("2013-06-03", freq="BM").is_month_start, 1),
- (Timestamp("2013-02-28", freq="Q-FEB").is_month_end, 1),
- (Timestamp("2013-02-28", freq="Q-FEB").is_quarter_end, 1),
- (Timestamp("2013-02-28", freq="Q-FEB").is_year_end, 1),
- (Timestamp("2013-03-01", freq="Q-FEB").is_month_start, 1),
- (Timestamp("2013-03-01", freq="Q-FEB").is_quarter_start, 1),
- (Timestamp("2013-03-01", freq="Q-FEB").is_year_start, 1),
- (Timestamp("2013-03-31", freq="QS-FEB").is_month_end, 1),
- (Timestamp("2013-03-31", freq="QS-FEB").is_quarter_end, 0),
- (Timestamp("2013-03-31", freq="QS-FEB").is_year_end, 0),
- (Timestamp("2013-02-01", freq="QS-FEB").is_month_start, 1),
- (Timestamp("2013-02-01", freq="QS-FEB").is_quarter_start, 1),
- (Timestamp("2013-02-01", freq="QS-FEB").is_year_start, 1),
- (Timestamp("2013-06-30", freq="BQ").is_month_end, 0),
- (Timestamp("2013-06-30", freq="BQ").is_quarter_end, 0),
- (Timestamp("2013-06-30", freq="BQ").is_year_end, 0),
- (Timestamp("2013-06-28", freq="BQ").is_month_end, 1),
- (Timestamp("2013-06-28", freq="BQ").is_quarter_end, 1),
- (Timestamp("2013-06-28", freq="BQ").is_year_end, 0),
- (Timestamp("2013-06-30", freq="BQS-APR").is_month_end, 0),
- (Timestamp("2013-06-30", freq="BQS-APR").is_quarter_end, 0),
- (Timestamp("2013-06-30", freq="BQS-APR").is_year_end, 0),
- (Timestamp("2013-06-28", freq="BQS-APR").is_month_end, 1),
- (Timestamp("2013-06-28", freq="BQS-APR").is_quarter_end, 1),
- (Timestamp("2013-03-29", freq="BQS-APR").is_year_end, 1),
- (Timestamp("2013-11-01", freq="AS-NOV").is_year_start, 1),
- (Timestamp("2013-10-31", freq="AS-NOV").is_year_end, 1),
- (Timestamp("2012-02-01").days_in_month, 29),
- (Timestamp("2013-02-01").days_in_month, 28),
- ]
+ freq_m = to_offset("M")
+ bm = to_offset("BM")
+ qfeb = to_offset("Q-FEB")
+ qsfeb = to_offset("QS-FEB")
+ bq = to_offset("BQ")
+ bqs_apr = to_offset("BQS-APR")
+ as_nov = to_offset("AS-NOV")
+
+ tests = [
+ (freq_m.is_month_start(Timestamp("2013-06-01")), 1),
+ (bm.is_month_start(Timestamp("2013-06-01")), 0),
+ (freq_m.is_month_start(Timestamp("2013-06-03")), 0),
+ (bm.is_month_start(Timestamp("2013-06-03")), 1),
+ (qfeb.is_month_end(Timestamp("2013-02-28")), 1),
+ (qfeb.is_quarter_end(Timestamp("2013-02-28")), 1),
+ (qfeb.is_year_end(Timestamp("2013-02-28")), 1),
+ (qfeb.is_month_start(Timestamp("2013-03-01")), 1),
+ (qfeb.is_quarter_start(Timestamp("2013-03-01")), 1),
+ (qfeb.is_year_start(Timestamp("2013-03-01")), 1),
+ (qsfeb.is_month_end(Timestamp("2013-03-31")), 1),
+ (qsfeb.is_quarter_end(Timestamp("2013-03-31")), 0),
+ (qsfeb.is_year_end(Timestamp("2013-03-31")), 0),
+ (qsfeb.is_month_start(Timestamp("2013-02-01")), 1),
+ (qsfeb.is_quarter_start(Timestamp("2013-02-01")), 1),
+ (qsfeb.is_year_start(Timestamp("2013-02-01")), 1),
+ (bq.is_month_end(Timestamp("2013-06-30")), 0),
+ (bq.is_quarter_end(Timestamp("2013-06-30")), 0),
+ (bq.is_year_end(Timestamp("2013-06-30")), 0),
+ (bq.is_month_end(Timestamp("2013-06-28")), 1),
+ (bq.is_quarter_end(Timestamp("2013-06-28")), 1),
+ (bq.is_year_end(Timestamp("2013-06-28")), 0),
+ (bqs_apr.is_month_end(Timestamp("2013-06-30")), 0),
+ (bqs_apr.is_quarter_end(Timestamp("2013-06-30")), 0),
+ (bqs_apr.is_year_end(Timestamp("2013-06-30")), 0),
+ (bqs_apr.is_month_end(Timestamp("2013-06-28")), 1),
+ (bqs_apr.is_quarter_end(Timestamp("2013-06-28")), 1),
+ (bqs_apr.is_year_end(Timestamp("2013-03-29")), 1),
+ (as_nov.is_year_start(Timestamp("2013-11-01")), 1),
+ (as_nov.is_year_end(Timestamp("2013-10-31")), 1),
+ (Timestamp("2012-02-01").days_in_month, 29),
+ (Timestamp("2013-02-01").days_in_month, 28),
+ ]
for ts, value in tests:
assert ts == value
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index d930b63fd0c0b..42aba136f378d 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -64,9 +64,7 @@ def test_dti_timestamp_fields(self, field):
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
- warn = FutureWarning if field.startswith("is_") else None
- with tm.assert_produces_warning(warn, match="Timestamp.freq is deprecated"):
- result = getattr(Timestamp(idx[-1]), field)
+ result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_isocalendar_fields(self):
@@ -75,22 +73,6 @@ def test_dti_timestamp_isocalendar_fields(self):
result = idx[-1].isocalendar()
assert result == expected
- def test_dti_timestamp_freq_fields(self):
- # extra fields from DatetimeIndex like quarter and week
- idx = tm.makeDateIndex(100)
-
- msg = "The 'freq' argument in Timestamp is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- ts = Timestamp(idx[-1], freq=idx.freq)
-
- msg2 = "Timestamp.freq is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg2):
- assert idx.freq == ts.freq
-
- msg3 = "Timestamp.freqstr is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg3):
- assert idx.freqstr == ts.freqstr
-
# ----------------------------------------------------------------
# DatetimeIndex.round
diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py
index ba1c1cbea26f9..a6a0b2781dc3b 100644
--- a/pandas/tests/io/generate_legacy_storage_files.py
+++ b/pandas/tests/io/generate_legacy_storage_files.py
@@ -243,9 +243,6 @@ def create_data():
"tz": Timestamp("2011-01-01", tz="US/Eastern"),
}
- timestamp["freq"] = Timestamp("2011-01-01", freq="D")
- timestamp["both"] = Timestamp("2011-01-01", tz="Asia/Tokyo", freq="M")
-
off = {
"DateOffset": DateOffset(years=1),
"DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 5115a33694207..930e547d5cba8 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -26,7 +26,6 @@
import uuid
from warnings import (
catch_warnings,
- filterwarnings,
simplefilter,
)
import zipfile
@@ -56,10 +55,6 @@
MonthEnd,
)
-pytestmark = pytest.mark.filterwarnings(
- "ignore:Timestamp.freq is deprecated:FutureWarning"
-)
-
@pytest.fixture(scope="module")
def current_pickle_data():
@@ -67,10 +62,6 @@ def current_pickle_data():
from pandas.tests.io.generate_legacy_storage_files import create_pickle_data
with catch_warnings():
- filterwarnings(
- "ignore", "The 'freq' argument in Timestamp", category=FutureWarning
- )
-
return create_pickle_data()
@@ -89,7 +80,6 @@ def compare_element(result, expected, typ):
assert result is pd.NaT
else:
assert result == expected
- assert result.freq == expected.freq
else:
comparator = getattr(tm, f"assert_{typ}_equal", tm.assert_almost_equal)
comparator(result, expected)
@@ -215,7 +205,6 @@ def python_unpickler(path):
],
)
@pytest.mark.parametrize("writer", [pd.to_pickle, python_pickler])
-@pytest.mark.filterwarnings("ignore:The 'freq' argument in Timestamp:FutureWarning")
def test_round_trip_current(current_pickle_data, pickle_writer, writer):
data = current_pickle_data
for typ, dv in data.items():
diff --git a/pandas/tests/reshape/concat/test_datetimes.py b/pandas/tests/reshape/concat/test_datetimes.py
index 1af54a1d5cf4a..93d212d0a581d 100644
--- a/pandas/tests/reshape/concat/test_datetimes.py
+++ b/pandas/tests/reshape/concat/test_datetimes.py
@@ -403,7 +403,6 @@ def test_concat_multiple_tzs(self):
expected = DataFrame({"time": [ts2, ts3]})
tm.assert_frame_equal(results, expected)
- @pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame(
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index f385453bdb3a1..14ea670fa6cf9 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -525,7 +525,6 @@ def test_pivot_index_with_nan_dates(self, method):
result = pd.pivot(df, index="b", columns="a", values="c")
tm.assert_frame_equal(result, pv.T)
- @pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
@pytest.mark.parametrize("method", [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 79e9e1d4fc68b..3b39a2f877c8d 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -184,7 +184,7 @@ def test_nat_iso_format(get_nat):
@pytest.mark.parametrize(
"klass,expected",
[
- (Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period"]),
+ (Timestamp, ["normalize", "to_julian_date", "to_period"]),
(
Timedelta,
[
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index 4283575a67f4c..17fee1ff3f949 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -158,11 +158,7 @@ def test_addition_subtraction_types(self):
# objects
dt = datetime(2014, 3, 4)
td = timedelta(seconds=1)
- # build a timestamp with a frequency, since then it supports
- # addition/subtraction of integers
- with tm.assert_produces_warning(FutureWarning, match="The 'freq' argument"):
- # freq deprecated
- ts = Timestamp(dt, freq="D")
+ ts = Timestamp(dt)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
@@ -183,34 +179,6 @@ def test_addition_subtraction_types(self):
assert type(ts + td64) == Timestamp
assert type(ts - td64) == Timestamp
- @pytest.mark.parametrize(
- "freq, td, td64",
- [
- ("S", timedelta(seconds=1), np.timedelta64(1, "s")),
- ("min", timedelta(minutes=1), np.timedelta64(1, "m")),
- ("H", timedelta(hours=1), np.timedelta64(1, "h")),
- ("D", timedelta(days=1), np.timedelta64(1, "D")),
- ("W", timedelta(weeks=1), np.timedelta64(1, "W")),
- ("M", None, np.timedelta64(1, "M")),
- ],
- )
- @pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
- @pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
- def test_addition_subtraction_preserve_frequency(self, freq, td, td64):
- ts = Timestamp("2014-03-05 00:00:00", freq=freq)
- original_freq = ts.freq
-
- assert (ts + 1 * original_freq).freq == original_freq
- assert (ts - 1 * original_freq).freq == original_freq
-
- if td is not None:
- # timedelta does not support months as unit
- assert (ts + td).freq == original_freq
- assert (ts - td).freq == original_freq
-
- assert (ts + td64).freq == original_freq
- assert (ts - td64).freq == original_freq
-
@pytest.mark.parametrize(
"td", [Timedelta(hours=3), np.timedelta64(3, "h"), timedelta(hours=3)]
)
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 5ab4f413fbbf1..9c3fa0f64153a 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -20,9 +20,6 @@
Timedelta,
Timestamp,
)
-import pandas._testing as tm
-
-from pandas.tseries import offsets
class TestTimestampConstructors:
@@ -233,13 +230,11 @@ def test_constructor_invalid_tz(self):
Timestamp("2017-10-22", tzinfo=pytz.utc, tz="UTC")
msg = "Cannot pass a date attribute keyword argument when passing a date string"
- msg2 = "The 'freq' argument"
with pytest.raises(ValueError, match=msg):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
- # interpreted as a `freq`
- with tm.assert_produces_warning(FutureWarning, match=msg2):
- Timestamp("2012-01-01", "US/Pacific")
+ # interpreted as `year`
+ Timestamp("2012-01-01", "US/Pacific")
def test_constructor_strptime(self):
# GH25016
@@ -347,14 +342,11 @@ def test_constructor_keyword(self):
)
) == repr(Timestamp("2015-11-12 01:02:03.999999"))
- @pytest.mark.filterwarnings("ignore:Timestamp.freq is:FutureWarning")
- @pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
- ts = Timestamp.fromordinal(base.toordinal(), freq="D")
+ ts = Timestamp.fromordinal(base.toordinal())
assert base == ts
- assert ts.freq == "D"
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz="US/Eastern")
@@ -606,21 +598,6 @@ def test_construct_with_different_string_format(self, arg):
expected = Timestamp(datetime(2013, 1, 1), tz=pytz.FixedOffset(540))
assert result == expected
- def test_construct_timestamp_preserve_original_frequency(self):
- # GH 22311
- with tm.assert_produces_warning(FutureWarning, match="The 'freq' argument"):
- result = Timestamp(Timestamp("2010-08-08", freq="D")).freq
- expected = offsets.Day()
- assert result == expected
-
- def test_constructor_invalid_frequency(self):
- # GH 22311
- msg = "Invalid frequency:"
- msg2 = "The 'freq' argument"
- with pytest.raises(ValueError, match=msg):
- with tm.assert_produces_warning(FutureWarning, match=msg2):
- Timestamp("2012-01-01", freq=[])
-
@pytest.mark.parametrize("box", [datetime, Timestamp])
def test_raise_tz_and_tzinfo_in_datetime_input(self, box):
# GH 23579
diff --git a/pandas/tests/scalar/timestamp/test_rendering.py b/pandas/tests/scalar/timestamp/test_rendering.py
index 2f88f96b6bbea..3998142e568fe 100644
--- a/pandas/tests/scalar/timestamp/test_rendering.py
+++ b/pandas/tests/scalar/timestamp/test_rendering.py
@@ -4,7 +4,6 @@
import pytz # noqa # a test below uses pytz but only inside a `eval` call
from pandas import Timestamp
-import pandas._testing as tm
class TestTimestampRendering:
@@ -36,27 +35,6 @@ def test_repr(self, date, freq, tz):
assert freq_repr not in repr(date_tz)
assert date_tz == eval(repr(date_tz))
- msg = "The 'freq' argument in Timestamp"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- date_freq = Timestamp(date, freq=freq)
- assert date in repr(date_freq)
- assert tz_repr not in repr(date_freq)
- assert freq_repr in repr(date_freq)
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
- assert date_freq == eval(repr(date_freq))
-
- with tm.assert_produces_warning(FutureWarning, match=msg):
- date_tz_freq = Timestamp(date, tz=tz, freq=freq)
- assert date in repr(date_tz_freq)
- assert tz_repr in repr(date_tz_freq)
- assert freq_repr in repr(date_tz_freq)
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
- assert date_tz_freq == eval(repr(date_tz_freq))
-
def test_repr_utcoffset(self):
# This can cause the tz field to be populated, but it's redundant to
# include this information in the date-string.
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 4f8c6fcc57186..a9a7a44f54dee 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -6,7 +6,6 @@
timedelta,
)
import locale
-import pickle
import unicodedata
from dateutil.tz import tzutc
@@ -36,64 +35,31 @@
import pandas._testing as tm
from pandas.tseries import offsets
+from pandas.tseries.frequencies import to_offset
class TestTimestampProperties:
- def test_freq_deprecation(self):
- # GH#41586
- msg = "The 'freq' argument in Timestamp is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- # warning issued at construction
- ts = Timestamp("2021-06-01", freq="D")
- ts2 = Timestamp("2021-06-01", freq="B")
-
- msg = "Timestamp.freq is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- # warning issued at attribute lookup
- ts.freq
-
- for per in ["month", "quarter", "year"]:
- for side in ["start", "end"]:
- attr = f"is_{per}_{side}"
-
- with tm.assert_produces_warning(FutureWarning, match=msg):
- getattr(ts2, attr)
-
- # is_(month|quarter|year)_(start|end) does _not_ issue a warning
- # with freq="D" bc the result will be unaffected by the deprecation
- with tm.assert_produces_warning(None):
- getattr(ts, attr)
-
- @pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
- @pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_properties_business(self):
- ts = Timestamp("2017-10-01", freq="B")
- control = Timestamp("2017-10-01")
+ freq = to_offset("B")
+
+ ts = Timestamp("2017-10-01")
assert ts.dayofweek == 6
assert ts.day_of_week == 6
- assert not ts.is_month_start # not a weekday
- assert not ts.freq.is_month_start(ts)
- assert ts.freq.is_month_start(ts + Timedelta(days=1))
- assert not ts.is_quarter_start # not a weekday
- assert not ts.freq.is_quarter_start(ts)
- assert ts.freq.is_quarter_start(ts + Timedelta(days=1))
- # Control case: non-business is month/qtr start
- assert control.is_month_start
- assert control.is_quarter_start
-
- ts = Timestamp("2017-09-30", freq="B")
- control = Timestamp("2017-09-30")
+ assert ts.is_month_start # not a weekday
+ assert not freq.is_month_start(ts)
+ assert freq.is_month_start(ts + Timedelta(days=1))
+ assert not freq.is_quarter_start(ts)
+ assert freq.is_quarter_start(ts + Timedelta(days=1))
+
+ ts = Timestamp("2017-09-30")
assert ts.dayofweek == 5
assert ts.day_of_week == 5
- assert not ts.is_month_end # not a weekday
- assert not ts.freq.is_month_end(ts)
- assert ts.freq.is_month_end(ts - Timedelta(days=1))
- assert not ts.is_quarter_end # not a weekday
- assert not ts.freq.is_quarter_end(ts)
- assert ts.freq.is_quarter_end(ts - Timedelta(days=1))
- # Control case: non-business is month/qtr start
- assert control.is_month_end
- assert control.is_quarter_end
+ assert ts.is_month_end
+ assert not freq.is_month_end(ts)
+ assert freq.is_month_end(ts - Timedelta(days=1))
+ assert ts.is_quarter_end
+ assert not freq.is_quarter_end(ts)
+ assert freq.is_quarter_end(ts - Timedelta(days=1))
@pytest.mark.parametrize(
"attr, expected",
@@ -476,26 +442,6 @@ def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour):
)
assert hash(transition_1) == hash(transition_2)
- def test_tz_conversion_freq(self, tz_naive_fixture):
- # GH25241
- with tm.assert_produces_warning(FutureWarning, match="freq"):
- t1 = Timestamp("2019-01-01 10:00", freq="H")
- assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
- with tm.assert_produces_warning(FutureWarning, match="freq"):
- t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T")
- assert t2.tz_convert(tz="UTC").freq == t2.freq
-
- def test_pickle_freq_no_warning(self):
- # GH#41949 we don't want a warning on unpickling
- with tm.assert_produces_warning(FutureWarning, match="freq"):
- ts = Timestamp("2019-01-01 10:00", freq="H")
-
- out = pickle.dumps(ts)
- with tm.assert_produces_warning(None):
- res = pickle.loads(out)
-
- assert res._freq == ts._freq
-
class TestTimestampNsOperations:
def test_nanosecond_string_parsing(self):
@@ -749,18 +695,13 @@ def test_start_end_fields(self, ts):
assert not ts.is_month_end
assert not ts.is_month_end
- freq = offsets.BDay()
- ts._set_freq(freq)
-
# 2016-01-01 is a Friday, so is year/quarter/month start with this freq
- msg = "Timestamp.freq is deprecated"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- assert ts.is_year_start
- assert ts.is_quarter_start
- assert ts.is_month_start
- assert not ts.is_year_end
- assert not ts.is_month_end
- assert not ts.is_month_end
+ assert ts.is_year_start
+ assert ts.is_quarter_start
+ assert ts.is_month_start
+ assert not ts.is_year_end
+ assert not ts.is_month_end
+ assert not ts.is_month_end
def test_day_name(self, dt64, ts):
alt = Timestamp(dt64)
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index b8291471225d7..f1d77ad5dbde2 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -479,6 +479,6 @@ def test_getitem_str_second_with_datetimeindex():
with pytest.raises(KeyError, match=r"^'2012-01-02 18:01:02'$"):
df["2012-01-02 18:01:02"]
- msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central', freq='S'\)"
+ msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central'\)"
with pytest.raises(KeyError, match=msg):
df[df.index[2]]
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 1d287f70a4606..3c8c72ba14e36 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -635,7 +635,7 @@ def test_getitem_with_integer_labels():
def test_getitem_missing(datetime_series):
# missing
d = datetime_series.index[0] - BDay()
- msg = r"Timestamp\('1999-12-31 00:00:00', freq='B'\)"
+ msg = r"Timestamp\('1999-12-31 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
datetime_series[d]
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49365 | 2022-10-28T01:08:45Z | 2022-11-04T18:09:41Z | 2022-11-04T18:09:41Z | 2022-11-04T18:15:53Z |
BUG: array_equivalent_object with mismatched shapes | diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index 188494c7c60db..3cbc04fb2f5cd 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -230,8 +230,8 @@ def generate_bins_dt64(
hasnans: bool = ...,
) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
def array_equivalent_object(
- left: np.ndarray, # object[:]
- right: np.ndarray, # object[:]
+ left: npt.NDArray[np.object_],
+ right: npt.NDArray[np.object_],
) -> bool: ...
def has_infs(arr: np.ndarray) -> bool: ... # const floating[:]
def get_reverse_indexer(
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 3769bbf087fee..5ef42228d8029 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -15,6 +15,7 @@ from cpython.iterator cimport PyIter_Check
from cpython.number cimport PyNumber_Check
from cpython.object cimport (
Py_EQ,
+ PyObject,
PyObject_RichCompareBool,
PyTypeObject,
)
@@ -571,25 +572,42 @@ def maybe_booleans_to_slice(ndarray[uint8_t, ndim=1] mask):
@cython.wraparound(False)
@cython.boundscheck(False)
-def array_equivalent_object(left: object[:], right: object[:]) -> bool:
+def array_equivalent_object(ndarray left, ndarray right) -> bool:
"""
- Perform an element by element comparison on 1-d object arrays
+ Perform an element by element comparison on N-d object arrays
taking into account nan positions.
"""
+ # left and right both have object dtype, but we cannot annotate that
+ # without limiting ndim.
cdef:
- Py_ssize_t i, n = left.shape[0]
+ Py_ssize_t i, n = left.size
object x, y
+ cnp.broadcast mi = cnp.PyArray_MultiIterNew2(left, right)
+
+ # Caller is responsible for checking left.shape == right.shape
for i in range(n):
- x = left[i]
- y = right[i]
+ # Analogous to: x = left[i]
+ x = <object>(<PyObject**>cnp.PyArray_MultiIter_DATA(mi, 0))[0]
+ y = <object>(<PyObject**>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
# we are either not equal or both nan
# I think None == None will be true here
try:
if PyArray_Check(x) and PyArray_Check(y):
- if not array_equivalent_object(x, y):
+ if x.shape != y.shape:
return False
+ if x.dtype == y.dtype == object:
+ if not array_equivalent_object(x, y):
+ return False
+ else:
+ # Circular import isn't great, but so it goes.
+ # TODO: could use np.array_equal?
+ from pandas.core.dtypes.missing import array_equivalent
+
+ if not array_equivalent(x, y):
+ return False
+
elif (x is C_NA) ^ (y is C_NA):
return False
elif not (
@@ -612,6 +630,8 @@ def array_equivalent_object(left: object[:], right: object[:]) -> bool:
return False
raise
+ cnp.PyArray_MultiIter_NEXT(mi)
+
return True
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 375d05bdf11ff..a225d2cd12eac 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -565,16 +565,7 @@ def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bo
if not strict_nan:
# isna considers NaN and None to be equivalent.
- if left.flags["F_CONTIGUOUS"] and right.flags["F_CONTIGUOUS"]:
- # we can improve performance by doing a copy-free ravel
- # e.g. in frame_methods.Equals.time_frame_nonunique_equal
- # if we transposed the frames
- left = left.ravel("K")
- right = right.ravel("K")
-
- return lib.array_equivalent_object(
- ensure_object(left.ravel()), ensure_object(right.ravel())
- )
+ return lib.array_equivalent_object(ensure_object(left), ensure_object(right))
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index cc365bef2b183..21c49807b7743 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -466,6 +466,27 @@ def test_array_equivalent_series(val):
assert not array_equivalent(Series([arr, arr]), Series([arr, val]))
+def test_array_equivalent_array_mismatched_shape():
+ # to trigger the motivating bug, the first N elements of the arrays need
+ # to match
+ first = np.array([1, 2, 3])
+ second = np.array([1, 2])
+
+ left = Series([first, "a"], dtype=object)
+ right = Series([second, "a"], dtype=object)
+ assert not array_equivalent(left, right)
+
+
+def test_array_equivalent_array_mismatched_dtype():
+ # same shape, different dtype can still be equivalent
+ first = np.array([1, 2], dtype=np.float64)
+ second = np.array([1, 2])
+
+ left = Series([first, "a"], dtype=object)
+ right = Series([second, "a"], dtype=object)
+ assert array_equivalent(left, right)
+
+
def test_array_equivalent_different_dtype_but_equal():
# Unclear if this is exposed anywhere in the public-facing API
assert array_equivalent(np.array([1, 2]), np.array([1.0, 2.0]))
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49363 | 2022-10-27T22:33:38Z | 2022-11-23T22:23:56Z | 2022-11-23T22:23:56Z | 2022-11-23T22:29:26Z |
DEPR: disallow int fill_value in shift with dt64/td64 | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 252c444b2e60c..68f38ae12c9c6 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -243,6 +243,7 @@ Removal of prior version deprecations/changes
- Removed :meth:`Series.str.__iter__` (:issue:`28277`)
- Removed ``pandas.SparseArray`` in favor of :class:`arrays.SparseArray` (:issue:`30642`)
- Removed ``pandas.SparseSeries`` and ``pandas.SparseDataFrame``, including pickle support. (:issue:`30642`)
+- Enforced disallowing passing an integer ``fill_value`` to :meth:`DataFrame.shift` and :meth:`Series.shift`` with datetime64, timedelta64, or period dtypes (:issue:`32591`)
- Enforced disallowing a string column label into ``times`` in :meth:`DataFrame.ewm` (:issue:`43265`)
- Enforced disallowing a tuple of column labels into :meth:`.DataFrameGroupBy.__getitem__` (:issue:`30546`)
- Removed setting Categorical._codes directly (:issue:`41429`)
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 74fc15a6fad63..fefc220403b9d 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -252,16 +252,11 @@ def _validate_searchsorted_value(
@doc(ExtensionArray.shift)
def shift(self, periods: int = 1, fill_value=None, axis: AxisInt = 0):
- fill_value = self._validate_shift_value(fill_value)
+ fill_value = self._validate_scalar(fill_value)
new_values = shift(self._ndarray, periods, axis, fill_value)
return self._from_backing_data(new_values)
- def _validate_shift_value(self, fill_value):
- # TODO(2.0): after deprecation in datetimelikearraymixin is enforced,
- # we can remove this and use validate_fill_value directly
- return self._validate_scalar(fill_value)
-
def __setitem__(self, key, value) -> None:
key = check_array_indexer(self, key)
value = self._validate_setitem_value(value)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index bcf4b5d58bf74..17ea71c8d29a4 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -603,37 +603,6 @@ def _validate_comparison_value(self, other):
return other
- def _validate_shift_value(self, fill_value):
- # TODO(2.0): once this deprecation is enforced, use _validate_scalar
- if is_valid_na_for_dtype(fill_value, self.dtype):
- fill_value = NaT
- elif isinstance(fill_value, self._recognized_scalars):
- fill_value = self._scalar_type(fill_value)
- else:
- new_fill: DatetimeLikeScalar
-
- # only warn if we're not going to raise
- if self._scalar_type is Period and lib.is_integer(fill_value):
- # kludge for #31971 since Period(integer) tries to cast to str
- new_fill = Period._from_ordinal(fill_value, freq=self.freq)
- else:
- new_fill = self._scalar_type(fill_value)
-
- # stacklevel here is chosen to be correct when called from
- # DataFrame.shift or Series.shift
- warnings.warn(
- f"Passing {type(fill_value)} to shift is deprecated and "
- "will raise in a future version, pass "
- f"{self._scalar_type.__name__} instead.",
- FutureWarning,
- # There is no way to hard-code the level since this might be
- # reached directly or called from the Index or Block method
- stacklevel=find_stack_level(),
- )
- fill_value = new_fill
-
- return self._unbox(fill_value, setitem=True)
-
def _validate_scalar(
self,
value,
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 8f9d38044e7ef..a899e95bac41d 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -554,22 +554,12 @@ def test_inplace_arithmetic(self):
tm.assert_equal(arr, expected)
def test_shift_fill_int_deprecated(self):
- # GH#31971
+ # GH#31971, enforced in 2.0
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = self.array_cls(data, freq="D")
- msg = "Passing <class 'int'> to shift"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = arr.shift(1, fill_value=1)
-
- expected = arr.copy()
- if self.array_cls is PeriodArray:
- fill_val = arr._scalar_type._from_ordinal(1, freq=arr.freq)
- else:
- fill_val = arr._scalar_type(1)
- expected[0] = fill_val
- expected[1:] = arr[:-1]
- tm.assert_equal(result, expected)
+ with pytest.raises(TypeError, match="value should be a"):
+ arr.shift(1, fill_value=1)
def test_median(self, arr1d):
arr = arr1d
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index f76deca9048be..3b33d0cc80445 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -505,26 +505,19 @@ def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
- with tm.assert_produces_warning(FutureWarning):
- result = ser.shift(1, fill_value=0)
- expected = Series([pd.Timestamp(0), ser[0]])
- tm.assert_series_equal(result, expected)
+ with pytest.raises(TypeError, match="value should be a"):
+ ser.shift(1, fill_value=0)
df = ser.to_frame()
- with tm.assert_produces_warning(FutureWarning):
- result = df.shift(1, fill_value=0)
- expected = expected.to_frame()
- tm.assert_frame_equal(result, expected)
+ with pytest.raises(TypeError, match="value should be a"):
+ df.shift(1, fill_value=0)
# axis = 1
df2 = DataFrame({"A": ser, "B": ser})
df2._consolidate_inplace()
- with tm.assert_produces_warning(FutureWarning):
- result = df2.shift(1, axis=1, fill_value=0)
-
- expected = DataFrame({"A": [pd.Timestamp(0), pd.Timestamp(0)], "B": df2["A"]})
- tm.assert_frame_equal(result, expected)
+ with pytest.raises(TypeError, match="value should be a"):
+ df2.shift(1, axis=1, fill_value=0)
# same thing but not consolidated
# This isn't great that we get different behavior, but
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49362 | 2022-10-27T21:07:34Z | 2022-10-28T18:27:42Z | 2022-10-28T18:27:42Z | 2022-10-28T20:50:11Z |
DEPR: DTA(float_data, dtype=dt64tz) | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 780318769f04e..ad2faa396035a 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -282,6 +282,7 @@ Removal of prior version deprecations/changes
- Changed behavior of :class:`DataFrame` constructor when passed a ``dtype`` (other than int) that the data cannot be cast to; it now raises instead of silently ignoring the dtype (:issue:`41733`)
- Changed the behavior of :class:`Series` constructor, it will no longer infer a datetime64 or timedelta64 dtype from string entries (:issue:`41731`)
- Changed behavior of :class:`Index` constructor when passed a ``SparseArray`` or ``SparseDtype`` to retain that dtype instead of casting to ``numpy.ndarray`` (:issue:`43930`)
+- Changed behavior of :class:`Index`, :class:`Series`, :class:`DataFrame` constructors with floating-dtype data and a :class:`DatetimeTZDtype`, the data are now interpreted as UTC-times instead of wall-times, consistent with how integer-dtype data are treated (:issue:`45573`)
- Removed the deprecated ``base`` and ``loffset`` arguments from :meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample` and :class:`pandas.Grouper`. Use ``offset`` or ``origin`` instead (:issue:`31809`)
- Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`)
- Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index ca54ab163ab64..d2c2811759e7c 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2230,27 +2230,10 @@ def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None):
return data, copy
if is_float_dtype(data.dtype):
- # Note: we must cast to datetime64[ns] here in order to treat these
- # as wall-times instead of UTC timestamps.
- data = data.astype(DT64NS_DTYPE)
+ # pre-2.0 we treated these as wall-times, inconsistent with ints
+ # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes
+ data = data.astype(np.int64)
copy = False
- if (
- tz is not None
- and len(data) > 0
- and not timezones.is_utc(timezones.maybe_get_tz(tz))
- ):
- # GH#23675, GH#45573 deprecate to treat symmetrically with integer dtypes
- warnings.warn(
- "The behavior of DatetimeArray._from_sequence with a timezone-aware "
- "dtype and floating-dtype data is deprecated. In a future version, "
- "this data will be interpreted as nanosecond UTC timestamps "
- "instead of wall-times, matching the behavior with integer dtypes. "
- "To retain the old behavior, explicitly cast to 'datetime64[ns]' "
- "before passing the data to pandas. To get the future behavior, "
- "first cast to 'int64'.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype):
# GH#29794 enforcing deprecation introduced in GH#23539
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index e1dd182a5ae30..cc365bef2b183 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -426,11 +426,9 @@ def test_array_equivalent(dtype_equal):
dtype_equal=dtype_equal,
)
- msg = "will be interpreted as nanosecond UTC timestamps instead of wall-times"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- dti1 = DatetimeIndex([0, np.nan], tz="US/Eastern")
- dti2 = DatetimeIndex([0, np.nan], tz="CET")
- dti3 = DatetimeIndex([1, np.nan], tz="US/Eastern")
+ dti1 = DatetimeIndex([0, np.nan], tz="US/Eastern")
+ dti2 = DatetimeIndex([0, np.nan], tz="CET")
+ dti3 = DatetimeIndex([1, np.nan], tz="US/Eastern")
assert array_equivalent(
dti1,
@@ -444,7 +442,7 @@ def test_array_equivalent(dtype_equal):
)
# The rest are not dtype_equal
assert not array_equivalent(DatetimeIndex([0, np.nan]), dti1)
- assert not array_equivalent(
+ assert array_equivalent(
dti2,
dti1,
)
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index 1423581555ee6..7d0d4392daef6 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -396,10 +396,7 @@ def test_astype_nan_to_bool(self):
def test_astype_ea_to_datetimetzdtype(self, dtype):
# GH37553
ser = Series([4, 0, 9], dtype=dtype)
- warn = FutureWarning if ser.dtype.kind == "f" else None
- msg = "with a timezone-aware dtype and floating-dtype data"
- with tm.assert_produces_warning(warn, match=msg):
- result = ser.astype(DatetimeTZDtype(tz="US/Pacific"))
+ result = ser.astype(DatetimeTZDtype(tz="US/Pacific"))
expected = Series(
{
@@ -409,21 +406,6 @@ def test_astype_ea_to_datetimetzdtype(self, dtype):
}
)
- if dtype in tm.FLOAT_EA_DTYPES:
- expected = Series(
- {
- 0: Timestamp(
- "1970-01-01 00:00:00.000000004-08:00", tz="US/Pacific"
- ),
- 1: Timestamp(
- "1970-01-01 00:00:00.000000000-08:00", tz="US/Pacific"
- ),
- 2: Timestamp(
- "1970-01-01 00:00:00.000000009-08:00", tz="US/Pacific"
- ),
- }
- )
-
tm.assert_series_equal(result, expected)
def test_astype_retain_Attrs(self, any_numpy_dtype):
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49361 | 2022-10-27T20:59:32Z | 2022-11-02T00:53:42Z | 2022-11-02T00:53:42Z | 2022-11-02T15:09:44Z |
DEPR: non-keyword arguments | diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 05e12630d7540..171e4feb290cf 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -36,7 +36,7 @@ def setup(self):
self.df = DataFrame(data)
def time_reshape_pivot_time_series(self):
- self.df.pivot("date", "variable", "value")
+ self.df.pivot(index="date", columns="variable", values="value")
class SimpleReshape:
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 252c444b2e60c..2cb77c14c1082 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -197,6 +197,13 @@ Removal of prior version deprecations/changes
- Disallow passing non-round floats to :class:`Timestamp` with ``unit="M"`` or ``unit="Y"`` (:issue:`47266`)
- Remove keywords ``convert_float`` and ``mangle_dupe_cols`` from :func:`read_excel` (:issue:`41176`)
- Disallow passing non-keyword arguments to :func:`read_excel` except ``io`` and ``sheet_name`` (:issue:`34418`)
+- Disallow passing non-keyword arguments to :meth:`DataFrame.set_index` except ``keys`` (:issue:`41495`)
+- Disallow passing non-keyword arguments to :meth:`Resampler.interpolate` except ``method`` (:issue:`41699`)
+- Disallow passing non-keyword arguments to :meth:`DataFrame.reset_index` and :meth:`Series.reset_index` except ``level`` (:issue:`41496`)
+- Disallow passing non-keyword arguments to :meth:`DataFrame.dropna` and :meth:`Series.dropna` (:issue:`41504`)
+- Disallow passing non-keyword arguments to :meth:`ExtensionArray.argsort` (:issue:`46134`)
+- Disallow passing non-keyword arguments to :meth:`Categorical.sort_values` (:issue:`47618`)
+- Disallow passing non-keyword arguments to :meth:`Index.drop_duplicates` and :meth:`Series.drop_duplicates` (:issue:`41485`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.drop_duplicates` except for ``subset`` (:issue:`41485`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` (:issue:`41506`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.interpolate` and :meth:`Series.interpolate` except for ``method`` (:issue:`41510`)
@@ -209,6 +216,9 @@ Removal of prior version deprecations/changes
- Disallow passing non-keyword arguments to :func:`read_json` except for ``path_or_buf`` (:issue:`27573`)
- Disallow passing non-keyword arguments to :func:`read_sas` except for ``filepath_or_buffer`` (:issue:`47154`)
- Disallow passing non-keyword arguments to :func:`read_stata` except for ``filepath_or_buffer`` (:issue:`48128`)
+- Disallow passing non-keyword arguments to :func:`read_csv` except ``filepath_or_buffer`` (:issue:`41485`)
+- Disallow passing non-keyword arguments to :func:`read_table` except ``filepath_or_buffer`` (:issue:`41485`)
+- Disallow passing non-keyword arguments to :func:`read_fwf` except ``filepath_or_buffer`` (:issue:`44710`)
- Disallow passing non-keyword arguments to :func:`read_xml` except for ``path_or_buffer`` (:issue:`45133`)
- Disallow passing non-keyword arguments to :meth:`Series.mask` and :meth:`DataFrame.mask` except ``cond`` and ``other`` (:issue:`41580`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.to_stata` except for ``path`` (:issue:`48128`)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index f18664915d015..945ae52c53047 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -20,10 +20,7 @@
pa_version_under6p0,
pa_version_under7p0,
)
-from pandas.util._decorators import (
- deprecate_nonkeyword_arguments,
- doc,
-)
+from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_array_like,
@@ -452,13 +449,12 @@ def isna(self) -> npt.NDArray[np.bool_]:
"""
return self._data.is_null().to_numpy()
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def argsort(
self,
+ *,
ascending: bool = True,
kind: SortKind = "quicksort",
na_position: str = "last",
- *args,
**kwargs,
) -> np.ndarray:
order = "ascending" if ascending else "descending"
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 9758ca84d236b..60772cbcc30a1 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -48,7 +48,6 @@
Appender,
Substitution,
cache_readonly,
- deprecate_nonkeyword_arguments,
)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
@@ -662,13 +661,12 @@ def _values_for_argsort(self) -> np.ndarray:
# Note: this is used in `ExtensionArray.argsort/argmin/argmax`.
return np.array(self)
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def argsort(
self,
+ *,
ascending: bool = True,
kind: SortKind = "quicksort",
na_position: str = "last",
- *args,
**kwargs,
) -> np.ndarray:
"""
@@ -699,7 +697,7 @@ def argsort(
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting. In case of overriding this,
# it is recommended to also override argmax/argmin
- ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
+ ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
values = self._values_for_argsort()
return nargsort(
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 980e8b4936c4e..2d4c5808d3132 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1802,10 +1802,8 @@ def check_for_ordered(self, op) -> None:
"Categorical to an ordered one\n"
)
- # error: Signature of "argsort" incompatible with supertype "ExtensionArray"
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
- def argsort( # type: ignore[override]
- self, ascending: bool = True, kind: SortKind = "quicksort", **kwargs
+ def argsort(
+ self, *, ascending: bool = True, kind: SortKind = "quicksort", **kwargs
):
"""
Return the indices that would sort the Categorical.
@@ -1875,9 +1873,12 @@ def sort_values(
) -> None:
...
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def sort_values(
- self, inplace: bool = False, ascending: bool = True, na_position: str = "last"
+ self,
+ *,
+ inplace: bool = False,
+ ascending: bool = True,
+ na_position: str = "last",
) -> Categorical | None:
"""
Sort the Categorical by category value returning a new
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 2c15a7bbc88a7..2bc6c9174af81 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -43,10 +43,7 @@
)
from pandas.compat.numpy import function as nv
from pandas.errors import IntCastingNaNError
-from pandas.util._decorators import (
- Appender,
- deprecate_nonkeyword_arguments,
-)
+from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import LossySetitemError
from pandas.core.dtypes.common import (
@@ -796,16 +793,15 @@ def __lt__(self, other):
def __le__(self, other):
return self._cmp_method(other, operator.le)
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def argsort(
self,
+ *,
ascending: bool = True,
kind: SortKind = "quicksort",
na_position: str = "last",
- *args,
**kwargs,
) -> np.ndarray:
- ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
+ ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
if ascending and kind == "quicksort" and na_position == "last":
return np.lexsort((self.right, self.left))
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4b147dc619692..5e0694ea91360 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1304,7 +1304,7 @@ def searchsorted(
sorter=sorter,
)
- def drop_duplicates(self, keep: DropKeep = "first"):
+ def drop_duplicates(self, *, keep: DropKeep = "first"):
duplicated = self._duplicated(keep=keep)
# error: Value of type "IndexOpsMixin" is not indexable
return self[~duplicated] # type: ignore[index]
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2809aa9eaa37d..c54eb0bb57747 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5835,10 +5835,10 @@ def set_index(
) -> None:
...
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "keys"])
def set_index(
self,
keys,
+ *,
drop: bool = True,
append: bool = False,
inplace: bool = False,
@@ -6080,10 +6080,10 @@ def reset_index(
) -> DataFrame | None:
...
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
def reset_index(
self,
level: IndexLabel = None,
+ *,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
@@ -6376,9 +6376,9 @@ def dropna(
) -> None:
...
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def dropna(
self,
+ *,
axis: Axis = 0,
how: AnyAll | NoDefault = no_default,
thresh: int | NoDefault = no_default,
@@ -8500,9 +8500,8 @@ def groupby(
@Substitution("")
@Appender(_shared_docs["pivot"])
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def pivot(
- self, index=lib.NoDefault, columns=lib.NoDefault, values=lib.NoDefault
+ self, *, index=lib.NoDefault, columns=lib.NoDefault, values=lib.NoDefault
) -> DataFrame:
from pandas.core.reshape.pivot import pivot
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 241e528fbdc74..7571525363447 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -67,7 +67,6 @@
from pandas.util._decorators import (
Appender,
cache_readonly,
- deprecate_nonkeyword_arguments,
doc,
)
from pandas.util._exceptions import (
@@ -2894,8 +2893,7 @@ def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT:
result = super().unique()
return self._shallow_copy(result)
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
- def drop_duplicates(self: _IndexT, keep: DropKeep = "first") -> _IndexT:
+ def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT:
"""
Return Index with duplicate values removed.
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 9f9fdef089353..ee738b43a481b 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -46,7 +46,6 @@
from pandas.util._decorators import (
Appender,
Substitution,
- deprecate_nonkeyword_arguments,
doc,
)
@@ -881,11 +880,11 @@ def fillna(self, method, limit=None):
"""
return self._upsample(method, limit=limit)
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"])
@doc(NDFrame.interpolate, **_shared_docs_kwargs)
def interpolate(
self,
method: QuantileInterpolation = "linear",
+ *,
axis: Axis = 0,
limit=None,
inplace: bool = False,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7d6932457ac29..9bfb2a0561532 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1409,10 +1409,10 @@ def reset_index(
) -> None:
...
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
def reset_index(
self,
level: IndexLabel = None,
+ *,
drop: bool = False,
name: Level = lib.no_default,
inplace: bool = False,
@@ -2186,23 +2186,22 @@ def unique(self) -> ArrayLike:
@overload
def drop_duplicates(
- self, keep: DropKeep = ..., *, inplace: Literal[False] = ...
+ self, *, keep: DropKeep = ..., inplace: Literal[False] = ...
) -> Series:
...
@overload
- def drop_duplicates(self, keep: DropKeep = ..., *, inplace: Literal[True]) -> None:
+ def drop_duplicates(self, *, keep: DropKeep = ..., inplace: Literal[True]) -> None:
...
@overload
def drop_duplicates(
- self, keep: DropKeep = ..., *, inplace: bool = ...
+ self, *, keep: DropKeep = ..., inplace: bool = ...
) -> Series | None:
...
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def drop_duplicates(
- self, keep: DropKeep = "first", inplace: bool = False
+ self, *, keep: DropKeep = "first", inplace: bool = False
) -> Series | None:
"""
Return Series with duplicate values removed.
@@ -5687,9 +5686,9 @@ def dropna(
) -> None:
...
- @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def dropna(
self,
+ *,
axis: Axis = 0,
inplace: bool = False,
how: AnyAll | None = None,
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index abd1182214f5f..1f4c449fc03a1 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -40,7 +40,6 @@
from pandas.util._decorators import (
Appender,
deprecate_kwarg,
- deprecate_nonkeyword_arguments,
)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_bool_kwarg
@@ -864,7 +863,6 @@ def read_csv(
@deprecate_kwarg(old_arg_name="mangle_dupe_cols", new_arg_name=None)
-@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
@@ -877,6 +875,7 @@ def read_csv(
)
def read_csv(
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
sep: str | None | lib.NoDefault = lib.no_default,
delimiter: str | None | lib.NoDefault = None,
# Column and Index Locations and Names
@@ -1208,7 +1207,6 @@ def read_table(
@deprecate_kwarg(old_arg_name="mangle_dupe_cols", new_arg_name=None)
-@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
@@ -1221,6 +1219,7 @@ def read_table(
)
def read_table(
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
sep: str | None | lib.NoDefault = lib.no_default,
delimiter: str | None | lib.NoDefault = None,
# Column and Index Locations and Names
@@ -1307,9 +1306,9 @@ def read_table(
return _read(filepath_or_buffer, kwds)
-@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
def read_fwf(
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
colspecs: Sequence[tuple[int, int]] | str | None = "infer",
widths: Sequence[int] | None = None,
infer_nrows: int = 100,
diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py
index 6e5b97af7c297..e6db7ec8ed3d7 100644
--- a/pandas/tests/frame/methods/test_drop.py
+++ b/pandas/tests/frame/methods/test_drop.py
@@ -510,18 +510,6 @@ def test_drop_with_duplicate_columns2(self):
result = df2.drop("C", axis=1)
tm.assert_frame_equal(result, expected)
- def test_drop_pos_args_deprecation(self):
- # https://github.com/pandas-dev/pandas/issues/41485
- df = DataFrame({"a": [1, 2, 3]})
- msg = (
- r"In a future version of pandas all arguments of DataFrame\.drop "
- r"except for the argument 'labels' will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.drop("a", 1)
- expected = DataFrame(index=[0, 1, 2])
- tm.assert_frame_equal(result, expected)
-
def test_drop_inplace_no_leftover_column_reference(self):
# GH 13934
df = DataFrame({"a": [1, 2, 3]})
diff --git a/pandas/tests/frame/methods/test_dropna.py b/pandas/tests/frame/methods/test_dropna.py
index 62351aa89c914..8c4d9499e3676 100644
--- a/pandas/tests/frame/methods/test_dropna.py
+++ b/pandas/tests/frame/methods/test_dropna.py
@@ -231,18 +231,6 @@ def test_dropna_with_duplicate_columns(self):
result = df.dropna(subset=["A", "C"], how="all")
tm.assert_frame_equal(result, expected)
- def test_dropna_pos_args_deprecation(self):
- # https://github.com/pandas-dev/pandas/issues/41485
- df = DataFrame({"a": [1, 2, 3]})
- msg = (
- r"In a future version of pandas all arguments of DataFrame\.dropna "
- r"will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.dropna(1)
- expected = DataFrame({"a": [1, 2, 3]})
- tm.assert_frame_equal(result, expected)
-
def test_set_single_column_subset(self):
# GH 41021
df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4, np.NaN, 5]})
diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 37431bc291b76..30c033572335a 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -730,19 +730,6 @@ def test_reset_index_multiindex_nat():
tm.assert_frame_equal(result, expected)
-def test_drop_pos_args_deprecation():
- # https://github.com/pandas-dev/pandas/issues/41485
- df = DataFrame({"a": [1, 2, 3]}).set_index("a")
- msg = (
- r"In a future version of pandas all arguments of DataFrame\.reset_index "
- r"except for the argument 'level' will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.reset_index("a", False)
- expected = DataFrame({"a": [1, 2, 3]})
- tm.assert_frame_equal(result, expected)
-
-
def test_reset_index_interval_columns_object_cast():
# GH 19136
df = DataFrame(
diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py
index 4c39cf99f18ff..8e5f11840fbe5 100644
--- a/pandas/tests/frame/methods/test_set_index.py
+++ b/pandas/tests/frame/methods/test_set_index.py
@@ -704,15 +704,3 @@ def test_set_index_periodindex(self):
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
-
- def test_drop_pos_args_deprecation(self):
- # https://github.com/pandas-dev/pandas/issues/41485
- df = DataFrame({"a": [1, 2, 3]})
- msg = (
- r"In a future version of pandas all arguments of DataFrame\.set_index "
- r"except for the argument 'keys' will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = df.set_index("a", True)
- expected = DataFrame(index=Index([1, 2, 3], name="a"))
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index 509daff1262b4..6c31caac4b42d 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -326,19 +326,6 @@ def test_duplicated_series_complex_numbers(dtype):
tm.assert_series_equal(result, expected)
-def test_multi_drop_duplicates_pos_args_deprecation():
- # GH#41485
- idx = MultiIndex.from_arrays([[1, 2, 3, 1], [1, 2, 3, 1]])
- msg = (
- "In a future version of pandas all arguments of "
- "Index.drop_duplicates will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = idx.drop_duplicates("last")
- expected = MultiIndex.from_arrays([[2, 3, 1], [2, 3, 1]])
- tm.assert_index_equal(expected, result)
-
-
def test_midx_unique_ea_dtype():
# GH#48335
vals_a = Series([1, 2, NA, NA], dtype="Int64")
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 698bec81e3630..523decba33b6e 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1590,20 +1590,6 @@ def test_construct_from_memoryview(klass, extra_kwargs):
tm.assert_index_equal(result, expected, exact=True)
-def test_drop_duplicates_pos_args_deprecation():
- # GH#41485
- idx = Index([1, 2, 3, 1])
- msg = (
- "In a future version of pandas all arguments of "
- "Index.drop_duplicates will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- idx.drop_duplicates("last")
- result = idx.drop_duplicates("last")
- expected = Index([2, 3, 1])
- tm.assert_index_equal(expected, result)
-
-
def test_get_attributes_dict_deprecated():
# https://github.com/pandas-dev/pandas/pull/44028
idx = Index([1, 2, 3, 1])
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
index 52d8abe76ecbc..af7ebc5e9555c 100644
--- a/pandas/tests/io/parser/common/test_common_basic.py
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -805,17 +805,6 @@ def test_read_csv_line_break_as_separator(kwargs, all_parsers):
parser.read_csv(StringIO(data), **kwargs)
-def test_read_csv_posargs_deprecation(all_parsers):
- # GH 41485
- f = StringIO("a,b\n1,2")
- parser = all_parsers
- msg = (
- "In a future version of pandas all arguments of read_csv "
- "except for the argument 'filepath_or_buffer' will be keyword-only"
- )
- parser.read_csv_check_warnings(FutureWarning, msg, f, " ")
-
-
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
@@ -941,17 +930,6 @@ def test_short_multi_line(all_parsers):
tm.assert_frame_equal(result, expected)
-def test_read_table_posargs_deprecation(all_parsers):
- # https://github.com/pandas-dev/pandas/issues/41485
- data = StringIO("a\tb\n1\t2")
- parser = all_parsers
- msg = (
- "In a future version of pandas all arguments of read_table "
- "except for the argument 'filepath_or_buffer' will be keyword-only"
- )
- parser.read_table_check_warnings(FutureWarning, msg, data, " ")
-
-
def test_read_seek(all_parsers):
# GH48646
parser = all_parsers
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index d6d787df39dfa..3e451239dcd40 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -910,18 +910,6 @@ def test_skiprows_with_iterator():
tm.assert_frame_equal(result, expected_frames[i])
-def test_skiprows_passing_as_positional_deprecated():
- # GH#41485
- data = """0
-1
-2
-"""
- with tm.assert_produces_warning(FutureWarning, match="keyword-only"):
- result = read_fwf(StringIO(data), [(0, 2)])
- expected = DataFrame({"0": [1, 2]})
- tm.assert_frame_equal(result, expected)
-
-
def test_names_and_infer_colspecs():
# GH#45337
data = """X Y Z
diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py
index 3aac7a961fa19..e1e042aae1447 100644
--- a/pandas/tests/resample/test_deprecated.py
+++ b/pandas/tests/resample/test_deprecated.py
@@ -278,30 +278,3 @@ def test_resample_base_with_timedeltaindex():
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
-
-
-def test_interpolate_posargs_deprecation():
- # GH 41485
- idx = pd.to_datetime(["1992-08-27 07:46:48", "1992-08-27 07:46:59"])
- s = Series([1, 4], index=idx)
-
- msg = (
- r"In a future version of pandas all arguments of Resampler\.interpolate "
- r"except for the argument 'method' will be keyword-only"
- )
-
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = s.resample("3s").interpolate("linear", 0)
-
- idx = pd.to_datetime(
- [
- "1992-08-27 07:46:48",
- "1992-08-27 07:46:51",
- "1992-08-27 07:46:54",
- "1992-08-27 07:46:57",
- ]
- )
- expected = Series([1.0, 1.0, 1.0, 1.0], index=idx)
-
- expected.index._data.freq = "3s"
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py
index c5cffa0c9fb0f..698430095b453 100644
--- a/pandas/tests/series/methods/test_drop_duplicates.py
+++ b/pandas/tests/series/methods/test_drop_duplicates.py
@@ -242,16 +242,3 @@ def test_drop_duplicates_categorical_bool_na(self, nulls_fixture):
index=[0, 1, 4],
)
tm.assert_series_equal(result, expected)
-
-
-def test_drop_duplicates_pos_args_deprecation():
- # GH#41485
- s = Series(["a", "b", "c", "b"])
- msg = (
- "In a future version of pandas all arguments of "
- "Series.drop_duplicates will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = s.drop_duplicates("last")
- expected = Series(["a", "c", "b"], index=[0, 2, 3])
- tm.assert_series_equal(expected, result)
diff --git a/pandas/tests/series/methods/test_dropna.py b/pandas/tests/series/methods/test_dropna.py
index 0dab9271bfee5..5bff7306fac33 100644
--- a/pandas/tests/series/methods/test_dropna.py
+++ b/pandas/tests/series/methods/test_dropna.py
@@ -101,15 +101,3 @@ def test_datetime64_tz_dropna(self):
)
assert result.dtype == "datetime64[ns, Asia/Tokyo]"
tm.assert_series_equal(result, expected)
-
- def test_dropna_pos_args_deprecation(self):
- # https://github.com/pandas-dev/pandas/issues/41485
- ser = Series([1, 2, 3])
- msg = (
- r"In a future version of pandas all arguments of Series\.dropna "
- r"will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = ser.dropna(0)
- expected = Series([1, 2, 3])
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py
index e7340aaf376e5..9362b0b52a698 100644
--- a/pandas/tests/series/methods/test_reset_index.py
+++ b/pandas/tests/series/methods/test_reset_index.py
@@ -148,18 +148,6 @@ def test_reset_index_with_drop(self, series_with_multilevel_index):
assert isinstance(deleveled, Series)
assert deleveled.index.name == ser.index.name
- def test_drop_pos_args_deprecation(self):
- # https://github.com/pandas-dev/pandas/issues/41485
- ser = Series([1, 2, 3], index=Index([1, 2, 3], name="a"))
- msg = (
- r"In a future version of pandas all arguments of Series\.reset_index "
- r"except for the argument 'level' will be keyword-only"
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- result = ser.reset_index("a", False)
- expected = DataFrame({"a": [1, 2, 3], 0: [1, 2, 3]})
- tm.assert_frame_equal(result, expected)
-
def test_reset_index_inplace_and_drop_ignore_name(self):
# GH#44575
ser = Series(range(2), name="old")
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/49359 | 2022-10-27T20:34:37Z | 2022-10-28T18:35:57Z | 2022-10-28T18:35:57Z | 2022-10-28T20:47:52Z |
API: stop silently ignoring parsing failures with dtype=dt64 | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 252c444b2e60c..25fe1d3d541c0 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -141,6 +141,7 @@ Other API changes
- The ``other`` argument in :meth:`DataFrame.mask` and :meth:`Series.mask` now defaults to ``no_default`` instead of ``np.nan`` consistent with :meth:`DataFrame.where` and :meth:`Series.where`. Entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). (:issue:`49111`)
- When creating a :class:`Series` with a object-dtype :class:`Index` of datetime objects, pandas no longer silently converts the index to a :class:`DatetimeIndex` (:issue:`39307`, :issue:`23598`)
- :meth:`Series.unique` with dtype "timedelta64[ns]" or "datetime64[ns]" now returns :class:`TimedeltaArray` or :class:`DatetimeArray` instead of ``numpy.ndarray`` (:issue:`49176`)
+- Passing strings that cannot be parsed as datetimes to :class:`Series` or :class:`DataFrame` with ``dtype="datetime64[ns]"`` will raise instead of silently ignoring the keyword and returning ``object`` dtype (:issue:`24435`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 65112fc19ae56..54fa9629fecd4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -20,7 +20,6 @@
)
import warnings
-from dateutil.parser import ParserError
import numpy as np
from pandas._libs import lib
@@ -1339,28 +1338,21 @@ def maybe_cast_to_datetime(
if value.size or not is_dtype_equal(value.dtype, dtype):
_disallow_mismatched_datetimelike(value, dtype)
- try:
- dta = sequence_to_datetimes(value)
- # GH 25843: Remove tz information since the dtype
- # didn't specify one
-
- if dta.tz is not None:
- raise ValueError(
- "Cannot convert timezone-aware data to "
- "timezone-naive dtype. Use "
- "pd.Series(values).dt.tz_localize(None) instead."
- )
-
- # TODO(2.0): Do this astype in sequence_to_datetimes to
- # avoid potential extra copy?
- dta = dta.astype(dtype, copy=False)
- value = dta
-
- except OutOfBoundsDatetime:
- raise
- except ParserError:
- # Note: this is dateutil's ParserError, not ours.
- pass
+ dta = sequence_to_datetimes(value)
+ # GH 25843: Remove tz information since the dtype
+ # didn't specify one
+
+ if dta.tz is not None:
+ raise ValueError(
+ "Cannot convert timezone-aware data to "
+ "timezone-naive dtype. Use "
+ "pd.Series(values).dt.tz_localize(None) instead."
+ )
+
+ # TODO(2.0): Do this astype in sequence_to_datetimes to
+ # avoid potential extra copy?
+ dta = dta.astype(dtype, copy=False)
+ value = dta
elif getattr(vdtype, "kind", None) in ["m", "M"]:
# we are already datetimelike and want to coerce to non-datetimelike;
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index ed9d7bced9253..57e498defccc1 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -16,7 +16,6 @@
DataFrame,
Series,
Timestamp,
- compat,
date_range,
option_context,
)
@@ -266,8 +265,8 @@ def f(dtype):
f("float64")
# 10822
- # invalid error message on dt inference
- if not compat.is_platform_windows():
+ msg = "Unknown string format: aa present at position 0"
+ with pytest.raises(ValueError, match=msg):
f("M8[ns]")
def test_pickle(self, float_string_frame, timezone_frame):
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 9817c758759d5..c492f894d4f7c 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -52,6 +52,16 @@
class TestSeriesConstructors:
+ def test_unparseable_strings_with_dt64_dtype(self):
+ # pre-2.0 these would be silently ignored and come back with object dtype
+ vals = ["aa"]
+ msg = "Unknown string format: aa present at position 0"
+ with pytest.raises(ValueError, match=msg):
+ Series(vals, dtype="datetime64[ns]")
+
+ with pytest.raises(ValueError, match=msg):
+ Series(np.array(vals, dtype=object), dtype="datetime64[ns]")
+
@pytest.mark.parametrize(
"constructor,check_index_type",
[
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
It's a bit ambiguous whether to call this a deprecation-enforcement or API change. In 1.3.0 we have
```
- Deprecated behavior of :class:`DataFrame` constructor when a ``dtype`` is passed and the data cannot be cast to that dtype. In a future version, this will raise instead of being silently ignored (:issue:`24435`)
```
but the PR that did that deprecation added that warning in a different code path, so we don't currently issue a warning for this case. (and the whatsnew didn't say anything about Series). | https://api.github.com/repos/pandas-dev/pandas/pulls/49358 | 2022-10-27T19:39:08Z | 2022-10-31T18:47:34Z | 2022-10-31T18:47:34Z | 2022-10-31T18:56:24Z |
STY: Style fix pandas/errors/__init__.py | diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 43ce8ad4abb45..ebe9a3d5bf472 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -156,19 +156,23 @@ class MergeError(ValueError):
class AccessorRegistrationWarning(Warning):
- """Warning for attribute conflicts in accessor registration."""
+ """
+ Warning for attribute conflicts in accessor registration.
+ """
class AbstractMethodError(NotImplementedError):
- """Raise this error instead of NotImplementedError for abstract methods
+ """
+ Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance, methodtype="method"):
types = {"method", "classmethod", "staticmethod", "property"}
if methodtype not in types:
- msg = f"methodtype must be one of {methodtype}, got {types} instead."
- raise ValueError(msg)
+ raise ValueError(
+ f"methodtype must be one of {methodtype}, got {types} instead."
+ )
self.methodtype = methodtype
self.class_instance = class_instance
@@ -177,5 +181,4 @@ def __str__(self) -> str:
name = self.class_instance.__name__
else:
name = type(self.class_instance).__name__
- msg = f"This {self.methodtype} must be defined in the concrete class {name}"
- return msg.format(methodtype=self.methodtype, name=name)
+ return f"This {self.methodtype} must be defined in the concrete class {name}"
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30066 | 2019-12-04T21:17:55Z | 2019-12-05T12:59:22Z | 2019-12-05T12:59:22Z | 2019-12-05T13:34:25Z |
CI: correct azure-36-locale slow name + move pyarrow | diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index 081145f846571..b5ebb79b0f1ab 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -20,14 +20,14 @@ jobs:
CONDA_PY: "36"
PATTERN: "not slow and not network"
py36_locale_slow_old_np:
- ENV_FILE: ci/deps/azure-36-locale.yaml
+ ENV_FILE: ci/deps/azure-36-locale_slow.yaml
CONDA_PY: "36"
PATTERN: "slow"
LOCALE_OVERRIDE: "zh_CN.UTF-8"
EXTRA_APT: "language-pack-zh-hans"
- py36_locale_slow:
- ENV_FILE: ci/deps/azure-36-locale_slow.yaml
+ py36_locale:
+ ENV_FILE: ci/deps/azure-36-locale.yaml
CONDA_PY: "36"
PATTERN: "not slow and not network"
LOCALE_OVERRIDE: "it_IT.UTF-8"
diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml
index 14cc4f2726e96..4f4c4524cb4dd 100644
--- a/ci/deps/azure-36-locale.yaml
+++ b/ci/deps/azure-36-locale.yaml
@@ -13,23 +13,27 @@ dependencies:
- pytest-azurepipelines
# pandas dependencies
- - beautifulsoup4==4.6.0
- - bottleneck=1.2.*
+ - beautifulsoup4
+ - gcsfs
+ - html5lib
+ - ipython
+ - jinja2
- lxml
- - matplotlib=2.2.2
- - numpy=1.14.*
- - openpyxl=2.4.8
- - python-dateutil
- - python-blosc
- - pytz=2017.2
- - scipy
- - sqlalchemy=1.1.4
- - xlrd=1.1.0
- - xlsxwriter=0.9.8
- - xlwt=1.2.0
+ - matplotlib=3.0.*
+ - nomkl
+ - numexpr
+ - numpy=1.15.*
+ - openpyxl
# lowest supported version of pyarrow (putting it here instead of in
# azure-36-minimum_versions because it needs numpy >= 1.14)
- pyarrow=0.12
- - pip
- - pip:
- - html5lib==1.0b2
+ - pytables
+ - python-dateutil
+ - pytz
+ - s3fs
+ - scipy
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ - moto
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index 46ddd44931848..c3c94e365c259 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -13,24 +13,20 @@ dependencies:
- pytest-azurepipelines
# pandas dependencies
- - beautifulsoup4
- - gcsfs
- - html5lib
- - ipython
- - jinja2
+ - beautifulsoup4==4.6.0
+ - bottleneck=1.2.*
- lxml
- - matplotlib=3.0.*
- - nomkl
- - numexpr
- - numpy=1.15.*
- - openpyxl
- - pytables
+ - matplotlib=2.2.2
+ - numpy=1.14.*
+ - openpyxl=2.4.8
- python-dateutil
- - pytz
- - s3fs
+ - python-blosc
+ - pytz=2017.2
- scipy
- - xarray
- - xlrd
- - xlsxwriter
- - xlwt
- - moto
+ - sqlalchemy=1.1.4
+ - xlrd=1.1.0
+ - xlsxwriter=0.9.8
+ - xlwt=1.2.0
+ - pip
+ - pip:
+ - html5lib==1.0b2
| Follow-up on https://github.com/pandas-dev/pandas/pull/30039
I put it in the wrong file, because the file names were swapped...
Now, here, I just renamed both files to the other name (the diff apparently cannot the rename, therefore you see a lot of changes, but I can ensure I didn't change anything except for pyarrow).
But, I could also leave the files as is and switch them in the posix.yml file ? (use the envs for the different build)
I don't know if there was something specific in either environment that was specifically added for the slow tests?
cc @datapythonista | https://api.github.com/repos/pandas-dev/pandas/pulls/30065 | 2019-12-04T20:35:15Z | 2019-12-05T17:43:57Z | 2019-12-05T17:43:57Z | 2019-12-05T17:44:02Z |
CLN: NDFrame.setup_axes | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fde3d1657b4f2..601dac3a1208b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -377,6 +377,8 @@ class DataFrame(NDFrame):
2 7 8 9
"""
+ _typ = "dataframe"
+
@property
def _constructor(self) -> Type["DataFrame"]:
return DataFrame
@@ -8143,10 +8145,6 @@ def isin(self, values):
DataFrame._setup_axes(
["index", "columns"],
- info_axis=1,
- stat_axis=0,
- axes_are_reversed=True,
- aliases={"rows": 0},
docs={
"index": "The index (row labels) of the DataFrame.",
"columns": "The column labels of the DataFrame.",
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c52b1c65ad08d..15c7b1610788f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -177,6 +177,7 @@ class NDFrame(PandasObject, SelectionMixin):
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
+ _typ: str
# ----------------------------------------------------------------------
# Constructors
@@ -283,71 +284,52 @@ def _constructor_expanddim(self):
# ----------------------------------------------------------------------
# Axis
+ _AXIS_ALIASES = {"rows": 0}
+ _AXIS_IALIASES = {0: "rows"}
+ _stat_axis_number = 0
+ _stat_axis_name = "index"
+ _ix = None
+ _AXIS_ORDERS: List[str]
+ _AXIS_NUMBERS: Dict[str, int]
+ _AXIS_NAMES: Dict[int, str]
+ _AXIS_REVERSED: bool
+ _info_axis_number: int
+ _info_axis_name: str
+ _AXIS_LEN: int
@classmethod
- def _setup_axes(
- cls,
- axes,
- info_axis=None,
- stat_axis=None,
- aliases=None,
- axes_are_reversed=False,
- build_axes=True,
- ns=None,
- docs=None,
- ):
+ def _setup_axes(cls, axes: List[str], docs: Dict[str, str]):
"""
Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
- info_axis_num : the axis of the selector dimension (int)
- stat_axis_num : the number of axis for the default stats (int)
- aliases : other names for a single axis (dict)
- axes_are_reversed : bool
- Whether to treat passed axes as reversed (DataFrame).
- build_axes : setup the axis properties (default True)
+ docs : docstrings for the axis properties
"""
+ info_axis = len(axes) - 1
+ axes_are_reversed = len(axes) > 1
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
- cls._AXIS_ALIASES = aliases or dict()
- cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_REVERSED = axes_are_reversed
- # typ
- setattr(cls, "_typ", cls.__name__.lower())
-
- # indexing support
- cls._ix = None
-
- if info_axis is not None:
- cls._info_axis_number = info_axis
- cls._info_axis_name = axes[info_axis]
-
- if stat_axis is not None:
- cls._stat_axis_number = stat_axis
- cls._stat_axis_name = axes[stat_axis]
+ cls._info_axis_number = info_axis
+ cls._info_axis_name = axes[info_axis]
# setup the actual axis
- if build_axes:
-
- def set_axis(a, i):
- setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
- cls._internal_names_set.add(a)
-
- if axes_are_reversed:
- m = cls._AXIS_LEN - 1
- for i, a in cls._AXIS_NAMES.items():
- set_axis(a, m - i)
- else:
- for i, a in cls._AXIS_NAMES.items():
- set_axis(a, i)
+ def set_axis(a, i):
+ setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
+ cls._internal_names_set.add(a)
- assert not isinstance(ns, dict)
+ if axes_are_reversed:
+ for i, a in cls._AXIS_NAMES.items():
+ set_axis(a, 1 - i)
+ else:
+ for i, a in cls._AXIS_NAMES.items():
+ set_axis(a, i)
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
@@ -379,19 +361,6 @@ def _construct_axes_from_arguments(
args = list(args)
for a in self._AXIS_ORDERS:
- # if we have an alias for this axis
- alias = self._AXIS_IALIASES.get(a)
- if alias is not None:
- if a in kwargs:
- if alias in kwargs:
- raise TypeError(
- f"arguments are mutually exclusive for [{a},{alias}]"
- )
- continue
- if alias in kwargs:
- kwargs[a] = kwargs.pop(alias)
- continue
-
# look for a argument by position
if a not in kwargs:
try:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 410b10a69ecd5..6939571f41c9f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -153,6 +153,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
Copy input data.
"""
+ _typ = "series"
+
_metadata: List[str] = []
_accessors = {"dt", "cat", "str", "sparse"}
_deprecations = (
@@ -4424,11 +4426,7 @@ def to_period(self, freq=None, copy=True):
Series._setup_axes(
- ["index"],
- info_axis=0,
- stat_axis=0,
- aliases={"rows": 0},
- docs={"index": "The index (axis labels) of the Series."},
+ ["index"], docs={"index": "The index (axis labels) of the Series."},
)
Series._add_numeric_operations()
Series._add_series_only_operations()
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index e53e02ed750cb..25b8713eb0307 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1139,12 +1139,12 @@ def test_extension_array_cross_section_converts():
(
lambda x: x.loc,
AttributeError,
- "type object 'NDFrame' has no attribute '_AXIS_ALIASES'",
+ "type object 'NDFrame' has no attribute '_AXIS_NAMES'",
),
(
lambda x: x.iloc,
AttributeError,
- "type object 'NDFrame' has no attribute '_AXIS_ALIASES'",
+ "type object 'NDFrame' has no attribute '_AXIS_NAMES'",
),
],
)
| A lot of this classmethod looks leftover from PanelND days. I think we can go even further towards getting rid of some of these attributes, but will save that for another time. | https://api.github.com/repos/pandas-dev/pandas/pulls/30064 | 2019-12-04T20:23:46Z | 2019-12-05T14:46:33Z | 2019-12-05T14:46:33Z | 2019-12-05T16:06:34Z |
API: BooleanArray any/all with NA logic | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index cfe55f1e05f71..2b9ea7dc220d7 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -281,6 +281,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules pandas/core/arrays/string_.py
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ MSG='Doctests arrays/boolean.py' ; echo $MSG
+ pytest -q --doctest-modules pandas/core/arrays/boolean.py
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
fi
### DOCSTRINGS ###
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 31dc656eb4b25..0cdc1bed34ecb 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -6,6 +6,7 @@
from pandas._libs import lib, missing as libmissing
from pandas.compat import set_function_name
+from pandas.compat.numpy import function as nv
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import astype_nansafe
@@ -560,6 +561,143 @@ def _values_for_argsort(self) -> np.ndarray:
data[self._mask] = -1
return data
+ def any(self, skipna: bool = True, **kwargs):
+ """
+ Return whether any element is True.
+
+ Returns False unless there is at least one element that is True.
+ By default, NAs are skipped. If ``skipna=False`` is specified and
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
+ is used as for logical operations.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+ Exclude NA values. If the entire array is NA and `skipna` is
+ True, then the result will be False, as for an empty array.
+ If `skipna` is False, the result will still be True if there is
+ at least one element that is True, otherwise NA will be returned
+ if there are NA's present.
+ **kwargs : any, default None
+ Additional keywords have no effect but might be accepted for
+ compatibility with NumPy.
+
+ Returns
+ -------
+ bool or :attr:`pandas.NA`
+
+ See Also
+ --------
+ numpy.any : Numpy version of this method.
+ BooleanArray.all : Return whether all elements are True.
+
+ Examples
+ --------
+
+ The result indicates whether any element is True (and by default
+ skips NAs):
+
+ >>> pd.array([True, False, True]).any()
+ True
+ >>> pd.array([True, False, pd.NA]).any()
+ True
+ >>> pd.array([False, False, pd.NA]).any()
+ False
+ >>> pd.array([], dtype="boolean").any()
+ False
+ >>> pd.array([pd.NA], dtype="boolean").any()
+ False
+
+ With ``skipna=False``, the result can be NA if this is logically
+ required (whether ``pd.NA`` is True or False influences the result):
+
+ >>> pd.array([True, False, pd.NA]).any(skipna=False)
+ True
+ >>> pd.array([False, False, pd.NA]).any(skipna=False)
+ NA
+ """
+ kwargs.pop("axis", None)
+ nv.validate_any((), kwargs)
+
+ values = self._data.copy()
+ np.putmask(values, self._mask, False)
+ result = values.any()
+ if skipna:
+ return result
+ else:
+ if result or len(self) == 0:
+ return result
+ else:
+ return self.dtype.na_value
+
+ def all(self, skipna: bool = True, **kwargs):
+ """
+ Return whether all elements are True.
+
+ Returns True unless there is at least one element that is False.
+ By default, NAs are skipped. If ``skipna=False`` is specified and
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
+ is used as for logical operations.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+ Exclude NA values. If the entire array is NA and `skipna` is
+ True, then the result will be True, as for an empty array.
+ If `skipna` is False, the result will still be False if there is
+ at least one element that is False, otherwise NA will be returned
+ if there are NA's present.
+ **kwargs : any, default None
+ Additional keywords have no effect but might be accepted for
+ compatibility with NumPy.
+
+ Returns
+ -------
+ bool or :attr:`pandas.NA`
+
+ See Also
+ --------
+ numpy.all : Numpy version of this method.
+ BooleanArray.any : Return whether any element is True.
+
+ Examples
+ --------
+
+ The result indicates whether any element is True (and by default
+ skips NAs):
+
+ >>> pd.array([True, True, pd.NA]).all()
+ True
+ >>> pd.array([True, False, pd.NA]).all()
+ False
+ >>> pd.array([], dtype="boolean").all()
+ True
+ >>> pd.array([pd.NA], dtype="boolean").all()
+ True
+
+ With ``skipna=False``, the result can be NA if this is logically
+ required (whether ``pd.NA`` is True or False influences the result):
+
+ >>> pd.array([True, True, pd.NA]).all(skipna=False)
+ NA
+ >>> pd.array([True, False, pd.NA]).all(skipna=False)
+ False
+ """
+ kwargs.pop("axis", None)
+ nv.validate_all((), kwargs)
+
+ values = self._data.copy()
+ np.putmask(values, self._mask, True)
+ result = values.all()
+
+ if skipna:
+ return result
+ else:
+ if not result or len(self) == 0:
+ return result
+ else:
+ return self.dtype.na_value
+
@classmethod
def _create_logical_method(cls, op):
def logical_method(self, other):
@@ -656,6 +794,10 @@ def cmp_method(self, other):
return set_function_name(cmp_method, name, cls)
def _reduce(self, name, skipna=True, **kwargs):
+
+ if name in {"any", "all"}:
+ return getattr(self, name)(skipna=skipna, **kwargs)
+
data = self._data
mask = self._mask
@@ -667,12 +809,8 @@ def _reduce(self, name, skipna=True, **kwargs):
op = getattr(nanops, "nan" + name)
result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs)
- # if we have a boolean op, don't coerce
- if name in ["any", "all"]:
- pass
-
# if we have numeric op that would result in an int, coerce to int if possible
- elif name in ["sum", "prod"] and notna(result):
+ if name in ["sum", "prod"] and notna(result):
int_result = np.int64(result)
if int_result == result:
result = int_result
diff --git a/pandas/tests/arrays/test_boolean.py b/pandas/tests/arrays/test_boolean.py
index d2f1a4a60ada8..7266e7f741a51 100644
--- a/pandas/tests/arrays/test_boolean.py
+++ b/pandas/tests/arrays/test_boolean.py
@@ -700,6 +700,33 @@ def test_reductions_return_types(dropna, data, all_numeric_reductions):
assert isinstance(getattr(s, op)(), np.float64)
+@pytest.mark.parametrize(
+ "values, exp_any, exp_all, exp_any_noskip, exp_all_noskip",
+ [
+ ([True, pd.NA], True, True, True, pd.NA),
+ ([False, pd.NA], False, False, pd.NA, False),
+ ([pd.NA], False, True, pd.NA, pd.NA),
+ ([], False, True, False, True),
+ ],
+)
+def test_any_all(values, exp_any, exp_all, exp_any_noskip, exp_all_noskip):
+ # the methods return numpy scalars
+ exp_any = pd.NA if exp_any is pd.NA else np.bool_(exp_any)
+ exp_all = pd.NA if exp_all is pd.NA else np.bool_(exp_all)
+ exp_any_noskip = pd.NA if exp_any_noskip is pd.NA else np.bool_(exp_any_noskip)
+ exp_all_noskip = pd.NA if exp_all_noskip is pd.NA else np.bool_(exp_all_noskip)
+
+ for con in [pd.array, pd.Series]:
+ a = con(values, dtype="boolean")
+ assert a.any() is exp_any
+ assert a.all() is exp_all
+ assert a.any(skipna=False) is exp_any_noskip
+ assert a.all(skipna=False) is exp_all_noskip
+
+ assert np.any(a.any()) is exp_any
+ assert np.all(a.all()) is exp_all
+
+
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
| Closes https://github.com/pandas-dev/pandas/issues/29686
Implementation and tests for `any`/`all` with the updated logic as discussed in the linked issue. | https://api.github.com/repos/pandas-dev/pandas/pulls/30062 | 2019-12-04T19:53:01Z | 2019-12-12T13:24:24Z | 2019-12-12T13:24:24Z | 2019-12-12T13:24:29Z |
Replace custom macro with stdlib | diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index 578f72112d02d..9f2b26b0dea19 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -647,8 +647,6 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
#define END_LINE() END_LINE_STATE(START_RECORD)
-#define IS_WHITESPACE(c) ((c == ' ' || c == '\t'))
-
#define IS_TERMINATOR(c) \
(c == line_terminator)
@@ -667,7 +665,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
// applied when in a field
#define IS_DELIMITER(c) \
((!self->delim_whitespace && c == self->delimiter) || \
- (self->delim_whitespace && IS_WHITESPACE(c)))
+ (self->delim_whitespace && isblank(c)))
#define _TOKEN_CLEANUP() \
self->stream_len = slen; \
@@ -818,7 +816,7 @@ int tokenize_bytes(parser_t *self,
self->state = EAT_CRNL_NOP;
break;
} else if (!self->delim_whitespace) {
- if (IS_WHITESPACE(c) && c != self->delimiter) {
+ if (isblank(c) && c != self->delimiter) {
} else { // backtrack
// use i + 1 because buf has been incremented but not i
do {
@@ -848,7 +846,7 @@ int tokenize_bytes(parser_t *self,
} else if (IS_COMMENT_CHAR(c)) {
self->state = EAT_COMMENT;
break;
- } else if (!IS_WHITESPACE(c)) {
+ } else if (!isblank(c)) {
self->state = START_FIELD;
// fall through to subsequent state
} else {
@@ -892,7 +890,7 @@ int tokenize_bytes(parser_t *self,
} else if (IS_COMMENT_CHAR(c)) {
self->state = EAT_LINE_COMMENT;
break;
- } else if (IS_WHITESPACE(c)) {
+ } else if (isblank(c)) {
if (self->delim_whitespace) {
if (self->skip_empty_lines) {
self->state = WHITESPACE_LINE;
| This is part of the C99 standard so I think better to leverage builtin functionality rather than redefine | https://api.github.com/repos/pandas-dev/pandas/pulls/30061 | 2019-12-04T19:43:43Z | 2019-12-06T17:16:14Z | 2019-12-06T17:16:14Z | 2019-12-06T17:16:28Z |
STY: fstrings doc/make.py | diff --git a/doc/make.py b/doc/make.py
index cbb1fa6a5324a..cf73f44b5dd02 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -60,7 +60,7 @@ def __init__(
if single_doc and single_doc.endswith(".rst"):
self.single_doc_html = os.path.splitext(single_doc)[0] + ".html"
elif single_doc:
- self.single_doc_html = "reference/api/pandas.{}.html".format(single_doc)
+ self.single_doc_html = f"reference/api/pandas.{single_doc}.html"
def _process_single_doc(self, single_doc):
"""
@@ -76,7 +76,7 @@ def _process_single_doc(self, single_doc):
if os.path.exists(os.path.join(SOURCE_PATH, single_doc)):
return single_doc
else:
- raise FileNotFoundError("File {} not found".format(single_doc))
+ raise FileNotFoundError(f"File {single_doc} not found")
elif single_doc.startswith("pandas."):
try:
@@ -84,17 +84,15 @@ def _process_single_doc(self, single_doc):
for name in single_doc.split("."):
obj = getattr(obj, name)
except AttributeError:
- raise ImportError("Could not import {}".format(single_doc))
+ raise ImportError(f"Could not import {single_doc}")
else:
return single_doc[len("pandas.") :]
else:
raise ValueError(
- (
- "--single={} not understood. Value should be a "
- "valid path to a .rst or .ipynb file, or a "
- "valid pandas object (e.g. categorical.rst or "
- "pandas.DataFrame.head)"
- ).format(single_doc)
+ f"--single={single_doc} not understood. "
+ "Value should be a valid path to a .rst or .ipynb file, "
+ "or a valid pandas object "
+ "(e.g. categorical.rst or pandas.DataFrame.head)"
)
@staticmethod
@@ -113,7 +111,7 @@ def _run_os(*args):
"""
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
- def _sphinx_build(self, kind):
+ def _sphinx_build(self, kind: str):
"""
Call sphinx to build documentation.
@@ -128,7 +126,7 @@ def _sphinx_build(self, kind):
>>> DocBuilder(num_jobs=4)._sphinx_build('html')
"""
if kind not in ("html", "latex"):
- raise ValueError("kind must be html or latex, " "not {}".format(kind))
+ raise ValueError(f"kind must be html or latex, not {kind}")
cmd = ["sphinx-build", "-b", kind]
if self.num_jobs:
@@ -136,7 +134,7 @@ def _sphinx_build(self, kind):
if self.warnings_are_errors:
cmd += ["-W", "--keep-going"]
if self.verbosity:
- cmd.append("-{}".format("v" * self.verbosity))
+ cmd.append(f"-{'v' * self.verbosity}")
cmd += [
"-d",
os.path.join(BUILD_PATH, "doctrees"),
@@ -156,7 +154,7 @@ def _get_page_title(self, page):
"""
Open the rst file `page` and extract its title.
"""
- fname = os.path.join(SOURCE_PATH, "{}.rst".format(page))
+ fname = os.path.join(SOURCE_PATH, f"{page}.rst")
option_parser = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
)
@@ -184,18 +182,6 @@ def _add_redirects(self):
Create in the build directory an html file with a redirect,
for every row in REDIRECTS_FILE.
"""
- html = """
- <html>
- <head>
- <meta http-equiv="refresh" content="0;URL={url}"/>
- </head>
- <body>
- <p>
- The page has been moved to <a href="{url}">{title}</a>
- </p>
- </body>
- <html>
- """
with open(REDIRECTS_FILE) as mapping_fd:
reader = csv.reader(mapping_fd)
for row in reader:
@@ -214,15 +200,23 @@ def _add_redirects(self):
if os.path.exists(path):
raise RuntimeError(
- ("Redirection would overwrite an existing file: " "{}").format(
- path
- )
+ f"Redirection would overwrite an existing file: {path}"
)
with open(path, "w") as moved_page_fd:
- moved_page_fd.write(
- html.format(url="{}.html".format(row[1]), title=title)
- )
+ html = f"""\
+<html>
+ <head>
+ <meta http-equiv="refresh" content="0;URL={row[1]}.html"/>
+ </head>
+ <body>
+ <p>
+ The page has been moved to <a href="{row[1]}.html">{title}</a>
+ </p>
+ </body>
+<html>"""
+
+ moved_page_fd.write(html)
def html(self):
"""
@@ -290,15 +284,14 @@ def zip_html(self):
def main():
cmds = [method for method in dir(DocBuilder) if not method.startswith("_")]
+ joined = ",".join(cmds)
argparser = argparse.ArgumentParser(
- description="pandas documentation builder",
- epilog="Commands: {}".format(",".join(cmds)),
+ description="pandas documentation builder", epilog=f"Commands: {joined}",
)
+
+ joined = ", ".join(cmds)
argparser.add_argument(
- "command",
- nargs="?",
- default="html",
- help="command to run: {}".format(", ".join(cmds)),
+ "command", nargs="?", default="html", help=f"command to run: {joined}",
)
argparser.add_argument(
"--num-jobs", type=int, default=0, help="number of jobs used by sphinx-build"
@@ -312,10 +305,9 @@ def main():
type=str,
default=None,
help=(
- 'filename (relative to the "source" folder)'
- " of section or method name to compile, e.g. "
- '"development/contributing.rst",'
- ' "ecosystem.rst", "pandas.DataFrame.join"'
+ "filename (relative to the 'source' folder) of section or method name to "
+ "compile, e.g. 'development/contributing.rst', "
+ "'ecosystem.rst', 'pandas.DataFrame.join'"
),
)
argparser.add_argument(
@@ -340,11 +332,8 @@ def main():
args = argparser.parse_args()
if args.command not in cmds:
- raise ValueError(
- "Unknown command {}. Available options: {}".format(
- args.command, ", ".join(cmds)
- )
- )
+ joined = ", ".join(cmds)
+ raise ValueError(f"Unknown command {args.command}. Available options: {joined}")
# Below we update both os.environ and sys.path. The former is used by
# external libraries (namely Sphinx) to compile this module and resolve
| - [x] ref #29547
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30060 | 2019-12-04T19:36:19Z | 2019-12-05T15:33:51Z | 2019-12-05T15:33:51Z | 2019-12-05T20:32:43Z |
REF: pytables do string conversion early to set attributes in fewer places | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fca1d3265cac2..5b7bb64795d90 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2313,111 +2313,34 @@ def set_kind(self):
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
- def set_atom(
- self,
- block,
- existing_col,
- min_itemsize,
- nan_rep,
- info,
- encoding=None,
- errors="strict",
- ):
+ def set_atom(self, block, itemsize: int, data_converted, use_str: bool):
""" create and setup my atom from the block b """
# short-cut certain block types
if block.is_categorical:
self.set_atom_categorical(block)
- self.update_info(info)
- return
elif block.is_datetimetz:
self.set_atom_datetime64tz(block)
- self.update_info(info)
- return
elif block.is_datetime:
- return self.set_atom_datetime64(block)
+ self.set_atom_datetime64(block)
elif block.is_timedelta:
- return self.set_atom_timedelta64(block)
+ self.set_atom_timedelta64(block)
elif block.is_complex:
- return self.set_atom_complex(block)
-
- dtype = block.dtype.name
- inferred_type = lib.infer_dtype(block.values, skipna=False)
+ self.set_atom_complex(block)
- if inferred_type == "date":
- raise TypeError("[date] is not implemented as a table column")
- elif inferred_type == "datetime":
- # after GH#8260
- # this only would be hit for a multi-timezone dtype
- # which is an error
-
- raise TypeError(
- "too many timezones in this block, create separate data columns"
- )
- elif inferred_type == "unicode":
- raise TypeError("[unicode] is not implemented as a table column")
-
- # this is basically a catchall; if say a datetime64 has nans then will
- # end up here ###
- elif inferred_type == "string" or dtype == "object":
- self.set_atom_string(
- block, existing_col, min_itemsize, nan_rep, encoding, errors,
- )
-
- # set as a data block
+ elif use_str:
+ self.set_atom_string(itemsize, data_converted)
else:
+ # set as a data block
self.set_atom_data(block)
- def get_atom_string(self, block, itemsize):
- return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
-
- def set_atom_string(
- self, block, existing_col, min_itemsize, nan_rep, encoding, errors
- ):
- # fill nan items with myself, don't disturb the blocks by
- # trying to downcast
- block = block.fillna(nan_rep, downcast=False)
- if isinstance(block, list):
- block = block[0]
- data = block.values
-
- # see if we have a valid string type
- inferred_type = lib.infer_dtype(data.ravel(), skipna=False)
- if inferred_type != "string":
-
- # we cannot serialize this data, so report an exception on a column
- # by column basis
- for i in range(len(block.shape[0])):
-
- col = block.iget(i)
- inferred_type = lib.infer_dtype(col.ravel(), skipna=False)
- if inferred_type != "string":
- iloc = block.mgr_locs.indexer[i]
- raise TypeError(
- f"Cannot serialize the column [{iloc}] because\n"
- f"its data contents are [{inferred_type}] object dtype"
- )
-
- # itemsize is the maximum length of a string (along any dimension)
- data_converted = _convert_string_array(data, encoding, errors)
- itemsize = data_converted.itemsize
-
- # specified min_itemsize?
- if isinstance(min_itemsize, dict):
- min_itemsize = int(
- min_itemsize.get(self.name) or min_itemsize.get("values") or 0
- )
- itemsize = max(min_itemsize or 0, itemsize)
-
- # check for column in the values conflicts
- if existing_col is not None:
- eci = existing_col.validate_col(itemsize)
- if eci > itemsize:
- itemsize = eci
+ def get_atom_string(self, shape, itemsize):
+ return _tables().StringCol(itemsize=itemsize, shape=shape[0])
+ def set_atom_string(self, itemsize: int, data_converted: np.ndarray):
self.itemsize = itemsize
self.kind = "string"
- self.typ = self.get_atom_string(block, itemsize)
+ self.typ = self.get_atom_string(data_converted.shape, itemsize)
self.set_data(data_converted.astype(f"|S{itemsize}", copy=False))
def get_atom_coltype(self, kind=None):
@@ -2621,7 +2544,7 @@ def validate_names(self):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
- def get_atom_string(self, block, itemsize):
+ def get_atom_string(self, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
@@ -3974,17 +3897,26 @@ def get_blk_items(mgr, blocks):
else:
existing_col = None
- col = klass.create_for_block(i=i, name=name, version=self.version)
- col.values = list(b_items)
- col.set_atom(
- block=b,
+ new_name = name or f"values_block_{i}"
+ itemsize, data_converted, use_str = _maybe_convert_for_string_atom(
+ new_name,
+ b,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
- info=self.info,
)
+
+ col = klass.create_for_block(i=i, name=new_name, version=self.version)
+ col.values = list(b_items)
+ col.set_atom(
+ block=b,
+ itemsize=itemsize,
+ data_converted=data_converted,
+ use_str=use_str,
+ )
+ col.update_info(self.info)
col.set_pos(j)
self.values_axes.append(col)
@@ -4842,6 +4774,74 @@ def _unconvert_index(data, kind: str, encoding=None, errors="strict"):
return index
+def _maybe_convert_for_string_atom(
+ name: str, block, existing_col, min_itemsize, nan_rep, encoding, errors
+):
+ use_str = False
+
+ if not block.is_object:
+ return block.dtype.itemsize, block.values, use_str
+
+ dtype_name = block.dtype.name
+ inferred_type = lib.infer_dtype(block.values, skipna=False)
+
+ if inferred_type == "date":
+ raise TypeError("[date] is not implemented as a table column")
+ elif inferred_type == "datetime":
+ # after GH#8260
+ # this only would be hit for a multi-timezone dtype which is an error
+ raise TypeError(
+ "too many timezones in this block, create separate data columns"
+ )
+
+ elif not (inferred_type == "string" or dtype_name == "object"):
+ return block.dtype.itemsize, block.values, use_str
+
+ use_str = True
+
+ block = block.fillna(nan_rep, downcast=False)
+ if isinstance(block, list):
+ # Note: because block is always object dtype, fillna goes
+ # through a path such that the result is always a 1-element list
+ block = block[0]
+ data = block.values
+
+ # see if we have a valid string type
+ inferred_type = lib.infer_dtype(data.ravel(), skipna=False)
+ if inferred_type != "string":
+
+ # we cannot serialize this data, so report an exception on a column
+ # by column basis
+ for i in range(len(block.shape[0])):
+
+ col = block.iget(i)
+ inferred_type = lib.infer_dtype(col.ravel(), skipna=False)
+ if inferred_type != "string":
+ iloc = block.mgr_locs.indexer[i]
+ raise TypeError(
+ f"Cannot serialize the column [{iloc}] because\n"
+ f"its data contents are [{inferred_type}] object dtype"
+ )
+
+ # itemsize is the maximum length of a string (along any dimension)
+ data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape)
+ assert data_converted.shape == block.shape, (data_converted.shape, block.shape)
+ itemsize = data_converted.itemsize
+
+ # specified min_itemsize?
+ if isinstance(min_itemsize, dict):
+ min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
+ itemsize = max(min_itemsize or 0, itemsize)
+
+ # check for column in the values conflicts
+ if existing_col is not None:
+ eci = existing_col.validate_col(itemsize)
+ if eci > itemsize:
+ itemsize = eci
+
+ return itemsize, data_converted, use_str
+
+
def _convert_string_array(data, encoding, errors, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
| Next-step following #29979.
A lot of work is currently done in set_atom_string (a self-mutating method) that can be done in an earlier, not-mutating call. By making that move, we simplify set_atom quite a bit, and get closer to getting all the attribute-setting into the constructor | https://api.github.com/repos/pandas-dev/pandas/pulls/30058 | 2019-12-04T18:12:41Z | 2019-12-05T15:39:44Z | 2019-12-05T15:39:44Z | 2019-12-05T16:09:58Z |
STY: "{foo!r}" -> "{repr(foo)}" | diff --git a/ci/print_skipped.py b/ci/print_skipped.py
index 8fd92ab9622ba..72822fa2d3c7f 100755
--- a/ci/print_skipped.py
+++ b/ci/print_skipped.py
@@ -5,7 +5,7 @@
def main(filename):
if not os.path.isfile(filename):
- raise RuntimeError(f"Could not find junit file {filename!r}")
+ raise RuntimeError(f"Could not find junit file {repr(filename)}")
tree = et.parse(filename)
root = tree.getroot()
| - [x] ref #29886
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30055 | 2019-12-04T17:25:38Z | 2019-12-04T18:50:43Z | 2019-12-04T18:50:43Z | 2019-12-05T15:14:49Z |
CI: disable numpydev job | diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index a10fd402b6733..081145f846571 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -44,13 +44,15 @@ jobs:
PATTERN: "not slow and not network"
LOCALE_OVERRIDE: "zh_CN.UTF-8"
- py37_np_dev:
- ENV_FILE: ci/deps/azure-37-numpydev.yaml
- CONDA_PY: "37"
- PATTERN: "not slow and not network"
- TEST_ARGS: "-W error"
- PANDAS_TESTING_MODE: "deprecate"
- EXTRA_APT: "xsel"
+ # Disabled for NumPy object-dtype warning.
+ # https://github.com/pandas-dev/pandas/issues/30043
+ # py37_np_dev:
+ # ENV_FILE: ci/deps/azure-37-numpydev.yaml
+ # CONDA_PY: "37"
+ # PATTERN: "not slow and not network"
+ # TEST_ARGS: "-W error"
+ # PANDAS_TESTING_MODE: "deprecate"
+ # EXTRA_APT: "xsel"
steps:
- script: |
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ea73241e5d078..c52b1c65ad08d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9133,7 +9133,7 @@ def tshift(self, periods: int = 1, freq=None, axis=0):
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
- else:
+ elif orig_freq is not None:
msg = (
f"Given freq {freq.rule_code} does not match"
f" PeriodIndex freq {orig_freq.rule_code}"
| xref https://github.com/pandas-dev/pandas/issues/30043.
Will take some time to fix all these cases. | https://api.github.com/repos/pandas-dev/pandas/pulls/30049 | 2019-12-04T14:03:52Z | 2019-12-04T16:41:35Z | 2019-12-04T16:41:34Z | 2019-12-04T16:51:43Z |
27820 clarify that read parquet accepts a directory path | diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 1ab311516fa02..a044cfcdf6a01 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -267,6 +267,10 @@ def read_parquet(path, engine="auto", columns=None, **kwargs):
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.parquet``.
+ A file URL can also be a path to a directory that contains multiple
+ partitioned parquet files. Both pyarrow and fastparquet support
+ paths to directories as well as file URLs. A directory path could be:
+ ``file://localhost/path/to/tables``
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
| - [x] closes #27820
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30048 | 2019-12-04T13:46:58Z | 2019-12-08T17:39:41Z | 2019-12-08T17:39:41Z | 2019-12-08T17:39:44Z |
CI: Fix print skipped tests | diff --git a/ci/print_skipped.py b/ci/print_skipped.py
index 51a2460e05fab..243f390fd62f2 100755
--- a/ci/print_skipped.py
+++ b/ci/print_skipped.py
@@ -5,12 +5,12 @@
def main(filename):
if not os.path.isfile(filename):
- return
+ raise RuntimeError(f"Could not find junit file {filename!r}")
tree = et.parse(filename)
root = tree.getroot()
current_class = ""
- for el in root.findall("testcase"):
+ for el in root.iter("testcase"):
cn = el.attrib["classname"]
for sk in el.findall("skipped"):
old_class = current_class
| - [X] closes #30040
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Looks like the format of the junit file that pytest generates has changed, and now tests are inside a `testsuite` tag, which makes the script to not find any tests. Instead of looking for them as immediate children of `testcase`, I look for them in all the tree (including subchildren).
CC: @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/30044 | 2019-12-04T12:50:41Z | 2019-12-04T13:21:46Z | 2019-12-04T13:21:45Z | 2019-12-04T13:30:51Z |
CI: ensure proper coverage of different pyarrow versions | diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml
index c3c94e365c259..14cc4f2726e96 100644
--- a/ci/deps/azure-36-locale.yaml
+++ b/ci/deps/azure-36-locale.yaml
@@ -27,6 +27,9 @@ dependencies:
- xlrd=1.1.0
- xlsxwriter=0.9.8
- xlwt=1.2.0
+ # lowest supported version of pyarrow (putting it here instead of in
+ # azure-36-minimum_versions because it needs numpy >= 1.14)
+ - pyarrow=0.12
- pip
- pip:
- html5lib==1.0b2
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index 3319afed173b5..a10fa0904a451 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -1,6 +1,5 @@
name: pandas-dev
channels:
- - defaults
- conda-forge
dependencies:
- python=3.7.*
@@ -33,3 +32,4 @@ dependencies:
- xlrd
- xlsxwriter
- xlwt
+ - pyarrow>=0.15
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index 04ff06a99a1f4..c1403f8eb8409 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -33,8 +33,7 @@ dependencies:
# https://github.com/pydata/pandas-gbq/issues/271
- google-cloud-bigquery<=1.11
- psycopg2
- # pyarrow segfaults on load: https://github.com/pandas-dev/pandas/issues/26716
- # - pyarrow=0.9.0
+ - pyarrow>=0.12.0
- pymysql
- pytables
- python-snappy
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 3e687d185df84..a98c93c250070 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -504,6 +504,7 @@ def test_empty_dataframe(self, pa):
df = pd.DataFrame()
check_round_trip(df, pa)
+ @pytest.mark.skip(reason="broken test")
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_additional_extension_arrays(self, pa):
# test additional ExtensionArrays that are supported through the
| Closes https://github.com/pandas-dev/pandas/issues/29223, closes https://github.com/pandas-dev/pandas/issues/26716 | https://api.github.com/repos/pandas-dev/pandas/pulls/30039 | 2019-12-04T09:03:20Z | 2019-12-04T13:10:25Z | 2019-12-04T13:10:24Z | 2019-12-04T13:54:58Z |
Removed CParserError | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 8f0f4e17df2f9..adc7a650b745f 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -63,11 +63,6 @@ from pandas.errors import (ParserError, DtypeWarning,
lzma = _import_lzma()
-# Import CParserError as alias of ParserError for backwards compatibility.
-# Ultimately, we want to remove this import. See gh-12665 and gh-14479.
-CParserError = ParserError
-
-
cdef:
float64_t INF = <float64_t>np.inf
float64_t NEGINF = -INF
diff --git a/pandas/io/common.py b/pandas/io/common.py
index c0eddb679c6f8..54253803c4881 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -47,9 +47,6 @@
lzma = _import_lzma()
-# gh-12665: Alias for now and remove later.
-CParserError = ParserError
-
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index 531c511e8c02d..fa2142444ed92 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -39,22 +39,6 @@ def test_catch_oob():
pass
-def test_error_rename():
- # see gh-12665
- from pandas.errors import ParserError
- from pandas.io.common import CParserError
-
- try:
- raise CParserError()
- except ParserError:
- pass
-
- try:
- raise ParserError()
- except CParserError:
- pass
-
-
class Foo:
@classmethod
def classmethod(cls):
| I think this is legacy stuff @gfyoung | https://api.github.com/repos/pandas-dev/pandas/pulls/30036 | 2019-12-04T06:43:12Z | 2019-12-04T13:31:37Z | 2019-12-04T13:31:36Z | 2023-04-12T20:16:56Z |
BUG: unclosed file warning when passing invalid encoding | diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 3adf8d7bbdd11..dab1cd243a343 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -3,7 +3,6 @@
and latex files. This module also applies to display formatting.
"""
-import codecs
from contextlib import contextmanager
from datetime import tzinfo
import decimal
@@ -495,7 +494,11 @@ def get_buffer(
if hasattr(buf, "write"):
yield buf
elif isinstance(buf, str):
- with codecs.open(buf, "w", encoding=encoding) as f:
+ with open(buf, "w", encoding=encoding, newline="") as f:
+ # GH#30034 open instead of codecs.open prevents a file leak
+ # if we have an invalid encoding argument.
+ # newline="" is needed to roundtrip correctly on
+ # windows test_to_latex_filename
yield f
else:
raise TypeError("buf is not a file name and it has no write method")
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 0f4a7a33dd115..004a1d184537d 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -3259,8 +3259,9 @@ def test_filepath_or_buffer_arg(
):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
elif encoding == "foo":
- with pytest.raises(LookupError, match="unknown encoding"):
- getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ with tm.assert_produces_warning(None):
+ with pytest.raises(LookupError, match="unknown encoding"):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
else:
expected = getattr(df, method)()
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
| This isn't directly related to #30031 but might solve it if the problem there is unclosed files piling up.
TL;DR: the actual problem is in the stdlib `codecs.open`:
```
def open(filename, mode='r', encoding=None, errors='strict', buffering=1):
if encoding is not None and 'b' not in mode:
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
```
When we pass a bogus encoding "foo" in test_format, the `lookup` call raises, and leaves behind the file opened a few lines earlier. | https://api.github.com/repos/pandas-dev/pandas/pulls/30034 | 2019-12-04T05:05:52Z | 2019-12-05T12:57:15Z | 2019-12-05T12:57:15Z | 2019-12-05T16:08:07Z |
revert openpyxl test changes | diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py
index 90e795bd5c52f..e8c60870e2a85 100644
--- a/pandas/tests/io/excel/test_openpyxl.py
+++ b/pandas/tests/io/excel/test_openpyxl.py
@@ -1,11 +1,8 @@
-from distutils.version import LooseVersion
import os
import numpy as np
import pytest
-from pandas.compat import PY37, is_platform_mac
-
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
@@ -16,8 +13,6 @@
pytestmark = pytest.mark.parametrize("ext", [".xlsx"])
-openpyxl_gt301 = LooseVersion(openpyxl.__version__) > LooseVersion("3.0.1")
-
def test_to_excel_styleconverter(ext):
from openpyxl import styles
@@ -86,9 +81,6 @@ def test_write_cells_merge_styled(ext):
assert xcell_a2.font == openpyxl_sty_merged
-@pytest.mark.xfail(
- openpyxl_gt301 and PY37 and is_platform_mac(), reason="broken change in openpyxl"
-)
@pytest.mark.parametrize(
"mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])]
)
@@ -115,9 +107,6 @@ def test_write_append_mode(ext, mode, expected):
assert wb2.worksheets[index]["A1"].value == cell_value
-@pytest.mark.xfail(
- openpyxl_gt301 and PY37 and is_platform_mac(), reason="broken change in openpyxl"
-)
def test_to_excel_with_openpyxl_engine(ext, tmpdir):
# GH 29854
# TODO: Fix this once newer version of openpyxl fixes the bug
| follow up to #30009 - thanks @jbrockmendel for fixing it and sorry I wasn't able to review more. I *think* the root cause though is just defusedxml being installed alongside things, which is only prevalent in our 3.6 build.
pinning openpyxl version alone I think is fine, but let's see...note that this same defusedxml issue causes a lot of warnings to be captured for xlrd in the excel tests | https://api.github.com/repos/pandas-dev/pandas/pulls/30033 | 2019-12-04T04:33:56Z | 2019-12-04T13:24:23Z | 2019-12-04T13:24:23Z | 2023-04-12T20:16:59Z |
changed "fun !r" -> "repr(fun)" | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 11e87a4eed27f..302d4b591fc1e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2944,7 +2944,7 @@ def _try_kind_sort(arr):
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
- raise ValueError("invalid na_position: {!r}".format(na_position))
+ raise ValueError(f"invalid na_position: {repr(na_position)}")
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 82eb93dd4c879..ed9ef23132683 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -208,7 +208,7 @@ def lexsort_indexer(keys, orders=None, na_position="last"):
cat = Categorical(key, ordered=True)
if na_position not in ["last", "first"]:
- raise ValueError("invalid na_position: {!r}".format(na_position))
+ raise ValueError(f"invalid na_position: {repr(na_position)}")
n = len(cat.categories)
codes = cat.codes.copy()
@@ -264,7 +264,7 @@ def nargsort(items, kind="quicksort", ascending: bool = True, na_position="last"
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
- raise ValueError("invalid na_position: {!r}".format(na_position))
+ raise ValueError(f"invalid na_position: {repr(na_position)}")
return indexer
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 137c37f938dfa..04c87e264550b 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -1933,10 +1933,8 @@ def _forbid_nonstring_types(func):
def wrapper(self, *args, **kwargs):
if self._inferred_dtype not in allowed_types:
msg = (
- "Cannot use .str.{name} with values of inferred dtype "
- "{inf_type!r}.".format(
- name=func_name, inf_type=self._inferred_dtype
- )
+ f"Cannot use .str.{func_name} with values of inferred dtype "
+ f"{repr(self._inferred_dtype)}."
)
raise TypeError(msg)
return func(self, *args, **kwargs)
| As described in the following issue
, usage of !r is currently redundant and so changing to f strings in place of it.
- [x] ref https://github.com/pandas-dev/pandas/issues/29886
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30032 | 2019-12-04T03:46:10Z | 2019-12-04T13:25:03Z | 2019-12-04T13:25:03Z | 2020-03-28T14:00:02Z |
REF: collect attribute-setting at the end of create_axes | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fca1d3265cac2..e1b3e5c83f7db 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3734,7 +3734,7 @@ def read_axes(
return True
- def get_object(self, obj):
+ def get_object(self, obj, transposed: bool):
""" return the data for this obj """
return obj
@@ -3838,15 +3838,13 @@ def create_axes(
)
# create according to the new data
- self.non_index_axes = []
- self.data_columns = []
+ new_non_index_axes: List = []
+ new_data_columns: List[Optional[str]] = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
- self.nan_rep = nan_rep
-
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
@@ -3863,7 +3861,7 @@ def create_axes(
# necessary
append_axis = list(a)
if existing_table is not None:
- indexer = len(self.non_index_axes)
+ indexer = len(new_non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if not array_equivalent(
np.array(append_axis), np.array(exist_axis)
@@ -3880,34 +3878,37 @@ def create_axes(
info["names"] = list(a.names)
info["type"] = type(a).__name__
- self.non_index_axes.append((i, append_axis))
+ new_non_index_axes.append((i, append_axis))
+
+ self.non_index_axes = new_non_index_axes
# set axis positions (based on the axes)
new_index_axes = [index_axes_map[a] for a in axes]
for j, iax in enumerate(new_index_axes):
iax.set_pos(j)
iax.update_info(self.info)
- self.index_axes = new_index_axes
- j = len(self.index_axes)
+ j = len(new_index_axes)
# check for column conflicts
- for a in self.axes:
+ for a in new_index_axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
- for a in self.non_index_axes:
+ for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
+ transposed = new_index_axes[0].axis == 1
+
# figure out data_columns and get out blocks
- block_obj = self.get_object(obj)._consolidate()
+ block_obj = self.get_object(obj, transposed)._consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
- if len(self.non_index_axes):
- axis, axis_labels = self.non_index_axes[0]
+ if len(new_non_index_axes):
+ axis, axis_labels = new_non_index_axes[0]
data_columns = self.validate_data_columns(data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex(
@@ -3945,7 +3946,7 @@ def get_blk_items(mgr, blocks):
blk_items = new_blk_items
# add my values
- self.values_axes = []
+ vaxes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
@@ -3959,7 +3960,7 @@ def get_blk_items(mgr, blocks):
if not (name is None or isinstance(name, str)):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
- self.data_columns.append(name)
+ new_data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
@@ -3987,10 +3988,15 @@ def get_blk_items(mgr, blocks):
)
col.set_pos(j)
- self.values_axes.append(col)
+ vaxes.append(col)
j += 1
+ self.nan_rep = nan_rep
+ self.data_columns = new_data_columns
+ self.values_axes = vaxes
+ self.index_axes = new_index_axes
+
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
@@ -4428,9 +4434,9 @@ class AppendableFrameTable(AppendableTable):
def is_transposed(self) -> bool:
return self.index_axes[0].axis == 1
- def get_object(self, obj):
+ def get_object(self, obj, transposed: bool):
""" these are written transposed """
- if self.is_transposed:
+ if transposed:
obj = obj.T
return obj
@@ -4512,7 +4518,7 @@ class AppendableSeriesTable(AppendableFrameTable):
def is_transposed(self) -> bool:
return False
- def get_object(self, obj):
+ def get_object(self, obj, transposed: bool):
return obj
def write(self, obj, data_columns=None, **kwargs):
| Ideally we wouldn't be altering `self` inplace at all, but we can make this a lot easier to reason about by collecting all of the attribute-setting at the end of the create_axes call.
`self.non_index_axes` I couldn't get all the way to the end of the call without a more invasive edit, which ill save for a separate pass to keep the diff smallish | https://api.github.com/repos/pandas-dev/pandas/pulls/30029 | 2019-12-04T01:42:28Z | 2019-12-04T18:47:30Z | 2019-12-04T18:47:30Z | 2019-12-04T19:32:52Z |
f strings in core/sorting | diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index ed9ef23132683..b99ac9cc333c6 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -208,7 +208,7 @@ def lexsort_indexer(keys, orders=None, na_position="last"):
cat = Categorical(key, ordered=True)
if na_position not in ["last", "first"]:
- raise ValueError(f"invalid na_position: {repr(na_position)}")
+ raise ValueError(f"invalid na_position: {na_position}")
n = len(cat.categories)
codes = cat.codes.copy()
@@ -264,7 +264,7 @@ def nargsort(items, kind="quicksort", ascending: bool = True, na_position="last"
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
- raise ValueError(f"invalid na_position: {repr(na_position)}")
+ raise ValueError(f"invalid na_position: {na_position}")
return indexer
| - [ ] references #29547
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30028 | 2019-12-04T01:02:14Z | 2019-12-04T23:11:45Z | 2019-12-04T23:11:45Z | 2019-12-04T23:11:51Z |
Updated .format() to f-strings | diff --git a/pandas/io/html.py b/pandas/io/html.py
index b8cb6679a9562..1a0e0537ad325 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -587,7 +587,7 @@ def _parse_tfoot_tr(self, table):
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
- raise ValueError("No text parsed from document: {doc}".format(doc=self.io))
+ raise ValueError(f"No text parsed from document: {self.io}")
return raw_text
def _build_doc(self):
@@ -616,8 +616,8 @@ def _build_xpath_expr(attrs) -> str:
if "class_" in attrs:
attrs["class"] = attrs.pop("class_")
- s = [f"@{k}={repr(v)}" for k, v in attrs.items()]
- return "[{expr}]".format(expr=" and ".join(s))
+ s = " and ".join([f"@{k}={repr(v)}" for k, v in attrs.items()])
+ return f"[{s}]"
_re_namespace = {"re": "http://exslt.org/regular-expressions"}
@@ -846,7 +846,8 @@ def _parser_dispatch(flavor):
def _print_as_set(s) -> str:
- return "{" + "{arg}".format(arg=", ".join(pprint_thing(el) for el in s)) + "}"
+ arg = ", ".join(pprint_thing(el) for el in s)
+ return f"{{{arg}}}"
def _validate_flavor(flavor):
@@ -871,10 +872,8 @@ def _validate_flavor(flavor):
if not flavor_set & valid_flavors:
raise ValueError(
- "{invalid} is not a valid set of flavors, valid "
- "flavors are {valid}".format(
- invalid=_print_as_set(flavor_set), valid=_print_as_set(valid_flavors)
- )
+ f"{_print_as_set(flavor_set)} is not a valid set of flavors, valid "
+ f"flavors are {_print_as_set(valid_flavors)}"
)
return flavor
@@ -898,11 +897,11 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
elif hasattr(io, "seekable") and not io.seekable():
# if we couldn't rewind it, let the user know
raise ValueError(
- "The flavor {} failed to parse your input. "
+ f"The flavor {flav} failed to parse your input. "
"Since you passed a non-rewindable file "
"object, we can't rewind it to try "
"another parser. Try read_html() with a "
- "different flavor.".format(flav)
+ "different flavor."
)
retained = caught
| - [ ] references #29547
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30027 | 2019-12-04T00:53:10Z | 2019-12-05T16:38:12Z | 2019-12-05T16:38:12Z | 2019-12-05T16:38:41Z |
fstring in io.sql | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 684e602f06d12..50fa04a405837 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -241,7 +241,7 @@ def read_sql_table(
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
- raise ValueError("Table {name} not found".format(name=table_name))
+ raise ValueError(f"Table {table_name} not found")
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
@@ -256,7 +256,7 @@ def read_sql_table(
if table is not None:
return table
else:
- raise ValueError("Table {name} not found".format(name=table_name), con)
+ raise ValueError(f"Table {table_name} not found", con)
def read_sql_query(
@@ -498,7 +498,7 @@ def to_sql(
.. versionadded:: 0.24.0
"""
if if_exists not in ("fail", "replace", "append"):
- raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
+ raise ValueError(f"'{if_exists}' is not valid for if_exists")
pandas_sql = pandasSQL_builder(con, schema=schema)
@@ -625,7 +625,7 @@ def __init__(
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
- raise ValueError("Could not init table '{name}'".format(name=name))
+ raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
@@ -643,18 +643,14 @@ def _execute_create(self):
def create(self):
if self.exists():
if self.if_exists == "fail":
- raise ValueError(
- "Table '{name}' already exists.".format(name=self.name)
- )
+ raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
- raise ValueError(
- "'{0}' is not valid for if_exists".format(self.if_exists)
- )
+ raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
@@ -689,7 +685,7 @@ def insert_data(self):
try:
temp.reset_index(inplace=True)
except ValueError as err:
- raise ValueError("duplicate name in index/columns: {0}".format(err))
+ raise ValueError(f"duplicate name in index/columns: {err}")
else:
temp = self.frame
@@ -732,7 +728,7 @@ def insert(self, chunksize=None, method=None):
elif callable(method):
exec_insert = partial(method, self)
else:
- raise ValueError("Invalid parameter `method`: {}".format(method))
+ raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
@@ -826,7 +822,7 @@ def _index_name(self, index, index_label):
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
- "levels, which is {0}".format(nlevels)
+ f"levels, which is {nlevels}"
)
else:
return index_label
@@ -839,7 +835,7 @@ def _index_name(self, index, index_label):
return ["index"]
else:
return [
- l if l is not None else "level_{0}".format(i)
+ l if l is not None else f"level_{i}"
for i, l in enumerate(self.frame.index.names)
]
@@ -1304,10 +1300,7 @@ def to_sql(
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
- raise ValueError(
- "The type of {column} is not a "
- "SQLAlchemy type ".format(column=col)
- )
+ raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
@@ -1331,11 +1324,11 @@ def to_sql(
)
if name not in table_names:
msg = (
- "The provided table name '{0}' is not found exactly as "
+ f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
- ).format(name)
+ )
warnings.warn(msg, UserWarning)
@property
@@ -1395,9 +1388,7 @@ def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
- raise ValueError(
- "Cannot convert identifier to UTF-8: '{name}'".format(name=name)
- )
+ raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'")
return uname
@@ -1461,8 +1452,8 @@ def insert_statement(self):
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
wildcards = ",".join([wld] * len(names))
- insert_statement = "INSERT INTO {table} ({columns}) VALUES ({wld})".format(
- table=escape(self.name), columns=col_names, wld=wildcards
+ insert_statement = (
+ f"INSERT INTO {escape(self.name)} ({col_names}) VALUES ({wildcards})"
)
return insert_statement
@@ -1496,9 +1487,7 @@ def _create_table_setup(self):
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
- "CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
- tbl=self.name, cnames_br=cnames_br
- )
+ f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
create_stmts = [
@@ -1599,14 +1588,11 @@ def execute(self, *args, **kwargs):
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
- "Execution failed on sql: {sql}\n{exc}\nunable "
- "to rollback".format(sql=args[0], exc=exc)
+ f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
- ex = DatabaseError(
- "Execution failed on sql '{sql}': {exc}".format(sql=args[0], exc=exc)
- )
+ ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
@@ -1731,11 +1717,7 @@ def to_sql(
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
- raise ValueError(
- "{column} ({type!s}) not a string".format(
- column=col, type=my_type
- )
- )
+ raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
@@ -1755,9 +1737,7 @@ def has_table(self, name, schema=None):
# esc_name = escape(name)
wld = "?"
- query = (
- "SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
- ).format(wld=wld)
+ query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
@@ -1765,7 +1745,7 @@ def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
- drop_sql = "DROP TABLE {name}".format(name=_get_valid_sqlite_name(name))
+ drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
| - [ ] ref #29547
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30026 | 2019-12-04T00:36:53Z | 2019-12-31T09:02:19Z | 2019-12-31T09:02:19Z | 2019-12-31T09:02:54Z |
f strings in core/series | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 410b10a69ecd5..5981f3f71996e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -104,9 +104,9 @@ def _coerce_method(converter):
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
- raise TypeError("cannot convert the series to {0}".format(str(converter)))
+ raise TypeError(f"cannot convert the series to {converter}")
- wrapper.__name__ = "__{name}__".format(name=converter.__name__)
+ wrapper.__name__ = f"__{converter.__name__}__"
return wrapper
@@ -274,8 +274,8 @@ def __init__(
try:
if len(index) != len(data):
raise ValueError(
- "Length of passed values is {val}, "
- "index implies {ind}".format(val=len(data), ind=len(index))
+ f"Length of passed values is {len(data)}, "
+ f"index implies {len(index)}."
)
except TypeError:
pass
@@ -1464,7 +1464,7 @@ def items(self):
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
- ... print("Index : {}, Value : {}".format(index, value))
+ ... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
@@ -2171,7 +2171,7 @@ def corr(self, other, method="pearson", min_periods=None):
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
- "'{method}' was supplied".format(method=method)
+ f"'{method}' was supplied"
)
def cov(self, other, min_periods=None):
@@ -2887,7 +2887,7 @@ def _try_kind_sort(arr):
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
- raise ValueError(f"invalid na_position: {repr(na_position)}")
+ raise ValueError(f"invalid na_position: {na_position}")
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
@@ -3779,7 +3779,7 @@ def _reduce(
elif isinstance(delegate, np.ndarray):
if numeric_only:
raise NotImplementedError(
- "Series.{0} does not implement numeric_only.".format(name)
+ f"Series.{name} does not implement numeric_only."
)
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
| - [ ] references #29547
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30025 | 2019-12-04T00:21:33Z | 2019-12-05T06:31:24Z | 2019-12-05T06:31:24Z | 2019-12-05T06:31:35Z |
f-string update for core.base. | diff --git a/pandas/core/base.py b/pandas/core/base.py
index 066a7628be364..5e613849ba8d5 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -114,9 +114,7 @@ def __setattr__(self, key, value):
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
- raise AttributeError(
- "You cannot add any new attribute '{key}'".format(key=key)
- )
+ raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
@@ -220,28 +218,22 @@ def _obj_with_exclusions(self):
def __getitem__(self, key):
if self._selection is not None:
- raise IndexError(
- "Column(s) {selection} already selected".format(
- selection=self._selection
- )
- )
+ raise IndexError(f"Column(s) {self._selection} already selected")
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
- raise KeyError(
- "Columns not found: {missing}".format(missing=str(bad_keys)[1:-1])
- )
+ raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
- raise KeyError("Column not found: {key}".format(key=key))
+ raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
- raise KeyError("Column not found: {key}".format(key=key))
+ raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
@@ -293,8 +285,7 @@ def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
return f(self, *args, **kwargs)
raise AttributeError(
- "'{arg}' is not a valid function for "
- "'{cls}' object".format(arg=arg, cls=type(self).__name__)
+ f"'{arg}' is not a valid function for '{type(self).__name__}' object"
)
def _aggregate(self, arg, *args, **kwargs):
@@ -359,7 +350,7 @@ def _aggregate(self, arg, *args, **kwargs):
elif isinstance(obj, ABCSeries):
raise SpecificationError("nested renamer is not supported")
elif isinstance(obj, ABCDataFrame) and k not in obj.columns:
- raise KeyError("Column '{col}' does not exist!".format(col=k))
+ raise KeyError(f"Column '{k}' does not exist!")
arg = new_arg
@@ -1101,9 +1092,7 @@ def _reduce(
func = getattr(self, name, None)
if func is None:
raise TypeError(
- "{klass} cannot perform the operation {op}".format(
- klass=type(self).__name__, op=name
- )
+ f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)
| - [ ] ref #29547
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30023 | 2019-12-03T23:42:41Z | 2019-12-04T13:29:09Z | 2019-12-04T13:29:09Z | 2019-12-04T13:29:13Z |
f string in io.common | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 54253803c4881..a01011cd7d4e4 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -246,8 +246,8 @@ def get_filepath_or_buffer(
return _expand_user(filepath_or_buffer), None, compression, False
if not is_file_like(filepath_or_buffer):
- msg = "Invalid file path or buffer object type: {_type}"
- raise ValueError(msg.format(_type=type(filepath_or_buffer)))
+ msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
+ raise ValueError(msg)
return filepath_or_buffer, None, compression, False
@@ -355,9 +355,9 @@ def _infer_compression(
if compression in _compression_to_extension:
return compression
- msg = "Unrecognized compression type: {}".format(compression)
+ msg = f"Unrecognized compression type: {compression}"
valid = ["infer", None] + sorted(_compression_to_extension)
- msg += "\nValid compression types are {}".format(valid)
+ msg += f"\nValid compression types are {valid}"
raise ValueError(msg)
@@ -454,13 +454,11 @@ def _get_handle(
if len(zip_names) == 1:
f = zf.open(zip_names.pop())
elif len(zip_names) == 0:
- raise ValueError(
- "Zero files found in ZIP file {}".format(path_or_buf)
- )
+ raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
"Multiple files found in ZIP file."
- " Only one file per ZIP: {}".format(zip_names)
+ f" Only one file per ZIP: {zip_names}"
)
# XZ Compression
@@ -469,7 +467,7 @@ def _get_handle(
# Unrecognized Compression
else:
- msg = "Unrecognized compression type: {}".format(compression)
+ msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
handles.append(f)
| - [ ] ref #29547
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30022 | 2019-12-03T23:26:20Z | 2019-12-11T13:16:58Z | 2019-12-11T13:16:58Z | 2019-12-11T19:03:12Z |
added f strings and typing to frame.py | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fde3d1657b4f2..88967b13c89b5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -15,6 +15,7 @@
import sys
from textwrap import dedent
from typing import (
+ Any,
FrozenSet,
Hashable,
Iterable,
@@ -25,6 +26,7 @@
Tuple,
Type,
Union,
+ cast,
)
import warnings
@@ -475,7 +477,7 @@ def __init__(
except (ValueError, TypeError) as e:
exc = TypeError(
"DataFrame constructor called with "
- "incompatible data and dtype: {e}".format(e=e)
+ f"incompatible data and dtype: {e}"
)
raise exc from e
@@ -1112,8 +1114,7 @@ def dot(self, other):
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
- "Dot product shape mismatch, "
- "{s} vs {r}".format(s=lvals.shape, r=rvals.shape)
+ f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
@@ -1129,7 +1130,7 @@ def dot(self, other):
else:
return Series(result, index=left.index)
else: # pragma: no cover
- raise TypeError("unsupported type: {oth}".format(oth=type(other)))
+ raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
@@ -1417,7 +1418,7 @@ def to_dict(self, orient="dict", into=dict):
for t in self.itertuples(name=None)
)
else:
- raise ValueError("orient '{o}' not understood".format(o=orient))
+ raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
@@ -1836,9 +1837,7 @@ def to_records(self, index=True, column_dtypes=None, index_dtypes=None):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
- msg = ("Invalid dtype {dtype} specified for {element} {name}").format(
- dtype=dtype_mapping, element=element, name=name
- )
+ msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@@ -2307,7 +2306,7 @@ def info(
lines.append(self.index._summary())
if len(self.columns) == 0:
- lines.append("Empty {name}".format(name=type(self).__name__))
+ lines.append(f"Empty {type(self).__name__}")
fmt.buffer_put_lines(buf, lines)
return
@@ -2335,10 +2334,7 @@ def _verbose_repr():
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
- "Columns must equal counts "
- "({cols:d} != {counts:d})".format(
- cols=len(cols), counts=len(counts)
- )
+ f"Columns must equal counts ({len(cols)} != {len(counts)})"
)
tmpl = "{count} non-null {dtype}"
@@ -2382,7 +2378,7 @@ def _sizeof_fmt(num, size_qualifier):
counts = self._data.get_dtype_counts()
dtypes = ["{k}({kk:d})".format(k=k[0], kk=k[1]) for k in sorted(counts.items())]
- lines.append("dtypes: {types}".format(types=", ".join(dtypes)))
+ lines.append(f"dtypes: {', '.join(dtypes)}")
if memory_usage is None:
memory_usage = get_option("display.memory_usage")
@@ -2399,12 +2395,7 @@ def _sizeof_fmt(num, size_qualifier):
if "object" in counts or self.index._is_memory_usage_qualified():
size_qualifier = "+"
mem_usage = self.memory_usage(index=True, deep=deep).sum()
- lines.append(
- "memory usage: {mem}\n".format(
- mem=_sizeof_fmt(mem_usage, size_qualifier)
- )
- )
-
+ lines.append(f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n")
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
@@ -3069,8 +3060,8 @@ def query(self, expr, inplace=False, **kwargs):
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
- msg = "expr must be a string to be evaluated, {0} given"
- raise ValueError(msg.format(type(expr)))
+ msg = f"expr must be a string to be evaluated, {type(expr)} given"
+ raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
@@ -3287,11 +3278,7 @@ def select_dtypes(self, include=None, exclude=None):
# can't both include AND exclude!
if not include.isdisjoint(exclude):
- raise ValueError(
- "include and exclude overlap on {inc_ex}".format(
- inc_ex=(include & exclude)
- )
- )
+ raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
@@ -4128,15 +4115,13 @@ def set_index(
try:
found = col in self.columns
except TypeError:
- raise TypeError(
- err_msg + " Received column of type {}".format(type(col))
- )
+ raise TypeError(f"{err_msg}. Received column of type {type(col)}")
else:
if not found:
missing.append(col)
if missing:
- raise KeyError("None of {} are in the columns".format(missing))
+ raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
@@ -4180,17 +4165,15 @@ def set_index(
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
- "Length mismatch: Expected {len_self} rows, "
- "received array of length {len_col}".format(
- len_self=len(self), len_col=len(arrays[-1])
- )
+ f"Length mismatch: Expected {len(self)} rows, "
+ f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
- raise ValueError("Index has duplicate keys: {dup}".format(dup=duplicates))
+ raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
@@ -4205,8 +4188,13 @@ def set_index(
return frame
def reset_index(
- self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
- ):
+ self,
+ level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
+ drop: bool = False,
+ inplace: bool = False,
+ col_level: Hashable = 0,
+ col_fill: Optional[Hashable] = "",
+ ) -> Optional["DataFrame"]:
"""
Reset the index, or a level of it.
@@ -4234,8 +4222,8 @@ def reset_index(
Returns
-------
- DataFrame
- DataFrame with the new index.
+ DataFrame or None
+ DataFrame with the new index or None if ``inplace=True``.
See Also
--------
@@ -4400,6 +4388,7 @@ def _maybe_casted_values(index, labels=None):
new_index = self.index.droplevel(level)
if not drop:
+ to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, ABCMultiIndex):
names = [
(n if n is not None else f"level_{i}")
@@ -4422,8 +4411,7 @@ def _maybe_casted_values(index, labels=None):
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
- "with incomplete column name "
- "{}".format(name)
+ f"with incomplete column name {name}"
)
col_fill = col_name[0]
@@ -4440,6 +4428,8 @@ def _maybe_casted_values(index, labels=None):
if not inplace:
return new_obj
+ return None
+
# ----------------------------------------------------------------------
# Reindex-based selection methods
@@ -4589,7 +4579,7 @@ def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
mask = count > 0
else:
if how is not None:
- raise ValueError("invalid how option: {h}".format(h=how))
+ raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
@@ -4600,7 +4590,12 @@ def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
else:
return result
- def drop_duplicates(self, subset=None, keep="first", inplace=False):
+ def drop_duplicates(
+ self,
+ subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
+ keep: Union[str, bool] = "first",
+ inplace: bool = False,
+ ) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed.
@@ -4623,6 +4618,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
Returns
-------
DataFrame
+ DataFrame with duplicates removed or None if ``inplace=True``.
"""
if self.empty:
return self.copy()
@@ -4637,7 +4633,13 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False):
else:
return self[-duplicated]
- def duplicated(self, subset=None, keep="first"):
+ return None
+
+ def duplicated(
+ self,
+ subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
+ keep: Union[str, bool] = "first",
+ ) -> "Series":
"""
Return boolean Series denoting duplicate rows.
@@ -4681,6 +4683,9 @@ def f(vals):
):
subset = (subset,)
+ # needed for mypy since can't narrow types using np.iterable
+ subset = cast(Iterable, subset)
+
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
@@ -6030,6 +6035,8 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame":
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
+ # TODO: use overload to refine return type of reset_index
+ assert df is not None # needed for mypy
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
result.index = self.index.take(result.index)
@@ -7208,7 +7215,7 @@ def corr(self, method="pearson", min_periods=1):
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
- "'{method}' was supplied".format(method=method)
+ f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
@@ -7399,9 +7406,9 @@ def c(x):
else:
raise ValueError(
- "Invalid method {method} was passed, "
+ f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
- "'spearman', or callable".format(method=method)
+ "'spearman', or callable"
)
if not drop:
@@ -7531,8 +7538,7 @@ def _count_level(self, level, axis=0, numeric_only=False):
if not isinstance(count_axis, ABCMultiIndex):
raise TypeError(
- "Can only count levels on hierarchical "
- "{ax}.".format(ax=self._get_axis_name(axis))
+ f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
if frame._is_mixed_type:
@@ -7590,8 +7596,8 @@ def _get_data(axis_matters):
data = self._get_bool_data()
else: # pragma: no cover
msg = (
- "Generating numeric_only data with filter_type {f}"
- "not supported.".format(f=filter_type)
+ f"Generating numeric_only data with filter_type {filter_type} "
+ "not supported."
)
raise NotImplementedError(msg)
return data
@@ -8000,7 +8006,7 @@ def to_timestamp(self, freq=None, how="start", axis=0, copy=True):
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
- raise AssertionError("Axis must be 0 or 1. Got {ax!s}".format(ax=axis))
+ raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
return self._constructor(new_data)
@@ -8034,7 +8040,7 @@ def to_period(self, freq=None, axis=0, copy=True):
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
- raise AssertionError("Axis must be 0 or 1. Got {ax!s}".format(ax=axis))
+ raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
return self._constructor(new_data)
@@ -8123,8 +8129,8 @@ def isin(self, values):
else:
if not is_list_like(values):
raise TypeError(
- f"only list-like or dict-like objects are allowed "
- f"to be passed to DataFrame.isin(), "
+ "only list-like or dict-like objects are allowed "
+ "to be passed to DataFrame.isin(), "
f"you passed a {repr(type(values).__name__)}"
)
return DataFrame(
@@ -8170,4 +8176,4 @@ def _from_nested_dict(data):
def _put_str(s, space):
- return "{s}".format(s=s)[:space].ljust(space)
+ return str(s)[:space].ljust(space)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index d671fff568891..726a59ca8e008 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -126,7 +126,10 @@ def _groupby_and_merge(
on = [on]
if right.duplicated(by + on).any():
- right = right.drop_duplicates(by + on, keep="last")
+ _right = right.drop_duplicates(by + on, keep="last")
+ # TODO: use overload to refine return type of drop_duplicates
+ assert _right is not None # needed for mypy
+ right = _right
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
| replaced old formatting with fstrings where possible
added typing hints to duplicated, drop_duplicates, reset_index | https://api.github.com/repos/pandas-dev/pandas/pulls/30021 | 2019-12-03T23:15:20Z | 2019-12-06T20:50:30Z | 2019-12-06T20:50:29Z | 2019-12-06T21:12:41Z |
Replaced !r with repr() | diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 82eb93dd4c879..b99ac9cc333c6 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -208,7 +208,7 @@ def lexsort_indexer(keys, orders=None, na_position="last"):
cat = Categorical(key, ordered=True)
if na_position not in ["last", "first"]:
- raise ValueError("invalid na_position: {!r}".format(na_position))
+ raise ValueError(f"invalid na_position: {na_position}")
n = len(cat.categories)
codes = cat.codes.copy()
@@ -264,7 +264,7 @@ def nargsort(items, kind="quicksort", ascending: bool = True, na_position="last"
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
- raise ValueError("invalid na_position: {!r}".format(na_position))
+ raise ValueError(f"invalid na_position: {na_position}")
return indexer
| - [x] reference #29886
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Issue: https://github.com/pandas-dev/pandas/issues/29886
@WillAyd | https://api.github.com/repos/pandas-dev/pandas/pulls/30020 | 2019-12-03T23:13:39Z | 2019-12-11T10:06:07Z | 2019-12-11T10:06:07Z | 2019-12-11T10:06:22Z |
CLN: no need to broadcast axes for ndim<=2 (pytables) | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index a48d9abc3c13b..e7e8f1a74684b 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -4232,21 +4232,7 @@ def write_data(self, chunksize: Optional[int], dropna: bool = False):
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
- bindexes = []
- for i, idx in enumerate(indexes):
-
- # broadcast to all other indexes except myself
- if i > 0 and i < nindexes:
- repeater = np.prod([indexes[bi].shape[0] for bi in range(0, i)])
- idx = np.tile(idx, repeater)
-
- if i < nindexes - 1:
- repeater = np.prod(
- [indexes[bi].shape[0] for bi in range(i + 1, nindexes)]
- )
- idx = np.repeat(idx, repeater)
-
- bindexes.append(idx)
+ assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
@@ -4271,7 +4257,7 @@ def write_data(self, chunksize: Optional[int], dropna: bool = False):
self.write_data_chunk(
rows,
- indexes=[a[start_i:end_i] for a in bindexes],
+ indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
| It takes a little bit of work to show, but `self.index_axes` is always length-1 on L4243, which means that nindexes == 1 (the assertion added in the code).
This ensures that the condition `if i > 0 and i < nindexes` on L4239 is is never satisfied, so `bindexes` is unnecessary. | https://api.github.com/repos/pandas-dev/pandas/pulls/30019 | 2019-12-03T23:06:10Z | 2019-12-04T13:32:39Z | 2019-12-04T13:32:39Z | 2019-12-04T16:55:36Z |
CLN: pytables _set_tz/_get_tz | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fca1d3265cac2..f948cebcb28d8 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -4,7 +4,7 @@
"""
import copy
-from datetime import date
+from datetime import date, tzinfo
import itertools
import os
import re
@@ -2918,7 +2918,8 @@ def read_array(
if dtype == "datetime64":
# reconstruct a timezone if indicated
- ret = _set_tz(ret, getattr(attrs, "tz", None), coerce=True)
+ tz = getattr(attrs, "tz", None)
+ ret = _set_tz(ret, tz, coerce=True)
elif dtype == "timedelta64":
ret = np.asarray(ret, dtype="m8[ns]")
@@ -4158,7 +4159,7 @@ def read_column(
encoding=self.encoding,
errors=self.errors,
)
- return Series(_set_tz(a.take_data(), a.tz, True), name=column)
+ return Series(_set_tz(a.take_data(), a.tz), name=column)
raise KeyError(f"column [{column}] not found in the table")
@@ -4687,37 +4688,39 @@ def _get_info(info, name):
# tz to/from coercion
-def _get_tz(tz):
+def _get_tz(tz: tzinfo) -> Union[str, tzinfo]:
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
- if zone is None:
- zone = tz.utcoffset().total_seconds()
return zone
-def _set_tz(values, tz, preserve_UTC: bool = False, coerce: bool = False):
+def _set_tz(
+ values: Union[np.ndarray, Index],
+ tz: Optional[Union[str, tzinfo]],
+ coerce: bool = False,
+) -> Union[np.ndarray, DatetimeIndex]:
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
- values : ndarray
- tz : string/pickled tz object
- preserve_UTC : bool,
- preserve the UTC of the result
+ values : ndarray or Index
+ tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
+ if isinstance(values, DatetimeIndex):
+ # If values is tzaware, the tz gets dropped in the values.ravel()
+ # call below (which returns an ndarray). So we are only non-lossy
+ # if `tz` matches `values.tz`.
+ assert values.tz is None or values.tz == tz
+
if tz is not None:
name = getattr(values, "name", None)
values = values.ravel()
tz = timezones.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
- if values.tz is None:
- values = values.tz_localize("UTC").tz_convert(tz)
- if preserve_UTC:
- if tz == "UTC":
- values = list(values)
+ values = values.tz_localize("UTC").tz_convert(tz)
elif coerce:
values = np.asarray(values, dtype="M8[ns]")
| removes preserve_UTC argument, which never did anything, and didnt have a clear purpose | https://api.github.com/repos/pandas-dev/pandas/pulls/30018 | 2019-12-03T22:55:06Z | 2019-12-04T18:50:04Z | 2019-12-04T18:50:04Z | 2019-12-04T19:16:26Z |
REF: make indexables cache_readonly (pytables) | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index a48d9abc3c13b..f7b98df9ba41e 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -29,6 +29,7 @@
from pandas._libs.tslibs import timezones
from pandas.compat._optional import import_optional_dependency
from pandas.errors import PerformanceWarning
+from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_object,
@@ -3522,43 +3523,39 @@ def validate_min_itemsize(self, min_itemsize):
"data_column"
)
- @property
+ @cache_readonly
def indexables(self):
""" create/cache the indexables if they don't exist """
- if self._indexables is None:
+ _indexables = []
+
+ # Note: each of the `name` kwargs below are str, ensured
+ # by the definition in index_cols.
+ # index columns
+ _indexables.extend(
+ [
+ IndexCol(name=name, axis=axis, pos=i)
+ for i, (axis, name) in enumerate(self.attrs.index_cols)
+ ]
+ )
- self._indexables = []
+ # values columns
+ dc = set(self.data_columns)
+ base_pos = len(_indexables)
- # Note: each of the `name` kwargs below are str, ensured
- # by the definition in index_cols.
- # index columns
- self._indexables.extend(
- [
- IndexCol(name=name, axis=axis, pos=i)
- for i, (axis, name) in enumerate(self.attrs.index_cols)
- ]
+ def f(i, c):
+ assert isinstance(c, str)
+ klass = DataCol
+ if c in dc:
+ klass = DataIndexableCol
+ return klass.create_for_block(
+ i=i, name=c, pos=base_pos + i, version=self.version
)
- # values columns
- dc = set(self.data_columns)
- base_pos = len(self._indexables)
-
- def f(i, c):
- assert isinstance(c, str)
- klass = DataCol
- if c in dc:
- klass = DataIndexableCol
- return klass.create_for_block(
- i=i, name=c, pos=base_pos + i, version=self.version
- )
-
- # Note: the definition of `values_cols` ensures that each
- # `c` below is a str.
- self._indexables.extend(
- [f(i, c) for i, c in enumerate(self.attrs.values_cols)]
- )
+ # Note: the definition of `values_cols` ensures that each
+ # `c` below is a str.
+ _indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
- return self._indexables
+ return _indexables
def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
"""
@@ -4140,7 +4137,6 @@ def write(self, **kwargs):
class AppendableTable(Table):
""" support the new appendable table formats """
- _indexables = None
table_type = "appendable"
def write(
@@ -4554,23 +4550,21 @@ def get_attrs(self):
]
self.data_columns = [a.name for a in self.values_axes]
- @property
+ @cache_readonly
def indexables(self):
""" create the indexables from the table description """
- if self._indexables is None:
-
- d = self.description
+ d = self.description
- # the index columns is just a simple index
- self._indexables = [GenericIndexCol(name="index", axis=0)]
+ # the index columns is just a simple index
+ _indexables = [GenericIndexCol(name="index", axis=0)]
- for i, n in enumerate(d._v_names):
- assert isinstance(n, str)
+ for i, n in enumerate(d._v_names):
+ assert isinstance(n, str)
- dc = GenericDataIndexableCol(name=n, pos=i, values=[n])
- self._indexables.append(dc)
+ dc = GenericDataIndexableCol(name=n, pos=i, values=[n])
+ _indexables.append(dc)
- return self._indexables
+ return _indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
| making these classes less stateful bit by bit | https://api.github.com/repos/pandas-dev/pandas/pulls/30016 | 2019-12-03T21:40:48Z | 2019-12-04T13:46:07Z | 2019-12-04T13:46:07Z | 2019-12-04T16:42:39Z |
DOC: Added "Raises `KeyError`" | diff --git a/pandas/compat/chainmap.py b/pandas/compat/chainmap.py
index 84824207de2a9..479eddf0c0536 100644
--- a/pandas/compat/chainmap.py
+++ b/pandas/compat/chainmap.py
@@ -10,6 +10,12 @@ def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
+ """
+ Raises
+ ------
+ KeyError
+ If `key` doesn't exist.
+ """
for mapping in self.maps:
if key in mapping:
del mapping[key]
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30014 | 2019-12-03T21:31:00Z | 2019-12-04T13:48:44Z | 2019-12-04T13:48:44Z | 2019-12-04T15:18:23Z |
CLN: annotate and de-nest write_array | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index a48d9abc3c13b..1f9e72bcf8a2f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -39,6 +39,7 @@
is_list_like,
is_timedelta64_dtype,
)
+from pandas.core.dtypes.generic import ABCExtensionArray
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
@@ -53,7 +54,7 @@
concat,
isna,
)
-from pandas._typing import FrameOrSeries
+from pandas._typing import ArrayLike, FrameOrSeries
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
@@ -2959,7 +2960,7 @@ def read_index_node(
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
- if "shape" in node._v_attrs and self._is_empty_array(node._v_attrs.shape):
+ if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type,)
kind = _ensure_decoded(node._v_attrs.kind)
name = None
@@ -3005,25 +3006,27 @@ def read_index_node(
return index
- def write_array_empty(self, key: str, value):
+ def write_array_empty(self, key: str, value: ArrayLike):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
- getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
- getattr(self.group, key)._v_attrs.shape = value.shape
+ node = getattr(self.group, key)
+ node._v_attrs.value_type = str(value.dtype)
+ node._v_attrs.shape = value.shape
- def _is_empty_array(self, shape) -> bool:
- """Returns true if any axis is zero length."""
- return any(x == 0 for x in shape)
+ def write_array(self, key: str, value: ArrayLike, items: Optional[Index] = None):
+ # TODO: we only have one test that gets here, the only EA
+ # that gets passed is DatetimeArray, and we never have
+ # both self._filters and EA
+ assert isinstance(value, (np.ndarray, ABCExtensionArray)), type(value)
- def write_array(self, key: str, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
- empty_array = self._is_empty_array(value.shape)
+ empty_array = value.size == 0
transposed = False
if is_categorical_dtype(value):
@@ -3038,29 +3041,29 @@ def write_array(self, key: str, value, items=None):
value = value.T
transposed = True
+ atom = None
if self._filters is not None:
- atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
- if atom is not None:
- # create an empty chunked array and fill it from value
- if not empty_array:
- ca = self._handle.create_carray(
- self.group, key, atom, value.shape, filters=self._filters
- )
- ca[:] = value
- getattr(self.group, key)._v_attrs.transposed = transposed
+ if atom is not None:
+ # We only get here if self._filters is non-None and
+ # the Atom.from_dtype call succeeded
- else:
- self.write_array_empty(key, value)
+ # create an empty chunked array and fill it from value
+ if not empty_array:
+ ca = self._handle.create_carray(
+ self.group, key, atom, value.shape, filters=self._filters
+ )
+ ca[:] = value
- return
+ else:
+ self.write_array_empty(key, value)
- if value.dtype.type == np.object_:
+ elif value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
@@ -3070,35 +3073,30 @@ def write_array(self, key: str, value, items=None):
elif inferred_type == "string":
pass
else:
- try:
- items = list(items)
- except TypeError:
- pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
+
+ elif empty_array:
+ self.write_array_empty(key, value)
+ elif is_datetime64_dtype(value.dtype):
+ self._handle.create_array(self.group, key, value.view("i8"))
+ getattr(self.group, key)._v_attrs.value_type = "datetime64"
+ elif is_datetime64tz_dtype(value.dtype):
+ # store as UTC
+ # with a zone
+ self._handle.create_array(self.group, key, value.asi8)
+
+ node = getattr(self.group, key)
+ node._v_attrs.tz = _get_tz(value.tz)
+ node._v_attrs.value_type = "datetime64"
+ elif is_timedelta64_dtype(value.dtype):
+ self._handle.create_array(self.group, key, value.view("i8"))
+ getattr(self.group, key)._v_attrs.value_type = "timedelta64"
else:
- if empty_array:
- self.write_array_empty(key, value)
- else:
- if is_datetime64_dtype(value.dtype):
- self._handle.create_array(self.group, key, value.view("i8"))
- getattr(self.group, key)._v_attrs.value_type = "datetime64"
- elif is_datetime64tz_dtype(value.dtype):
- # store as UTC
- # with a zone
- self._handle.create_array(self.group, key, value.asi8)
-
- node = getattr(self.group, key)
- node._v_attrs.tz = _get_tz(value.tz)
- node._v_attrs.value_type = "datetime64"
- elif is_timedelta64_dtype(value.dtype):
- self._handle.create_array(self.group, key, value.view("i8"))
- getattr(self.group, key)._v_attrs.value_type = "timedelta64"
- else:
- self._handle.create_array(self.group, key, value)
+ self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
| https://api.github.com/repos/pandas-dev/pandas/pulls/30012 | 2019-12-03T20:50:47Z | 2019-12-04T13:47:44Z | 2019-12-04T13:47:44Z | 2019-12-04T16:32:47Z | |
DOC: Fix to lzma.LZMAFile | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index f95dd8679308f..60cfecd5804ac 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -98,11 +98,11 @@ def is_platform_32bit() -> bool:
def _import_lzma():
"""
- Attempts to import the lzma module.
+ Importing the `lzma` module.
Warns
-----
- When the lzma module is not available.
+ When the `lzma` module is not available.
"""
try:
import lzma
@@ -119,22 +119,22 @@ def _import_lzma():
def _get_lzma_file(lzma):
"""
- Attempting to get the lzma.LZMAFile class.
+ Importing the `LZMAFile` class from the `lzma` module.
Returns
-------
class
- The lzma.LZMAFile class.
+ The `LZMAFile` class from the `lzma` module.
Raises
------
RuntimeError
- If the module lzma was not imported correctly, or didn't exist.
+ If the `lzma` module was not imported correctly, or didn't exist.
"""
if lzma is None:
raise RuntimeError(
"lzma module not available. "
- "A Python re-install with the proper "
- "dependencies might be required to solve this issue."
+ "A Python re-install with the proper dependencies, "
+ "might be required to solve this issue."
)
return lzma.LZMAFile
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/30010 | 2019-12-03T20:32:45Z | 2019-12-04T20:16:44Z | 2019-12-04T20:16:44Z | 2019-12-05T10:22:04Z |
CI: troubleshoot openpyxl failures | diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index ddc1ea41a08a3..04ff06a99a1f4 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -27,7 +27,8 @@ dependencies:
- numexpr
- numpy=1.15.*
- odfpy
- - openpyxl
+ - openpyxl<=3.0.1
+ # https://github.com/pandas-dev/pandas/pull/30009 openpyxl 3.0.2 broke
- pandas-gbq
# https://github.com/pydata/pandas-gbq/issues/271
- google-cloud-bigquery<=1.11
diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py
index f00329e9c7f81..90e795bd5c52f 100644
--- a/pandas/tests/io/excel/test_openpyxl.py
+++ b/pandas/tests/io/excel/test_openpyxl.py
@@ -1,8 +1,11 @@
+from distutils.version import LooseVersion
import os
import numpy as np
import pytest
+from pandas.compat import PY37, is_platform_mac
+
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
@@ -13,6 +16,8 @@
pytestmark = pytest.mark.parametrize("ext", [".xlsx"])
+openpyxl_gt301 = LooseVersion(openpyxl.__version__) > LooseVersion("3.0.1")
+
def test_to_excel_styleconverter(ext):
from openpyxl import styles
@@ -81,6 +86,9 @@ def test_write_cells_merge_styled(ext):
assert xcell_a2.font == openpyxl_sty_merged
+@pytest.mark.xfail(
+ openpyxl_gt301 and PY37 and is_platform_mac(), reason="broken change in openpyxl"
+)
@pytest.mark.parametrize(
"mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])]
)
@@ -107,7 +115,9 @@ def test_write_append_mode(ext, mode, expected):
assert wb2.worksheets[index]["A1"].value == cell_value
-@pytest.mark.xfail(openpyxl.__version__ > "3.0.1", reason="broken change in openpyxl")
+@pytest.mark.xfail(
+ openpyxl_gt301 and PY37 and is_platform_mac(), reason="broken change in openpyxl"
+)
def test_to_excel_with_openpyxl_engine(ext, tmpdir):
# GH 29854
# TODO: Fix this once newer version of openpyxl fixes the bug
| tested on OSX 36 and 37, we'll see if that generalizes | https://api.github.com/repos/pandas-dev/pandas/pulls/30009 | 2019-12-03T19:53:29Z | 2019-12-04T01:46:58Z | 2019-12-04T01:46:58Z | 2020-04-05T17:35:09Z |
CLN: test code no longer shared with sparse | diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index a86e1dfe8353c..60befe5e73d37 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -6,35 +6,11 @@
import pytest
import pandas as pd
-from pandas import (
- Categorical,
- DataFrame,
- Series,
- SparseDtype,
- compat,
- date_range,
- timedelta_range,
-)
+from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas.util.testing as tm
-class SharedWithSparse:
- """
- A collection of tests DataFrame and SparseDataFrame can share.
-
- In generic tests on this class, use ``self._assert_frame_equal()`` and
- ``self._assert_series_equal()`` which are implemented in sub-classes
- and dispatch correctly.
- """
-
- def _assert_frame_equal(self, left, right):
- """Dispatch to frame class dependent assertion"""
- raise NotImplementedError
-
- def _assert_series_equal(self, left, right):
- """Dispatch to series class dependent assertion"""
- raise NotImplementedError
-
+class TestDataFrameMisc:
def test_copy_index_name_checking(self, float_frame):
# don't want to be able to modify the index stored elsewhere after
# making a copy
@@ -141,16 +117,16 @@ def test_tab_completion(self):
def test_not_hashable(self):
empty_frame = DataFrame()
- df = self.klass([1])
- msg = "'(Sparse)?DataFrame' objects are mutable, thus they cannot be hashed"
+ df = DataFrame([1])
+ msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_new_empty_index(self):
- df1 = self.klass(np.random.randn(0, 3))
- df2 = self.klass(np.random.randn(0, 3))
+ df1 = DataFrame(np.random.randn(0, 3))
+ df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
@@ -161,7 +137,7 @@ def test_array_interface(self, float_frame):
assert result.index is float_frame.index
assert result.columns is float_frame.columns
- self._assert_frame_equal(result, float_frame.apply(np.sqrt))
+ tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
@@ -187,9 +163,9 @@ def test_nonzero(self, float_frame, float_string_frame):
assert not df.empty
def test_iteritems(self):
- df = self.klass([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
+ df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
- assert isinstance(v, self.klass._constructor_sliced)
+ assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
@@ -206,15 +182,15 @@ def test_iter(self, float_frame):
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
- self._assert_series_equal(v, exp)
+ tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
- self._assert_series_equal(v, exp)
+ tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
- s = self.klass(
+ s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
@@ -222,7 +198,7 @@ def test_iterrows_iso8601(self):
)
for k, v in s.iterrows():
exp = s.loc[k]
- self._assert_series_equal(v, exp)
+ tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
@@ -248,19 +224,19 @@ def test_iterrows_corner(self):
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
- s = self.klass._constructor_sliced(tup[1:])
+ s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
- self._assert_series_equal(s, expected)
+ tm.assert_series_equal(s, expected)
- df = self.klass(
+ df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
- df = self.klass(data={"a": [1, 2, 3], "b": [4, 5, 6]})
+ df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
@@ -315,7 +291,7 @@ def test_sequence_like_with_categorical(self):
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
- def test_values(self, float_frame, float_string_frame):
+ def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
@@ -332,7 +308,7 @@ def test_values(self, float_frame, float_string_frame):
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
- df = self.klass({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
+ df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
@@ -372,17 +348,17 @@ def test_transpose(self, float_frame):
# mixed type
index, data = tm.getMixedTypeDict()
- mixed = self.klass(data, index=index)
+ mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in mixed_T.items():
assert s.dtype == np.object_
def test_swapaxes(self):
- df = self.klass(np.random.randn(10, 5))
- self._assert_frame_equal(df.T, df.swapaxes(0, 1))
- self._assert_frame_equal(df.T, df.swapaxes(1, 0))
- self._assert_frame_equal(df, df.swapaxes(0, 0))
+ df = DataFrame(np.random.randn(10, 5))
+ tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
+ tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
+ tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = (
"No axis named 2 for object type"
r" <class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
@@ -413,7 +389,7 @@ def test_more_values(self, float_string_frame):
assert values.shape[1] == len(float_string_frame.columns)
def test_repr_with_mi_nat(self, float_string_frame):
- df = self.klass(
+ df = DataFrame(
{"X": [1, 2]}, index=[[pd.NaT, pd.Timestamp("20130101")], ["a", "b"]]
)
result = repr(df)
@@ -430,18 +406,18 @@ def test_series_put_names(self, float_string_frame):
assert v.name == k
def test_empty_nonzero(self):
- df = self.klass([1, 2, 3])
+ df = DataFrame([1, 2, 3])
assert not df.empty
- df = self.klass(index=[1], columns=[1])
+ df = DataFrame(index=[1], columns=[1])
assert not df.empty
- df = self.klass(index=["a", "b"], columns=["c", "d"]).dropna()
+ df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
- self.klass(),
- self.klass(index=[1]),
- self.klass(columns=[1]),
- self.klass({1: []}),
+ DataFrame(),
+ DataFrame(index=[1]),
+ DataFrame(columns=[1]),
+ DataFrame({1: []}),
]
for df in empty_frames:
assert df.empty
@@ -449,7 +425,7 @@ def test_empty_nonzero(self):
def test_with_datetimelikes(self):
- df = self.klass(
+ df = DataFrame(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
@@ -458,20 +434,9 @@ def test_with_datetimelikes(self):
t = df.T
result = t.dtypes.value_counts()
- if self.klass is DataFrame:
- expected = Series({np.dtype("object"): 10})
- else:
- expected = Series({SparseDtype(dtype=object): 10})
+ expected = Series({np.dtype("object"): 10})
tm.assert_series_equal(result, expected)
-
-class TestDataFrameMisc(SharedWithSparse):
-
- klass = DataFrame
- # SharedWithSparse tests use generic, klass-agnostic assertion
- _assert_frame_equal = staticmethod(tm.assert_frame_equal)
- _assert_series_equal = staticmethod(tm.assert_series_equal)
-
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 1e4757ffecb5d..42b2c37638c76 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -24,18 +24,7 @@
import pandas.io.formats.printing as printing
-class SharedWithSparse:
- """
- A collection of tests Series and SparseSeries can share.
-
- In generic tests on this class, use ``self._assert_series_equal()``
- which is implemented in sub-classes.
- """
-
- def _assert_series_equal(self, left, right):
- """Dispatch to series class dependent assertion"""
- raise NotImplementedError
-
+class TestSeriesMisc:
def test_scalarop_preserve_name(self, datetime_series):
result = datetime_series * 2
assert result.name == datetime_series.name
@@ -132,19 +121,19 @@ def test_sort_index_name(self, datetime_series):
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
- result = self.series_klass(d)
- expected = self.series_klass(d, index=sorted(d.keys()))
- self._assert_series_equal(result, expected)
+ result = Series(d)
+ expected = Series(d, index=sorted(d.keys()))
+ tm.assert_series_equal(result, expected)
- result = self.series_klass(d, index=["b", "c", "d", "a"])
- expected = self.series_klass([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
- self._assert_series_equal(result, expected)
+ result = Series(d, index=["b", "c", "d", "a"])
+ expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
+ tm.assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
- series = self.series_klass(data)
- expected = self.series_klass(dict(data.items()))
- self._assert_series_equal(series, expected)
+ series = Series(data)
+ expected = Series(dict(data.items()))
+ tm.assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
@@ -152,44 +141,44 @@ def test_constructor_ordereddict(self):
("col{i}".format(i=i), np.random.random()) for i in range(12)
)
- series = self.series_klass(data)
- expected = self.series_klass(list(data.values()), list(data.keys()))
- self._assert_series_equal(series, expected)
+ series = Series(data)
+ expected = Series(list(data.values()), list(data.keys()))
+ tm.assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
- series = self.series_klass(A(data))
- self._assert_series_equal(series, expected)
+ series = Series(A(data))
+ tm.assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
_d = sorted(d.items())
- result = self.series_klass(d)
- expected = self.series_klass(
+ result = Series(d)
+ expected = Series(
[x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
)
- self._assert_series_equal(result, expected)
+ tm.assert_series_equal(result, expected)
d["z"] = 111.0
_d.insert(0, ("z", d["z"]))
- result = self.series_klass(d)
- expected = self.series_klass(
+ result = Series(d)
+ expected = Series(
[x[1] for x in _d], index=pd.Index([x[0] for x in _d], tupleize_cols=False)
)
result = result.reindex(index=expected.index)
- self._assert_series_equal(result, expected)
+ tm.assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
- expected = self.series_klass(
+ expected = Series(
data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
)
- result = self.series_klass(
+ result = Series(
data={
pd.to_timedelta(0, unit="s"): "A",
pd.to_timedelta(10, unit="s"): "B",
@@ -197,20 +186,13 @@ def test_constructor_dict_timedelta_index(self):
},
index=pd.to_timedelta([0, 10, 20], unit="s"),
)
- self._assert_series_equal(result, expected)
+ tm.assert_series_equal(result, expected)
def test_sparse_accessor_updates_on_inplace(self):
s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
s.drop([0, 1], inplace=True)
assert s.sparse.density == 1.0
-
-class TestSeriesMisc(SharedWithSparse):
-
- series_klass = Series
- # SharedWithSparse tests use generic, series_klass-agnostic assertion
- _assert_series_equal = staticmethod(tm.assert_series_equal)
-
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
| broken off from #29985. | https://api.github.com/repos/pandas-dev/pandas/pulls/30007 | 2019-12-03T17:25:39Z | 2019-12-03T18:54:14Z | 2019-12-03T18:38:37Z | 2019-12-03T18:57:37Z |
DOC: whatsnew notes that slipped through the cracks | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 470209a7f4a33..e533ebb0d0084 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -506,6 +506,9 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Removed the previously deprecated :meth:`Series.get_value`, :meth:`Series.set_value`, :meth:`DataFrame.get_value`, :meth:`DataFrame.set_value` (:issue:`17739`)
- Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to ``False`` (:issue:`27600`)
- Removed the previously deprecated :attr:`Series.cat.categorical`, :attr:`Series.cat.index`, :attr:`Series.cat.name` (:issue:`24751`)
+- Removed the previously deprecated ``time_rule`` keyword from (non-public) :func:`offsets.generate_range`, which has been moved to :func:`core.arrays._ranges.generate_range` (:issue:`24157`)
+- :meth:`DataFrame.loc` or :meth:`Series.loc` with listlike indexers and missing labels will no longer reindex (:issue:`17295`)
+- :meth:`DataFrame.to_excel` and :meth:`Series.to_excel` with non-existent columns will no longer reindex (:issue:`17295`)
- Removed the previously deprecated "by" keyword from :meth:`DataFrame.sort_index`, use :meth:`DataFrame.sort_values` instead (:issue:`10726`)
- Removed support for nested renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`DataFrameGroupBy.aggregate`, :meth:`SeriesGroupBy.aggregate`, :meth:`Rolling.aggregate` (:issue:`18529`)
- Passing ``datetime64`` data to :class:`TimedeltaIndex` or ``timedelta64`` data to ``DatetimeIndex`` now raises ``TypeError`` (:issue:`23539`, :issue:`23937`)
| split off from #29985. | https://api.github.com/repos/pandas-dev/pandas/pulls/30006 | 2019-12-03T17:24:12Z | 2019-12-03T18:36:28Z | 2019-12-03T18:36:28Z | 2019-12-03T18:54:48Z |
DEPR: get_duplicates | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 4d171bc3d400d..599ad6d28a8f5 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -636,7 +636,6 @@ generated/pandas.Index.equals,../reference/api/pandas.Index.equals
generated/pandas.Index.factorize,../reference/api/pandas.Index.factorize
generated/pandas.Index.fillna,../reference/api/pandas.Index.fillna
generated/pandas.Index.format,../reference/api/pandas.Index.format
-generated/pandas.Index.get_duplicates,../reference/api/pandas.Index.get_duplicates
generated/pandas.Index.get_indexer_for,../reference/api/pandas.Index.get_indexer_for
generated/pandas.Index.get_indexer,../reference/api/pandas.Index.get_indexer
generated/pandas.Index.get_indexer_non_unique,../reference/api/pandas.Index.get_indexer_non_unique
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index c155b5e3fcb37..23f8424213574 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -152,7 +152,6 @@ Selecting
Index.asof
Index.asof_locs
Index.contains
- Index.get_duplicates
Index.get_indexer
Index.get_indexer_for
Index.get_indexer_non_unique
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 470209a7f4a33..8b1d4c91f3be1 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -548,7 +548,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Removed the previously properties :attr:`DataFrame.is_copy`, :attr:`Series.is_copy` (:issue:`18812`)
- Removed the previously deprecated :meth:`DataFrame.get_ftype_counts`, :meth:`Series.get_ftype_counts` (:issue:`18243`)
- Removed the previously deprecated :meth:`DataFrame.ftypes`, :meth:`Series.ftypes`, :meth:`Series.ftype` (:issue:`26744`)
-- Removed the previously deprecated :meth:`Index.get_duplicated`, use ``idx[idx.duplicated()].unique()`` instead (:issue:`20239`)
+- Removed the previously deprecated :meth:`Index.get_duplicates`, use ``idx[idx.duplicated()].unique()`` instead (:issue:`20239`)
- Removed the previously deprecated :meth:`Series.clip_upper`, :meth:`Series.clip_lower`, :meth:`DataFrame.clip_upper`, :meth:`DataFrame.clip_lower` (:issue:`24203`)
- Removed the ability to alter :attr:`DatetimeIndex.freq`, :attr:`TimedeltaIndex.freq`, or :attr:`PeriodIndex.freq` (:issue:`20772`)
- Removed the previously deprecated :attr:`DatetimeIndex.offset` (:issue:`20730`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 9f96d09b0d3dd..b99e60f8c6278 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2138,68 +2138,6 @@ def duplicated(self, keep="first"):
"""
return super().duplicated(keep=keep)
- def get_duplicates(self):
- """
- Extract duplicated index elements.
-
- .. deprecated:: 0.23.0
- Use idx[idx.duplicated()].unique() instead
-
- Returns a sorted list of index elements which appear more than once in
- the index.
-
- Returns
- -------
- array-like
- List of duplicated indexes.
-
- See Also
- --------
- Index.duplicated : Return boolean array denoting duplicates.
- Index.drop_duplicates : Return Index with duplicates removed.
-
- Examples
- --------
-
- Works on different Index of types.
-
- >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP
- [2, 3]
-
- Note that for a DatetimeIndex, it does not return a list but a new
- DatetimeIndex:
-
- >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
- ... '2018-01-03', '2018-01-04', '2018-01-04'],
- ... format='%Y-%m-%d')
- >>> pd.Index(dates).get_duplicates() # doctest: +SKIP
- DatetimeIndex(['2018-01-03', '2018-01-04'],
- dtype='datetime64[ns]', freq=None)
-
- Sorts duplicated elements even when indexes are unordered.
-
- >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP
- [2, 3]
-
- Return empty array-like structure when all elements are unique.
-
- >>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP
- []
- >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
- ... format='%Y-%m-%d')
- >>> pd.Index(dates).get_duplicates() # doctest: +SKIP
- DatetimeIndex([], dtype='datetime64[ns]', freq=None)
- """
- warnings.warn(
- "'get_duplicates' is deprecated and will be removed in "
- "a future release. You can use "
- "idx[idx.duplicated()].unique() instead",
- FutureWarning,
- stacklevel=2,
- )
-
- return self[self.duplicated()].unique()
-
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 4a38e3a146c0e..e3fa6a8321d5f 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -188,25 +188,6 @@ def test_string_index_series_name_converted(self):
result = df.T["1/3/2000"]
assert result.name == df.index[2]
- def test_get_duplicates(self):
- idx = DatetimeIndex(
- [
- "2000-01-01",
- "2000-01-02",
- "2000-01-02",
- "2000-01-03",
- "2000-01-03",
- "2000-01-04",
- ]
- )
-
- with tm.assert_produces_warning(FutureWarning):
- # Deprecated - see GH20239
- result = idx.get_duplicates()
-
- ex = DatetimeIndex(["2000-01-02", "2000-01-03"])
- tm.assert_index_equal(result, ex)
-
def test_argmin_argmax(self):
idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"])
assert idx.argmin() == 1
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index 518bd093b23b1..ee1f068b92df1 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -251,16 +251,13 @@ def test_duplicated_large(keep):
tm.assert_numpy_array_equal(result, expected)
-def test_get_duplicates():
+def test_duplicated2():
+ # TODO: more informative test name
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
- with tm.assert_produces_warning(FutureWarning):
- # Deprecated - see GH20239
- assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []]))
-
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(2, dtype="bool"))
for n in range(1, 6): # 1st level shape
@@ -274,10 +271,6 @@ def test_get_duplicates():
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
- with tm.assert_produces_warning(FutureWarning):
- # Deprecated - see GH20239
- assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []]))
-
tm.assert_numpy_array_equal(
mi.duplicated(), np.zeros(len(mi), dtype="bool")
)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 77d81a4a9566e..c0c677b076e2c 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2394,11 +2394,6 @@ def test_cached_properties_not_settable(self):
with pytest.raises(AttributeError, match="Can't set attribute"):
index.is_unique = False
- def test_get_duplicates_deprecated(self):
- index = pd.Index([1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- index.get_duplicates()
-
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip("IPython", minversion="6.0.0")
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index d59b6c18f6042..a62bd7f39e039 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -179,16 +179,6 @@ def test_sort_values(self):
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]), check_dtype=False)
- def test_get_duplicates(self):
- idx = TimedeltaIndex(["1 day", "2 day", "2 day", "3 day", "3day", "4day"])
-
- with tm.assert_produces_warning(FutureWarning):
- # Deprecated - see GH20239
- result = idx.get_duplicates()
-
- ex = TimedeltaIndex(["2 day", "3day"])
- tm.assert_index_equal(result, ex)
-
def test_argmin_argmax(self):
idx = TimedeltaIndex(["1 day 00:00:05", "1 day 00:00:01", "1 day 00:00:02"])
assert idx.argmin() == 1
| the need for this looks like the result of a typo on my part | https://api.github.com/repos/pandas-dev/pandas/pulls/30004 | 2019-12-03T16:30:23Z | 2019-12-03T18:35:57Z | 2019-12-03T18:35:57Z | 2019-12-03T18:53:53Z |
DEPR: plotting deprecations | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 5c9543580be26..8e6f9ba826d82 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -499,6 +499,10 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
**Other removals**
+- Removed the previously deprecated :func:`pandas.plotting._matplotlib.tsplot`, use :meth:`Series.plot` instead (:issue:`19980`)
+- :func:`pandas.tseries.converter.register` has been moved to :func:`pandas.plotting.register_matplotlib_converters` (:issue:`18307`)
+- :meth:`Series.plot` no longer accepts positional arguments, pass keyword arguments instead (:issue:`30003`)
+- :meth:`DataFrame.hist` and :meth:`Series.hist` no longer allows ``figsize="default"``, specify figure size by passinig a tuple instead (:issue:`30003`)
- Floordiv of integer-dtyped array by :class:`Timedelta` now raises ``TypeError`` (:issue:`21036`)
- :func:`pandas.api.types.infer_dtype` argument ``skipna`` defaults to ``True`` instead of ``False`` (:issue:`24050`)
- Removed the previously deprecated :meth:`Index.summary` (:issue:`18217`)
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 375e6fe2b02c7..dd907457f7c32 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1,5 +1,4 @@
import importlib
-import warnings
from pandas._config import get_option
@@ -752,7 +751,7 @@ def _get_call_args(backend_name, data, args, kwargs):
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
- warnings.warn(msg, FutureWarning, stacklevel=3)
+ raise TypeError(msg)
pos_args = {name: value for value, (name, _) in zip(args, arg_def)}
if backend_name == "pandas.plotting._matplotlib":
diff --git a/pandas/plotting/_matplotlib/__init__.py b/pandas/plotting/_matplotlib/__init__.py
index 206600ad37acc..f9a692b0559ca 100644
--- a/pandas/plotting/_matplotlib/__init__.py
+++ b/pandas/plotting/_matplotlib/__init__.py
@@ -24,7 +24,6 @@
radviz,
scatter_matrix,
)
-from pandas.plotting._matplotlib.timeseries import tsplot
from pandas.plotting._matplotlib.tools import table
PLOT_CLASSES = {
@@ -66,7 +65,6 @@ def plot(data, kind, **kwargs):
"boxplot",
"boxplot_frame",
"boxplot_frame_groupby",
- "tsplot",
"table",
"andrews_curves",
"autocorrelation_plot",
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index b60e8fa8a3f7c..dc9eede7e4d52 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -1,5 +1,3 @@
-import warnings
-
import numpy as np
from pandas.core.dtypes.common import is_integer, is_list_like
@@ -182,12 +180,10 @@ def _grouped_plot(
if figsize == "default":
# allowed to specify mpl default with 'default'
- warnings.warn(
- "figsize='default' is deprecated. Specify figure size by tuple instead",
- FutureWarning,
- stacklevel=5,
+ raise ValueError(
+ "figsize='default' is no longer supported. "
+ "Specify figure size by tuple instead"
)
- figsize = None
grouped = data.groupby(by)
if column is not None:
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index fa9585e1fc229..dd048114142f3 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -1,7 +1,6 @@
# TODO: Use the fact that axis can have units to simplify the process
import functools
-import warnings
import numpy as np
@@ -25,7 +24,6 @@
TimeSeries_DateFormatter,
TimeSeries_DateLocator,
TimeSeries_TimedeltaFormatter,
- register_pandas_matplotlib_converters,
)
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
@@ -34,49 +32,6 @@
# Plotting functions and monkey patches
-@register_pandas_matplotlib_converters
-def tsplot(series, plotf, ax=None, **kwargs):
- """
- Plots a Series on the given Matplotlib axes or the current axes
-
- Parameters
- ----------
- axes : Axes
- series : Series
-
- Notes
- _____
- Supports same kwargs as Axes.plot
-
-
- .. deprecated:: 0.23.0
- Use Series.plot() instead
- """
- import matplotlib.pyplot as plt
-
- warnings.warn(
- "'tsplot' is deprecated and will be removed in a "
- "future version. Please use Series.plot() instead.",
- FutureWarning,
- stacklevel=3,
- )
-
- # Used inferred freq is possible, need a test case for inferred
- if ax is None:
- ax = plt.gca()
-
- freq, series = _maybe_resample(series, ax, kwargs)
-
- # Set ax with freq info
- _decorate_axes(ax, freq, kwargs)
- ax._plot_data.append((series, plotf, kwargs))
- lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
-
- # set date formatter, locators and rescale limits
- format_dateaxis(ax, ax.freq, series.index)
- return lines
-
-
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index c2bdea39ae30d..fb9ad57626600 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -139,15 +139,6 @@ def test_registry_resets(self):
for k, v in original.items():
units.registry[k] = v
- def test_old_import_warns(self):
- with tm.assert_produces_warning(FutureWarning) as w:
- from pandas.tseries import converter
-
- converter.register()
-
- assert len(w)
- assert "pandas.plotting.register_matplotlib_converters" in str(w[0].message)
-
class TestDateTimeConverter:
def setup_method(self, method):
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 14cb2bc9d7b62..6c1c7dfd1a4a4 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -313,7 +313,8 @@ def test_grouped_hist_legacy(self):
with pytest.raises(AttributeError):
_grouped_hist(df.A, by=df.C, foo="bar")
- with tm.assert_produces_warning(FutureWarning):
+ msg = "Specify figure size by tuple instead"
+ with pytest.raises(ValueError, match=msg):
df.hist(by="C", figsize="default")
@pytest.mark.slow
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index c51cd0e92eb3c..1e59fbf928876 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -32,14 +32,9 @@ def test_get_accessor_args():
with pytest.raises(TypeError, match=msg):
func(backend_name="", data=[], args=[], kwargs={})
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- x, y, kind, kwargs = func(
- backend_name="", data=Series(), args=["line", None], kwargs={}
- )
- assert x is None
- assert y is None
- assert kind == "line"
- assert kwargs == {"ax": None}
+ msg = "should not be called with positional arguments"
+ with pytest.raises(TypeError, match=msg):
+ func(backend_name="", data=Series(), args=["line", None], kwargs={})
x, y, kind, kwargs = func(
backend_name="",
diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py
deleted file mode 100644
index ac80215e01ed5..0000000000000
--- a/pandas/tseries/converter.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# flake8: noqa
-import warnings
-
-# TODO `_matplotlib` module should be private, so the plotting backend
-# can be changed. Decide whether all these should be public and exposed
-# in `pandas.plotting`, or remove from here (I guess they are here for
-# legacy reasons)
-from pandas.plotting._matplotlib.converter import (
- DatetimeConverter,
- MilliSecondLocator,
- PandasAutoDateFormatter,
- PandasAutoDateLocator,
- PeriodConverter,
- TimeConverter,
- TimeFormatter,
- TimeSeries_DateFormatter,
- TimeSeries_DateLocator,
- get_datevalue,
- get_finder,
- time2num,
-)
-
-
-def register():
- from pandas.plotting import register_matplotlib_converters
-
- msg = (
- "'pandas.tseries.converter.register' has been moved and renamed to "
- "'pandas.plotting.register_matplotlib_converters'. "
- )
- warnings.warn(msg, FutureWarning, stacklevel=2)
- register_matplotlib_converters()
| Input/help needed on two outstanding questions:
1) Do we just remove `pandas.tseries.converter` after this?
2) Some of these deprecations are old enough that I couldn't track them down via the blame, so they currently have "(:issue:`????`)" in the whatsnew. | https://api.github.com/repos/pandas-dev/pandas/pulls/30003 | 2019-12-03T16:27:10Z | 2019-12-05T15:41:29Z | 2019-12-05T15:41:29Z | 2019-12-05T18:06:24Z |
TYP: some types for pandas/io/formats/csvs.py | diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index f0493036b934a..ae5d1d30bcddb 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -5,6 +5,7 @@
import csv as csvlib
from io import StringIO
import os
+from typing import Any, Dict, List
import warnings
from zipfile import ZipFile
@@ -187,7 +188,7 @@ def save(self):
close = True
try:
- writer_kwargs = dict(
+ writer_kwargs: Dict[str, Any] = dict(
lineterminator=self.line_terminator,
delimiter=self.sep,
quoting=self.quoting,
@@ -198,8 +199,7 @@ def save(self):
if self.encoding == "ascii":
self.writer = csvlib.writer(f, **writer_kwargs)
else:
- writer_kwargs["encoding"] = self.encoding
- self.writer = UnicodeWriter(f, **writer_kwargs)
+ self.writer = UnicodeWriter(f, encoding=self.encoding, **writer_kwargs)
self._save()
@@ -233,7 +233,7 @@ def _save_header(self):
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
- encoded_labels = []
+ encoded_labels: List[str] = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, ABCIndexClass))
if not (has_aliases or self.header):
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/30000 | 2019-12-03T15:37:08Z | 2019-12-05T15:47:31Z | 2019-12-05T15:47:31Z | 2019-12-05T15:54:48Z |
TYP: some (mainly primative) types for tseries.frequencies | diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 898060d011372..9162e6a415b34 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -1,6 +1,6 @@
from datetime import timedelta
import re
-from typing import Dict
+from typing import Dict, Optional
import numpy as np
from pytz import AmbiguousTimeError
@@ -52,8 +52,10 @@
_offset_map: Dict[str, DateOffset] = {}
-def get_period_alias(offset_str):
- """ alias to closest period strings BQ->Q etc"""
+def get_period_alias(offset_str: str) -> Optional[str]:
+ """
+ Alias to closest period strings BQ->Q etc.
+ """
return _offset_to_period_map.get(offset_str, None)
@@ -68,7 +70,7 @@ def get_period_alias(offset_str):
}
-def to_offset(freq):
+def to_offset(freq) -> Optional[DateOffset]:
"""
Return DateOffset object from string or tuple representation
or datetime.timedelta object.
@@ -179,9 +181,9 @@ def to_offset(freq):
return delta
-def get_offset(name):
+def get_offset(name: str) -> DateOffset:
"""
- Return DateOffset object associated with rule name
+ Return DateOffset object associated with rule name.
Examples
--------
@@ -214,7 +216,7 @@ def get_offset(name):
# Period codes
-def infer_freq(index, warn=True):
+def infer_freq(index, warn: bool = True) -> Optional[str]:
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
@@ -247,6 +249,7 @@ def infer_freq(index, warn=True):
)
index = values
+ inferer: _FrequencyInferer
if is_period_arraylike(index):
raise TypeError(
"PeriodIndex given. Check the `freq` attribute "
@@ -280,7 +283,7 @@ class _FrequencyInferer:
Not sure if I can avoid the state machine here
"""
- def __init__(self, index, warn=True):
+ def __init__(self, index, warn: bool = True):
self.index = index
self.values = index.asi8
@@ -315,7 +318,7 @@ def is_unique(self) -> bool:
def is_unique_asi8(self):
return len(self.deltas_asi8) == 1
- def get_freq(self):
+ def get_freq(self) -> Optional[str]:
"""
Find the appropriate frequency string to describe the inferred
frequency of self.values
@@ -388,7 +391,7 @@ def mdiffs(self):
def ydiffs(self):
return unique_deltas(self.fields["Y"].astype("i8"))
- def _infer_daily_rule(self):
+ def _infer_daily_rule(self) -> Optional[str]:
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
@@ -424,7 +427,9 @@ def _infer_daily_rule(self):
if wom_rule:
return wom_rule
- def _get_annual_rule(self):
+ return None
+
+ def _get_annual_rule(self) -> Optional[str]:
if len(self.ydiffs) > 1:
return None
@@ -434,7 +439,7 @@ def _get_annual_rule(self):
pos_check = self.month_position_check()
return {"cs": "AS", "bs": "BAS", "ce": "A", "be": "BA"}.get(pos_check)
- def _get_quarterly_rule(self):
+ def _get_quarterly_rule(self) -> Optional[str]:
if len(self.mdiffs) > 1:
return None
@@ -444,13 +449,13 @@ def _get_quarterly_rule(self):
pos_check = self.month_position_check()
return {"cs": "QS", "bs": "BQS", "ce": "Q", "be": "BQ"}.get(pos_check)
- def _get_monthly_rule(self):
+ def _get_monthly_rule(self) -> Optional[str]:
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
return {"cs": "MS", "bs": "BMS", "ce": "M", "be": "BM"}.get(pos_check)
- def _is_business_daily(self):
+ def _is_business_daily(self) -> bool:
# quick check: cannot be business daily
if self.day_deltas != [1, 3]:
return False
@@ -465,7 +470,7 @@ def _is_business_daily(self):
| ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))
)
- def _get_wom_rule(self):
+ def _get_wom_rule(self) -> Optional[str]:
# wdiffs = unique(np.diff(self.index.week))
# We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
@@ -501,11 +506,11 @@ def _infer_daily_rule(self):
return _maybe_add_count("D", days)
-def _is_multiple(us, mult):
+def _is_multiple(us, mult: int) -> bool:
return us % mult == 0
-def _maybe_add_count(base, count):
+def _maybe_add_count(base: str, count: float) -> str:
if count != 1:
assert count == int(count)
count = int(count)
| https://api.github.com/repos/pandas-dev/pandas/pulls/29999 | 2019-12-03T14:47:13Z | 2019-12-03T16:24:45Z | 2019-12-03T16:24:45Z | 2019-12-03T20:51:58Z | |
TST: update test_nanops setup_method | diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index e5d963a307502..c207c803510ca 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -24,7 +24,7 @@ def setup_method(self, method):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
- arr_shape = (11, 7, 5)
+ arr_shape = (11, 7)
self.arr_float = np.random.randn(*arr_shape)
self.arr_float1 = np.random.randn(*arr_shape)
@@ -68,21 +68,21 @@ def setup_method(self, method):
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj])
- self.arr_float_2d = self.arr_float[:, :, 0]
- self.arr_float1_2d = self.arr_float1[:, :, 0]
+ self.arr_float_2d = self.arr_float
+ self.arr_float1_2d = self.arr_float1
- self.arr_nan_2d = self.arr_nan[:, :, 0]
- self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
- self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
- self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
+ self.arr_nan_2d = self.arr_nan
+ self.arr_float_nan_2d = self.arr_float_nan
+ self.arr_float1_nan_2d = self.arr_float1_nan
+ self.arr_nan_float1_2d = self.arr_nan_float1
- self.arr_float_1d = self.arr_float[:, 0, 0]
- self.arr_float1_1d = self.arr_float1[:, 0, 0]
+ self.arr_float_1d = self.arr_float[:, 0]
+ self.arr_float1_1d = self.arr_float1[:, 0]
- self.arr_nan_1d = self.arr_nan[:, 0, 0]
- self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
- self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
- self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
+ self.arr_nan_1d = self.arr_nan[:, 0]
+ self.arr_float_nan_1d = self.arr_float_nan[:, 0]
+ self.arr_float1_nan_1d = self.arr_float1_nan[:, 0]
+ self.arr_nan_float1_1d = self.arr_nan_float1[:, 0]
def teardown_method(self, method):
nanops._USE_BOTTLENECK = use_bn
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/29998 | 2019-12-03T13:56:28Z | 2019-12-03T16:26:49Z | 2019-12-03T16:26:49Z | 2019-12-03T20:52:30Z |
TST: use index_or_series fixture | diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 1ba0930c06334..29d425039551a 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -171,9 +171,9 @@ class TestDatetime64SeriesComparison:
],
)
@pytest.mark.parametrize("reverse", [True, False])
- @pytest.mark.parametrize("box", [Series, pd.Index])
@pytest.mark.parametrize("dtype", [None, object])
- def test_nat_comparisons(self, dtype, box, reverse, pair):
+ def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
+ box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
@@ -2383,14 +2383,16 @@ def test_dti_add_series(self, tz, names):
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
- @pytest.mark.parametrize("other_box", [pd.Index, Series])
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
- def test_dti_addsub_offset_arraylike(self, tz_naive_fixture, names, op, other_box):
+ def test_dti_addsub_offset_arraylike(
+ self, tz_naive_fixture, names, op, index_or_series
+ ):
# GH#18849, GH#19744
box = pd.Index
+ other_box = index_or_series
from .test_timedelta64 import get_upcast_box
tz = tz_naive_fixture
| broken off #28339, xref #29725 | https://api.github.com/repos/pandas-dev/pandas/pulls/29996 | 2019-12-03T13:51:56Z | 2019-12-03T16:29:15Z | 2019-12-03T16:29:14Z | 2019-12-03T20:54:03Z |
CLN: some imports in pandas/tests/frame/test_dtypes.py | diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index af5a765aa2729..6709cdcb1eebf 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
+from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, IntervalDtype
import pandas as pd
from pandas import (
@@ -699,14 +699,7 @@ def test_astype_categorical(self, dtype):
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
- @pytest.mark.parametrize(
- "cls",
- [
- pd.api.types.CategoricalDtype,
- pd.api.types.DatetimeTZDtype,
- pd.api.types.IntervalDtype,
- ],
- )
+ @pytest.mark.parametrize("cls", [CategoricalDtype, DatetimeTZDtype, IntervalDtype])
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/29995 | 2019-12-03T13:46:14Z | 2019-12-03T16:30:00Z | 2019-12-03T16:30:00Z | 2019-12-03T20:54:56Z |
TST: add some message checks in test_query_eval | diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index abd8ef98ff871..d577ff7c71277 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -479,11 +479,13 @@ def test_query_scope(self):
tm.assert_frame_equal(res, expected)
# no local variable c
- with pytest.raises(UndefinedVariableError):
+ with pytest.raises(
+ UndefinedVariableError, match="local variable 'c' is not defined"
+ ):
df.query("@a > b > @c", engine=engine, parser=parser)
# no column named 'c'
- with pytest.raises(UndefinedVariableError):
+ with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"):
df.query("@a > b > c", engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
@@ -494,7 +496,7 @@ def test_query_doesnt_pickup_local(self):
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
# we don't pick up the local 'sin'
- with pytest.raises(UndefinedVariableError):
+ with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"):
df.query("sin > 5", engine=engine, parser=parser)
def test_query_builtin(self):
@@ -588,7 +590,7 @@ def test_nested_raises_on_local_self_reference(self):
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
- with pytest.raises(UndefinedVariableError):
+ with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("df > 0", engine=self.engine, parser=self.parser)
def test_local_syntax(self):
@@ -651,9 +653,9 @@ def test_query_undefined_local(self):
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list("ab"))
- msg = "local variable 'c' is not defined"
-
- with pytest.raises(UndefinedVariableError, match=msg):
+ with pytest.raises(
+ UndefinedVariableError, match="local variable 'c' is not defined"
+ ):
df.query("a == @c", engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
@@ -784,7 +786,7 @@ def test_nested_scope(self):
with pytest.raises(SyntaxError):
df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
- with pytest.raises(UndefinedVariableError):
+ with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("(df>0) & (df2>0)", engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
| https://api.github.com/repos/pandas-dev/pandas/pulls/29994 | 2019-12-03T13:42:31Z | 2019-12-03T18:40:45Z | 2019-12-03T18:40:44Z | 2019-12-03T20:58:44Z | |
CI: re-enable downstream test for geopandas | diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index dc88ebe1f7f8e..ea128c8c3a422 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -102,10 +102,7 @@ def test_pandas_datareader():
# importing from pandas, Cython import warning
-@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
-@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
-@pytest.mark.skip(reason="gh-25778: geopandas stack issue")
def test_geopandas():
geopandas = import_module("geopandas") # noqa
| Closes https://github.com/pandas-dev/pandas/issues/25778 | https://api.github.com/repos/pandas-dev/pandas/pulls/29993 | 2019-12-03T12:11:02Z | 2019-12-03T13:39:55Z | 2019-12-03T13:39:55Z | 2019-12-03T14:28:24Z |
TYP: some (mainly primative) types for core.generic | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e19bf9c1c39ea..1144a1f7da1a6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -18,6 +18,7 @@
Optional,
Sequence,
Set,
+ Tuple,
Union,
)
import warnings
@@ -66,7 +67,7 @@
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
-from pandas._typing import Dtype, FilePathOrBuffer, JSONSerializable
+from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries, JSONSerializable
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
@@ -362,7 +363,7 @@ def _construct_axes_dict_from(self, axes, **kwargs):
return d
def _construct_axes_from_arguments(
- self, args, kwargs, require_all=False, sentinel=None
+ self, args, kwargs, require_all: bool = False, sentinel=None
):
"""Construct and returns axes if supplied in args/kwargs.
@@ -509,7 +510,7 @@ def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
- def shape(self):
+ def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
@@ -572,12 +573,12 @@ def size(self):
return np.prod(self.shape)
@property
- def _selected_obj(self):
+ def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
- def _obj_with_exclusions(self):
+ def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@@ -1395,7 +1396,7 @@ def _set_axis_name(self, name, axis=0, inplace=False):
# ----------------------------------------------------------------------
# Comparison Methods
- def _indexed_same(self, other):
+ def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
@@ -1608,7 +1609,7 @@ def _is_level_reference(self, key, axis=0):
and not self._is_label_reference(key, axis=axis)
)
- def _is_label_reference(self, key, axis=0):
+ def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
@@ -1637,7 +1638,7 @@ def _is_label_reference(self, key, axis=0):
and any(key in self.axes[ax] for ax in other_axes)
)
- def _is_label_or_level_reference(self, key, axis=0):
+ def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
@@ -1661,7 +1662,7 @@ def _is_label_or_level_reference(self, key, axis=0):
key, axis=axis
)
- def _check_label_or_level_ambiguity(self, key, axis=0):
+ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
@@ -1710,7 +1711,7 @@ def _check_label_or_level_ambiguity(self, key, axis=0):
)
raise ValueError(msg)
- def _get_label_or_level_values(self, key, axis=0):
+ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
@@ -1782,7 +1783,7 @@ def _get_label_or_level_values(self, key, axis=0):
return values
- def _drop_labels_or_levels(self, keys, axis=0):
+ def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
@@ -1913,12 +1914,12 @@ def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
- def __contains__(self, key):
+ def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
- def empty(self):
+ def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
@@ -1991,7 +1992,7 @@ def __array_wrap__(self, result, context=None):
# ----------------------------------------------------------------------
# Picklability
- def __getstate__(self):
+ def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
@@ -2582,13 +2583,13 @@ def to_sql(
name: str,
con,
schema=None,
- if_exists="fail",
- index=True,
+ if_exists: str = "fail",
+ index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
- ):
+ ) -> None:
"""
Write records stored in a DataFrame to a SQL database.
@@ -2735,7 +2736,12 @@ def to_sql(
method=method,
)
- def to_pickle(self, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
+ def to_pickle(
+ self,
+ path,
+ compression: Optional[str] = "infer",
+ protocol: int = pickle.HIGHEST_PROTOCOL,
+ ) -> None:
"""
Pickle (serialize) object to file.
@@ -2791,7 +2797,7 @@ def to_pickle(self, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL)
to_pickle(self, path, compression=compression, protocol=protocol)
- def to_clipboard(self, excel=True, sep=None, **kwargs):
+ def to_clipboard(self, excel: bool_t = True, sep: Optional[str] = None, **kwargs):
r"""
Copy object to the system clipboard.
@@ -3276,7 +3282,7 @@ def to_csv(
# Fancy Indexing
@classmethod
- def _create_indexer(cls, name, indexer):
+ def _create_indexer(cls, name: str, indexer) -> None:
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
@@ -3285,24 +3291,24 @@ def _create_indexer(cls, name, indexer):
# ----------------------------------------------------------------------
# Lookup Caching
- def _set_as_cached(self, item, cacher):
+ def _set_as_cached(self, item, cacher) -> None:
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
- def _reset_cacher(self):
+ def _reset_cacher(self) -> None:
"""Reset the cacher."""
if hasattr(self, "_cacher"):
del self._cacher
- def _maybe_cache_changed(self, item, value):
+ def _maybe_cache_changed(self, item, value) -> None:
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
- def _is_cached(self):
+ def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
@@ -3313,7 +3319,9 @@ def _get_cacher(self):
cacher = cacher[1]()
return cacher
- def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
+ def _maybe_update_cacher(
+ self, clear: bool_t = False, verify_is_copy: bool_t = True
+ ) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
@@ -3350,13 +3358,13 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
if clear:
self._clear_item_cache()
- def _clear_item_cache(self):
+ def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
- def take(self, indices, axis=0, is_copy=True, **kwargs):
+ def take(self, indices, axis=0, is_copy: bool_t = True, **kwargs):
"""
Return the elements in the given *positional* indices along an axis.
@@ -3447,7 +3455,7 @@ class max_speed
return result
- def xs(self, key, axis=0, level=None, drop_level=True):
+ def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
@@ -3552,9 +3560,9 @@ class animal locomotion
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
- indexer = [slice(None)] * self.ndim
- indexer[axis] = loc
- indexer = tuple(indexer)
+ _indexer = [slice(None)] * self.ndim
+ _indexer[axis] = loc
+ indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
@@ -3655,11 +3663,11 @@ def _slice(self, slobj: slice, axis=0, kind=None):
result._set_is_copy(self, copy=is_copy)
return result
- def _set_item(self, key, value):
+ def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
- def _set_is_copy(self, ref=None, copy=True):
+ def _set_is_copy(self, ref=None, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
@@ -3668,7 +3676,7 @@ def _set_is_copy(self, ref=None, copy=True):
else:
self._is_copy = None
- def _check_is_chained_assignment_possible(self):
+ def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
@@ -3826,7 +3834,14 @@ def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
- def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
+ def reindex_like(
+ self,
+ other,
+ method: Optional[str] = None,
+ copy: bool_t = True,
+ limit=None,
+ tolerance=None,
+ ):
"""
Return an object with matching indices as other object.
@@ -3938,8 +3953,8 @@ def drop(
index=None,
columns=None,
level=None,
- inplace=False,
- errors="raise",
+ inplace: bool_t = False,
+ errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -3967,7 +3982,7 @@ def drop(
else:
return obj
- def _drop_axis(self, labels, axis, level=None, errors="raise"):
+ def _drop_axis(self, labels, axis, level=None, errors: str = "raise"):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
@@ -4020,7 +4035,7 @@ def _drop_axis(self, labels, axis, level=None, errors="raise"):
return result
- def _update_inplace(self, result, verify_is_copy=True):
+ def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
@@ -4037,7 +4052,7 @@ def _update_inplace(self, result, verify_is_copy=True):
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
- def add_prefix(self, prefix):
+ def add_prefix(self, prefix: str):
"""
Prefix labels with string `prefix`.
@@ -4096,7 +4111,7 @@ def add_prefix(self, prefix):
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
- def add_suffix(self, suffix):
+ def add_suffix(self, suffix: str):
"""
Suffix labels with string `suffix`.
@@ -4160,9 +4175,9 @@ def sort_values(
by=None,
axis=0,
ascending=True,
- inplace=False,
- kind="quicksort",
- na_position="last",
+ inplace: bool_t = False,
+ kind: str = "quicksort",
+ na_position: str = "last",
):
"""
Sort by the values along either axis.
@@ -4257,11 +4272,11 @@ def sort_index(
self,
axis=0,
level=None,
- ascending=True,
- inplace=False,
- kind="quicksort",
- na_position="last",
- sort_remaining=True,
+ ascending: bool_t = True,
+ inplace: bool_t = False,
+ kind: str = "quicksort",
+ na_position: str = "last",
+ sort_remaining: bool_t = True,
):
"""
Sort object by labels (along an axis).
@@ -4583,7 +4598,7 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy)
return obj
- def _needs_reindex_multi(self, axes, method, level):
+ def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
@@ -4596,7 +4611,11 @@ def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
- self, reindexers, fill_value=None, copy=False, allow_dups=False
+ self,
+ reindexers,
+ fill_value=None,
+ copy: bool_t = False,
+ allow_dups: bool_t = False,
):
"""allow_dups indicates an internal call here """
@@ -4628,7 +4647,13 @@ def _reindex_with_indexers(
return self._constructor(new_data).__finalize__(self)
- def filter(self, items=None, like=None, regex=None, axis=None):
+ def filter(
+ self,
+ items=None,
+ like: Optional[str] = None,
+ regex: Optional[str] = None,
+ axis=None,
+ ):
"""
Subset the dataframe rows or columns according to the specified index labels.
@@ -4719,7 +4744,7 @@ def f(x):
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
- def head(self, n=5):
+ def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
@@ -4778,7 +4803,7 @@ def head(self, n=5):
return self.iloc[:n]
- def tail(self, n=5):
+ def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
@@ -5208,7 +5233,9 @@ def pipe(self, func, *args, **kwargs):
# ----------------------------------------------------------------------
# Attribute access
- def __finalize__(self, other, method=None, **kwargs):
+ def __finalize__(
+ self: FrameOrSeries, other, method=None, **kwargs
+ ) -> FrameOrSeries:
"""
Propagate metadata from other to self.
@@ -5228,7 +5255,7 @@ def __finalize__(self, other, method=None, **kwargs):
object.__setattr__(self, name, getattr(other, name, None))
return self
- def __getattr__(self, name):
+ def __getattr__(self, name: str):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
@@ -5247,7 +5274,7 @@ def __getattr__(self, name):
return self[name]
return object.__getattribute__(self, name)
- def __setattr__(self, name, value):
+ def __setattr__(self, name: str, value) -> None:
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
@@ -5312,7 +5339,7 @@ def _protect_consolidate(self, f):
self._clear_item_cache()
return result
- def _consolidate_inplace(self):
+ def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
@@ -5320,7 +5347,7 @@ def f():
self._protect_consolidate(f)
- def _consolidate(self, inplace=False):
+ def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
@@ -5357,7 +5384,7 @@ def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
- def _check_inplace_setting(self, value):
+ def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
@@ -5559,7 +5586,7 @@ def dtypes(self):
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
- def _to_dict_of_blocks(self, copy=True):
+ def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
@@ -5712,7 +5739,7 @@ def astype(self, dtype, copy: bool_t = True, errors: str = "raise"):
result.columns = self.columns
return result
- def copy(self, deep=True):
+ def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
@@ -5820,10 +5847,10 @@ def copy(self, deep=True):
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
- def __copy__(self, deep=True):
+ def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
- def __deepcopy__(self, memo=None):
+ def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
@@ -5835,8 +5862,13 @@ def __deepcopy__(self, memo=None):
return self.copy(deep=True)
def _convert(
- self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True
- ):
+ self: FrameOrSeries,
+ datetime: bool_t = False,
+ numeric: bool_t = False,
+ timedelta: bool_t = False,
+ coerce: bool_t = False,
+ copy: bool_t = True,
+ ) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
@@ -5928,14 +5960,14 @@ def infer_objects(self):
# Filling NA's
def fillna(
- self,
+ self: FrameOrSeries,
value=None,
method=None,
axis=None,
- inplace=False,
+ inplace: bool_t = False,
limit=None,
downcast=None,
- ):
+ ) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
@@ -5971,8 +6003,8 @@ def fillna(
Returns
-------
- %(klass)s
- Object with missing values filled.
+ %(klass)s or None
+ Object with missing values filled or None if ``inplace=True``.
See Also
--------
@@ -6112,30 +6144,43 @@ def fillna(
if inplace:
self._update_inplace(new_data)
+ return None
else:
return self._constructor(new_data).__finalize__(self)
- def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
+ def ffill(
+ self: FrameOrSeries,
+ axis=None,
+ inplace: bool_t = False,
+ limit=None,
+ downcast=None,
+ ) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
- %(klass)s
- Object with missing values filled.
+ %(klass)s or None
+ Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
- def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
+ def bfill(
+ self: FrameOrSeries,
+ axis=None,
+ inplace: bool_t = False,
+ limit=None,
+ downcast=None,
+ ) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
- %(klass)s
- Object with missing values filled.
+ %(klass)s or None
+ Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
@@ -7197,7 +7242,7 @@ def notna(self):
def notnull(self):
return notna(self).__finalize__(self)
- def _clip_with_scalar(self, lower, upper, inplace=False):
+ def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
@@ -7245,7 +7290,15 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
threshold = _align_method_FRAME(self, threshold, axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
- def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
+ def clip(
+ self,
+ lower=None,
+ upper=None,
+ axis=None,
+ inplace: bool_t = False,
+ *args,
+ **kwargs,
+ ):
"""
Trim values at input threshold(s).
@@ -7364,11 +7417,11 @@ def groupby(
by=None,
axis=0,
level=None,
- as_index=True,
- sort=True,
- group_keys=True,
- squeeze=False,
- observed=False,
+ as_index: bool_t = True,
+ sort: bool_t = True,
+ group_keys: bool_t = True,
+ squeeze: bool_t = False,
+ observed: bool_t = False,
):
"""
Group DataFrame or Series using a mapper or by a Series of columns.
@@ -7493,7 +7546,14 @@ def groupby(
observed=observed,
)
- def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
+ def asfreq(
+ self,
+ freq,
+ method=None,
+ how: Optional[str] = None,
+ normalize: bool_t = False,
+ fill_value=None,
+ ):
"""
Convert TimeSeries to specified frequency.
@@ -7596,7 +7656,7 @@ def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
fill_value=fill_value,
)
- def at_time(self, time, asof=False, axis=None):
+ def at_time(self, time, asof: bool_t = False, axis=None):
"""
Select values at particular time of day (e.g. 9:30AM).
@@ -7653,7 +7713,12 @@ def at_time(self, time, asof=False, axis=None):
return self.take(indexer, axis=axis)
def between_time(
- self, start_time, end_time, include_start=True, include_end=True, axis=None
+ self,
+ start_time,
+ end_time,
+ include_start: bool_t = True,
+ include_end: bool_t = True,
+ axis=None,
):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
@@ -7732,16 +7797,16 @@ def between_time(
def resample(
self,
rule,
- how=None,
+ how: Optional[str] = None,
axis=0,
- fill_method=None,
- closed=None,
- label=None,
- convention="start",
- kind=None,
+ fill_method: Optional[str] = None,
+ closed: Optional[str] = None,
+ label: Optional[str] = None,
+ convention: str = "start",
+ kind: Optional[str] = None,
loffset=None,
- limit=None,
- base=0,
+ limit: Optional[int] = None,
+ base: int = 0,
on=None,
level=None,
):
@@ -8165,14 +8230,14 @@ def last(self, offset):
return self.iloc[start:]
def rank(
- self,
+ self: FrameOrSeries,
axis=0,
- method="average",
- numeric_only=None,
- na_option="keep",
- ascending=True,
- pct=False,
- ):
+ method: str = "average",
+ numeric_only: Optional[bool_t] = None,
+ na_option: str = "keep",
+ ascending: bool_t = True,
+ pct: bool_t = False,
+ ) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
@@ -8424,7 +8489,7 @@ def _align_frame(
join="outer",
axis=None,
level=None,
- copy=True,
+ copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
@@ -8484,7 +8549,7 @@ def _align_series(
join="outer",
axis=None,
level=None,
- copy=True,
+ copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
@@ -8974,7 +9039,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
return self._constructor(new_data).__finalize__(self)
- def slice_shift(self, periods=1, axis=0):
+ def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
@@ -9011,7 +9076,7 @@ def slice_shift(self, periods=1, axis=0):
return new_obj.__finalize__(self)
- def tshift(self, periods=1, freq=None, axis=0):
+ def tshift(self, periods: int = 1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
@@ -9071,7 +9136,9 @@ def tshift(self, periods=1, freq=None, axis=0):
return self._constructor(new_data).__finalize__(self)
- def truncate(self, before=None, after=None, axis=None, copy=True):
+ def truncate(
+ self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
+ ) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
@@ -9224,7 +9291,9 @@ def truncate(self, before=None, after=None, axis=None, copy=True):
return result
- def tz_convert(self, tz, axis=0, level=None, copy=True):
+ def tz_convert(
+ self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
+ ) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
@@ -9280,8 +9349,14 @@ def _tz_convert(ax, tz):
return result.__finalize__(self)
def tz_localize(
- self, tz, axis=0, level=None, copy=True, ambiguous="raise", nonexistent="raise"
- ):
+ self: FrameOrSeries,
+ tz,
+ axis=0,
+ level=None,
+ copy: bool_t = True,
+ ambiguous="raise",
+ nonexistent: str = "raise",
+ ) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
@@ -11099,7 +11174,7 @@ def _doc_parms(cls):
def _make_min_count_stat_function(
- cls, name, name1, name2, axis_descr, desc, f, see_also="", examples=""
+ cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
@@ -11147,7 +11222,7 @@ def stat_func(
def _make_stat_function(
- cls, name, name1, name2, axis_descr, desc, f, see_also="", examples=""
+ cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
| mypy 0.750 has an experimental feature for static inference of annotations see https://mypy.readthedocs.io/en/latest/mypy_daemon.html#static-inference-of-annotations
have tried it out on functions in core.generic. The annotations add here are mainly the ones that matched the docstrings.
| https://api.github.com/repos/pandas-dev/pandas/pulls/29991 | 2019-12-03T11:55:37Z | 2019-12-04T13:53:35Z | 2019-12-04T13:53:35Z | 2019-12-04T19:51:43Z |
DEPR: remove get_values, SparseArray.values | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 61902f3134a4d..33ba94e2baa01 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -360,7 +360,6 @@ generated/pandas.DataFrame.from_records,../reference/api/pandas.DataFrame.from_r
generated/pandas.DataFrame.ge,../reference/api/pandas.DataFrame.ge
generated/pandas.DataFrame.get,../reference/api/pandas.DataFrame.get
generated/pandas.DataFrame.get_value,../reference/api/pandas.DataFrame.get_value
-generated/pandas.DataFrame.get_values,../reference/api/pandas.DataFrame.get_values
generated/pandas.DataFrame.groupby,../reference/api/pandas.DataFrame.groupby
generated/pandas.DataFrame.gt,../reference/api/pandas.DataFrame.gt
generated/pandas.DataFrame.head,../reference/api/pandas.DataFrame.head
@@ -643,7 +642,6 @@ generated/pandas.Index.get_level_values,../reference/api/pandas.Index.get_level_
generated/pandas.Index.get_loc,../reference/api/pandas.Index.get_loc
generated/pandas.Index.get_slice_bound,../reference/api/pandas.Index.get_slice_bound
generated/pandas.Index.get_value,../reference/api/pandas.Index.get_value
-generated/pandas.Index.get_values,../reference/api/pandas.Index.get_values
generated/pandas.Index.groupby,../reference/api/pandas.Index.groupby
generated/pandas.Index.has_duplicates,../reference/api/pandas.Index.has_duplicates
generated/pandas.Index.hasnans,../reference/api/pandas.Index.hasnans
@@ -1044,7 +1042,6 @@ generated/pandas.Series.from_csv,../reference/api/pandas.Series.from_csv
generated/pandas.Series.ge,../reference/api/pandas.Series.ge
generated/pandas.Series.get,../reference/api/pandas.Series.get
generated/pandas.Series.get_value,../reference/api/pandas.Series.get_value
-generated/pandas.Series.get_values,../reference/api/pandas.Series.get_values
generated/pandas.Series.groupby,../reference/api/pandas.Series.groupby
generated/pandas.Series.gt,../reference/api/pandas.Series.gt
generated/pandas.Series.hasnans,../reference/api/pandas.Series.hasnans
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst
index 2604af4e33a89..4c296d74b5ef9 100644
--- a/doc/source/reference/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -30,7 +30,6 @@ Attributes and underlying data
DataFrame.dtypes
DataFrame.select_dtypes
DataFrame.values
- DataFrame.get_values
DataFrame.axes
DataFrame.ndim
DataFrame.size
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 23f8424213574..a01f2bcd40612 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -159,7 +159,6 @@ Selecting
Index.get_loc
Index.get_slice_bound
Index.get_value
- Index.get_values
Index.isin
Index.slice_indexer
Index.slice_locs
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index e13b4ed98a38b..1e17b72db2aaf 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -53,7 +53,6 @@ Conversion
Series.to_period
Series.to_timestamp
Series.to_list
- Series.get_values
Series.__array__
Indexing, iteration
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index ea54b00cf5be4..0019fc4b36d20 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -26,14 +26,6 @@ cdef _check_result_array(object obj, Py_ssize_t cnt):
raise ValueError('Function does not reduce')
-cdef bint _is_sparse_array(object obj):
- # TODO can be removed one SparseArray.values is removed (GH26421)
- if hasattr(obj, '_subtyp'):
- if obj._subtyp == 'sparse_array':
- return True
- return False
-
-
cdef class Reducer:
"""
Performs generic reduction operation on a C or Fortran-contiguous ndarray
@@ -404,8 +396,7 @@ cdef class SeriesGrouper(_BaseGrouper):
cdef inline _extract_result(object res, bint squeeze=True):
""" extract the result object, it might be a 0-dim ndarray
or a len-1 0-dim, or a scalar """
- if (not _is_sparse_array(res) and hasattr(res, 'values')
- and util.is_array(res.values)):
+ if hasattr(res, 'values') and util.is_array(res.values):
res = res.values
if util.is_array(res):
if res.ndim == 0:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f4a20b808292a..a2e456581cb4f 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -302,9 +302,7 @@ class Categorical(ExtensionArray, PandasObject):
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
- _deprecations = PandasObject._deprecations | frozenset(
- ["tolist", "itemsize", "get_values"]
- )
+ _deprecations = PandasObject._deprecations | frozenset(["tolist", "itemsize"])
_typ = "categorical"
def __init__(
@@ -1461,29 +1459,18 @@ def value_counts(self, dropna=True):
return Series(count, index=CategoricalIndex(ix), dtype="int64")
- def get_values(self):
+ def _internal_get_values(self):
"""
Return the values.
- .. deprecated:: 0.25.0
-
For internal compatibility with pandas formatting.
Returns
-------
- numpy.array
+ np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
- warn(
- "The 'get_values' method is deprecated and will be removed in a "
- "future version",
- FutureWarning,
- stacklevel=2,
- )
- return self._internal_get_values()
-
- def _internal_get_values(self):
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 593ba7a643193..e909e92139c80 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -568,23 +568,6 @@ def npoints(self) -> int:
"""
return self.sp_index.npoints
- @property
- def values(self):
- """
- Dense values
-
- .. deprecated:: 0.25.0
-
- Use ``np.asarray(...)`` or the ``.to_dense()`` method instead.
- """
- msg = (
- "The SparseArray.values attribute is deprecated and will be "
- "removed in a future version. You can use `np.asarray(...)` or "
- "the `.to_dense()` method instead."
- )
- warnings.warn(msg, FutureWarning, stacklevel=2)
- return self.to_dense()
-
def isna(self):
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
@@ -1137,22 +1120,6 @@ def to_dense(self):
"""
return np.asarray(self, dtype=self.sp_values.dtype)
- def get_values(self):
- """
- Convert SparseArray to a NumPy array.
-
- .. deprecated:: 0.25.0
- Use `to_dense` instead.
-
- """
- warnings.warn(
- "The 'get_values' method is deprecated and will be removed in a "
- "future version. Use the 'to_dense' method instead.",
- FutureWarning,
- stacklevel=2,
- )
- return self._internal_get_values()
-
_internal_get_values = to_dense
# ------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c52b1c65ad08d..de9f38e28c86a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5506,13 +5506,10 @@ def _get_values(self):
# compat
return self.values
- def get_values(self):
+ def _internal_get_values(self):
"""
Return an ndarray after converting sparse values to dense.
- .. deprecated:: 0.25.0
- Use ``np.asarray(..)`` or :meth:`DataFrame.values` instead.
-
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
@@ -5526,41 +5523,7 @@ def get_values(self):
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
-
- Examples
- --------
- >>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False],
- ... 'c': [1.0, 2.0]})
- >>> df
- a b c
- 0 1 True 1.0
- 1 2 False 2.0
-
- >>> df.get_values()
- array([[1, True, 1.0], [2, False, 2.0]], dtype=object)
-
- >>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]),
- ... "c": [1.0, 2.0, 3.0]})
- >>> df
- a c
- 0 1.0 1.0
- 1 NaN 2.0
- 2 NaN 3.0
-
- >>> df.get_values()
- array([[ 1., 1.],
- [nan, 2.],
- [nan, 3.]])
- """
- warnings.warn(
- "The 'get_values' method is deprecated and will be removed in a "
- "future version. Use '.values' or 'np.asarray(..)' instead.",
- FutureWarning,
- stacklevel=2,
- )
- return self._internal_get_values()
-
- def _internal_get_values(self):
+ """
return self.values
@property
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 87c110f95c13a..cb6e3b172c5c9 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -215,7 +215,7 @@ class Index(IndexOpsMixin, PandasObject):
_deprecations: FrozenSet[str] = (
PandasObject._deprecations
| IndexOpsMixin._deprecations
- | frozenset(["contains", "get_values", "set_value"])
+ | frozenset(["contains", "set_value"])
)
# To hand over control to subclasses
@@ -3753,13 +3753,10 @@ def _values(self) -> Union[ExtensionArray, ABCIndexClass, np.ndarray]:
"""
return self._data
- def get_values(self):
+ def _internal_get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
- .. deprecated:: 0.25.0
- Use :meth:`Index.to_numpy` or :attr:`Index.array` instead.
-
Returns
-------
numpy.ndarray
@@ -3767,7 +3764,7 @@ def get_values(self):
See Also
--------
- Index.values : The attribute that get_values wraps.
+ Index.values : The attribute that _internal_get_values wraps.
Examples
--------
@@ -3780,33 +3777,24 @@ def get_values(self):
a 1 2 3
b 4 5 6
c 7 8 9
- >>> df.index.get_values()
+ >>> df.index._internal_get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
- >>> idx.get_values()
+ >>> idx._internal_get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
- >>> midx.get_values()
+ >>> midx._internal_get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
- >>> midx.get_values().ndim
+ >>> midx._internal_get_values().ndim
1
"""
- warnings.warn(
- "The 'get_values' method is deprecated and will be removed in a "
- "future version. Use '.to_numpy()' or '.array' instead.",
- FutureWarning,
- stacklevel=2,
- )
- return self._internal_get_values()
-
- def _internal_get_values(self):
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 410b10a69ecd5..1bfd5c047ed76 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -465,27 +465,16 @@ def _values(self):
"""
return self._data.internal_values()
- def get_values(self):
+ def _internal_get_values(self):
"""
Same as values (but handles sparseness conversions); is a view.
- .. deprecated:: 0.25.0
- Use :meth:`Series.to_numpy` or :attr:`Series.array` instead.
-
Returns
-------
numpy.ndarray
Data of the Series.
"""
- warnings.warn(
- "The 'get_values' method is deprecated and will be removed in a "
- "future version. Use '.to_numpy()' or '.array' instead.",
- FutureWarning,
- stacklevel=2,
- )
- return self._internal_get_values()
- def _internal_get_values(self):
return self._data.get_values()
# ops
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index 42087b89a19b5..c80e963d5c409 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -504,9 +504,3 @@ def test_recode_to_categories_large(self):
new = Index(expected)
result = _recode_for_categories(codes, old, new)
tm.assert_numpy_array_equal(result, expected)
-
- def test_deprecated_get_values(self):
- cat = Categorical(["a", "b", "c", "a"])
- with tm.assert_produces_warning(FutureWarning):
- res = cat.get_values()
- tm.assert_numpy_array_equal(res, np.array(cat))
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index c9f96ed516dc5..75a4f683e92d2 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -617,8 +617,7 @@ def test_dense_repr(self, vals, fill_value):
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
- with tm.assert_produces_warning(FutureWarning):
- res2 = arr.get_values()
+ res2 = arr._internal_get_values()
tm.assert_numpy_array_equal(res2, vals)
@@ -1244,12 +1243,3 @@ def test_map_missing():
result = arr.map({0: 10, 1: 11})
tm.assert_sp_array_equal(result, expected)
-
-
-def test_deprecated_values():
- arr = SparseArray([0, 1, 2])
-
- with tm.assert_produces_warning(FutureWarning):
- result = arr.values
-
- tm.assert_numpy_array_equal(result, arr.to_dense())
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 60befe5e73d37..91fb71c9de7a4 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -528,9 +528,3 @@ def test_tab_complete_warning(self, ip):
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
-
- def test_get_values_deprecated(self):
- df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]})
- with tm.assert_produces_warning(FutureWarning):
- res = df.get_values()
- tm.assert_numpy_array_equal(res, df.values)
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 14bf6490a706b..6a3137785e6f3 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -180,8 +180,7 @@ def test_values(self):
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.to_numpy(), exp)
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_numpy_array_equal(idx.get_values(), exp)
+
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 42b2c37638c76..8acab3fa2541d 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -498,12 +498,6 @@ def test_integer_series_size(self):
s = Series(range(9), dtype="Int64")
assert s.size == 9
- def test_get_values_deprecation(self):
- s = Series(range(9))
- with tm.assert_produces_warning(FutureWarning):
- res = s.get_values()
- tm.assert_numpy_array_equal(res, s.values)
-
class TestCategoricalSeries:
@pytest.mark.parametrize(
| The cleanups in libreduction are made possible by SparseArray.values going away. | https://api.github.com/repos/pandas-dev/pandas/pulls/29989 | 2019-12-03T05:22:01Z | 2019-12-04T22:51:48Z | 2019-12-04T22:51:48Z | 2019-12-04T22:59:22Z |
STY: F-strings | diff --git a/ci/print_skipped.py b/ci/print_skipped.py
index 51a2460e05fab..2b5cf1ab34295 100755
--- a/ci/print_skipped.py
+++ b/ci/print_skipped.py
@@ -32,8 +32,7 @@ def main(filename):
print("-" * 80)
else:
print(
- "#{i} {class_name}.{test_name}: {message}".format(
- **dict(test_data, i=i)
- )
+ f"#{i} {test_data['class_name']}."
+ f"{test_data['test_name']}: {test_data['message']}"
)
i += 1
diff --git a/doc/sphinxext/announce.py b/doc/sphinxext/announce.py
index 1a5ab99b5a94f..fdc5a6b283ba8 100755
--- a/doc/sphinxext/announce.py
+++ b/doc/sphinxext/announce.py
@@ -113,13 +113,13 @@ def build_string(revision_range, heading="Contributors"):
components["authors"] = "* " + "\n* ".join(components["authors"])
tpl = textwrap.dedent(
- """\
- {heading}
- {uline}
+ f"""\
+ {components['heading']}
+ {components['uline']}
- {author_message}
- {authors}"""
- ).format(**components)
+ {components['author_message']}
+ {components['authors']}"""
+ )
return tpl
diff --git a/doc/sphinxext/contributors.py b/doc/sphinxext/contributors.py
index 1a064f71792e9..d9ba2bb2cfb07 100644
--- a/doc/sphinxext/contributors.py
+++ b/doc/sphinxext/contributors.py
@@ -27,7 +27,7 @@ def run(self):
except git.GitCommandError as exc:
return [
self.state.document.reporter.warning(
- "Cannot find contributors for range '{}': {}".format(range_, exc),
+ f"Cannot find contributors for range {repr(range_)}: {exc}",
line=self.lineno,
)
]
| - [x] ref https://github.com/pandas-dev/pandas/issues/29547
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29988 | 2019-12-03T04:46:21Z | 2019-12-04T13:57:06Z | 2019-12-04T13:57:06Z | 2019-12-04T15:34:34Z |
DEPR: remove real, imag, put, and str.partition kwarg | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 599ad6d28a8f5..61902f3134a4d 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -1119,7 +1119,6 @@ generated/pandas.Series.pow,../reference/api/pandas.Series.pow
generated/pandas.Series.prod,../reference/api/pandas.Series.prod
generated/pandas.Series.product,../reference/api/pandas.Series.product
generated/pandas.Series.ptp,../reference/api/pandas.Series.ptp
-generated/pandas.Series.put,../reference/api/pandas.Series.put
generated/pandas.Series.quantile,../reference/api/pandas.Series.quantile
generated/pandas.Series.radd,../reference/api/pandas.Series.radd
generated/pandas.Series.rank,../reference/api/pandas.Series.rank
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 2485b94ab4d09..e13b4ed98a38b 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -39,7 +39,6 @@ Attributes
Series.empty
Series.dtypes
Series.name
- Series.put
Conversion
----------
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 4eb5f350cad8e..5c9543580be26 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -565,6 +565,9 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Passing multiple axes to :meth:`DataFrame.dropna` is no longer supported (:issue:`20995`)
- Removed previously deprecated :meth:`Series.nonzero`, use `to_numpy().nonzero()` instead (:issue:`24048`)
- Passing floating dtype ``codes`` to :meth:`Categorical.from_codes` is no longer supported, pass ``codes.astype(np.int64)`` instead (:issue:`21775`)
+- :meth:`Series.str.partition` and :meth:`Series.str.rpartition` no longer accept "pat" keyword, use "sep" instead (:issue:`23767`)
+- Removed the previously deprecated :meth:`Series.put` (:issue:`27106`)
+- Removed the previously deprecated :attr:`Series.real`, :attr:`Series.imag` (:issue:`27106`)
- Removed the previously deprecated :meth:`Series.to_dense`, :meth:`DataFrame.to_dense` (:issue:`26684`)
- Removed the previously deprecated :meth:`Index.dtype_str`, use ``str(index.dtype)`` instead (:issue:`27106`)
- :meth:`Categorical.ravel` returns a :class:`Categorical` instead of a ``ndarray`` (:issue:`27199`)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 11e87a4eed27f..3f665f0b202c1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -158,7 +158,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_deprecations = (
base.IndexOpsMixin._deprecations
| generic.NDFrame._deprecations
- | frozenset(["compress", "valid", "real", "imag", "put", "ptp", "nonzero"])
+ | frozenset(["compress", "ptp"])
)
# Override cache_readonly bc Series is mutable
@@ -528,23 +528,6 @@ def compress(self, condition, *args, **kwargs):
nv.validate_compress(args, kwargs)
return self[condition]
- def put(self, *args, **kwargs):
- """
- Apply the `put` method to its `values` attribute if it has one.
-
- .. deprecated:: 0.25.0
-
- See Also
- --------
- numpy.ndarray.put
- """
- warnings.warn(
- "`put` has been deprecated and will be removed in a future version.",
- FutureWarning,
- stacklevel=2,
- )
- self._values.put(*args, **kwargs)
-
def __len__(self) -> int:
"""
Return the length of the Series.
@@ -777,46 +760,6 @@ def __array__(self, dtype=None):
# ----------------------------------------------------------------------
# Unary Methods
- @property
- def real(self):
- """
- Return the real value of vector.
-
- .. deprecated:: 0.25.0
- """
- warnings.warn(
- "`real` is deprecated and will be removed in a future version. "
- "To eliminate this warning for a Series `ser`, use "
- "`np.real(ser.to_numpy())` or `ser.to_numpy().real`.",
- FutureWarning,
- stacklevel=2,
- )
- return self.values.real
-
- @real.setter
- def real(self, v):
- self.values.real = v
-
- @property
- def imag(self):
- """
- Return imag value of vector.
-
- .. deprecated:: 0.25.0
- """
- warnings.warn(
- "`imag` is deprecated and will be removed in a future version. "
- "To eliminate this warning for a Series `ser`, use "
- "`np.imag(ser.to_numpy())` or `ser.to_numpy().imag`.",
- FutureWarning,
- stacklevel=2,
- )
- return self.values.imag
-
- @imag.setter
- def imag(self, v):
- self.values.imag = v
-
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 137c37f938dfa..2d136de8b3424 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -9,7 +9,7 @@
import pandas._libs.lib as lib
import pandas._libs.ops as libops
-from pandas.util._decorators import Appender, deprecate_kwarg
+from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
ensure_object,
@@ -2630,9 +2630,6 @@ def rsplit(self, pat=None, n=-1, expand=False):
----------
sep : str, default whitespace
String to split on.
- pat : str, default whitespace
- .. deprecated:: 0.24.0
- Use ``sep`` instead.
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
@@ -2710,7 +2707,6 @@ def rsplit(self, pat=None, n=-1, expand=False):
"also": "rpartition : Split the string at the last occurrence of `sep`.",
}
)
- @deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def partition(self, sep=" ", expand=True):
f = lambda x: x.partition(sep)
@@ -2726,7 +2722,6 @@ def partition(self, sep=" ", expand=True):
"also": "partition : Split the string at the first occurrence of `sep`.",
}
)
- @deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep=" ", expand=True):
f = lambda x: x.rpartition(sep)
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 0dc64651e8d58..065be966efa49 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -411,26 +411,6 @@ def test_astype_empty_constructor_equality(self, dtype):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
- @pytest.mark.filterwarnings("ignore::FutureWarning")
- def test_complex(self):
- # see gh-4819: complex access for ndarray compat
- a = np.arange(5, dtype=np.float64)
- b = Series(a + 4j * a)
-
- tm.assert_numpy_array_equal(a, np.real(b))
- tm.assert_numpy_array_equal(4 * a, np.imag(b))
-
- b.real = np.arange(5) + 5
- tm.assert_numpy_array_equal(a + 5, np.real(b))
- tm.assert_numpy_array_equal(4 * a, np.imag(b))
-
- def test_real_imag_deprecated(self):
- # GH 18262
- s = pd.Series([1])
- with tm.assert_produces_warning(FutureWarning):
- s.imag
- s.real
-
def test_arg_for_errors_in_astype(self):
# see gh-14878
s = Series([1, 2, 3])
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index 187c5d90407ce..efcb500a0b79f 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -242,10 +242,3 @@ def test_hasnans_unchached_for_series():
ser.iloc[-1] = np.nan
assert ser.hasnans is True
assert Series.hasnans.__doc__ == pd.Index.hasnans.__doc__
-
-
-def test_put_deprecated():
- # GH 18262
- s = pd.Series([1])
- with tm.assert_produces_warning(FutureWarning):
- s.put(0, 0)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 0e2f8ee6543e1..cf52e286a47a5 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2966,23 +2966,21 @@ def test_partition_with_name(self):
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
- def test_partition_deprecation(self):
+ def test_partition_sep_kwarg(self):
# GH 22676; depr kwarg "pat" in favor of "sep"
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
# str.partition
# using sep -> no warning
expected = values.str.partition(sep="_")
- with tm.assert_produces_warning(FutureWarning):
- result = values.str.partition(pat="_")
- tm.assert_frame_equal(result, expected)
+ result = values.str.partition("_")
+ tm.assert_frame_equal(result, expected)
# str.rpartition
# using sep -> no warning
expected = values.str.rpartition(sep="_")
- with tm.assert_produces_warning(FutureWarning):
- result = values.str.rpartition(pat="_")
- tm.assert_frame_equal(result, expected)
+ result = values.str.rpartition("_")
+ tm.assert_frame_equal(result, expected)
def test_pipe_failures(self):
# #2119
| https://api.github.com/repos/pandas-dev/pandas/pulls/29986 | 2019-12-03T02:31:12Z | 2019-12-04T13:58:42Z | 2019-12-04T13:58:42Z | 2019-12-04T17:37:46Z | |
CLN: assorted (non-pytables) cleanups | diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 6dc110e3f8d07..2e5a563b815b3 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -306,6 +306,9 @@ def eval(
"multi-line expressions are only valid in the "
"context of data, use DataFrame.eval"
)
+ engine = _check_engine(engine)
+ _check_parser(parser)
+ _check_resolvers(resolvers)
ret = None
first_expr = True
@@ -313,9 +316,6 @@ def eval(
for expr in exprs:
expr = _convert_expression(expr)
- engine = _check_engine(engine)
- _check_parser(parser)
- _check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 5cab0c1fe6d59..84b6d45b78fe8 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -40,8 +40,8 @@ def datetime_index(request):
"""
freqstr = request.param
# TODO: non-monotone indexes; NaTs, different start dates, timezones
- pi = pd.date_range(start=pd.Timestamp("2000-01-01"), periods=100, freq=freqstr)
- return pi
+ dti = pd.date_range(start=pd.Timestamp("2000-01-01"), periods=100, freq=freqstr)
+ return dti
@pytest.fixture
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index d08c8b1cc13e5..584550d562b0d 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2974,14 +2974,10 @@ def test_partition_sep_kwarg(self):
# GH 22676; depr kwarg "pat" in favor of "sep"
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
- # str.partition
- # using sep -> no warning
expected = values.str.partition(sep="_")
result = values.str.partition("_")
tm.assert_frame_equal(result, expected)
- # str.rpartition
- # using sep -> no warning
expected = values.str.rpartition(sep="_")
result = values.str.rpartition("_")
tm.assert_frame_equal(result, expected)
| includes a couple of whatsnew notes that fell through the cracks | https://api.github.com/repos/pandas-dev/pandas/pulls/29985 | 2019-12-03T02:28:23Z | 2019-12-12T13:48:13Z | 2019-12-12T13:48:13Z | 2019-12-12T15:51:24Z |
CI: add jinja to deps | diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index 903a4b4a222f1..2bd11c9030325 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -20,6 +20,7 @@ dependencies:
- numexpr
- numpy=1.15.*
- openpyxl
+ - jinja2
- pyarrow>=0.12.0
- pytables
- python-dateutil
| i think this should fix the failing windows-py36 azure build | https://api.github.com/repos/pandas-dev/pandas/pulls/29984 | 2019-12-03T02:08:53Z | 2019-12-03T10:20:59Z | 2019-12-03T10:20:59Z | 2019-12-03T16:09:16Z |
CI: Using more appropriate GitHub Actions syntax to set path | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f68080d05bea6..a36420556ae24 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -15,12 +15,12 @@ jobs:
runs-on: ubuntu-latest
steps:
+ - name: Setting conda path
+ run: echo "::add-path::${HOME}/miniconda3/bin"
+
- name: Checkout
uses: actions/checkout@v1
- - name: Setting conda path
- run: echo "::set-env name=PATH::${HOME}/miniconda3/bin:${PATH}"
-
- name: Looking for unwanted patterns
run: ci/code_checks.sh patterns
if: true
| Looks like GitHub Actions provides a specific command to add to the PATH, which is simpler (and also works on Windows, which is not required in this case, but will be when we add the test jobs). | https://api.github.com/repos/pandas-dev/pandas/pulls/29983 | 2019-12-03T00:14:25Z | 2019-12-04T13:59:39Z | 2019-12-04T13:59:39Z | 2019-12-04T13:59:42Z |
CLN: assorted pytables cleanups, remove unwanted assertion | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 4874135430d1d..a48d9abc3c13b 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -361,9 +361,6 @@ def read_hdf(
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
- assert not kwargs, kwargs
- # NB: in principle more kwargs could be passed to HDFStore, but in
- # tests none are.
if mode not in ["r", "r+", "a"]:
raise ValueError(
@@ -500,13 +497,14 @@ class HDFStore:
"""
_handle: Optional["File"]
+ _mode: str
_complevel: int
_fletcher32: bool
def __init__(
self,
path,
- mode=None,
+ mode: str = "a",
complevel: Optional[int] = None,
complib=None,
fletcher32: bool = False,
@@ -837,7 +835,13 @@ def select_as_coordinates(
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
- def select_column(self, key: str, column: str, **kwargs):
+ def select_column(
+ self,
+ key: str,
+ column: str,
+ start: Optional[int] = None,
+ stop: Optional[int] = None,
+ ):
"""
return a single column from the table. This is generally only useful to
select an indexable
@@ -845,8 +849,10 @@ def select_column(self, key: str, column: str, **kwargs):
Parameters
----------
key : str
- column: str
+ column : str
The column of interest.
+ start : int or None, default None
+ stop : int or None, default None
Raises
------
@@ -859,7 +865,7 @@ def select_column(self, key: str, column: str, **kwargs):
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
- return tbl.read_column(column=column, **kwargs)
+ return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
@@ -2582,9 +2588,9 @@ class Fixed:
Parameters
----------
-
- parent : my parent HDFStore
- group : the group node where the table resides
+ parent : HDFStore
+ group : Node
+ The group node where the table resides.
"""
pandas_kind: str
@@ -2871,7 +2877,7 @@ def read_index(
return self.read_multi_index(key, start=start, stop=stop)
elif variety == "regular":
node = getattr(self.group, key)
- _, index = self.read_index_node(node, start=start, stop=stop)
+ index = self.read_index_node(node, start=start, stop=stop)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
@@ -2931,13 +2937,13 @@ def read_multi_index(
levels = []
codes = []
- names = []
+ names: List[Optional[Hashable]] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
- name, lev = self.read_index_node(node, start=start, stop=stop)
+ lev = self.read_index_node(node, start=start, stop=stop)
levels.append(lev)
- names.append(name)
+ names.append(lev.name)
label_key = f"{key}_label{i}"
level_codes = self.read_array(label_key, start=start, stop=stop)
@@ -2949,7 +2955,7 @@ def read_multi_index(
def read_index_node(
self, node: "Node", start: Optional[int] = None, stop: Optional[int] = None
- ):
+ ) -> Index:
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
@@ -2997,7 +3003,7 @@ def read_index_node(
index.name = name
- return name, index
+ return index
def write_array_empty(self, key: str, value):
""" write a 0-len array """
@@ -3131,7 +3137,6 @@ def write(self, obj, **kwargs):
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
- is_shape_reversed = False
nblocks: int
@@ -3158,10 +3163,6 @@ def shape(self):
shape.append(items)
- # hacky - this works for frames, but is reversed for panels
- if self.is_shape_reversed:
- shape = shape[::-1]
-
return shape
except AttributeError:
return None
@@ -3259,7 +3260,6 @@ class Table(Fixed):
table_type: str
levels = 1
is_table = True
- is_shape_reversed = False
index_axes: List[IndexCol]
non_index_axes: List[Tuple[int, Any]]
@@ -3302,7 +3302,7 @@ def __repr__(self) -> str:
f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
)
- def __getitem__(self, c):
+ def __getitem__(self, c: str):
""" return the axis for c """
for a in self.axes:
if c == a.name:
@@ -3345,10 +3345,6 @@ def is_multi_index(self) -> bool:
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
- def validate_metadata(self, existing):
- """ create / validate metadata """
- self.metadata = [c.name for c in self.values_axes if c.metadata is not None]
-
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
@@ -3651,8 +3647,8 @@ def read_axes(
Parameters
----------
where : ???
- start: int or None, default None
- stop: int or None, default None
+ start : int or None, default None
+ stop : int or None, default None
Returns
-------
@@ -3946,7 +3942,7 @@ def get_blk_items(mgr, blocks):
self.validate_min_itemsize(min_itemsize)
# validate our metadata
- self.validate_metadata(existing_table)
+ self.metadata = [c.name for c in self.values_axes if c.metadata is not None]
# validate the axes if we have an existing table
if validate:
@@ -4122,7 +4118,13 @@ class WORMTable(Table):
table_type = "worm"
- def read(self, **kwargs):
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: Optional[int] = None,
+ stop: Optional[int] = None,
+ ):
""" read the indices and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
@@ -4479,8 +4481,7 @@ def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or "values"
- obj = DataFrame({name: obj}, index=obj.index)
- obj.columns = [name]
+ obj = obj.to_frame(name)
return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(
| #29977 included an assertion that I meant to remove before merging. This removes that, followed by some misc cleanup: annotations, docstrings, remove is_shape_reversed (which was for Panel compat) | https://api.github.com/repos/pandas-dev/pandas/pulls/29982 | 2019-12-02T23:55:35Z | 2019-12-03T18:53:03Z | 2019-12-03T18:53:03Z | 2020-04-05T17:35:01Z |
REF: move attribute setting towards the constructor | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 4874135430d1d..b5a329c0c0936 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2203,12 +2203,6 @@ def take_data(self):
self.data, data = None, self.data
return data
- def set_metadata(self, metadata):
- """ record the metadata """
- if metadata is not None:
- metadata = np.array(metadata, copy=False).ravel()
- self.metadata = metadata
-
def set_kind(self):
# set my kind if we can
@@ -2240,7 +2234,6 @@ def set_kind(self):
def set_atom(
self,
block,
- block_items,
existing_col,
min_itemsize,
nan_rep,
@@ -2250,13 +2243,15 @@ def set_atom(
):
""" create and setup my atom from the block b """
- self.values = list(block_items)
-
# short-cut certain block types
if block.is_categorical:
- return self.set_atom_categorical(block, items=block_items, info=info)
+ self.set_atom_categorical(block)
+ self.update_info(info)
+ return
elif block.is_datetimetz:
- return self.set_atom_datetime64tz(block, info=info)
+ self.set_atom_datetime64tz(block)
+ self.update_info(info)
+ return
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
@@ -2284,13 +2279,7 @@ def set_atom(
# end up here ###
elif inferred_type == "string" or dtype == "object":
self.set_atom_string(
- block,
- block_items,
- existing_col,
- min_itemsize,
- nan_rep,
- encoding,
- errors,
+ block, existing_col, min_itemsize, nan_rep, encoding, errors,
)
# set as a data block
@@ -2301,7 +2290,7 @@ def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(
- self, block, block_items, existing_col, min_itemsize, nan_rep, encoding, errors
+ self, block, existing_col, min_itemsize, nan_rep, encoding, errors
):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
@@ -2316,13 +2305,14 @@ def set_atom_string(
# we cannot serialize this data, so report an exception on a column
# by column basis
- for i, item in enumerate(block_items):
+ for i in range(len(block.shape[0])):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel(), skipna=False)
if inferred_type != "string":
+ iloc = block.mgr_locs.indexer[i]
raise TypeError(
- f"Cannot serialize the column [{item}] because\n"
+ f"Cannot serialize the column [{iloc}] because\n"
f"its data contents are [{inferred_type}] object dtype"
)
@@ -2375,7 +2365,7 @@ def set_atom_data(self, block):
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
- def set_atom_categorical(self, block, items, info=None):
+ def set_atom_categorical(self, block):
# currently only supports a 1-D categorical
# in a 1-D block
@@ -2385,8 +2375,6 @@ def set_atom_categorical(self, block, items, info=None):
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
- if len(items) > 1:
- raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
@@ -2395,10 +2383,7 @@ def set_atom_categorical(self, block, items, info=None):
# write the categories
self.meta = "category"
- self.set_metadata(block.values.categories)
-
- # update the info
- self.update_info(info)
+ self.metadata = np.array(block.values.categories, copy=False).ravel()
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
@@ -2409,7 +2394,7 @@ def set_atom_datetime64(self, block):
values = block.values.view("i8")
self.set_data(values, "datetime64")
- def set_atom_datetime64tz(self, block, info):
+ def set_atom_datetime64tz(self, block):
values = block.values
@@ -2418,7 +2403,6 @@ def set_atom_datetime64tz(self, block, info):
# store a converted timezone
self.tz = _get_tz(block.values.tz)
- self.update_info(info)
self.kind = "datetime64"
self.typ = self.get_atom_datetime64(block)
@@ -3926,9 +3910,9 @@ def get_blk_items(mgr, blocks):
existing_col = None
col = klass.create_for_block(i=i, name=name, version=self.version)
+ col.values = list(b_items)
col.set_atom(
block=b,
- block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
| The end goal is to minimize the amount of attribute setting that is done outside of the constructor (or create_for_block which is a classmethod and ill give it a pass).
create_for_block is called in one place, and immediately after that set_atom is called on the result. set_atom does a ton of attribute setting, so the idea here is to gradually move that attribute setting up into create_for_block.
Related but kept for a separate branch, most of the affected methods have `block` arguments, these can all be changed to operate on block.values, which will narrow the exposure to internals. | https://api.github.com/repos/pandas-dev/pandas/pulls/29979 | 2019-12-02T21:04:51Z | 2019-12-04T14:01:50Z | 2019-12-04T14:01:49Z | 2019-12-04T16:57:56Z |
CLN: explicit kwargs for select | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5a42df92ddf84..6cbf342751469 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -284,7 +284,19 @@ def to_hdf(
f(path_or_buf)
-def read_hdf(path_or_buf, key=None, mode: str = "r", **kwargs):
+def read_hdf(
+ path_or_buf,
+ key=None,
+ mode: str = "r",
+ errors: str = "strict",
+ where=None,
+ start: Optional[int] = None,
+ stop: Optional[int] = None,
+ columns=None,
+ iterator=False,
+ chunksize: Optional[int] = None,
+ **kwargs,
+):
"""
Read from the store, close it if we opened it.
@@ -350,6 +362,9 @@ def read_hdf(path_or_buf, key=None, mode: str = "r", **kwargs):
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
+ assert not kwargs, kwargs
+ # NB: in principle more kwargs could be passed to HDFStore, but in
+ # tests none are.
if mode not in ["r", "r+", "a"]:
raise ValueError(
@@ -357,8 +372,8 @@ def read_hdf(path_or_buf, key=None, mode: str = "r", **kwargs):
f"Allowed modes are r, r+ and a."
)
# grab the scope
- if "where" in kwargs:
- kwargs["where"] = _ensure_term(kwargs["where"], scope_level=1)
+ if where is not None:
+ where = _ensure_term(where, scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
@@ -382,7 +397,7 @@ def read_hdf(path_or_buf, key=None, mode: str = "r", **kwargs):
if not exists:
raise FileNotFoundError(f"File {path_or_buf} does not exist")
- store = HDFStore(path_or_buf, mode=mode, **kwargs)
+ store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
@@ -405,7 +420,16 @@ def read_hdf(path_or_buf, key=None, mode: str = "r", **kwargs):
"contains multiple datasets."
)
key = candidate_only_group._v_pathname
- return store.select(key, auto_close=auto_close, **kwargs)
+ return store.select(
+ key,
+ where=where,
+ start=start,
+ stop=stop,
+ columns=columns,
+ iterator=iterator,
+ chunksize=chunksize,
+ auto_close=auto_close,
+ )
except (ValueError, TypeError, KeyError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
@@ -734,7 +758,6 @@ def select(
iterator=False,
chunksize=None,
auto_close: bool = False,
- **kwargs,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
@@ -850,7 +873,6 @@ def select_as_multiple(
iterator=False,
chunksize=None,
auto_close: bool = False,
- **kwargs,
):
"""
Retrieve pandas objects from multiple tables.
@@ -888,7 +910,7 @@ def select_as_multiple(
stop=stop,
iterator=iterator,
chunksize=chunksize,
- **kwargs,
+ auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
| Questions for @jreback on follow-ups:
1) Note in `read_hdf` kwargs is still present in the signature, but i put an assertion that they are empty. In principle we could pass more junk to HDFStore, but we never do in tests. Should we allow it, or can those kwargs be deleted altogether?
2) the current signature for `select_column` is `(self, key: str, column: str, **kwargs)`. These kwargs are passed to `read_column`, which would accept "where", "start", "stop". In tests we pass start and stop, but never where. Can we rule out "where"?
| https://api.github.com/repos/pandas-dev/pandas/pulls/29977 | 2019-12-02T17:17:57Z | 2019-12-02T23:23:30Z | 2019-12-02T23:23:30Z | 2019-12-02T23:44:14Z |
TYP: add some types to pandas/io/parquet.py | diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index edbf60cc91d0b..1ab311516fa02 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -1,5 +1,6 @@
""" parquet compat """
+from typing import Any, Dict, Optional
from warnings import catch_warnings
from pandas.compat._optional import import_optional_dependency
@@ -10,7 +11,7 @@
from pandas.io.common import get_filepath_or_buffer, is_gcs_url, is_s3_url
-def get_engine(engine):
+def get_engine(engine: str) -> "BaseImpl":
""" return our implementation """
if engine == "auto":
@@ -35,19 +36,15 @@ def get_engine(engine):
"support"
)
- if engine not in ["pyarrow", "fastparquet"]:
- raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
-
if engine == "pyarrow":
return PyArrowImpl()
elif engine == "fastparquet":
return FastParquetImpl()
+ raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
-class BaseImpl:
-
- api = None # module
+class BaseImpl:
@staticmethod
def validate_dataframe(df):
@@ -74,7 +71,7 @@ def read(self, path, columns=None, **kwargs):
class PyArrowImpl(BaseImpl):
def __init__(self):
- pyarrow = import_optional_dependency(
+ import_optional_dependency(
"pyarrow", extra="pyarrow is required for parquet support."
)
import pyarrow.parquet
@@ -87,13 +84,14 @@ def write(
path,
compression="snappy",
coerce_timestamps="ms",
- index=None,
+ index: Optional[bool] = None,
partition_cols=None,
**kwargs,
):
self.validate_dataframe(df)
path, _, _, _ = get_filepath_or_buffer(path, mode="wb")
+ from_pandas_kwargs: Dict[str, Any]
if index is None:
from_pandas_kwargs = {}
else:
@@ -203,7 +201,7 @@ def to_parquet(
path,
engine="auto",
compression="snappy",
- index=None,
+ index: Optional[bool] = None,
partition_cols=None,
**kwargs,
):
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/29972 | 2019-12-02T16:38:57Z | 2019-12-02T23:15:50Z | 2019-12-02T23:15:50Z | 2019-12-03T12:41:35Z |
CLN: explicit kwargs and docstring for create_index | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 39e9d467b652f..df377bb4e1e7c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1191,13 +1191,31 @@ def append_to_multiple(
self.append(k, val, data_columns=dc, **kwargs)
- def create_table_index(self, key: str, **kwargs):
+ def create_table_index(
+ self,
+ key: str,
+ columns=None,
+ optlevel: Optional[int] = None,
+ kind: Optional[str] = None,
+ ):
"""
Create a pytables index on the table.
Parameters
----------
key : str
+ columns : None, bool, or listlike[str]
+ Indicate which columns to create an index on.
+
+ * False : Do not create any indexes.
+ * True : Create indexes on all columns.
+ * None : Create indexes on all columns.
+ * listlike : Create indexes on the given columns.
+
+ optlevel : int or None, default None
+ Optimization level, if None, pytables defaults to 6.
+ kind : str or None, default None
+ Kind of index, if None, pytables defaults to "medium"
Raises
------
@@ -1212,7 +1230,7 @@ def create_table_index(self, key: str, **kwargs):
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
- s.create_index(**kwargs)
+ s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self):
"""
@@ -3526,7 +3544,7 @@ def f(i, c):
return self._indexables
- def create_index(self, columns=None, optlevel=None, kind=None):
+ def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
@@ -3534,10 +3552,18 @@ def create_index(self, columns=None, optlevel=None, kind=None):
Parameters
----------
- columns : False (don't create an index), True (create all columns
- index), None or list_like (the indexers to index)
- optlevel: optimization level (defaults to 6)
- kind : kind of index (defaults to 'medium')
+ columns : None, bool, or listlike[str]
+ Indicate which columns to create an index on.
+
+ * False : Do not create any indexes.
+ * True : Create indexes on all columns.
+ * None : Create indexes on all columns.
+ * listlike : Create indexes on the given columns.
+
+ optlevel : int or None, default None
+ Optimization level, if None, pytables defaults to 6.
+ kind : str or None, default None
+ Kind of index, if None, pytables defaults to "medium"
Raises
------
| Side-note, in `create_index`, none of our tests hit the `v.remove_index()` calls. If we could rule those out, this function could be simplified quite a bit. | https://api.github.com/repos/pandas-dev/pandas/pulls/29971 | 2019-12-02T16:34:11Z | 2019-12-02T23:16:38Z | 2019-12-02T23:16:38Z | 2019-12-02T23:19:09Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.