title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
CLN: consistent signatures for equals methods | diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index 321d7c374d8ec..0c3d8915b749b 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -103,7 +103,7 @@ cdef class IntIndex(SparseIndex):
if not monotonic:
raise ValueError("Indices must be strictly increasing")
- def equals(self, other) -> bool:
+ def equals(self, other: object) -> bool:
if not isinstance(other, IntIndex):
return False
@@ -399,7 +399,7 @@ cdef class BlockIndex(SparseIndex):
if blengths[i] == 0:
raise ValueError(f'Zero-length block {i}')
- def equals(self, other) -> bool:
+ def equals(self, other: object) -> bool:
if not isinstance(other, BlockIndex):
return False
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 921927325a144..d85647edc3b81 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -7,7 +7,7 @@
without warning.
"""
import operator
-from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
+from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, cast
import numpy as np
@@ -20,7 +20,12 @@
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.cast import maybe_cast_to_extension_array
-from pandas.core.dtypes.common import is_array_like, is_list_like, pandas_dtype
+from pandas.core.dtypes.common import (
+ is_array_like,
+ is_dtype_equal,
+ is_list_like,
+ pandas_dtype,
+)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
@@ -742,7 +747,7 @@ def searchsorted(self, value, side="left", sorter=None):
arr = self.astype(object)
return arr.searchsorted(value, side=side, sorter=sorter)
- def equals(self, other: "ExtensionArray") -> bool:
+ def equals(self, other: object) -> bool:
"""
Return if another array is equivalent to this array.
@@ -762,7 +767,8 @@ def equals(self, other: "ExtensionArray") -> bool:
"""
if not type(self) == type(other):
return False
- elif not self.dtype == other.dtype:
+ other = cast(ExtensionArray, other)
+ if not is_dtype_equal(self.dtype, other.dtype):
return False
elif not len(self) == len(other):
return False
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 6e5c7bc699962..a28b341669918 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2242,7 +2242,7 @@ def _from_factorized(cls, uniques, original):
original.categories.take(uniques), dtype=original.dtype
)
- def equals(self, other):
+ def equals(self, other: object) -> bool:
"""
Returns True if categorical arrays are equal.
@@ -2254,7 +2254,9 @@ def equals(self, other):
-------
bool
"""
- if self.is_dtype_equal(other):
+ if not isinstance(other, Categorical):
+ return False
+ elif self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fcb7e2a949205..651c079ecc08e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -22,6 +22,7 @@
Tuple,
Type,
Union,
+ cast,
)
import warnings
import weakref
@@ -1195,7 +1196,7 @@ def _indexed_same(self, other) -> bool:
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
- def equals(self, other):
+ def equals(self, other: object) -> bool:
"""
Test whether two objects contain the same elements.
@@ -1275,6 +1276,7 @@ def equals(self, other):
"""
if not (isinstance(other, type(self)) or isinstance(self, type(other))):
return False
+ other = cast(NDFrame, other)
return self._mgr.equals(other._mgr)
# -------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ecd3670e724a1..623ce68201492 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4176,7 +4176,7 @@ def putmask(self, mask, value):
# coerces to object
return self.astype(object).putmask(mask, value)
- def equals(self, other: Any) -> bool:
+ def equals(self, other: object) -> bool:
"""
Determine if two Index object are equal.
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index fb283cbe02954..4990e6a8e20e9 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -290,7 +290,7 @@ def _is_dtype_compat(self, other) -> bool:
return other
- def equals(self, other) -> bool:
+ def equals(self, other: object) -> bool:
"""
Determine if two CategoricalIndex objects contain the same elements.
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 8ccdab21339df..6d9d75a69e91d 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -24,7 +24,7 @@
is_scalar,
)
from pandas.core.dtypes.concat import concat_compat
-from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.generic import ABCIndex, ABCSeries
from pandas.core import algorithms
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
@@ -130,14 +130,14 @@ def __array_wrap__(self, result, context=None):
# ------------------------------------------------------------------------
- def equals(self, other) -> bool:
+ def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
- if not isinstance(other, ABCIndexClass):
+ if not isinstance(other, Index):
return False
elif not isinstance(other, type(self)):
try:
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 9548ebbd9c3b2..e8d0a44324cc5 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1005,19 +1005,20 @@ def _format_space(self) -> str:
def argsort(self, *args, **kwargs) -> np.ndarray:
return np.lexsort((self.right, self.left))
- def equals(self, other) -> bool:
+ def equals(self, other: object) -> bool:
"""
Determines if two IntervalIndex objects contain the same elements.
"""
if self.is_(other):
return True
- # if we can coerce to an II
- # then we can compare
+ # if we can coerce to an IntervalIndex then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(other)
+ if not isinstance(other, IntervalIndex):
+ return False
return (
self.left.equals(other.left)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 13927dede5542..9ca3075d2962c 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3227,7 +3227,7 @@ def truncate(self, before=None, after=None):
verify_integrity=False,
)
- def equals(self, other) -> bool:
+ def equals(self, other: object) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
@@ -3270,11 +3270,10 @@ def equals(self, other) -> bool:
np.asarray(other.levels[i]._values), other_codes, allow_fill=False
)
- # since we use NaT both datetime64 and timedelta64
- # we can have a situation where a level is typed say
- # timedelta64 in self (IOW it has other values than NaT)
- # but types datetime64 in other (where its all NaT)
- # but these are equivalent
+ # since we use NaT both datetime64 and timedelta64 we can have a
+ # situation where a level is typed say timedelta64 in self (IOW it
+ # has other values than NaT) but types datetime64 in other (where
+ # its all NaT) but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 3577a7aacc008..6080c32052266 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -433,7 +433,7 @@ def argsort(self, *args, **kwargs) -> np.ndarray:
else:
return np.arange(len(self) - 1, -1, -1)
- def equals(self, other) -> bool:
+ def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index aa74d173d69b3..371b721f08b27 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1437,7 +1437,10 @@ def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
- def equals(self, other: "BlockManager") -> bool:
+ def equals(self, other: object) -> bool:
+ if not isinstance(other, BlockManager):
+ return False
+
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
| https://api.github.com/repos/pandas-dev/pandas/pulls/35636 | 2020-08-08T19:28:27Z | 2020-08-12T12:22:55Z | 2020-08-12T12:22:55Z | 2020-08-14T11:13:43Z | |
BUG: DataFrame.apply with func altering row in-place | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index f0ad9d1ca3b0f..a2e9a2feaeeab 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
- Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`)
+- Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 6b8d7dc35fe95..6d44cf917a07a 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -389,6 +389,8 @@ def series_generator(self):
blk = mgr.blocks[0]
for (arr, name) in zip(values, self.index):
+ # GH#35462 re-pin mgr in case setitem changed it
+ ser._mgr = mgr
blk.values = arr
ser.name = name
yield ser
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py
index 3a32278e2a4b1..538978358c8e7 100644
--- a/pandas/tests/frame/apply/test_frame_apply.py
+++ b/pandas/tests/frame/apply/test_frame_apply.py
@@ -1522,3 +1522,22 @@ def test_apply_dtype(self, col):
expected = df.dtypes
tm.assert_series_equal(result, expected)
+
+
+def test_apply_mutating():
+ # GH#35462 case where applied func pins a new BlockManager to a row
+ df = pd.DataFrame({"a": range(100), "b": range(100, 200)})
+
+ def func(row):
+ mgr = row._mgr
+ row.loc["a"] += 1
+ assert row._mgr is not mgr
+ return row
+
+ expected = df.copy()
+ expected["a"] += 1
+
+ result = df.apply(func, axis=1)
+
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(df, result)
| closes #35462
closes #35634
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35633 | 2020-08-08T17:55:50Z | 2020-08-11T00:01:58Z | 2020-08-11T00:01:58Z | 2020-08-11T08:40:19Z |
REF: _cython_agg_blocks follow patterns similar to _apply_blockwise | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 53242c0332a8c..b7280a9f7db3c 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1026,8 +1026,7 @@ def _cython_agg_blocks(
if numeric_only:
data = data.get_numeric_data(copy=False)
- agg_blocks: List[Block] = []
- new_items: List[np.ndarray] = []
+ agg_blocks: List["Block"] = []
deleted_items: List[np.ndarray] = []
no_result = object()
@@ -1056,11 +1055,12 @@ def cast_result_block(result, block: "Block", how: str) -> "Block":
# reshape to be valid for non-Extension Block
result = result.reshape(1, -1)
- agg_block: Block = block.make_block(result)
+ agg_block: "Block" = block.make_block(result)
return agg_block
- for block in data.blocks:
- # Avoid inheriting result from earlier in the loop
+ def blk_func(block: "Block") -> List["Block"]:
+ new_blocks: List["Block"] = []
+
result = no_result
locs = block.mgr_locs.as_array
try:
@@ -1076,8 +1076,7 @@ def cast_result_block(result, block: "Block", how: str) -> "Block":
# we cannot perform the operation
# in an alternate way, exclude the block
assert how == "ohlc"
- deleted_items.append(locs)
- continue
+ raise
# call our grouper again with only this block
obj = self.obj[data.items[locs]]
@@ -1096,8 +1095,7 @@ def cast_result_block(result, block: "Block", how: str) -> "Block":
except TypeError:
# we may have an exception in trying to aggregate
# continue and exclude the block
- deleted_items.append(locs)
- continue
+ raise
else:
result = cast(DataFrame, result)
# unwrap DataFrame to get array
@@ -1108,20 +1106,33 @@ def cast_result_block(result, block: "Block", how: str) -> "Block":
# clean, we choose to clean up this mess later on.
assert len(locs) == result.shape[1]
for i, loc in enumerate(locs):
- new_items.append(np.array([loc], dtype=locs.dtype))
agg_block = result.iloc[:, [i]]._mgr.blocks[0]
- agg_blocks.append(agg_block)
+ new_blocks.append(agg_block)
else:
result = result._mgr.blocks[0].values
if isinstance(result, np.ndarray) and result.ndim == 1:
result = result.reshape(1, -1)
agg_block = cast_result_block(result, block, how)
- new_items.append(locs)
- agg_blocks.append(agg_block)
+ new_blocks = [agg_block]
else:
agg_block = cast_result_block(result, block, how)
- new_items.append(locs)
- agg_blocks.append(agg_block)
+ new_blocks = [agg_block]
+ return new_blocks
+
+ skipped: List[int] = []
+ new_items: List[np.ndarray] = []
+ for i, block in enumerate(data.blocks):
+ try:
+ nbs = blk_func(block)
+ except (NotImplementedError, TypeError):
+ # TypeError -> we may have an exception in trying to aggregate
+ # continue and exclude the block
+ # NotImplementedError -> "ohlc" with wrong dtype
+ skipped.append(i)
+ deleted_items.append(block.mgr_locs.as_array)
+ else:
+ agg_blocks.extend(nbs)
+ new_items.append(block.mgr_locs.as_array)
if not agg_blocks:
raise DataError("No numeric types to aggregate")
| Follows #35535
This isn't yet identical to apply_blockwise, because both this and apply_blockwise have some odd behaviors that need to get standardized before they get combined into a BlockManager method, but this is a move in that direction. | https://api.github.com/repos/pandas-dev/pandas/pulls/35632 | 2020-08-08T17:48:52Z | 2020-08-12T22:15:36Z | 2020-08-12T22:15:36Z | 2020-08-12T22:49:04Z |
TST: add test for agg on ordered categorical cols | diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index ce9d4b892d775..8fe450fe6abfc 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1063,6 +1063,85 @@ def test_groupby_get_by_index():
pd.testing.assert_frame_equal(res, expected)
+@pytest.mark.parametrize(
+ "grp_col_dict, exp_data",
+ [
+ ({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
+ ({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
+ ({"nr": "min"}, {"nr": [1, 5]}),
+ ],
+)
+def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
+ # test single aggregations on ordered categorical cols GHGH27800
+
+ # create the result dataframe
+ input_df = pd.DataFrame(
+ {
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
+ "cat_ord": list("aabbccdd"),
+ "cat": list("aaaabbbb"),
+ }
+ )
+
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
+ result_df = input_df.groupby("cat").agg(grp_col_dict)
+
+ # create expected dataframe
+ cat_index = pd.CategoricalIndex(
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
+ )
+
+ expected_df = pd.DataFrame(data=exp_data, index=cat_index)
+
+ tm.assert_frame_equal(result_df, expected_df)
+
+
+@pytest.mark.parametrize(
+ "grp_col_dict, exp_data",
+ [
+ ({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
+ ({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
+ ({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
+ ],
+)
+def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
+ # test combined aggregations on ordered categorical cols GH27800
+
+ # create the result dataframe
+ input_df = pd.DataFrame(
+ {
+ "nr": [1, 2, 3, 4, 5, 6, 7, 8],
+ "cat_ord": list("aabbccdd"),
+ "cat": list("aaaabbbb"),
+ }
+ )
+
+ input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
+ input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
+ result_df = input_df.groupby("cat").agg(grp_col_dict)
+
+ # create expected dataframe
+ cat_index = pd.CategoricalIndex(
+ ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
+ )
+
+ # unpack the grp_col_dict to create the multi-index tuple
+ # this tuple will be used to create the expected dataframe index
+ multi_index_list = []
+ for k, v in grp_col_dict.items():
+ if isinstance(v, list):
+ for value in v:
+ multi_index_list.append([k, value])
+ else:
+ multi_index_list.append([k, v])
+ multi_index = pd.MultiIndex.from_tuples(tuple(multi_index_list))
+
+ expected_df = pd.DataFrame(data=exp_data, columns=multi_index, index=cat_index)
+
+ tm.assert_frame_equal(result_df, expected_df)
+
+
def test_nonagg_agg():
# GH 35490 - Single/Multiple agg of non-agg function give same results
# TODO: agg should raise for functions that don't aggregate
| - [x] closes #27800
- [x] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35630 | 2020-08-08T14:44:51Z | 2020-08-21T22:34:52Z | 2020-08-21T22:34:52Z | 2020-08-21T22:34:57Z |
DOC: docstrings for __array_wrap__ | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 834cd992f5650..fcb7e2a949205 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1772,7 +1772,28 @@ def empty(self) -> bool_t:
def __array__(self, dtype=None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
- def __array_wrap__(self, result, context=None):
+ def __array_wrap__(
+ self,
+ result: np.ndarray,
+ context: Optional[Tuple[Callable, Tuple[Any, ...], int]] = None,
+ ):
+ """
+ Gets called after a ufunc and other functions.
+
+ Parameters
+ ----------
+ result: np.ndarray
+ The result of the ufunc or other function called on the NumPy array
+ returned by __array__
+ context: tuple of (func, tuple, int)
+ This parameter is returned by ufuncs as a 3-element tuple: (name of the
+ ufunc, arguments of the ufunc, domain of the ufunc), but is not set by
+ other numpy functions.q
+
+ Notes
+ -----
+ Series implements __array_ufunc_ so this not called for ufunc on Series.
+ """
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index bfdfbd35f27ad..bd75a064b483e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -574,7 +574,7 @@ def __array__(self, dtype=None) -> np.ndarray:
def __array_wrap__(self, result, context=None):
"""
- Gets called after a ufunc.
+ Gets called after a ufunc and other functions.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1:
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 0ce057d6e764a..8ccdab21339df 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -116,7 +116,7 @@ def values(self):
def __array_wrap__(self, result, context=None):
"""
- Gets called after a ufunc.
+ Gets called after a ufunc and other functions.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result):
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index c7199e4a28a17..11334803d4583 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -345,10 +345,13 @@ def _int64index(self) -> Int64Index:
def __array_wrap__(self, result, context=None):
"""
- Gets called after a ufunc. Needs additional handling as
- PeriodIndex stores internal data as int dtype
+ Gets called after a ufunc and other functions.
- Replace this to __numpy_ufunc__ in future version
+ Needs additional handling as PeriodIndex stores internal data as int
+ dtype
+
+ Replace this to __numpy_ufunc__ in future version and implement
+ __array_function__ for Indexes
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
| non-code changes broken-off #35334 | https://api.github.com/repos/pandas-dev/pandas/pulls/35629 | 2020-08-08T14:08:27Z | 2020-08-08T14:52:21Z | 2020-08-08T14:52:21Z | 2020-08-08T16:33:26Z |
TYP: update setup.cfg | diff --git a/setup.cfg b/setup.cfg
index 84c281b756395..e4c0b3dcf37ef 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -175,9 +175,6 @@ check_untyped_defs=False
[mypy-pandas.core.groupby.ops]
check_untyped_defs=False
-[mypy-pandas.core.indexes.base]
-check_untyped_defs=False
-
[mypy-pandas.core.indexes.datetimes]
check_untyped_defs=False
@@ -214,9 +211,6 @@ check_untyped_defs=False
[mypy-pandas.core.window.common]
check_untyped_defs=False
-[mypy-pandas.core.window.ewm]
-check_untyped_defs=False
-
[mypy-pandas.core.window.expanding]
check_untyped_defs=False
| https://api.github.com/repos/pandas-dev/pandas/pulls/35628 | 2020-08-08T13:52:21Z | 2020-08-08T17:49:25Z | 2020-08-08T17:49:25Z | 2020-08-08T18:03:00Z | |
DEPR: Deprecate inplace param in MultiIndex.set_codes and MultiIndex.set_levels | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index 6843dd1eadc81..cac18f5bf39cd 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -1532,12 +1532,8 @@ Setting metadata
~~~~~~~~~~~~~~~~
Indexes are "mostly immutable", but it is possible to set and change their
-metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and
-``codes``).
-
-You can use the ``rename``, ``set_names``, ``set_levels``, and ``set_codes``
-to set these attributes directly. They default to returning a copy; however,
-you can specify ``inplace=True`` to have the data change in place.
+``name`` attribute. You can use the ``rename``, ``set_names`` to set these attributes
+directly, and they default to returning a copy.
See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 33e70daa55e66..d3bccada09c29 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -47,7 +47,7 @@ Other enhancements
Deprecations
~~~~~~~~~~~~
-
+- Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`)
-
-
diff --git a/pandas/conftest.py b/pandas/conftest.py
index e0adb37e7d2f5..c1925b4f5ca3b 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -359,7 +359,7 @@ def multiindex_year_month_day_dataframe_random_data():
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use Int64Index, to make sure things work
- ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True)
+ ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index a6e8ec0707de7..13927dede5542 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -740,7 +740,7 @@ def _set_levels(
self._tuples = None
self._reset_cache()
- def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
+ def set_levels(self, levels, level=None, inplace=None, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning new index.
@@ -752,6 +752,8 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
+
+ .. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
@@ -822,6 +824,15 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
+ if inplace is not None:
+ warnings.warn(
+ "inplace is deprecated and will be removed in a future version.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ else:
+ inplace = False
+
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
@@ -898,7 +909,7 @@ def _set_codes(
self._tuples = None
self._reset_cache()
- def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
+ def set_codes(self, codes, level=None, inplace=None, verify_integrity=True):
"""
Set new codes on MultiIndex. Defaults to returning new index.
@@ -914,6 +925,8 @@ def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
+
+ .. deprecated:: 1.2.0
verify_integrity : bool (default True)
If True, checks that levels and codes are compatible.
@@ -958,6 +971,15 @@ def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
(1, 'two')],
names=['foo', 'bar'])
"""
+ if inplace is not None:
+ warnings.warn(
+ "inplace is deprecated and will be removed in a future version.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ else:
+ inplace = False
+
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py
index 5216c3be116e0..dcc33428d18a5 100644
--- a/pandas/tests/frame/methods/test_sort_index.py
+++ b/pandas/tests/frame/methods/test_sort_index.py
@@ -555,8 +555,8 @@ def test_sort_index_and_reconstruction(self):
),
)
- df.columns.set_levels(
- pd.to_datetime(df.columns.levels[1]), level=1, inplace=True
+ df.columns = df.columns.set_levels(
+ pd.to_datetime(df.columns.levels[1]), level=1
)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index d1f66af4a8e83..b2500efef9e03 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -84,7 +84,8 @@ def test_inplace_mutation_resets_values():
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
- mi1.set_levels(levels2, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
@@ -103,7 +104,8 @@ def test_inplace_mutation_resets_values():
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
- mi2.set_codes(codes2, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ mi2.set_codes(codes2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index e48731b9c8099..9add4b478da47 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -91,7 +91,8 @@ def test_duplicate_multiindex_codes():
mi = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]])
msg = r"Level values must be unique: \[[AB', ]+\] on level 0"
with pytest.raises(ValueError, match=msg):
- mi.set_levels([["A", "B", "A", "A", "B"], [2, 1, 3, -2, 5]], inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ mi.set_levels([["A", "B", "A", "A", "B"], [2, 1, 3, -2, 5]], inplace=True)
@pytest.mark.parametrize("names", [["a", "b", "a"], [1, 1, 2], [1, "a", 1]])
diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py
index 063ede028add7..b48f09457b96c 100644
--- a/pandas/tests/indexes/multi/test_equivalence.py
+++ b/pandas/tests/indexes/multi/test_equivalence.py
@@ -192,10 +192,12 @@ def test_is_():
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
- mi4.set_levels([list(range(10)), list(range(10))], inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ mi4.set_levels([list(range(10)), list(range(10))], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
- mi5.set_levels(mi5.levels, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index 8a3deca0236e4..b9132f429905d 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -93,7 +93,8 @@ def test_set_levels(idx):
# level changing [w/ mutation]
ind2 = idx.copy()
- inplace_return = ind2.set_levels(new_levels, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
@@ -113,20 +114,23 @@ def test_set_levels(idx):
# level changing specific level [w/ mutation]
ind2 = idx.copy()
- inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.copy()
- inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = idx.copy()
- inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
@@ -136,19 +140,23 @@ def test_set_levels(idx):
original_index = idx.copy()
for inplace in [True, False]:
with pytest.raises(ValueError, match="^On"):
- idx.set_levels(["c"], level=0, inplace=inplace)
+ with tm.assert_produces_warning(FutureWarning):
+ idx.set_levels(["c"], level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels, check_dtype=True)
with pytest.raises(ValueError, match="^On"):
- idx.set_codes([0, 1, 2, 3, 4, 5], level=0, inplace=inplace)
+ with tm.assert_produces_warning(FutureWarning):
+ idx.set_codes([0, 1, 2, 3, 4, 5], level=0, inplace=inplace)
assert_matching(idx.codes, original_index.codes, check_dtype=True)
with pytest.raises(TypeError, match="^Levels"):
- idx.set_levels("c", level=0, inplace=inplace)
+ with tm.assert_produces_warning(FutureWarning):
+ idx.set_levels("c", level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels, check_dtype=True)
with pytest.raises(TypeError, match="^Codes"):
- idx.set_codes(1, level=0, inplace=inplace)
+ with tm.assert_produces_warning(FutureWarning):
+ idx.set_codes(1, level=0, inplace=inplace)
assert_matching(idx.codes, original_index.codes, check_dtype=True)
@@ -168,7 +176,8 @@ def test_set_codes(idx):
# changing label w/ mutation
ind2 = idx.copy()
- inplace_return = ind2.set_codes(new_codes, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_codes(new_codes, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
@@ -188,20 +197,23 @@ def test_set_codes(idx):
# label changing specific level w/ mutation
ind2 = idx.copy()
- inplace_return = ind2.set_codes(new_codes[0], level=0, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_codes(new_codes[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [new_codes[0], codes[1]])
assert_matching(idx.codes, codes)
ind2 = idx.copy()
- inplace_return = ind2.set_codes(new_codes[1], level=1, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_codes(new_codes[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [codes[0], new_codes[1]])
assert_matching(idx.codes, codes)
# codes changing multiple levels [w/ mutation]
ind2 = idx.copy()
- inplace_return = ind2.set_codes(new_codes, level=[0, 1], inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ inplace_return = ind2.set_codes(new_codes, level=[0, 1], inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
@@ -217,7 +229,8 @@ def test_set_codes(idx):
# [w/ mutation]
result = ind.copy()
- result.set_codes(codes=new_codes, level=1, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ result.set_codes(codes=new_codes, level=1, inplace=True)
assert result.equals(expected)
@@ -329,3 +342,19 @@ def test_set_levels_with_iterable():
[expected_sizes, colors], names=["size", "color"]
)
tm.assert_index_equal(result, expected)
+
+
+@pytest.mark.parametrize("inplace", [True, False])
+def test_set_codes_inplace_deprecated(idx, inplace):
+ new_codes = idx.codes[1][::-1]
+
+ with tm.assert_produces_warning(FutureWarning):
+ idx.set_codes(codes=new_codes, level=1, inplace=inplace)
+
+
+@pytest.mark.parametrize("inplace", [True, False])
+def test_set_levels_inplace_deprecated(idx, inplace):
+ new_level = idx.levels[1].copy()
+
+ with tm.assert_produces_warning(FutureWarning):
+ idx.set_levels(levels=new_level, level=1, inplace=inplace)
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index fd150bb4d57a2..c776a33717ccd 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -220,7 +220,8 @@ def test_metadata_immutable(idx):
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]])
assert ind.is_monotonic
- ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
diff --git a/pandas/tests/indexing/multiindex/test_sorted.py b/pandas/tests/indexing/multiindex/test_sorted.py
index 572cb9da405d1..bafe5068e1418 100644
--- a/pandas/tests/indexing/multiindex/test_sorted.py
+++ b/pandas/tests/indexing/multiindex/test_sorted.py
@@ -43,9 +43,13 @@ def test_frame_getitem_not_sorted2(self, key):
df2 = df.set_index(["col1", "col2"])
df2_original = df2.copy()
- return_value = df2.index.set_levels(["b", "d", "a"], level="col1", inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ return_value = df2.index.set_levels(
+ ["b", "d", "a"], level="col1", inplace=True
+ )
assert return_value is None
- return_value = df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ return_value = df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True)
assert return_value is None
assert not df2.index.is_lexsorted()
assert not df2.index.is_monotonic
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index 2b75a1ec6ca6e..79879ef346f53 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -799,7 +799,7 @@ def test_invalid_separator(self):
expected = expected.set_index(["id", "year"])[
["X", "A2010", "A2011", "B2010", "A", "B"]
]
- expected.index.set_levels([0, 1], level=0, inplace=True)
+ expected.index = expected.index.set_levels([0, 1], level=0)
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep)
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
@@ -861,7 +861,7 @@ def test_invalid_suffixtype(self):
expected = pd.DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])
- expected.index.set_levels([0, 1], level=0, inplace=True)
+ expected.index = expected.index.set_levels([0, 1], level=0)
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 1ba73292dc0b4..724558bd49ea2 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -63,8 +63,8 @@ def setup_method(self, method):
).sum()
# use Int64Index, to make sure things work
- self.ymd.index.set_levels(
- [lev.astype("i8") for lev in self.ymd.index.levels], inplace=True
+ self.ymd.index = self.ymd.index.set_levels(
+ [lev.astype("i8") for lev in self.ymd.index.levels]
)
self.ymd.index.set_names(["year", "month", "day"], inplace=True)
| Deprecates ``inplace`` parameter in ``MultiIndex.set_codes`` and ``MultiIndex.set_levels``.
Allowing inplace ops in indexes makes them more complicated wtr. caching etc. After ``inplace`` is removed, ``MultiIndex`` will be immutable, except the ``names`` attribute, similarly to other index classes. | https://api.github.com/repos/pandas-dev/pandas/pulls/35626 | 2020-08-08T12:13:30Z | 2020-08-10T13:18:54Z | 2020-08-10T13:18:54Z | 2020-08-10T17:52:24Z |
DOC: Fixed docstring for mode() | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9e70120f67969..b571502f52127 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1786,7 +1786,9 @@ def count(self, level=None):
def mode(self, dropna=True) -> "Series":
"""
- Return the mode(s) of the dataset.
+ Return the mode(s) of the Series.
+
+ The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35624 | 2020-08-08T10:14:59Z | 2020-08-22T16:03:21Z | 2020-08-22T16:03:21Z | 2020-08-24T08:07:17Z |
Backport PR #35621 on branch 1.1.x (CI: Linux py36_locale failures with pytest DeprecationWarning) | diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml
index a9b9a5a47ccf5..3034ed3dc43af 100644
--- a/ci/deps/azure-36-locale.yaml
+++ b/ci/deps/azure-36-locale.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1
+ - pytest>=5.0.1,<6.0.0 # https://github.com/pandas-dev/pandas/issues/35620
- pytest-xdist>=1.21
- pytest-asyncio
- hypothesis>=3.58.0
| Backport PR #35621: CI: Linux py36_locale failures with pytest DeprecationWarning | https://api.github.com/repos/pandas-dev/pandas/pulls/35623 | 2020-08-08T09:50:46Z | 2020-08-08T12:23:06Z | 2020-08-08T12:23:06Z | 2020-08-08T12:23:06Z |
CI: Linux py36_locale failures with pytest DeprecationWarning | diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml
index a9b9a5a47ccf5..3034ed3dc43af 100644
--- a/ci/deps/azure-36-locale.yaml
+++ b/ci/deps/azure-36-locale.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1
+ - pytest>=5.0.1,<6.0.0 # https://github.com/pandas-dev/pandas/issues/35620
- pytest-xdist>=1.21
- pytest-asyncio
- hypothesis>=3.58.0
| xref #35620 | https://api.github.com/repos/pandas-dev/pandas/pulls/35621 | 2020-08-08T09:11:12Z | 2020-08-08T09:47:44Z | 2020-08-08T09:47:44Z | 2020-08-08T09:50:57Z |
Backport PR #35588 on branch 1.1.x (BUG: fix styler cell_ids arg so that blank style is ignored on False) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index a044a4aab284e..ade88a6127014 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -27,6 +27,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
+- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`).
Categorical
^^^^^^^^^^^
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index fd1efa2d1b668..584f42a6cab12 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -390,7 +390,7 @@ def format_attr(pair):
"is_visible": (c not in hidden_columns),
}
# only add an id if the cell has a style
- if self.cell_ids or not (len(ctx[r, c]) == 1 and ctx[r, c][0] == ""):
+ if self.cell_ids or (r, c) in ctx:
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 9c6910637fa7e..3ef5157655e78 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -1682,6 +1682,12 @@ def f(a, b, styler):
result = styler.pipe((f, "styler"), a=1, b=2)
assert result == (1, 2, styler)
+ def test_no_cell_ids(self):
+ # GH 35588
+ df = pd.DataFrame(data=[[0]])
+ s = Styler(df, uuid="_", cell_ids=False).render()
+ assert s.find('<td class="data row0 col0" >') != -1
+
@td.skip_if_no_mpl
class TestStylerMatplotlibDep:
| Backport PR #35588: BUG: fix styler cell_ids arg so that blank style is ignored on False | https://api.github.com/repos/pandas-dev/pandas/pulls/35619 | 2020-08-08T08:13:08Z | 2020-08-08T09:21:19Z | 2020-08-08T09:21:19Z | 2020-08-08T09:21:19Z |
Backport PR #35473 on branch 1.1.x (REGR: Fix conversion of mixed dtype DataFrame to numpy str) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index a044a4aab284e..c3d0104ad9df2 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`)
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3e4c9393f74de..b7286ce86d24e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1371,6 +1371,8 @@ def to_numpy(
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
+ if result.dtype is not dtype:
+ result = np.array(result, dtype=dtype, copy=False)
return result
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 4693cc193c27c..e6e2b06e1873e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -867,6 +867,8 @@ def _interleave(self, dtype=None, na_value=lib.no_default) -> np.ndarray:
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
+ elif is_dtype_equal(dtype, str):
+ dtype = "object"
result = np.empty(self.shape, dtype=dtype)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 2b79fc8cd3406..cc57a3970d18b 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -367,6 +367,13 @@ def test_to_numpy_copy(self):
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is not arr
+ def test_to_numpy_mixed_dtype_to_str(self):
+ # https://github.com/pandas-dev/pandas/issues/35455
+ df = pd.DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]])
+ result = df.to_numpy(dtype=str)
+ expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
| Backport PR #35473: REGR: Fix conversion of mixed dtype DataFrame to numpy str | https://api.github.com/repos/pandas-dev/pandas/pulls/35617 | 2020-08-07T23:17:28Z | 2020-08-08T09:46:38Z | 2020-08-08T09:46:38Z | 2020-08-08T09:46:39Z |
DOC: compression support for file objects in to_csv | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 87f25f578c3c6..834cd992f5650 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3088,7 +3088,7 @@ def to_csv(
.. versionchanged:: 1.2.0
- Compression is supported for non-binary file objects.
+ Compression is supported for binary file objects.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I made a mistake in #35129: `to_csv` supports the compression argument for file objects in binary mode (not for file objects in text/non-binary mode). | https://api.github.com/repos/pandas-dev/pandas/pulls/35615 | 2020-08-07T22:45:08Z | 2020-08-08T01:27:40Z | 2020-08-08T01:27:39Z | 2020-08-08T01:27:40Z |
REF: use consistent pattern in tslibs.vectorized | diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index b23f8255a76ac..c3c78ca54885a 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -275,44 +275,38 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t
int64_t[:] deltas
str typ
Py_ssize_t[:] pos
- int64_t delta, local_val
-
- if tz is None or is_utc(tz):
- with nogil:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- local_val = stamps[i]
- result[i] = normalize_i8_stamp(local_val)
+ int64_t local_val, delta = NPY_NAT
+ bint use_utc = False, use_tzlocal = False, use_fixed = False
+
+ if is_utc(tz) or tz is None:
+ use_utc = True
elif is_tzlocal(tz):
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- local_val = tz_convert_utc_to_tzlocal(stamps[i], tz)
- result[i] = normalize_i8_stamp(local_val)
+ use_tzlocal = True
else:
- # Adjust datetime64 timestamp, recompute datetimestruct
trans, deltas, typ = get_dst_info(tz)
-
if typ not in ["pytz", "dateutil"]:
# static/fixed; in this case we know that len(delta) == 1
+ use_fixed = True
delta = deltas[0]
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- local_val = stamps[i] + delta
- result[i] = normalize_i8_stamp(local_val)
else:
pos = trans.searchsorted(stamps, side="right") - 1
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- local_val = stamps[i] + deltas[pos[i]]
- result[i] = normalize_i8_stamp(local_val)
+
+ for i in range(n):
+ # TODO: reinstate nogil for use_utc case?
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+
+ if use_utc:
+ local_val = stamps[i]
+ elif use_tzlocal:
+ local_val = tz_convert_utc_to_tzlocal(stamps[i], tz)
+ elif use_fixed:
+ local_val = stamps[i] + delta
+ else:
+ local_val = stamps[i] + deltas[pos[i]]
+
+ result[i] = normalize_i8_stamp(local_val)
return result.base # `.base` to access underlying ndarray
@@ -339,40 +333,36 @@ def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None):
ndarray[int64_t] trans
int64_t[:] deltas
intp_t[:] pos
- int64_t local_val, delta
+ int64_t local_val, delta = NPY_NAT
str typ
int64_t day_nanos = 24 * 3600 * 1_000_000_000
+ bint use_utc = False, use_tzlocal = False, use_fixed = False
- if tz is None or is_utc(tz):
- for i in range(n):
- local_val = stamps[i]
- if local_val % day_nanos != 0:
- return False
-
+ if is_utc(tz) or tz is None:
+ use_utc = True
elif is_tzlocal(tz):
- for i in range(n):
- local_val = tz_convert_utc_to_tzlocal(stamps[i], tz)
- if local_val % day_nanos != 0:
- return False
+ use_tzlocal = True
else:
trans, deltas, typ = get_dst_info(tz)
-
if typ not in ["pytz", "dateutil"]:
# static/fixed; in this case we know that len(delta) == 1
+ use_fixed = True
delta = deltas[0]
- for i in range(n):
- # Adjust datetime64 timestamp, recompute datetimestruct
- local_val = stamps[i] + delta
- if local_val % day_nanos != 0:
- return False
+ else:
+ pos = trans.searchsorted(stamps, side="right") - 1
+ for i in range(n):
+ if use_utc:
+ local_val = stamps[i]
+ elif use_tzlocal:
+ local_val = tz_convert_utc_to_tzlocal(stamps[i], tz)
+ elif use_fixed:
+ local_val = stamps[i] + delta
else:
- pos = trans.searchsorted(stamps) - 1
- for i in range(n):
- # Adjust datetime64 timestamp, recompute datetimestruct
- local_val = stamps[i] + deltas[pos[i]]
- if local_val % day_nanos != 0:
- return False
+ local_val = stamps[i] + deltas[pos[i]]
+
+ if local_val % day_nanos != 0:
+ return False
return True
@@ -390,45 +380,38 @@ def dt64arr_to_periodarr(const int64_t[:] stamps, int freq, tzinfo tz):
int64_t[:] deltas
Py_ssize_t[:] pos
npy_datetimestruct dts
- int64_t local_val
+ int64_t local_val, delta = NPY_NAT
+ bint use_utc = False, use_tzlocal = False, use_fixed = False
if is_utc(tz) or tz is None:
- with nogil:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i], &dts)
- result[i] = get_period_ordinal(&dts, freq)
-
+ use_utc = True
elif is_tzlocal(tz):
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- local_val = tz_convert_utc_to_tzlocal(stamps[i], tz)
- dt64_to_dtstruct(local_val, &dts)
- result[i] = get_period_ordinal(&dts, freq)
+ use_tzlocal = True
else:
- # Adjust datetime64 timestamp, recompute datetimestruct
trans, deltas, typ = get_dst_info(tz)
-
if typ not in ["pytz", "dateutil"]:
# static/fixed; in this case we know that len(delta) == 1
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i] + deltas[0], &dts)
- result[i] = get_period_ordinal(&dts, freq)
+ use_fixed = True
+ delta = deltas[0]
else:
pos = trans.searchsorted(stamps, side="right") - 1
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts)
- result[i] = get_period_ordinal(&dts, freq)
+ for i in range(n):
+ # TODO: reinstate nogil for use_utc case?
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+
+ if use_utc:
+ local_val = stamps[i]
+ elif use_tzlocal:
+ local_val = tz_convert_utc_to_tzlocal(stamps[i], tz)
+ elif use_fixed:
+ local_val = stamps[i] + delta
+ else:
+ local_val = stamps[i] + deltas[pos[i]]
+
+ dt64_to_dtstruct(local_val, &dts)
+ result[i] = get_period_ordinal(&dts, freq)
return result.base # .base to get underlying ndarray
| DO NOT MERGE - Performance takes a hit
We have edited some of the functions in `tslibs.vectorized to do some pre-processing in order to de-duplicate some code. This new pattern is conducive to refactoring out helper functions for further de-duplication.
So far, moving to the new pattern has been performance-neutral or better. But for the functions which this branch moves to the new pattern, performance takes a hit, particularly for dt64arr_to_periodarray. I am at a loss as to why this would be, hoping more eyeballs will help. cc @WillAyd
```
asv continuous -E virtualenv -f 1.01 master HEAD -b tslibs
[...]
before after ratio
[d82e5403] [9f5d9ef6]
<master~4> <ref-vectorized>
+ 21.4±0.3ms 53.7±6ms 2.51 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 1011, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 248±4μs 613±50μs 2.48 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 3000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 21.9±0.3ms 53.8±6ms 2.46 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 1000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 238±8μs 574±60μs 2.41 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 1011, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 268±3μs 617±70μs 2.30 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 7000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 246±5μs 562±70μs 2.28 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 2011, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 282±10μs 636±70μs 2.26 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 12000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 274±20μs 616±70μs 2.24 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 11000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 270±6μs 599±60μs 2.22 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 6000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 248±4μs 544±40μs 2.20 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 2000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 23.4±0.4ms 51.3±2ms 2.19 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 2000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 278±8μs 605±70μs 2.18 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 8000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 290±9μs 620±100μs 2.14 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 9000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 286±40μs 611±60μs 2.14 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 10000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 259±20μs 538±60μs 2.08 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 1000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 35.5±10ms 73.5±20ms 2.07 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 9000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 300±5μs 610±60μs 2.03 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 4006, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 289±8μs 586±60μs 2.03 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 4000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 38.3±10ms 77.3±20ms 2.02 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 10000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 36.6±10ms 70.9±20ms 1.94 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 11000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 26.1±1ms 49.9±1ms 1.91 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 2011, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 38.8±10ms 72.4±20ms 1.87 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 12000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 27.0±3ms 50.2±0.5ms 1.86 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 3000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 35.3±10ms 64.6±10ms 1.83 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 7000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 35.2±9ms 64.3±10ms 1.83 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 6000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 38.0±1ms 67.7±10ms 1.78 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 4006, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 358±7μs 637±70μs 1.78 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(10000, 5000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 9.43±0.2μs 16.7±2μs 1.77 tslibs.resolution.TimeResolution.time_get_resolution('s', 100, tzfile('/usr/share/zoneinfo/Asia/Tokyo'))
+ 3.10±0.04μs 5.49±1μs 1.77 tslibs.fields.TimeGetStartEndField.time_get_start_end_field(1, 'start', 'month', None, 5)
+ 37.7±10ms 66.5±10ms 1.76 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 8000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 33.5±5ms 57.0±2ms 1.70 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 4000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 26.5±0.1μs 42.8±10μs 1.61 tslibs.tz_convert.TimeTZConvert.time_tz_localize_to_utc(100, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 45.3±8ms 70.5±10ms 1.56 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 5000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 24.7±0.2μs 38.2±10μs 1.54 tslibs.tz_convert.TimeTZConvert.time_tz_localize_to_utc(100, <DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>)
+ 1.55±0.08ms 2.36±0.2ms 1.52 tslibs.normalize.Normalize.time_is_date_array_normalized(1000000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 327±4ms 491±100ms 1.50 tslibs.tslib.TimeIntsToPydatetime.time_ints_to_pydatetime('time', 1000000, None)
+ 24.7±0.9ms 36.6±10ms 1.48 tslibs.fields.TimeGetDateField.time_get_date_field(1000000, 'dow')
+ 3.29±0.06μs 4.87±1μs 1.48 tslibs.fields.TimeGetStartEndField.time_get_start_end_field(1, 'start', 'month', None, 12)
+ 338±2ms 497±100ms 1.47 tslibs.tslib.TimeIntsToPydatetime.time_ints_to_pydatetime('time', 1000000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 22.7±0.3μs 32.0±3μs 1.41 tslibs.normalize.Normalize.time_is_date_array_normalized(10000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 12.2±0.5μs 17.1±4μs 1.41 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(100, 2000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 12.0±0.4μs 16.5±4μs 1.38 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(100, 2011, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 12.1±0.3μs 16.8±2μs 1.38 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(100, 12000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 12.2±0.2μs 16.6±2μs 1.36 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(100, 11000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 11.7±0.3μs 15.9±1μs 1.35 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(100, 3000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 12.6±0.3μs 16.5±2μs 1.31 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(100, 4000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 8.89±0.6μs 11.3±1μs 1.27 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(0, 2000, tzfile('/usr/share/zoneinfo/Asia/Tokyo'))
+ 115±2μs 143±20μs 1.25 tslibs.normalize.Normalize.time_is_date_array_normalized(10000, <DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>)
+ 12.8±0.3μs 15.9±2μs 1.24 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(100, 6000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 61.7±2μs 75.0±7μs 1.22 tslibs.normalize.Normalize.time_is_date_array_normalized(10000, tzfile('/usr/share/zoneinfo/Asia/Tokyo'))
+ 7.58±0.08ms 8.61±0.1ms 1.14 tslibs.normalize.Normalize.time_is_date_array_normalized(1000000, tzfile('/usr/share/zoneinfo/Asia/Tokyo'))
+ 8.80±0.07ms 9.55±0.4ms 1.09 tslibs.normalize.Normalize.time_normalize_i8_timestamps(1000000, tzfile('/usr/share/zoneinfo/Asia/Tokyo'))
- 3.46±0.05μs 3.06±0.1μs 0.89 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1, 3000, tzlocal())
- 25.2±0.9ms 21.2±0.5ms 0.84 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 2011, datetime.timezone.utc)
- 2.39±0.1ms 2.01±0.3ms 0.84 tslibs.normalize.Normalize.time_normalize_i8_timestamps(1000000, datetime.timezone(datetime.timedelta(seconds=3600)))
- 47.0±0.5ms 38.7±0.8ms 0.82 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 2011, <DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>)
- 38.5±3ms 31.2±0.4ms 0.81 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 3000, tzfile('/usr/share/zoneinfo/Asia/Tokyo'))
- 36.4±1ms 29.3±0.7ms 0.81 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 2011, tzfile('/usr/share/zoneinfo/Asia/Tokyo'))
- 25.7±3ms 20.7±0.2ms 0.80 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 3000, datetime.timezone.utc)
- 25.7±3ms 20.5±0.2ms 0.80 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1000000, 3000, None)
- 1.88±0.02ms 607±200μs 0.32 tslibs.tz_convert.TimeTZConvert.time_tz_convert_from_utc(1000000, datetime.timezone.utc)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/35613 | 2020-08-07T21:31:30Z | 2020-08-11T00:35:39Z | 2020-08-11T00:35:39Z | 2020-08-11T15:30:43Z |
Backport PR #35562 on branch 1.1.x (BUG: Ensure rolling groupby doesn't segfault with center=True) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 6b315e0a9d016..a044a4aab284e 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -18,6 +18,7 @@ Fixed regressions
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
+- Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 0898836ed2e0e..bc36bdca982e8 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -319,4 +319,10 @@ def get_window_bounds(
end_arrays.append(window_indicies.take(end))
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
+ # GH 35552: Need to adjust start and end based on the nans appended to values
+ # when center=True
+ if num_values > len(start):
+ offset = num_values - len(start)
+ start = np.concatenate([start, np.array([end[-1]] * offset)])
+ end = np.concatenate([end, np.array([end[-1]] * offset)])
return start, end
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index ca5a9eccea4f5..5241b9548a442 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -215,6 +215,71 @@ def foo(x):
)
tm.assert_series_equal(result, expected)
+ def test_groupby_rolling_center_center(self):
+ # GH 35552
+ series = Series(range(1, 6))
+ result = series.groupby(series).rolling(center=True, window=3).mean()
+ expected = Series(
+ [np.nan] * 5,
+ index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),
+ )
+ tm.assert_series_equal(result, expected)
+
+ series = Series(range(1, 5))
+ result = series.groupby(series).rolling(center=True, window=3).mean()
+ expected = Series(
+ [np.nan] * 4,
+ index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),
+ )
+ tm.assert_series_equal(result, expected)
+
+ df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)})
+ result = df.groupby("a").rolling(center=True, window=3).mean()
+ expected = pd.DataFrame(
+ [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],
+ index=pd.MultiIndex.from_tuples(
+ (
+ ("a", 0),
+ ("a", 1),
+ ("a", 2),
+ ("a", 3),
+ ("a", 4),
+ ("b", 5),
+ ("b", 6),
+ ("b", 7),
+ ("b", 8),
+ ("b", 9),
+ ("b", 10),
+ ),
+ names=["a", None],
+ ),
+ columns=["b"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)})
+ result = df.groupby("a").rolling(center=True, window=3).mean()
+ expected = pd.DataFrame(
+ [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],
+ index=pd.MultiIndex.from_tuples(
+ (
+ ("a", 0),
+ ("a", 1),
+ ("a", 2),
+ ("a", 3),
+ ("a", 4),
+ ("b", 5),
+ ("b", 6),
+ ("b", 7),
+ ("b", 8),
+ ("b", 9),
+ ),
+ names=["a", None],
+ ),
+ columns=["b"],
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_groupby_subselect_rolling(self):
# GH 35486
df = DataFrame(
| Backport PR #35562: BUG: Ensure rolling groupby doesn't segfault with center=True | https://api.github.com/repos/pandas-dev/pandas/pulls/35610 | 2020-08-07T18:19:30Z | 2020-08-07T19:00:47Z | 2020-08-07T19:00:47Z | 2020-08-07T19:00:47Z |
Doc notes for core team members | diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index 9f9e9dc2631f3..cd084ab263477 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -132,17 +132,24 @@ respond or self-close their issue if it's determined that the behavior is not a
or the feature is out of scope. Sometimes reporters just go away though, and
we'll close the issue after the conversation has died.
+.. _maintaining.reviewing:
+
Reviewing pull requests
-----------------------
Anybody can review a pull request: regular contributors, triagers, or core-team
-members. Here are some guidelines to check.
+members. But only core-team members can merge pull requets when they're ready.
+
+Here are some things to check when reviewing a pull request.
-* Tests should be in a sensible location.
+* Tests should be in a sensible location: in the same file as closely related tests.
* New public APIs should be included somewhere in ``doc/source/reference/``.
* New / changed API should use the ``versionadded`` or ``versionchanged`` directives in the docstring.
* User-facing changes should have a whatsnew in the appropriate file.
* Regression tests should reference the original GitHub issue number like ``# GH-1234``.
+* The pull request should be labeled and assigned the appropriate milestone (the next patch release
+ for regression fixes and small bug fixes, the next minor milestone otherwise)
+* Changes should comply with our :ref:`policies.version`.
Cleaning up old issues
----------------------
@@ -189,5 +196,34 @@ being helpful on the issue tracker.
The current list of core-team members is at
https://github.com/pandas-dev/pandas-governance/blob/master/people.md
+
+.. _maintaining.merging:
+
+Merging pull requests
+---------------------
+
+Only core team members can merge pull requests. We have a few guidelines.
+
+1. You should typically not self-merge your own pull requests. Exceptions include
+ things like small changes to fix CI (e.g. pinning a package version).
+2. You should not merge pull requests that have an active discussion, or pull
+ requests that has any ``-1`` votes from a core maintainer. Pandas operates
+ by consensus.
+3. For larger changes, it's good to have a +1 from at least two core team members.
+
+In addition to the items listed in :ref:`maintaining.closing`, you should verify
+that the pull request is assigned the correct milestone.
+
+Pull requests merged with a patch-release milestone will typically be backported
+by our bot. Verify that the bot noticed the merge (it will leave a comment within
+a minute typically). If a manual backport is needed please do that, and remove
+the "Needs backport" label once you've done it manually. If you forget to assign
+a milestone before tagging, you can request the bot to backport it with:
+
+.. code-block:: console
+
+ @Meeseeksdev backport <branch>
+
+
.. _governance documents: https://github.com/pandas-dev/pandas-governance
-.. _list of permissions: https://help.github.com/en/github/setting-up-and-managing-organizations-and-teams/repository-permission-levels-for-an-organization
\ No newline at end of file
+.. _list of permissions: https://help.github.com/en/github/setting-up-and-managing-organizations-and-teams/repository-permission-levels-for-an-organization
| https://api.github.com/repos/pandas-dev/pandas/pulls/35608 | 2020-08-07T14:07:24Z | 2020-08-10T23:02:12Z | 2020-08-10T23:02:12Z | 2020-08-10T23:02:17Z | |
ENH: Styler column and row styles | diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 12dd72f761408..24f344488d1ca 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -793,7 +793,8 @@
"source": [
"The next option you have are \"table styles\".\n",
"These are styles that apply to the table as a whole, but don't look at the data.\n",
- "Certain stylings, including pseudo-selectors like `:hover` can only be used this way."
+ "Certain stylings, including pseudo-selectors like `:hover` can only be used this way.\n",
+ "These can also be used to set specific row or column based class selectors, as will be shown."
]
},
{
@@ -831,9 +832,32 @@
"The value for `props` should be a list of tuples of `('attribute', 'value')`.\n",
"\n",
"`table_styles` are extremely flexible, but not as fun to type out by hand.\n",
- "We hope to collect some useful ones either in pandas, or preferable in a new package that [builds on top](#Extensibility) the tools here."
+ "We hope to collect some useful ones either in pandas, or preferable in a new package that [builds on top](#Extensibility) the tools here.\n",
+ "\n",
+ "`table_styles` can be used to add column and row based class descriptors. For large tables this can increase performance by avoiding repetitive individual css for each cell, and it can also simplify style construction in some cases.\n",
+ "If `table_styles` is given as a dictionary each key should be a specified column or index value and this will map to specific class CSS selectors of the given column or row.\n",
+ "\n",
+ "Note that `Styler.set_table_styles` will overwrite existing styles but can be chained by setting the `overwrite` argument to `False`."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "outputs": [],
+ "source": [
+ "html = html.set_table_styles({\n",
+ " 'B': [dict(selector='', props=[('color', 'green')])],\n",
+ " 'C': [dict(selector='td', props=[('color', 'red')])], \n",
+ " }, overwrite=False)\n",
+ "html"
+ ],
+ "metadata": {
+ "collapsed": false,
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ }
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -922,10 +946,12 @@
"- DataFrame only `(use Series.to_frame().style)`\n",
"- The index and columns must be unique\n",
"- No large repr, and performance isn't great; this is intended for summary DataFrames\n",
- "- You can only style the *values*, not the index or columns\n",
+ "- You can only style the *values*, not the index or columns (except with `table_styles` above)\n",
"- You can only apply styles, you can't insert new HTML entities\n",
"\n",
- "Some of these will be addressed in the future.\n"
+ "Some of these will be addressed in the future.\n",
+ "Performance can suffer when adding styles to each cell in a large DataFrame.\n",
+ "It is recommended to apply table or column based styles where possible to limit overall HTML length, as well as setting a shorter UUID to avoid unnecessary repeated data transmission. \n"
]
},
{
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 727b5ec92bdc4..ce0245036aded 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -232,6 +232,7 @@ Other enhancements
- :class:`Index` with object dtype supports division and multiplication (:issue:`34160`)
- :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`)
- :meth:`DataFrame.hist` now supports time series (datetime) data (:issue:`32590`)
+- :meth:`Styler.set_table_styles` now allows the direct styling of rows and columns and can be chained (:issue:`35607`)
- ``Styler`` now allows direct CSS class name addition to individual data cells (:issue:`36159`)
- :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`)
- :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`)
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 4b7a5e76cb475..638b4b1aed774 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -991,20 +991,46 @@ def set_caption(self, caption: str) -> "Styler":
self.caption = caption
return self
- def set_table_styles(self, table_styles) -> "Styler":
+ def set_table_styles(self, table_styles, axis=0, overwrite=True) -> "Styler":
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
+ This function can be used to style the entire table, columns, rows or
+ specific HTML selectors.
+
Parameters
----------
- table_styles : list
- Each individual table_style should be a dictionary with
- ``selector`` and ``props`` keys. ``selector`` should be a CSS
- selector that the style will be applied to (automatically
- prefixed by the table's UUID) and ``props`` should be a list of
- tuples with ``(attribute, value)``.
+ table_styles : list or dict
+ If supplying a list, each individual table_style should be a
+ dictionary with ``selector`` and ``props`` keys. ``selector``
+ should be a CSS selector that the style will be applied to
+ (automatically prefixed by the table's UUID) and ``props``
+ should be a list of tuples with ``(attribute, value)``.
+ If supplying a dict, the dict keys should correspond to
+ column names or index values, depending upon the specified
+ `axis` argument. These will be mapped to row or col CSS
+ selectors. MultiIndex values as dict keys should be
+ in their respective tuple form. The dict values should be
+ a list as specified in the form with CSS selectors and
+ props that will be applied to the specified row or column.
+
+ .. versionchanged:: 1.2.0
+
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
+ Apply to each column (``axis=0`` or ``'index'``), to each row
+ (``axis=1`` or ``'columns'``). Only used if `table_styles` is
+ dict.
+
+ .. versionadded:: 1.2.0
+
+ overwrite : boolean, default True
+ Styles are replaced if `True`, or extended if `False`. CSS
+ rules are preserved so most recent styles set will dominate
+ if selectors intersect.
+
+ .. versionadded:: 1.2.0
Returns
-------
@@ -1012,13 +1038,48 @@ def set_table_styles(self, table_styles) -> "Styler":
Examples
--------
- >>> df = pd.DataFrame(np.random.randn(10, 4))
+ >>> df = pd.DataFrame(np.random.randn(10, 4),
+ ... columns=['A', 'B', 'C', 'D'])
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
+
+ Adding column styling by name
+
+ >>> df.style.set_table_styles({
+ ... 'A': [{'selector': '',
+ ... 'props': [('color', 'red')]}],
+ ... 'B': [{'selector': 'td',
+ ... 'props': [('color', 'blue')]}]
+ ... }, overwrite=False)
+
+ Adding row styling
+
+ >>> df.style.set_table_styles({
+ ... 0: [{'selector': 'td:hover',
+ ... 'props': [('font-size', '25px')]}]
+ ... }, axis=1, overwrite=False)
"""
- self.table_styles = table_styles
+ if is_dict_like(table_styles):
+ if axis in [0, "index"]:
+ obj, idf = self.data.columns, ".col"
+ else:
+ obj, idf = self.data.index, ".row"
+
+ table_styles = [
+ {
+ "selector": s["selector"] + idf + str(obj.get_loc(key)),
+ "props": s["props"],
+ }
+ for key, styles in table_styles.items()
+ for s in styles
+ ]
+
+ if not overwrite and self.table_styles is not None:
+ self.table_styles.extend(table_styles)
+ else:
+ self.table_styles = table_styles
return self
def set_na_rep(self, na_rep: str) -> "Styler":
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 79f9bbace000e..64fe8a7730ae2 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -1712,6 +1712,28 @@ def test_set_data_classes(self, classes):
assert '<td class="data row1 col0" >2</td>' in s
assert '<td class="data row1 col1" >3</td>' in s
+ def test_chaining_table_styles(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ styler = df.style.set_table_styles(
+ [{"selector": "", "props": [("background-color", "yellow")]}]
+ ).set_table_styles(
+ [{"selector": ".col0", "props": [("background-color", "blue")]}],
+ overwrite=False,
+ )
+ assert len(styler.table_styles) == 2
+
+ def test_column_and_row_styling(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ s = Styler(df, uuid_len=0)
+ s = s.set_table_styles({"A": [{"selector": "", "props": [("color", "blue")]}]})
+ assert "#T__ .col0 {\n color: blue;\n }" in s.render()
+ s = s.set_table_styles(
+ {0: [{"selector": "", "props": [("color", "blue")]}]}, axis=1
+ )
+ assert "#T__ .row0 {\n color: blue;\n }" in s.render()
+
def test_colspan_w3(self):
# GH 36223
df = DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]])
| - [x] closes #35605
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/35607 | 2020-08-07T13:05:09Z | 2020-11-24T14:01:40Z | 2020-11-24T14:01:40Z | 2021-01-26T08:10:55Z |
BUG: allow missing values in Index when calling Index.sort_values | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index b07351d05defb..d09d3c6832526 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -267,8 +267,9 @@ Interval
Indexing
^^^^^^^^
+
- Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__geitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`)
--
+- Bug in :meth:`Index.sort_values` where, when empty values were passed, the method would break by trying to compare missing values instead of pushing them to the end of the sort order. (:issue:`35584`)
-
Missing
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 0878380d00837..5474005a63b8e 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -437,6 +437,29 @@ def index(request):
index_fixture2 = index
+@pytest.fixture(params=indices_dict.keys())
+def index_with_missing(request):
+ """
+ Fixture for indices with missing values
+ """
+ if request.param in ["int", "uint", "range", "empty", "repeats"]:
+ pytest.xfail("missing values not supported")
+ # GH 35538. Use deep copy to avoid illusive bug on np-dev
+ # Azure pipeline that writes into indices_dict despite copy
+ ind = indices_dict[request.param].copy(deep=True)
+ vals = ind.values
+ if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
+ # For setting missing values in the top level of MultiIndex
+ vals = ind.tolist()
+ vals[0] = tuple([None]) + vals[0][1:]
+ vals[-1] = tuple([None]) + vals[-1][1:]
+ return MultiIndex.from_tuples(vals)
+ else:
+ vals[0] = None
+ vals[-1] = None
+ return type(ind)(vals)
+
+
# ----------------------------------------------------------------
# Series'
# ----------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 65b5dfb6df911..a1bc8a4659b24 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -88,7 +88,7 @@
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
from pandas.core.ops.invalid import make_invalid_op
-from pandas.core.sorting import ensure_key_mapped
+from pandas.core.sorting import ensure_key_mapped, nargsort
from pandas.core.strings import StringMethods
from pandas.io.formats.printing import (
@@ -4443,7 +4443,11 @@ def asof_locs(self, where, mask):
return result
def sort_values(
- self, return_indexer=False, ascending=True, key: Optional[Callable] = None
+ self,
+ return_indexer=False,
+ ascending=True,
+ na_position: str_t = "last",
+ key: Optional[Callable] = None,
):
"""
Return a sorted copy of the index.
@@ -4457,6 +4461,12 @@ def sort_values(
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
+ na_position : {'first' or 'last'}, default 'last'
+ Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
+ the end.
+
+ .. versionadded:: 1.2.0
+
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
@@ -4497,9 +4507,16 @@ def sort_values(
"""
idx = ensure_key_mapped(self, key)
- _as = idx.argsort()
- if not ascending:
- _as = _as[::-1]
+ # GH 35584. Sort missing values according to na_position kwarg
+ # ignore na_position for MutiIndex
+ if not isinstance(self, ABCMultiIndex):
+ _as = nargsort(
+ items=idx, ascending=ascending, na_position=na_position, key=key
+ )
+ else:
+ _as = idx.argsort()
+ if not ascending:
+ _as = _as[::-1]
sorted_index = self.take(_as)
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 2755b186f3eae..b5e150f25f543 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -618,7 +618,7 @@ def test_sort_values(self, closed):
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
- result = index.sort_values(ascending=False)
+ result = index.sort_values(ascending=False, na_position="first")
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index e7dd76584d780..d1b34c315b682 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -174,9 +174,6 @@ def _check_freq(index, expected_index):
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
-
- exp = np.array([2, 1, 3, 4, 0])
- tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(["2011", "2013", "NaT", "2011"], name="pidx", freq="D")
@@ -333,3 +330,16 @@ def test_freq_setter_deprecated(self):
# warning for setter
with pytest.raises(AttributeError, match="can't set attribute"):
idx.freq = pd.offsets.Day()
+
+
+@pytest.mark.xfail(reason="Datetime-like sort_values currently unstable (GH 35922)")
+def test_order_stability_compat():
+ # GH 35584. The new implementation of sort_values for Index.sort_values
+ # is stable when sorting in descending order. Datetime-like sort_values
+ # currently aren't stable. xfail should be removed after
+ # the implementations' behavior is synchronized (xref GH 35922)
+ pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A")
+ iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx")
+ ordered1, indexer1 = pidx.sort_values(return_indexer=True, ascending=False)
+ ordered2, indexer2 = iidx.sort_values(return_indexer=True, ascending=False)
+ tm.assert_numpy_array_equal(indexer1, indexer2)
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index db260b71e7186..aa6b395176b06 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -13,7 +13,14 @@
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
-from pandas import CategoricalIndex, MultiIndex, RangeIndex
+from pandas import (
+ CategoricalIndex,
+ DatetimeIndex,
+ MultiIndex,
+ PeriodIndex,
+ RangeIndex,
+ TimedeltaIndex,
+)
import pandas._testing as tm
@@ -391,3 +398,46 @@ def test_astype_preserves_name(self, index, dtype):
assert result.names == index.names
else:
assert result.name == index.name
+
+
+@pytest.mark.parametrize("na_position", [None, "middle"])
+def test_sort_values_invalid_na_position(index_with_missing, na_position):
+ if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ # datetime-like indices will get na_position kwarg as part of
+ # synchronizing duplicate-sorting behavior, because we currently expect
+ # them, other indices, and Series to sort differently (xref 35922)
+ pytest.xfail("sort_values does not support na_position kwarg")
+ elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
+ pytest.xfail("missing value sorting order not defined for index type")
+
+ if na_position not in ["first", "last"]:
+ with pytest.raises(
+ ValueError, match=f"invalid na_position: {na_position}",
+ ):
+ index_with_missing.sort_values(na_position=na_position)
+
+
+@pytest.mark.parametrize("na_position", ["first", "last"])
+def test_sort_values_with_missing(index_with_missing, na_position):
+ # GH 35584. Test that sort_values works with missing values,
+ # sort non-missing and place missing according to na_position
+
+ if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ # datetime-like indices will get na_position kwarg as part of
+ # synchronizing duplicate-sorting behavior, because we currently expect
+ # them, other indices, and Series to sort differently (xref 35922)
+ pytest.xfail("sort_values does not support na_position kwarg")
+ elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
+ pytest.xfail("missing value sorting order not defined for index type")
+
+ missing_count = np.sum(index_with_missing.isna())
+ not_na_vals = index_with_missing[index_with_missing.notna()].values
+ sorted_values = np.sort(not_na_vals)
+ if na_position == "first":
+ sorted_values = np.concatenate([[None] * missing_count, sorted_values])
+ else:
+ sorted_values = np.concatenate([sorted_values, [None] * missing_count])
+ expected = type(index_with_missing)(sorted_values)
+
+ result = index_with_missing.sort_values(na_position=na_position)
+ tm.assert_index_equal(result, expected)
| - [X] closes #35584, xref #35922
- [X] tests added 1 / passed 1
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
### Problem
`Index.sort_values` breaks when `Index` includes `None`. The OP points out that a `Series` doesn't break in a similar case. In my opinion, there is a number of ways missing values can creep into an Index, and `sort_values` shoudn't be breaking.
### Details
When we do `Series.sort_values` we shunt the missing values to the end or the beginning of the Series depending on `na_position`, and then `argsort` the rest. When we currently do `sort_values` on an `Index`, we try to `argsort` the whole thing, and it expectedly breaks.
### Solution and alternatives
I propose we do the same stuff for `Index` that `Series` does, that is send the missing values to the end or the beginning. Added the `na_position` kwarg that works similar to how it works in `Series`.
Another possible solution is to explicitly forbid Indices with missing values and raise a clear error. I don't think it's advisable, since there isn't much of a practical difference between non-unique indices, which we allow, and this case.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35604 | 2020-08-07T12:15:38Z | 2020-09-06T17:47:41Z | 2020-09-06T17:47:41Z | 2022-01-22T10:07:39Z |
Backport PR #35582 on branch 1.1.x (BUG: to_timedelta fails on Int64 Series with null values) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 7db609fba5d68..2ec4bc8eafb0c 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -37,6 +37,11 @@ Categorical
-
-
+**Timedelta**
+
+- Bug in :meth:`to_timedelta` fails when arg is a :class:`Series` with `Int64` dtype containing null values (:issue:`35574`)
+
+
**Numeric**
-
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index a378423df788b..a30e1060c64f1 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -29,7 +29,7 @@
from pandas.core import nanops
from pandas.core.algorithms import checked_add_with_arr
-from pandas.core.arrays import datetimelike as dtl
+from pandas.core.arrays import IntegerArray, datetimelike as dtl
from pandas.core.arrays._ranges import generate_regular_range
import pandas.core.common as com
from pandas.core.construction import extract_array
@@ -921,6 +921,8 @@ def sequence_to_td64ns(data, copy=False, unit=None, errors="raise"):
elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArray)):
inferred_freq = data.freq
data = data._data
+ elif isinstance(data, IntegerArray):
+ data = data.to_numpy("int64", na_value=tslibs.iNaT)
# Convert whatever we have into timedelta64[ns] dtype
if is_object_dtype(data.dtype) or is_string_dtype(data.dtype):
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 1e193f22a6698..f68d83f7f4d58 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -166,3 +166,16 @@ def test_to_timedelta_ignore_strings_unit(self):
arr = np.array([1, 2, "error"], dtype=object)
result = pd.to_timedelta(arr, unit="ns", errors="ignore")
tm.assert_numpy_array_equal(result, arr)
+
+ def test_to_timedelta_nullable_int64_dtype(self):
+ # GH 35574
+ expected = Series([timedelta(days=1), timedelta(days=2)])
+ result = to_timedelta(Series([1, 2], dtype="Int64"), unit="days")
+
+ tm.assert_series_equal(result, expected)
+
+ # IntegerArray Series with nulls
+ expected = Series([timedelta(days=1), None])
+ result = to_timedelta(Series([1, None], dtype="Int64"), unit="days")
+
+ tm.assert_series_equal(result, expected)
| Backport PR #35582: BUG: to_timedelta fails on Int64 Series with null values | https://api.github.com/repos/pandas-dev/pandas/pulls/35602 | 2020-08-07T11:50:37Z | 2020-08-07T12:52:48Z | 2020-08-07T12:52:48Z | 2020-08-07T12:52:48Z |
Backport PR #35578 on branch 1.1.x (BUG: df.shift(n, axis=1) with multiple blocks) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 7db609fba5d68..77ac362593e2a 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
+- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 0ce2408eb003e..4693cc193c27c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -551,6 +551,24 @@ def interpolate(self, **kwargs) -> "BlockManager":
return self.apply("interpolate", **kwargs)
def shift(self, periods: int, axis: int, fill_value) -> "BlockManager":
+ if axis == 0 and self.ndim == 2 and self.nblocks > 1:
+ # GH#35488 we need to watch out for multi-block cases
+ ncols = self.shape[0]
+ if periods > 0:
+ indexer = [-1] * periods + list(range(ncols - periods))
+ else:
+ nper = abs(periods)
+ indexer = list(range(nper, ncols)) + [-1] * nper
+ result = self.reindex_indexer(
+ self.items,
+ indexer,
+ axis=0,
+ fill_value=fill_value,
+ allow_dups=True,
+ consolidate=False,
+ )
+ return result
+
return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value)
def fillna(self, value, limit, inplace: bool, downcast) -> "BlockManager":
@@ -1213,6 +1231,7 @@ def reindex_indexer(
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
+ consolidate: bool = True,
) -> T:
"""
Parameters
@@ -1223,7 +1242,8 @@ def reindex_indexer(
fill_value : object, default None
allow_dups : bool, default False
copy : bool, default True
-
+ consolidate: bool, default True
+ Whether to consolidate inplace before reindexing.
pandas-indexer with -1's only.
"""
@@ -1236,7 +1256,8 @@ def reindex_indexer(
result.axes[axis] = new_axis
return result
- self._consolidate_inplace()
+ if consolidate:
+ self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index 9ec029a6c4304..8f6902eca816f 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -145,6 +145,33 @@ def test_shift_duplicate_columns(self):
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
+ def test_shift_axis1_multiple_blocks(self):
+ # GH#35488
+ df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3)))
+ df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2)))
+ df3 = pd.concat([df1, df2], axis=1)
+ assert len(df3._mgr.blocks) == 2
+
+ result = df3.shift(2, axis=1)
+
+ expected = df3.take([-1, -1, 0, 1, 2], axis=1)
+ expected.iloc[:, :2] = np.nan
+ expected.columns = df3.columns
+
+ tm.assert_frame_equal(result, expected)
+
+ # Case with periods < 0
+ # rebuild df3 because `take` call above consolidated
+ df3 = pd.concat([df1, df2], axis=1)
+ assert len(df3._mgr.blocks) == 2
+ result = df3.shift(-2, axis=1)
+
+ expected = df3.take([2, 3, 4, -1, -1], axis=1)
+ expected.iloc[:, -2:] = np.nan
+ expected.columns = df3.columns
+
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
# TODO: remove this test when tshift deprecation is enforced
| Backport PR #35578: BUG: df.shift(n, axis=1) with multiple blocks | https://api.github.com/repos/pandas-dev/pandas/pulls/35601 | 2020-08-07T11:47:28Z | 2020-08-07T13:34:24Z | 2020-08-07T13:34:24Z | 2020-08-07T13:34:25Z |
Backport PR #35547 on branch 1.1.x (Bug fix one element series truncate) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 7db609fba5d68..45f1015a8e7bd 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -48,7 +48,7 @@ Categorical
**Indexing**
--
+- Bug in :meth:`Series.truncate` when trying to truncate a single-element series (:issue:`35544`)
**DataFrame**
- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e46fde1f59f16..a11ee6b5d9846 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9397,7 +9397,7 @@ def truncate(
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
- if ax.is_monotonic_decreasing:
+ if len(ax) > 1 and ax.is_monotonic_decreasing:
before, after = after, before
slicer = [slice(None, None)] * self._AXIS_LEN
diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py
index 7c82edbaec177..45592f8d99b93 100644
--- a/pandas/tests/series/methods/test_truncate.py
+++ b/pandas/tests/series/methods/test_truncate.py
@@ -141,3 +141,14 @@ def test_truncate_multiindex(self):
expected = df.col
tm.assert_series_equal(result, expected)
+
+ def test_truncate_one_element_series(self):
+ # GH 35544
+ series = pd.Series([0.1], index=pd.DatetimeIndex(["2020-08-04"]))
+ before = pd.Timestamp("2020-08-02")
+ after = pd.Timestamp("2020-08-04")
+
+ result = series.truncate(before=before, after=after)
+
+ # the input Series and the expected Series are the same
+ tm.assert_series_equal(result, series)
| Backport PR #35547: Bug fix one element series truncate | https://api.github.com/repos/pandas-dev/pandas/pulls/35600 | 2020-08-07T11:39:43Z | 2020-08-07T12:29:44Z | 2020-08-07T12:29:44Z | 2020-08-07T12:29:44Z |
Backport PR #35590 on branch 1.1.x (BUG: validate index/data length match in DataFrame construction) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 5e36bfe6b6307..7db609fba5d68 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -50,6 +50,10 @@ Categorical
-
+**DataFrame**
+- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`)
+-
+
.. ---------------------------------------------------------------------------
.. _whatsnew_111.contributors:
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 6ca6eca1ff829..f4f4a3666a84e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -105,7 +105,6 @@ class Block(PandasObject):
is_extension = False
_can_hold_na = False
_can_consolidate = True
- _verify_integrity = True
_validate_ndim = True
@classmethod
@@ -1525,7 +1524,6 @@ class ExtensionBlock(Block):
"""
_can_consolidate = False
- _verify_integrity = False
_validate_ndim = False
is_extension = True
@@ -2613,7 +2611,6 @@ def _replace_coerce(
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
is_categorical = True
- _verify_integrity = True
_can_hold_na = True
should_store = Block.should_store
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 895385b170c91..0ce2408eb003e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -312,7 +312,7 @@ def _verify_integrity(self) -> None:
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
- if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
+ if block.shape[1:] != mgr_shape[1:]:
raise construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index a4ed548264d39..b78bb1c492ef4 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2619,6 +2619,12 @@ class DatetimeSubclass(datetime):
data = pd.DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]})
assert data.datetime.dtype == "datetime64[ns]"
+ def test_with_mismatched_index_length_raises(self):
+ # GH#33437
+ dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
+ with pytest.raises(ValueError, match="Shape of passed values"):
+ DataFrame(dti, index=range(4))
+
class TestDataFrameConstructorWithDatetimeTZ:
def test_from_dict(self):
| Backport PR #35590: BUG: validate index/data length match in DataFrame construction | https://api.github.com/repos/pandas-dev/pandas/pulls/35597 | 2020-08-07T07:45:37Z | 2020-08-07T08:32:55Z | 2020-08-07T08:32:55Z | 2020-08-07T08:32:55Z |
Backport PR #35554 on branch 1.1.x (DOC: Document that read_hdf can use pickle) | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index d4be9d802d697..cc42f952b1733 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3441,10 +3441,11 @@ for some advanced strategies
.. warning::
- pandas requires ``PyTables`` >= 3.0.0.
- There is a indexing bug in ``PyTables`` < 3.2 which may appear when querying stores using an index.
- If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2.
- Stores created previously will need to be rewritten using the updated version.
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle. Loading pickled data received from
+ untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
.. ipython:: python
:suppress:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index e0df4c29e543e..6497067e3930c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -289,7 +289,15 @@ def read_hdf(
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
- criteria
+ criteria.
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
@@ -445,6 +453,14 @@ class HDFStore:
Either Fixed or Table format.
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
path : str
@@ -789,6 +805,14 @@ def select(
"""
Retrieve pandas object stored in file, optionally based on where criteria.
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
key : str
@@ -852,6 +876,15 @@ def select_as_coordinates(
"""
return the selection as an Index
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+
Parameters
----------
key : str
@@ -876,6 +909,14 @@ def select_column(
return a single column from the table. This is generally only useful to
select an indexable
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
key : str
@@ -912,6 +953,14 @@ def select_as_multiple(
"""
Retrieve pandas objects from multiple tables.
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
keys : a list of the tables
| Backport PR #35554: DOC: Document that read_hdf can use pickle | https://api.github.com/repos/pandas-dev/pandas/pulls/35593 | 2020-08-06T22:00:39Z | 2020-08-06T23:19:31Z | 2020-08-06T23:19:30Z | 2020-08-06T23:19:31Z |
REF: Simplify Index.copy | diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 73109020b1b54..1e70ff90fcd44 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -9,7 +9,7 @@
from pandas._libs import Interval, Period, algos
from pandas._libs.tslibs import conversion
-from pandas._typing import ArrayLike, DtypeObj
+from pandas._typing import ArrayLike, DtypeObj, Optional
from pandas.core.dtypes.base import registry
from pandas.core.dtypes.dtypes import (
@@ -1732,6 +1732,32 @@ def _validate_date_like_dtype(dtype) -> None:
)
+def validate_all_hashable(*args, error_name: Optional[str] = None) -> None:
+ """
+ Return None if all args are hashable, else raise a TypeError.
+
+ Parameters
+ ----------
+ *args
+ Arguments to validate.
+ error_name : str, optional
+ The name to use if error
+
+ Raises
+ ------
+ TypeError : If an argument is not hashable
+
+ Returns
+ -------
+ None
+ """
+ if not all(is_hashable(arg) for arg in args):
+ if error_name:
+ raise TypeError(f"{error_name} must be a hashable type")
+ else:
+ raise TypeError("All elements must be hashable")
+
+
def pandas_dtype(dtype) -> DtypeObj:
"""
Convert input into a pandas only dtype object or a numpy dtype object.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index bfdfbd35f27ad..38ac869276ba4 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -58,6 +58,7 @@
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
+ validate_all_hashable,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import (
@@ -812,13 +813,11 @@ def copy(self, name=None, deep=False, dtype=None, names=None):
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
+ name = self._validate_names(name=name, names=names, deep=deep)[0]
if deep:
- new_index = self._shallow_copy(self._data.copy())
+ new_index = self._shallow_copy(self._data.copy(), name=name)
else:
- new_index = self._shallow_copy()
-
- names = self._validate_names(name=name, names=names, deep=deep)
- new_index = new_index.set_names(names)
+ new_index = self._shallow_copy(name=name)
if dtype:
new_index = new_index.astype(dtype)
@@ -1186,7 +1185,7 @@ def name(self, value):
maybe_extract_name(value, None, type(self))
self._name = value
- def _validate_names(self, name=None, names=None, deep: bool = False):
+ def _validate_names(self, name=None, names=None, deep: bool = False) -> List[Label]:
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
@@ -1196,15 +1195,25 @@ def _validate_names(self, name=None, names=None, deep: bool = False):
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
- return deepcopy(self.names) if deep else self.names
+ new_names = deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
- return names
+ new_names = names
+ elif not is_list_like(name):
+ new_names = [name]
else:
- if not is_list_like(name):
- return [name]
- return name
+ new_names = name
+
+ if len(new_names) != len(self.names):
+ raise ValueError(
+ f"Length of new names must be {len(self.names)}, got {len(new_names)}"
+ )
+
+ # All items in 'new_names' need to be hashable
+ validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name")
+
+ return new_names
def _get_names(self):
return FrozenList((self.name,))
@@ -1232,9 +1241,8 @@ def _set_names(self, values, level=None):
# GH 20527
# All items in 'name' need to be hashable:
- for name in values:
- if not is_hashable(name):
- raise TypeError(f"{type(self).__name__}.name must be a hashable type")
+ validate_all_hashable(*values, error_name=f"{type(self).__name__}.name")
+
self._name = values[0]
names = property(fset=_set_names, fget=_get_names)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index e9c4c301f4dca..3577a7aacc008 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -388,9 +388,8 @@ def _shallow_copy(self, values=None, name: Label = no_default):
def copy(self, name=None, deep=False, dtype=None, names=None):
self._validate_dtype(dtype)
- new_index = self._shallow_copy()
- names = self._validate_names(name=name, names=names, deep=deep)
- new_index = new_index.set_names(names)
+ name = self._validate_names(name=name, names=names, deep=deep)[0]
+ new_index = self._shallow_copy(name=name)
return new_index
def _minmax(self, meth: str):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9e70120f67969..93368ea1e515f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -54,6 +54,7 @@
is_list_like,
is_object_dtype,
is_scalar,
+ validate_all_hashable,
)
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.inference import is_hashable
@@ -491,8 +492,7 @@ def name(self) -> Label:
@name.setter
def name(self, value: Label) -> None:
- if not is_hashable(value):
- raise TypeError("Series.name must be a hashable type")
+ validate_all_hashable(value, error_name=f"{type(self).__name__}.name")
object.__setattr__(self, "_name", value)
@property
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index ce12718e48d0d..a6c526fcb008a 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -746,3 +746,13 @@ def test_astype_object_preserves_datetime_na(from_type):
result = astype_nansafe(arr, dtype="object")
assert isna(result)[0]
+
+
+def test_validate_allhashable():
+ assert com.validate_all_hashable(1, "a") is None
+
+ with pytest.raises(TypeError, match="All elements must be hashable"):
+ com.validate_all_hashable([])
+
+ with pytest.raises(TypeError, match="list must be a hashable type"):
+ com.validate_all_hashable([], error_name="list")
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 238ee8d304d05..98f7c0eadb4bb 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -270,6 +270,20 @@ def test_copy_name(self, index):
s3 = s1 * s2
assert s3.index.name == "mario"
+ def test_name2(self, index):
+ # gh-35592
+ if isinstance(index, MultiIndex):
+ return
+
+ assert index.copy(name="mario").name == "mario"
+
+ with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
+ index.copy(name=["mario", "luigi"])
+
+ msg = f"{type(index).__name__}.name must be a hashable type"
+ with pytest.raises(TypeError, match=msg):
+ index.copy(name=[["mario"]])
+
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py
index 479b5ef0211a0..f38da7ad2ae1c 100644
--- a/pandas/tests/indexes/multi/test_names.py
+++ b/pandas/tests/indexes/multi/test_names.py
@@ -75,6 +75,13 @@ def test_copy_names():
assert multi_idx.names == ["MyName1", "MyName2"]
assert multi_idx3.names == ["NewName1", "NewName2"]
+ # gh-35592
+ with pytest.raises(ValueError, match="Length of new names must be 2, got 1"):
+ multi_idx.copy(names=["mario"])
+
+ with pytest.raises(TypeError, match="MultiIndex.name must be a hashable type"):
+ multi_idx.copy(names=[["mario"], ["luigi"]])
+
def test_names(idx, index_names):
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 1ba73292dc0b4..724558bd49ea2 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -63,8 +63,8 @@ def setup_method(self, method):
).sum()
# use Int64Index, to make sure things work
- self.ymd.index.set_levels(
- [lev.astype("i8") for lev in self.ymd.index.levels], inplace=True
+ self.ymd.index = self.ymd.index.set_levels(
+ [lev.astype("i8") for lev in self.ymd.index.levels]
)
self.ymd.index.set_names(["year", "month", "day"], inplace=True)
| Avoid the use of the ``Index.set_names`` in ``Index.copy``, which is a slightly awkward method to use here (it calls ``_shallow_copy`` + is longwinded).
This is archieved by tightening up the checks in ``_validate_names``. | https://api.github.com/repos/pandas-dev/pandas/pulls/35592 | 2020-08-06T21:22:05Z | 2020-08-10T13:24:30Z | 2020-08-10T13:24:30Z | 2020-08-10T17:52:34Z |
Backport PR #35513 on branch 1.1.x (BUG: RollingGroupby respects __getitem__) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 6a327a4fc732f..5e36bfe6b6307 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -16,8 +16,7 @@ Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
--
--
+- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 445f179248226..87bcaa7d9512f 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2220,6 +2220,10 @@ def _apply(
def _constructor(self):
return Rolling
+ @cache_readonly
+ def _selected_obj(self):
+ return self._groupby._selected_obj
+
def _create_blocks(self, obj: FrameOrSeries):
"""
Split data into blocks & return conformed data.
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 744ca264e91d9..ca5a9eccea4f5 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -214,3 +214,28 @@ def foo(x):
name="value",
)
tm.assert_series_equal(result, expected)
+
+ def test_groupby_subselect_rolling(self):
+ # GH 35486
+ df = DataFrame(
+ {"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [10, 20, 30, 20]}
+ )
+ result = df.groupby("a")[["b"]].rolling(2).max()
+ expected = DataFrame(
+ [np.nan, np.nan, 2.0, np.nan],
+ columns=["b"],
+ index=pd.MultiIndex.from_tuples(
+ ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.groupby("a")["b"].rolling(2).max()
+ expected = Series(
+ [np.nan, np.nan, 2.0, np.nan],
+ index=pd.MultiIndex.from_tuples(
+ ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
+ ),
+ name="b",
+ )
+ tm.assert_series_equal(result, expected)
| Backport PR #35513: BUG: RollingGroupby respects __getitem__ | https://api.github.com/repos/pandas-dev/pandas/pulls/35591 | 2020-08-06T19:18:42Z | 2020-08-06T21:50:03Z | 2020-08-06T21:50:03Z | 2020-08-06T21:50:03Z |
BUG: validate index/data length match in DataFrame construction | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 5e36bfe6b6307..7db609fba5d68 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -50,6 +50,10 @@ Categorical
-
+**DataFrame**
+- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`)
+-
+
.. ---------------------------------------------------------------------------
.. _whatsnew_111.contributors:
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 3186c555b7ae1..f3286b3c20965 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -105,7 +105,6 @@ class Block(PandasObject):
is_extension = False
_can_hold_na = False
_can_consolidate = True
- _verify_integrity = True
_validate_ndim = True
@classmethod
@@ -1525,7 +1524,6 @@ class ExtensionBlock(Block):
"""
_can_consolidate = False
- _verify_integrity = False
_validate_ndim = False
is_extension = True
@@ -2613,7 +2611,6 @@ def _replace_coerce(
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
is_categorical = True
- _verify_integrity = True
_can_hold_na = True
should_store = Block.should_store
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 895385b170c91..0ce2408eb003e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -312,7 +312,7 @@ def _verify_integrity(self) -> None:
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
- if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
+ if block.shape[1:] != mgr_shape[1:]:
raise construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index a4ed548264d39..b78bb1c492ef4 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2619,6 +2619,12 @@ class DatetimeSubclass(datetime):
data = pd.DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]})
assert data.datetime.dtype == "datetime64[ns]"
+ def test_with_mismatched_index_length_raises(self):
+ # GH#33437
+ dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
+ with pytest.raises(ValueError, match="Shape of passed values"):
+ DataFrame(dti, index=range(4))
+
class TestDataFrameConstructorWithDatetimeTZ:
def test_from_dict(self):
| - [x] closes #33437
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35590 | 2020-08-06T18:56:01Z | 2020-08-07T02:20:04Z | 2020-08-07T02:20:03Z | 2020-08-07T07:45:04Z |
BUG: fix styler cell_ids arg so that blank style is ignored on False | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 7db609fba5d68..ddc7fba7a5d10 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -25,6 +25,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
+- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`).
Categorical
^^^^^^^^^^^
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index fd1efa2d1b668..584f42a6cab12 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -390,7 +390,7 @@ def format_attr(pair):
"is_visible": (c not in hidden_columns),
}
# only add an id if the cell has a style
- if self.cell_ids or not (len(ctx[r, c]) == 1 and ctx[r, c][0] == ""):
+ if self.cell_ids or (r, c) in ctx:
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 9c6910637fa7e..3ef5157655e78 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -1682,6 +1682,12 @@ def f(a, b, styler):
result = styler.pipe((f, "styler"), a=1, b=2)
assert result == (1, 2, styler)
+ def test_no_cell_ids(self):
+ # GH 35588
+ df = pd.DataFrame(data=[[0]])
+ s = Styler(df, uuid="_", cell_ids=False).render()
+ assert s.find('<td class="data row0 col0" >') != -1
+
@td.skip_if_no_mpl
class TestStylerMatplotlibDep:
| - [x] closes #35586
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35588 | 2020-08-06T14:47:19Z | 2020-08-07T21:35:22Z | 2020-08-07T21:35:22Z | 2020-08-08T09:44:21Z |
Ensure file is closed promptly in case of error | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 8b28a4439e1da..bd4ee9b46db67 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -289,6 +289,7 @@ MultiIndex
I/O
^^^
+- :func:`read_sas` no longer leaks resources on failure (:issue:`35566`)
- Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`)
- In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`)
- :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`)
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 76dac39d1889f..f2ee642d8fd42 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -142,8 +142,12 @@ def __init__(
self._path_or_buf = open(self._path_or_buf, "rb")
self.handle = self._path_or_buf
- self._get_properties()
- self._parse_metadata()
+ try:
+ self._get_properties()
+ self._parse_metadata()
+ except Exception:
+ self.close()
+ raise
def column_data_lengths(self):
"""Return a numpy int64 array of the column data lengths"""
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 1a4ba544f5d59..9727ec930119b 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -264,7 +264,11 @@ def __init__(
# should already be opened in binary mode in Python 3.
self.filepath_or_buffer = filepath_or_buffer
- self._read_header()
+ try:
+ self._read_header()
+ except Exception:
+ self.close()
+ raise
def close(self):
self.filepath_or_buffer.close()
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index ae9457a8e3147..31d1a6ad471ea 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -136,8 +136,8 @@ def read_sas(
if iterator or chunksize:
return reader
- data = reader.read()
-
- if ioargs.should_close:
- reader.close()
- return data
+ try:
+ return reader.read()
+ finally:
+ if ioargs.should_close:
+ reader.close()
diff --git a/pandas/tests/io/sas/data/corrupt.sas7bdat b/pandas/tests/io/sas/data/corrupt.sas7bdat
new file mode 100644
index 0000000000000..2941ffe3ecdf5
Binary files /dev/null and b/pandas/tests/io/sas/data/corrupt.sas7bdat differ
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index 8c14f9de9f61c..9de6ca75fd4d9 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -217,6 +217,14 @@ def test_zero_variables(datapath):
pd.read_sas(fname)
+def test_corrupt_read(datapath):
+ # We don't really care about the exact failure, the important thing is
+ # that the resource should be cleaned up afterwards (BUG #35566)
+ fname = datapath("io", "sas", "data", "corrupt.sas7bdat")
+ with pytest.raises(AttributeError):
+ pd.read_sas(fname)
+
+
def round_datetime_to_ms(ts):
if isinstance(ts, datetime):
return ts.replace(microsecond=int(round(ts.microsecond, -3) / 1000) * 1000)
| Fixes #35566. Replaces #35567. | https://api.github.com/repos/pandas-dev/pandas/pulls/35587 | 2020-08-06T13:10:10Z | 2020-09-13T22:28:13Z | 2020-09-13T22:28:12Z | 2020-09-13T22:28:13Z |
BUG: NaT.__cmp__(invalid) should raise TypeError | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 304897edbb75e..4f715d4627a61 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -60,6 +60,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
- Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`)
+- Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`)
-
Timedelta
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 73df51832d700..79f50c7261905 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -107,30 +107,25 @@ cdef class _NaT(datetime):
__array_priority__ = 100
def __richcmp__(_NaT self, object other, int op):
- cdef:
- int ndim = getattr(other, "ndim", -1)
+ if util.is_datetime64_object(other) or PyDateTime_Check(other):
+ # We treat NaT as datetime-like for this comparison
+ return _nat_scalar_rules[op]
- if ndim == -1:
+ elif util.is_timedelta64_object(other) or PyDelta_Check(other):
+ # We treat NaT as timedelta-like for this comparison
return _nat_scalar_rules[op]
elif util.is_array(other):
- result = np.empty(other.shape, dtype=np.bool_)
- result.fill(_nat_scalar_rules[op])
+ if other.dtype.kind in "mM":
+ result = np.empty(other.shape, dtype=np.bool_)
+ result.fill(_nat_scalar_rules[op])
+ elif other.dtype.kind == "O":
+ result = np.array([PyObject_RichCompare(self, x, op) for x in other])
+ else:
+ return NotImplemented
return result
- elif ndim == 0:
- if util.is_datetime64_object(other):
- return _nat_scalar_rules[op]
- else:
- raise TypeError(
- f"Cannot compare type {type(self).__name__} "
- f"with type {type(other).__name__}"
- )
-
- # Note: instead of passing "other, self, _reverse_ops[op]", we observe
- # that `_nat_scalar_rules` is invariant under `_reverse_ops`,
- # rendering it unnecessary.
- return PyObject_RichCompare(other, self, op)
+ return NotImplemented
def __add__(self, other):
if self is not c_NaT:
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index e1e2ea1a5cec8..03830019affa1 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -513,11 +513,67 @@ def test_to_numpy_alias():
assert isna(expected) and isna(result)
-@pytest.mark.parametrize("other", [Timedelta(0), Timestamp(0)])
+@pytest.mark.parametrize(
+ "other",
+ [
+ Timedelta(0),
+ Timedelta(0).to_pytimedelta(),
+ pytest.param(
+ Timedelta(0).to_timedelta64(),
+ marks=pytest.mark.xfail(
+ reason="td64 doesnt return NotImplemented, see numpy#17017"
+ ),
+ ),
+ Timestamp(0),
+ Timestamp(0).to_pydatetime(),
+ pytest.param(
+ Timestamp(0).to_datetime64(),
+ marks=pytest.mark.xfail(
+ reason="dt64 doesnt return NotImplemented, see numpy#17017"
+ ),
+ ),
+ Timestamp(0).tz_localize("UTC"),
+ NaT,
+ ],
+)
def test_nat_comparisons(compare_operators_no_eq_ne, other):
# GH 26039
- assert getattr(NaT, compare_operators_no_eq_ne)(other) is False
- assert getattr(other, compare_operators_no_eq_ne)(NaT) is False
+ opname = compare_operators_no_eq_ne
+
+ assert getattr(NaT, opname)(other) is False
+
+ op = getattr(operator, opname.strip("_"))
+ assert op(NaT, other) is False
+ assert op(other, NaT) is False
+
+
+@pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")])
+def test_nat_comparisons_numpy(other):
+ # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons
+ # pass, this test can be removed
+ assert not NaT == other
+ assert NaT != other
+ assert not NaT < other
+ assert not NaT > other
+ assert not NaT <= other
+ assert not NaT >= other
+
+
+@pytest.mark.parametrize("other", ["foo", 2, 2.0])
+@pytest.mark.parametrize("op", [operator.le, operator.lt, operator.ge, operator.gt])
+def test_nat_comparisons_invalid(other, op):
+ # GH#35585
+ assert not NaT == other
+ assert not other == NaT
+
+ assert NaT != other
+ assert other != NaT
+
+ with pytest.raises(TypeError):
+ op(NaT, other)
+
+ with pytest.raises(TypeError):
+ op(other, NaT)
@pytest.mark.parametrize(
| - [x] closes #35046
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35585 | 2020-08-06T03:42:48Z | 2020-08-06T22:06:47Z | 2020-08-06T22:06:47Z | 2020-08-06T23:28:01Z |
BUG: TDA.__floordiv__ with NaT | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index b16ca0a80c5b4..0cb316a829c8c 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -64,7 +64,7 @@ Datetimelike
Timedelta
^^^^^^^^^
-
+- Bug in :class:`TimedeltaIndex`, :class:`Series`, and :class:`DataFrame` floor-division with ``timedelta64`` dtypes and ``NaT`` in the denominator (:issue:`35529`)
-
-
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index a378423df788b..99a4725c2d806 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -628,7 +628,7 @@ def __floordiv__(self, other):
result = self.asi8 // other.asi8
mask = self._isnan | other._isnan
if mask.any():
- result = result.astype(np.int64)
+ result = result.astype(np.float64)
result[mask] = np.nan
return result
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index f94408d657ae5..64d3d5b6d684d 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -1733,6 +1733,23 @@ def test_tdarr_div_length_mismatch(self, box_with_array):
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
+ def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
+ # GH#35529
+ box = box_with_array
+
+ left = pd.Series([1000, 222330, 30], dtype="timedelta64[ns]")
+ right = pd.Series([1000, 222330, None], dtype="timedelta64[ns]")
+
+ left = tm.box_expected(left, box)
+ right = tm.box_expected(right, box)
+
+ expected = np.array([1.0, 1.0, np.nan], dtype=np.float64)
+ expected = tm.box_expected(expected, box)
+
+ result = left // right
+
+ tm.assert_equal(result, expected)
+
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
| - [x] closes #35529
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35583 | 2020-08-06T01:42:45Z | 2020-08-06T16:20:30Z | 2020-08-06T16:20:29Z | 2020-08-06T17:33:26Z |
BUG: to_timedelta fails on Int64 Series with null values | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 6a327a4fc732f..e9bac816b5abc 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -38,6 +38,11 @@ Categorical
-
-
+**Timedelta**
+
+- Bug in :meth:`to_timedelta` fails when arg is a :class:`Series` with `Int64` dtype containing null values (:issue:`35574`)
+
+
**Numeric**
-
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index a378423df788b..a30e1060c64f1 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -29,7 +29,7 @@
from pandas.core import nanops
from pandas.core.algorithms import checked_add_with_arr
-from pandas.core.arrays import datetimelike as dtl
+from pandas.core.arrays import IntegerArray, datetimelike as dtl
from pandas.core.arrays._ranges import generate_regular_range
import pandas.core.common as com
from pandas.core.construction import extract_array
@@ -921,6 +921,8 @@ def sequence_to_td64ns(data, copy=False, unit=None, errors="raise"):
elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArray)):
inferred_freq = data.freq
data = data._data
+ elif isinstance(data, IntegerArray):
+ data = data.to_numpy("int64", na_value=tslibs.iNaT)
# Convert whatever we have into timedelta64[ns] dtype
if is_object_dtype(data.dtype) or is_string_dtype(data.dtype):
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 1e193f22a6698..f68d83f7f4d58 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -166,3 +166,16 @@ def test_to_timedelta_ignore_strings_unit(self):
arr = np.array([1, 2, "error"], dtype=object)
result = pd.to_timedelta(arr, unit="ns", errors="ignore")
tm.assert_numpy_array_equal(result, arr)
+
+ def test_to_timedelta_nullable_int64_dtype(self):
+ # GH 35574
+ expected = Series([timedelta(days=1), timedelta(days=2)])
+ result = to_timedelta(Series([1, 2], dtype="Int64"), unit="days")
+
+ tm.assert_series_equal(result, expected)
+
+ # IntegerArray Series with nulls
+ expected = Series([timedelta(days=1), None])
+ result = to_timedelta(Series([1, None], dtype="Int64"), unit="days")
+
+ tm.assert_series_equal(result, expected)
| - [X] closes #35574
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35582 | 2020-08-06T01:42:06Z | 2020-08-07T11:49:53Z | 2020-08-07T11:49:52Z | 2020-08-07T11:50:28Z |
Adding function to calculate years since a reference timestamp | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 304897edbb75e..2379350b29f43 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -59,6 +59,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
+- Added :func:`age_in_years` to calculate the years since a reference time (such as years of age since date of birth).
- Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`)
-
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index bddfc30d86a53..56fd6d4e45121 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1246,6 +1246,35 @@ timedelta}, default 'raise'
"""
return getattr(self.freq, 'freqstr', self.freq)
+ def age_in_years(self, on=None) -> int:
+ """
+ Return the age in years of the timestamp. If either age is NaT, will
+ return np.nan.
+
+ Parameters
+ ----------
+ on : Timestamp
+ Date at which the age should be valid. Defaults to today.
+
+ * If None, defaults to today
+
+ Returns
+ -------
+ age_in_years : int
+ """
+ from pandas import to_datetime
+
+ if on is None:
+ on = self.today()
+
+ # if NaT is input, there is no age. Return NaN.
+ if self is NaT or on is NaT:
+ return np.nan
+
+ day_this_year = to_datetime(datetime(on.year, self.month, self.day))
+ age_in_years = on.year - self.year - (on < day_this_year)
+ return age_in_years
+
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise'):
"""
Convert naive Timestamp to local time zone, or remove
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index e1e2ea1a5cec8..c96b15b735680 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -186,7 +186,17 @@ def test_nat_iso_format(get_nat):
@pytest.mark.parametrize(
"klass,expected",
[
- (Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]),
+ (
+ Timestamp,
+ [
+ "age_in_years",
+ "freqstr",
+ "normalize",
+ "to_julian_date",
+ "to_period",
+ "tz",
+ ],
+ ),
(
Timedelta,
[
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index 954301b979074..81fc1f7007e34 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -106,6 +106,13 @@ def test_timestamp_sub_datetime(self):
assert (ts - dt).days == 1
assert (dt - ts).days == -1
+ def test_age_in_years(self):
+ # test individual values
+ birthday = Timestamp(datetime(1983, 2, 4))
+ assert birthday.age_in_years(on=Timestamp(datetime(2020, 2, 3))) == 36
+ assert birthday.age_in_years(on=Timestamp(datetime(2020, 2, 4))) == 37
+ assert np.isnan(birthday.age_in_years(on=Timestamp("NaT")))
+
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
| - [ ] closes #xxxx
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35581 | 2020-08-06T01:32:07Z | 2020-08-07T15:30:47Z | null | 2020-08-07T15:30:47Z |
BUG: df.shift(n, axis=1) with multiple blocks | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 7db609fba5d68..77ac362593e2a 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
+- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 0ce2408eb003e..4693cc193c27c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -551,6 +551,24 @@ def interpolate(self, **kwargs) -> "BlockManager":
return self.apply("interpolate", **kwargs)
def shift(self, periods: int, axis: int, fill_value) -> "BlockManager":
+ if axis == 0 and self.ndim == 2 and self.nblocks > 1:
+ # GH#35488 we need to watch out for multi-block cases
+ ncols = self.shape[0]
+ if periods > 0:
+ indexer = [-1] * periods + list(range(ncols - periods))
+ else:
+ nper = abs(periods)
+ indexer = list(range(nper, ncols)) + [-1] * nper
+ result = self.reindex_indexer(
+ self.items,
+ indexer,
+ axis=0,
+ fill_value=fill_value,
+ allow_dups=True,
+ consolidate=False,
+ )
+ return result
+
return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value)
def fillna(self, value, limit, inplace: bool, downcast) -> "BlockManager":
@@ -1213,6 +1231,7 @@ def reindex_indexer(
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
+ consolidate: bool = True,
) -> T:
"""
Parameters
@@ -1223,7 +1242,8 @@ def reindex_indexer(
fill_value : object, default None
allow_dups : bool, default False
copy : bool, default True
-
+ consolidate: bool, default True
+ Whether to consolidate inplace before reindexing.
pandas-indexer with -1's only.
"""
@@ -1236,7 +1256,8 @@ def reindex_indexer(
result.axes[axis] = new_axis
return result
- self._consolidate_inplace()
+ if consolidate:
+ self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index 9ec029a6c4304..8f6902eca816f 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -145,6 +145,33 @@ def test_shift_duplicate_columns(self):
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
+ def test_shift_axis1_multiple_blocks(self):
+ # GH#35488
+ df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3)))
+ df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2)))
+ df3 = pd.concat([df1, df2], axis=1)
+ assert len(df3._mgr.blocks) == 2
+
+ result = df3.shift(2, axis=1)
+
+ expected = df3.take([-1, -1, 0, 1, 2], axis=1)
+ expected.iloc[:, :2] = np.nan
+ expected.columns = df3.columns
+
+ tm.assert_frame_equal(result, expected)
+
+ # Case with periods < 0
+ # rebuild df3 because `take` call above consolidated
+ df3 = pd.concat([df1, df2], axis=1)
+ assert len(df3._mgr.blocks) == 2
+ result = df3.shift(-2, axis=1)
+
+ expected = df3.take([2, 3, 4, -1, -1], axis=1)
+ expected.iloc[:, -2:] = np.nan
+ expected.columns = df3.columns
+
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
# TODO: remove this test when tshift deprecation is enforced
| - [x] closes #35488
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35578 | 2020-08-05T23:14:29Z | 2020-08-07T11:45:49Z | 2020-08-07T11:45:49Z | 2020-08-07T20:49:51Z |
CLN: remove kwargs from RangeIndex.copy | diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 1dc4fc1e91462..e9c4c301f4dca 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -385,11 +385,13 @@ def _shallow_copy(self, values=None, name: Label = no_default):
return Int64Index._simple_new(values, name=name)
@doc(Int64Index.copy)
- def copy(self, name=None, deep=False, dtype=None, **kwargs):
+ def copy(self, name=None, deep=False, dtype=None, names=None):
self._validate_dtype(dtype)
- if name is None:
- name = self.name
- return self.from_range(self._range, name=name)
+
+ new_index = self._shallow_copy()
+ names = self._validate_names(name=name, names=names, deep=deep)
+ new_index = new_index.set_names(names)
+ return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
| xref #31669.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35575 | 2020-08-05T19:33:14Z | 2020-08-06T15:48:00Z | 2020-08-06T15:48:00Z | 2020-08-06T20:23:31Z |
Ensure _group_selection_context is always reset | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index ac45222625569..6c8a780859939 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -463,8 +463,10 @@ def _group_selection_context(groupby):
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
- yield groupby
- groupby._reset_group_selection()
+ try:
+ yield groupby
+ finally:
+ groupby._reset_group_selection()
_KeysArgType = Union[
| Context managers will resume with an exception if the with block calling them fails.
This happening is not an excuse to not clean up.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Found by inspecting the code, not by actually finding a failing example.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35572 | 2020-08-05T14:39:09Z | 2020-08-06T15:14:44Z | 2020-08-06T15:14:44Z | 2020-08-06T15:14:51Z |
Change check_freq default to False | diff --git a/pandas/_testing.py b/pandas/_testing.py
index a020fbff3553a..caf2dbbf31b4f 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1226,7 +1226,7 @@ def assert_series_equal(
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
- check_freq=True,
+ check_freq=False,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
@@ -1271,7 +1271,7 @@ def assert_series_equal(
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
- check_freq : bool, default True
+ check_freq : bool, default False
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
@@ -1426,7 +1426,7 @@ def assert_frame_equal(
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
- check_freq=True,
+ check_freq=False,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
@@ -1486,7 +1486,7 @@ def assert_frame_equal(
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
- check_freq : bool, default True
+ check_freq : bool, default False
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
| - [x] closes #35570
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35571 | 2020-08-05T14:34:01Z | 2020-08-14T13:03:24Z | null | 2020-08-14T14:29:33Z |
DOC: add type BinaryIO to path param #35505 | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index b1bbda4a4b7e0..e31acbbb1614a 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -541,7 +541,7 @@ class ExcelWriter(metaclass=abc.ABCMeta):
Parameters
----------
- path : str
+ path : str or typing.BinaryIO
Path to xls or xlsx or ods file.
engine : str (optional)
Engine to use for writing. If None, defaults to
@@ -596,6 +596,21 @@ class ExcelWriter(metaclass=abc.ABCMeta):
>>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer:
... df.to_excel(writer, sheet_name='Sheet3')
+
+ You can store Excel file in RAM:
+
+ >>> import io
+ >>> buffer = io.BytesIO()
+ >>> with pd.ExcelWriter(buffer) as writer:
+ ... df.to_excel(writer)
+
+ You can pack Excel file into zip archive:
+
+ >>> import zipfile
+ >>> with zipfile.ZipFile('path_to_file.zip', 'w') as zf:
+ ... with zf.open('filename.xlsx', 'w') as buffer:
+ ... with pd.ExcelWriter(buffer) as writer:
+ ... df.to_excel(writer)
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
| - [ ] closes #35505
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35568 | 2020-08-05T12:57:36Z | 2020-09-13T23:07:21Z | 2020-09-13T23:07:21Z | 2020-09-13T23:07:41Z |
Ensure file is closed promptly in case of error | diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 291c9d1ee7f0c..d6f083385c7e0 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -128,6 +128,7 @@ def read_sas(
if iterator or chunksize:
return reader
- data = reader.read()
- reader.close()
- return data
+ try:
+ return reader.read()
+ finally:
+ reader.close()
| Fixes #35566 | https://api.github.com/repos/pandas-dev/pandas/pulls/35567 | 2020-08-05T12:46:05Z | 2020-08-06T13:19:39Z | null | 2020-08-07T07:33:59Z |
BUG: Ensure rolling groupby doesn't segfault with center=True | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 6b315e0a9d016..a044a4aab284e 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -18,6 +18,7 @@ Fixed regressions
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
+- Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 0898836ed2e0e..bc36bdca982e8 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -319,4 +319,10 @@ def get_window_bounds(
end_arrays.append(window_indicies.take(end))
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
+ # GH 35552: Need to adjust start and end based on the nans appended to values
+ # when center=True
+ if num_values > len(start):
+ offset = num_values - len(start)
+ start = np.concatenate([start, np.array([end[-1]] * offset)])
+ end = np.concatenate([end, np.array([end[-1]] * offset)])
return start, end
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index ca5a9eccea4f5..5241b9548a442 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -215,6 +215,71 @@ def foo(x):
)
tm.assert_series_equal(result, expected)
+ def test_groupby_rolling_center_center(self):
+ # GH 35552
+ series = Series(range(1, 6))
+ result = series.groupby(series).rolling(center=True, window=3).mean()
+ expected = Series(
+ [np.nan] * 5,
+ index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),
+ )
+ tm.assert_series_equal(result, expected)
+
+ series = Series(range(1, 5))
+ result = series.groupby(series).rolling(center=True, window=3).mean()
+ expected = Series(
+ [np.nan] * 4,
+ index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),
+ )
+ tm.assert_series_equal(result, expected)
+
+ df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)})
+ result = df.groupby("a").rolling(center=True, window=3).mean()
+ expected = pd.DataFrame(
+ [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],
+ index=pd.MultiIndex.from_tuples(
+ (
+ ("a", 0),
+ ("a", 1),
+ ("a", 2),
+ ("a", 3),
+ ("a", 4),
+ ("b", 5),
+ ("b", 6),
+ ("b", 7),
+ ("b", 8),
+ ("b", 9),
+ ("b", 10),
+ ),
+ names=["a", None],
+ ),
+ columns=["b"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)})
+ result = df.groupby("a").rolling(center=True, window=3).mean()
+ expected = pd.DataFrame(
+ [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],
+ index=pd.MultiIndex.from_tuples(
+ (
+ ("a", 0),
+ ("a", 1),
+ ("a", 2),
+ ("a", 3),
+ ("a", 4),
+ ("b", 5),
+ ("b", 6),
+ ("b", 7),
+ ("b", 8),
+ ("b", 9),
+ ),
+ names=["a", None],
+ ),
+ columns=["b"],
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_groupby_subselect_rolling(self):
# GH 35486
df = DataFrame(
| - [x] closes #35552
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35562 | 2020-08-05T06:31:03Z | 2020-08-07T18:09:31Z | 2020-08-07T18:09:30Z | 2020-08-07T19:29:31Z |
Merge pull request #1 from pandas-dev/master - #34498 improve the .equals() docstring | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e46fde1f59f16..5c1ae1bc1e025 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1199,11 +1199,9 @@ def equals(self, other):
"""
Test whether two objects contain the same elements.
- This function allows two Series or DataFrames to be compared against
- each other to see if they have the same shape and elements. NaNs in
- the same location are considered equal. The column headers do not
- need to have the same type, but the elements within the columns must
- be the same dtype.
+ This function compares two Series or DataFrames.
+ checks if both have the same shape and elements.
+ columns must be the same dtype.
Parameters
----------
@@ -1234,10 +1232,8 @@ def equals(self, other):
Notes
-----
- This function requires that the elements have the same dtype as their
- respective elements in the other Series or DataFrame. However, the
- column labels do not need to have the same type, as long as they are
- still considered equal.
+ Header or index values may be of a different dtype.
+ NaN values at same positions are considered equal.
Examples
--------
|
#34498 improve the .equals() docstring
| https://api.github.com/repos/pandas-dev/pandas/pulls/35555 | 2020-08-04T21:06:02Z | 2020-08-07T19:19:12Z | null | 2020-08-07T19:19:12Z |
DOC: Document that read_hdf can use pickle | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index d4be9d802d697..cc42f952b1733 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3441,10 +3441,11 @@ for some advanced strategies
.. warning::
- pandas requires ``PyTables`` >= 3.0.0.
- There is a indexing bug in ``PyTables`` < 3.2 which may appear when querying stores using an index.
- If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2.
- Stores created previously will need to be rewritten using the updated version.
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle. Loading pickled data received from
+ untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
.. ipython:: python
:suppress:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index e0df4c29e543e..6497067e3930c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -289,7 +289,15 @@ def read_hdf(
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
- criteria
+ criteria.
+
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
@@ -445,6 +453,14 @@ class HDFStore:
Either Fixed or Table format.
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
path : str
@@ -789,6 +805,14 @@ def select(
"""
Retrieve pandas object stored in file, optionally based on where criteria.
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
key : str
@@ -852,6 +876,15 @@ def select_as_coordinates(
"""
return the selection as an Index
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
+
Parameters
----------
key : str
@@ -876,6 +909,14 @@ def select_column(
return a single column from the table. This is generally only useful to
select an indexable
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
key : str
@@ -912,6 +953,14 @@ def select_as_multiple(
"""
Retrieve pandas objects from multiple tables.
+ .. warning::
+
+ Pandas uses PyTables for reading and writing HDF5 files, which allows
+ serializing object-dtype data with pickle when using the "fixed" format.
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: https://docs.python.org/3/library/pickle.html for more.
+
Parameters
----------
keys : a list of the tables
| This documents that `read_hdf`, which uses PyTables, might invoke pickle to deserialize arrays that were serialize with pickle.
PyTables clearly documents this at http://www.pytables.org/usersguide/libref/declarative_classes.html#tables.ObjectAtom, but pandas users may not realize that if they aren't aware that pandas uses pytables under the hood.
I've tried to include the warning in all the methods that eventually read data, but it's a bit hard to say if I've gotten them all.
Ideally we would also like to provide an `allow_pickle=False` keyword, but that would be best implemented in PyTables, and used by us. I opened https://github.com/PyTables/PyTables/issues/813 for discussion. | https://api.github.com/repos/pandas-dev/pandas/pulls/35554 | 2020-08-04T20:16:58Z | 2020-08-06T22:00:27Z | 2020-08-06T22:00:26Z | 2020-08-06T22:00:31Z |
[MINOR] Fix unnecessary pluralization | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 79627e43d78c2..d229cd5a9d7ec 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4193,7 +4193,7 @@ def rename(
Parameters
----------
mapper : dict-like or function
- Dict-like or functions transformations to apply to
+ Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35553 | 2020-08-04T19:33:30Z | 2020-08-04T23:00:15Z | 2020-08-04T23:00:15Z | 2020-09-15T10:55:35Z |
Bug fix one element series truncate | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 7db609fba5d68..45f1015a8e7bd 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -48,7 +48,7 @@ Categorical
**Indexing**
--
+- Bug in :meth:`Series.truncate` when trying to truncate a single-element series (:issue:`35544`)
**DataFrame**
- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 42d02f37508fc..aaf23cc198d95 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9405,7 +9405,7 @@ def truncate(
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
- if ax.is_monotonic_decreasing:
+ if len(ax) > 1 and ax.is_monotonic_decreasing:
before, after = after, before
slicer = [slice(None, None)] * self._AXIS_LEN
diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py
index 7c82edbaec177..45592f8d99b93 100644
--- a/pandas/tests/series/methods/test_truncate.py
+++ b/pandas/tests/series/methods/test_truncate.py
@@ -141,3 +141,14 @@ def test_truncate_multiindex(self):
expected = df.col
tm.assert_series_equal(result, expected)
+
+ def test_truncate_one_element_series(self):
+ # GH 35544
+ series = pd.Series([0.1], index=pd.DatetimeIndex(["2020-08-04"]))
+ before = pd.Timestamp("2020-08-02")
+ after = pd.Timestamp("2020-08-04")
+
+ result = series.truncate(before=before, after=after)
+
+ # the input Series and the expected Series are the same
+ tm.assert_series_equal(result, series)
| - [Y] closes #35544
- [Y] tests added / passed
- [Y] passes `black pandas`
- [Y] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [Y] whatsnew entry
Fix when trying to truncate a one-element series within a correct date range returned an empty series instead of the originally passed-in series. | https://api.github.com/repos/pandas-dev/pandas/pulls/35547 | 2020-08-04T15:07:30Z | 2020-08-07T11:39:32Z | 2020-08-07T11:39:32Z | 2020-08-07T12:48:06Z |
REGR: Fix interpolation on empty dataframe | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index a044a4aab284e..c815f87e8e703 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
+- Fixed regression where :meth:`DataFrame.interpolate` would raise a ``TypeError`` when the :class:`DataFrame` was empty (:issue:`35598`).
- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
- Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 87f25f578c3c6..6a773dc91e534 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6820,6 +6820,9 @@ def interpolate(
obj = self.T if should_transpose else self
+ if obj.empty:
+ return self
+
if method not in fillna_methods:
axis = self._info_axis_number
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index ddb5723e7bd3e..3c9d79397e4bd 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -34,6 +34,13 @@ def test_interp_basic(self):
expected.loc[5, "B"] = 9
tm.assert_frame_equal(result, expected)
+ def test_interp_empty(self):
+ # https://github.com/pandas-dev/pandas/issues/35598
+ df = DataFrame()
+ result = df.interpolate()
+ expected = df
+ tm.assert_frame_equal(result, expected)
+
def test_interp_bad_method(self):
df = DataFrame(
{
| Interpolation on an empty dataframe broke in 1.1 due to a change in how 'all columns are objects' is checked (specifically all(empty set) is True, while before dtype count object = None was checked against size = 0).
This is a complex function and I'm not sure what the proper fix is, suggesting to keep the empty check out of the rest of the logic.
Example code that broke:
```python
import pandas as pd
df = pd.DataFrame([1,2])
df[[]].interpolate(limit_area='inside')
```
- [x] closes #35598
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35543 | 2020-08-04T12:51:33Z | 2020-08-17T10:22:35Z | 2020-08-17T10:22:34Z | 2020-08-24T14:08:52Z |
Backport PR #35510 on branch 1.1.x (REGR: Check for float in isnaobj_old) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 443589308ad4c..f8f655ce7b866 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
-
-
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 760fab3781fd4..771e8053ac9be 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -155,7 +155,10 @@ def isnaobj_old(arr: ndarray) -> ndarray:
result = np.zeros(n, dtype=np.uint8)
for i in range(n):
val = arr[i]
- result[i] = checknull(val) or val == INF or val == NEGINF
+ result[i] = (
+ checknull(val)
+ or util.is_float_object(val) and (val == INF or val == NEGINF)
+ )
return result.view(np.bool_)
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 12e73bae40eac..5154a9ba6fdf0 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -18,7 +18,7 @@
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
import pandas.util._test_decorators as td
-from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
+from pandas import DataFrame, Index, MultiIndex, Series, compat, concat, option_context
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
@@ -2179,3 +2179,13 @@ def test_read_csv_names_not_accepting_sets(all_parsers):
parser = all_parsers
with pytest.raises(ValueError, match="Names should be an ordered collection."):
parser.read_csv(StringIO(data), names=set("QAZ"))
+
+
+def test_read_csv_with_use_inf_as_na(all_parsers):
+ # https://github.com/pandas-dev/pandas/issues/35493
+ parser = all_parsers
+ data = "1.0\nNaN\n3.0"
+ with option_context("use_inf_as_na", True):
+ result = parser.read_csv(StringIO(data), header=None)
+ expected = DataFrame([1.0, np.nan, 3.0])
+ tm.assert_frame_equal(result, expected)
| Backport PR #35510: REGR: Check for float in isnaobj_old | https://api.github.com/repos/pandas-dev/pandas/pulls/35540 | 2020-08-04T09:55:42Z | 2020-08-04T10:56:33Z | 2020-08-04T10:56:33Z | 2020-08-04T10:56:33Z |
Backport PR #35440 on branch 1.1.x (BUG: CategoricalIndex.format) | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 443589308ad4c..815ce2c4c2905 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -26,6 +26,13 @@ Fixed regressions
Bug fixes
~~~~~~~~~
+
+Categorical
+^^^^^^^^^^^
+
+- Bug in :meth:`CategoricalIndex.format` where, when stringified scalars had different lengths, the shorter string would be right-filled with spaces, so it had the same length as the longest string (:issue:`35439`)
+
+
**Datetimelike**
-
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index b0b008de69a94..74b235655e345 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -20,7 +20,7 @@
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
+from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
from pandas.core import accessor
from pandas.core.algorithms import take_1d
@@ -348,12 +348,12 @@ def _format_attrs(self):
return attrs
def _format_with_header(self, header, na_rep="NaN") -> List[str]:
- from pandas.io.formats.format import format_array
+ from pandas.io.formats.printing import pprint_thing
- formatted_values = format_array(
- self._values, formatter=None, na_rep=na_rep, justify="left"
- )
- result = ibase.trim_front(formatted_values)
+ result = [
+ pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep
+ for x in self._values
+ ]
return header + result
# --------------------------------------------------------------------
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index e5e98039ff77b..eee610681087d 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,7 +1,7 @@
from datetime import timedelta
import operator
from sys import getsizeof
-from typing import Any, List, Optional
+from typing import Any, Optional
import warnings
import numpy as np
@@ -33,8 +33,6 @@
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.common import unpack_zerodim_and_defer
-from pandas.io.formats.printing import pprint_thing
-
_empty_range = range(0)
@@ -197,9 +195,6 @@ def _format_data(self, name=None):
# we are formatting thru the attributes
return None
- def _format_with_header(self, header, na_rep="NaN") -> List[str]:
- return header + [pprint_thing(x) for x in self._range]
-
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index 7f30a77872bc1..8af26eef504fc 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -478,3 +478,9 @@ def test_reindex_base(self):
def test_map_str(self):
# See test_map.py
pass
+
+ def test_format_different_scalar_lengths(self):
+ # GH35439
+ idx = CategoricalIndex(["aaaaaaaaa", "b"])
+ expected = ["aaaaaaaaa", "b"]
+ assert idx.format() == expected
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index f5b9f4a401e60..3b41c4bfacf73 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -642,6 +642,12 @@ def test_equals_op(self):
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
+ def test_format(self):
+ # GH35439
+ idx = self.create_index()
+ expected = [str(x) for x in idx]
+ assert idx.format() == expected
+
def test_hasnans_isnans(self, index):
# GH 11343, added tests for hasnans / isnans
if isinstance(index, MultiIndex):
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index 7345ae3032463..a5abf2946feda 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -20,6 +20,12 @@ def index(self, request):
def create_index(self) -> DatetimeIndex:
return date_range("20130101", periods=5)
+ def test_format(self):
+ # GH35439
+ idx = self.create_index()
+ expected = [f"{x:%Y-%m-%d}" for x in idx]
+ assert idx.format() == expected
+
def test_shift(self):
pass # handled in test_ops
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index eaf48421dc071..59ee88117a984 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1171,8 +1171,11 @@ def test_summary_bug(self):
assert "~:{range}:0" in result
assert "{other}%s" in result
- def test_format(self, index):
- self._check_method_works(Index.format, index)
+ def test_format_different_scalar_lengths(self):
+ # GH35439
+ idx = Index(["aaaaaaaaa", "b"])
+ expected = ["aaaaaaaaa", "b"]
+ assert idx.format() == expected
def test_format_bug(self):
# GH 14626
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index a7c5734ef9b02..bfcac5d433d2c 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -21,6 +21,13 @@ def test_can_hold_identifiers(self):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
+ def test_format(self):
+ # GH35439
+ idx = self.create_index()
+ max_width = max(len(str(x)) for x in idx)
+ expected = [str(x).ljust(max_width) for x in idx]
+ assert idx.format() == expected
+
def test_numeric_compat(self):
pass # override Base method
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index e236b3da73c69..84805d06df4a8 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -2141,6 +2141,15 @@ def test_dict_entries(self):
assert "'a': 1" in val
assert "'b': 2" in val
+ def test_categorical_columns(self):
+ # GH35439
+ data = [[4, 2], [3, 2], [4, 3]]
+ cols = ["aaaaaaaaa", "b"]
+ df = pd.DataFrame(data, columns=cols)
+ df_cat_cols = pd.DataFrame(data, columns=pd.CategoricalIndex(cols))
+
+ assert df.to_string() == df_cat_cols.to_string()
+
def test_period(self):
# GH 12615
df = pd.DataFrame(
| Backport PR #35440: BUG: CategoricalIndex.format | https://api.github.com/repos/pandas-dev/pandas/pulls/35539 | 2020-08-04T09:54:49Z | 2020-08-04T10:38:00Z | 2020-08-04T10:38:00Z | 2020-08-04T10:38:00Z |
Backport PR #35502 on branch 1.1.x (CI: xfail numpy-dev) | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index c8b780455f862..f5b9f4a401e60 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -5,6 +5,7 @@
import pytest
from pandas._libs import iNaT
+from pandas.compat.numpy import _is_numpy_dev
from pandas.errors import InvalidIndexError
from pandas.core.dtypes.common import is_datetime64tz_dtype
@@ -417,7 +418,7 @@ def test_set_ops_error_cases(self, case, method, index):
with pytest.raises(TypeError, match=msg):
getattr(index, method)(case)
- def test_intersection_base(self, index):
+ def test_intersection_base(self, index, request):
if isinstance(index, CategoricalIndex):
return
@@ -434,6 +435,15 @@ def test_intersection_base(self, index):
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
+ # https://github.com/pandas-dev/pandas/issues/35481
+ if (
+ _is_numpy_dev
+ and isinstance(case, Series)
+ and isinstance(index, UInt64Index)
+ ):
+ mark = pytest.mark.xfail(reason="gh-35481")
+ request.node.add_marker(mark)
+
result = first.intersection(case)
assert tm.equalContents(result, second)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 30b13b6ea9fce..193800fae751f 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -5,6 +5,8 @@
import numpy as np
import pytest
+from pandas.compat.numpy import _is_numpy_dev
+
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
@@ -945,6 +947,7 @@ def test_loc_setitem_empty_append(self):
df.loc[0, "x"] = expected.loc[0, "x"]
tm.assert_frame_equal(df, expected)
+ @pytest.mark.xfail(_is_numpy_dev, reason="gh-35481")
def test_loc_setitem_empty_append_raises(self):
# GH6173, various appends to an empty dataframe
| Backport PR #35502: CI: xfail numpy-dev | https://api.github.com/repos/pandas-dev/pandas/pulls/35537 | 2020-08-04T09:03:21Z | 2020-08-04T09:53:42Z | 2020-08-04T09:53:42Z | 2020-08-04T09:53:42Z |
Backport PR #35468 on branch 1.1.x (CI: activate azure pipelines on 1.1.x) | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index e45cafc02cb61..113ad3e338952 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,9 +1,11 @@
# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml
trigger:
- master
+- 1.1.x
pr:
- master
+- 1.1.x
variables:
PYTEST_WORKERS: auto
| Backport PR #35468: CI: activate azure pipelines on 1.1.x | https://api.github.com/repos/pandas-dev/pandas/pulls/35536 | 2020-08-04T08:23:08Z | 2020-08-04T09:01:47Z | 2020-08-04T09:01:47Z | 2020-08-04T09:01:47Z |
Backport PR #35467 on branch 1.1.x (CI: activate github actions on 1.1.x (PR only)) | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index db1fc30111a2d..149acef72db26 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -4,7 +4,9 @@ on:
push:
branches: master
pull_request:
- branches: master
+ branches:
+ - master
+ - 1.1.x
env:
ENV_FILE: environment.yml
| Backport PR #35467: CI: activate github actions on 1.1.x (PR only) | https://api.github.com/repos/pandas-dev/pandas/pulls/35535 | 2020-08-04T08:22:31Z | 2020-08-04T09:00:50Z | 2020-08-04T09:00:50Z | 2020-08-04T09:00:50Z |
Check if NPY_NAT is NA for int64 in rank() (#32859) | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index b16ca0a80c5b4..aaec5da8af66f 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -157,7 +157,7 @@ ExtensionArray
Other
^^^^^
--
+- Bug in :meth:`Series.rank` incorrectly treating int64 min value as NaN (:issue:`32859`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 7e90a8cc681ef..958cbd050ed61 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -840,7 +840,7 @@ def rank_1d(
elif rank_t is float64_t:
mask = np.isnan(values)
elif rank_t is int64_t:
- mask = values == NPY_NAT
+ mask = missing.isnaobj(values)
# create copy in case of NPY_NAT
# values are mutated inplace
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 6c6bdb6b1b2bd..6e656eca5e152 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1769,6 +1769,19 @@ def test_basic(self):
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
+ @pytest.mark.parametrize("dtype", ["int32", "int64"])
+ def test_negative_min_rank(self, dtype):
+ # GH#32859
+ # Check that nan is respected on float64
+ s = pd.Series(np.array([np.inf, np.nan, -np.inf]))
+ expected = pd.Series(np.array([2.0, np.nan, 1.0]))
+ tm.assert_series_equal(s.rank(na_option="keep"), expected)
+
+ # Rank works if coverted to most negative value
+ s = pd.Series(np.array([np.inf, np.nan, -np.inf]).astype(dtype))
+ expected = pd.Series(np.array([2.0, 2.0, 2.0]))
+ tm.assert_series_equal(s.rank(na_option="keep"), expected)
+
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
| - [x] closes #32859
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Used `missing.isnaobj()` to try to handle the suggestion from @jbrockmendel. Lmk if this might cause other issues.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35533 | 2020-08-04T01:04:37Z | 2020-08-18T01:03:30Z | null | 2020-08-18T01:03:40Z |
BUG: handle immutable arrays in tz_convert_from_utc (#35530) | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index b16ca0a80c5b4..304897edbb75e 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -59,7 +59,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
--
+- Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`)
-
Timedelta
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 2b148cd8849f1..4c62b16d430bd 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -410,7 +410,7 @@ cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz):
return val + deltas[pos]
-def tz_convert_from_utc(int64_t[:] vals, tzinfo tz):
+def tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
"""
Convert the values (in i8) from UTC to tz
@@ -435,7 +435,7 @@ def tz_convert_from_utc(int64_t[:] vals, tzinfo tz):
@cython.boundscheck(False)
@cython.wraparound(False)
-cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz):
+cdef int64_t[:] _tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
"""
Convert the given values (in i8) either to UTC or from UTC.
@@ -457,7 +457,7 @@ cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz):
str typ
if is_utc(tz):
- converted = vals
+ converted = vals.copy()
elif is_tzlocal(tz):
converted = np.empty(n, dtype=np.int64)
for i in range(n):
diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py
index 4f184b78f34a1..87cd97f853f4d 100644
--- a/pandas/tests/tslibs/test_conversion.py
+++ b/pandas/tests/tslibs/test_conversion.py
@@ -78,6 +78,14 @@ def test_tz_convert_corner(arr):
tm.assert_numpy_array_equal(result, arr)
+def test_tz_convert_readonly():
+ # GH#35530
+ arr = np.array([0], dtype=np.int64)
+ arr.setflags(write=False)
+ result = tzconversion.tz_convert_from_utc(arr, UTC)
+ tm.assert_numpy_array_equal(result, arr)
+
+
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"])
def test_length_zero_copy(dtype, copy):
| - [x] closes #35530
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/35532 | 2020-08-03T22:49:18Z | 2020-08-06T15:11:23Z | 2020-08-06T15:11:22Z | 2020-08-06T18:17:25Z |
BUG: Fix assert_equal when check_exact=True for non-numeric dtypes #3… | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index f0ad9d1ca3b0f..2fa4c12d24172 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`)
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
+- Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`)
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
- Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`)
diff --git a/pandas/_testing.py b/pandas/_testing.py
index a020fbff3553a..713f29466f097 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1339,10 +1339,8 @@ def assert_series_equal(
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
- if check_exact:
- if not is_numeric_dtype(left.dtype):
- raise AssertionError("check_exact may only be used with numeric Series")
-
+ if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
+ # Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index 1284cc9d4f49b..a7b5aeac560e4 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -281,3 +281,18 @@ class MySeries(Series):
with pytest.raises(AssertionError, match="Series classes are different"):
tm.assert_series_equal(s3, s1, check_series_type=True)
+
+
+def test_series_equal_exact_for_nonnumeric():
+ # https://github.com/pandas-dev/pandas/issues/35446
+ s1 = Series(["a", "b"])
+ s2 = Series(["a", "b"])
+ s3 = Series(["b", "a"])
+
+ tm.assert_series_equal(s1, s2, check_exact=True)
+ tm.assert_series_equal(s2, s1, check_exact=True)
+
+ with pytest.raises(AssertionError):
+ tm.assert_series_equal(s1, s3, check_exact=True)
+ with pytest.raises(AssertionError):
+ tm.assert_series_equal(s3, s1, check_exact=True)
| `assert_series_equal(..., check_exact=True)` no longer raises an error when series has a non-numeric dtype. This fixes a bug/ regression introduced in `1.1.0`.
- [x] closes #35446
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I've added a very simple test, but could add more. For instance, there could be a check on `assert_frame_equal`, but I'm not sure what the desired strategy is for that.
I've left off the `whatsnew` entry for now since I figure that's the most likely to generate conflicts while this gets reviewed. | https://api.github.com/repos/pandas-dev/pandas/pulls/35522 | 2020-08-03T07:20:45Z | 2020-08-10T13:30:14Z | 2020-08-10T13:30:13Z | 2020-08-10T13:31:13Z |
REF: StringArray._from_sequence, use less memory | diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index d7fb2775376c0..2023858181baa 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -7,6 +7,21 @@
from .pandas_vb_common import tm
+class Construction:
+
+ params = ["str", "string"]
+ param_names = ["dtype"]
+
+ def setup(self, dtype):
+ self.data = tm.rands_array(nchars=10 ** 5, size=10)
+
+ def time_construction(self, dtype):
+ Series(self.data, dtype=dtype)
+
+ def peakmem_construction(self, dtype):
+ Series(self.data, dtype=dtype)
+
+
class Methods:
def setup(self):
self.s = Series(tm.makeStringIndex(10 ** 5))
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 565b4a014bd0c..f1a2f3be5dca3 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -74,6 +74,11 @@ Categorical
- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`)
-
+**Strings**
+
+- fix memory usage issue when instantiating large :class:`pandas.arrays.StringArray` (:issue:`35499`)
+
+
.. ---------------------------------------------------------------------------
.. _whatsnew_111.contributors:
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 5fa91ffee8ea8..eadfcefaac73d 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -618,35 +618,52 @@ def astype_intsafe(ndarray[object] arr, new_dtype):
@cython.wraparound(False)
@cython.boundscheck(False)
-def astype_str(arr: ndarray, skipna: bool=False) -> ndarray[object]:
- """
- Convert all elements in an array to string.
+cpdef ndarray[object] ensure_string_array(
+ arr,
+ object na_value=np.nan,
+ bint convert_na_value=True,
+ bint copy=True,
+ bint skipna=True,
+):
+ """Returns a new numpy array with object dtype and only strings and na values.
Parameters
----------
- arr : ndarray
- The array whose elements we are casting.
- skipna : bool, default False
+ arr : array-like
+ The values to be converted to str, if needed.
+ na_value : Any
+ The value to use for na. For example, np.nan or pd.NA.
+ convert_na_value : bool, default True
+ If False, existing na values will be used unchanged in the new array.
+ copy : bool, default True
+ Whether to ensure that a new array is returned.
+ skipna : bool, default True
Whether or not to coerce nulls to their stringified form
- (e.g. NaN becomes 'nan').
+ (e.g. if False, NaN becomes 'nan').
Returns
-------
ndarray
- A new array with the input array's elements casted.
+ An array with the input array's elements casted to str or nan-like.
"""
cdef:
- object arr_i
- Py_ssize_t i, n = arr.size
- ndarray[object] result = np.empty(n, dtype=object)
-
- for i in range(n):
- arr_i = arr[i]
+ Py_ssize_t i = 0, n = len(arr)
- if not (skipna and checknull(arr_i)):
- arr_i = str(arr_i)
+ result = np.asarray(arr, dtype="object")
+ if copy and result is arr:
+ result = result.copy()
- result[i] = arr_i
+ for i in range(n):
+ val = result[i]
+ if not checknull(val):
+ result[i] = str(val)
+ else:
+ if convert_na_value:
+ val = na_value
+ if skipna:
+ result[i] = val
+ else:
+ result[i] = str(val)
return result
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index bb55c3cdea45c..381968f9724b6 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -177,11 +177,10 @@ class StringArray(PandasArray):
def __init__(self, values, copy=False):
values = extract_array(values)
- skip_validation = isinstance(values, type(self))
super().__init__(values, copy=copy)
self._dtype = StringDtype()
- if not skip_validation:
+ if not isinstance(values, type(self)):
self._validate()
def _validate(self):
@@ -200,23 +199,11 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
assert dtype == "string"
result = np.asarray(scalars, dtype="object")
- if copy and result is scalars:
- result = result.copy()
-
- # Standardize all missing-like values to NA
- # TODO: it would be nice to do this in _validate / lib.is_string_array
- # We are already doing a scan over the values there.
- na_values = isna(result)
- has_nans = na_values.any()
- if has_nans and result is scalars:
- # force a copy now, if we haven't already
- result = result.copy()
-
- # convert to str, then to object to avoid dtype like '<U3', then insert na_value
- result = np.asarray(result, dtype=str)
- result = np.asarray(result, dtype="object")
- if has_nans:
- result[na_values] = StringDtype.na_value
+
+ # convert non-na-likes to str, and nan-likes to StringDtype.na_value
+ result = lib.ensure_string_array(
+ result, na_value=StringDtype.na_value, copy=copy
+ )
return cls(result)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 228329898b6a4..2697f42eb05a4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -916,7 +916,7 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
- return lib.astype_str(arr.ravel(), skipna=skipna).reshape(arr.shape)
+ return lib.ensure_string_array(arr.ravel(), skipna=skipna).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
@@ -1608,19 +1608,11 @@ def construct_1d_ndarray_preserving_na(
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str'))
array(['1.0', '2.0', None], dtype=object)
"""
- subarr = np.array(values, dtype=dtype, copy=copy)
if dtype is not None and dtype.kind == "U":
- # GH-21083
- # We can't just return np.array(subarr, dtype='str') since
- # NumPy will convert the non-string objects into strings
- # Including NA values. Se we have to go
- # string -> object -> update NA, which requires an
- # additional pass over the data.
- na_values = isna(values)
- subarr2 = subarr.astype(object)
- subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
- subarr = subarr2
+ subarr = lib.ensure_string_array(values, convert_na_value=False, copy=copy)
+ else:
+ subarr = np.array(values, dtype=dtype, copy=copy)
return subarr
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 6f9a1a5be4c43..efd5d29ae0717 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -206,12 +206,16 @@ def test_constructor_raises():
@pytest.mark.parametrize("copy", [True, False])
def test_from_sequence_no_mutate(copy):
- a = np.array(["a", np.nan], dtype=object)
- original = a.copy()
- result = pd.arrays.StringArray._from_sequence(a, copy=copy)
- expected = pd.arrays.StringArray(np.array(["a", pd.NA], dtype=object))
+ nan_arr = np.array(["a", np.nan], dtype=object)
+ na_arr = np.array(["a", pd.NA], dtype=object)
+
+ result = pd.arrays.StringArray._from_sequence(nan_arr, copy=copy)
+ expected = pd.arrays.StringArray(na_arr)
+
tm.assert_extension_array_equal(result, expected)
- tm.assert_numpy_array_equal(a, original)
+
+ expected = nan_arr if copy else na_arr
+ tm.assert_numpy_array_equal(nan_arr, expected)
def test_astype_int():
| - [x] closes #35499
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
@ldacey, can you try if this fixes your problem?
CC @simonjayhawkins | https://api.github.com/repos/pandas-dev/pandas/pulls/35519 | 2020-08-02T19:15:46Z | 2020-08-17T14:35:00Z | 2020-08-17T14:35:00Z | 2020-09-19T08:19:56Z |
CLN: get_flattened_iterator | diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 3aaeef3b63760..c6b0732b04c09 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -50,7 +50,7 @@
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
- get_flattened_iterator,
+ get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
@@ -153,7 +153,7 @@ def _get_group_keys(self):
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
- return get_flattened_iterator(comp_ids, ngroups, self.levels, self.codes)
+ return get_flattened_list(comp_ids, ngroups, self.levels, self.codes)
def apply(self, f: F, data: FrameOrSeries, axis: int = 0):
mutated = self.mutated
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index ee73aa42701b0..8bdd466ae6f33 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -1,5 +1,6 @@
""" miscellaneous sorting / groupby utilities """
-from typing import Callable, Optional
+from collections import defaultdict
+from typing import TYPE_CHECKING, Callable, DefaultDict, Iterable, List, Optional, Tuple
import numpy as np
@@ -18,6 +19,9 @@
import pandas.core.algorithms as algorithms
from pandas.core.construction import extract_array
+if TYPE_CHECKING:
+ from pandas.core.indexes.base import Index # noqa:F401
+
_INT64_MAX = np.iinfo(np.int64).max
@@ -409,7 +413,7 @@ def ensure_key_mapped(values, key: Optional[Callable], levels=None):
levels : Optional[List], if values is a MultiIndex, list of levels to
apply the key to.
"""
- from pandas.core.indexes.api import Index
+ from pandas.core.indexes.api import Index # noqa:F811
if not key:
return values
@@ -440,36 +444,21 @@ def ensure_key_mapped(values, key: Optional[Callable], levels=None):
return result
-class _KeyMapper:
- """
- Map compressed group id -> key tuple.
- """
-
- def __init__(self, comp_ids, ngroups: int, levels, labels):
- self.levels = levels
- self.labels = labels
- self.comp_ids = comp_ids.astype(np.int64)
-
- self.k = len(labels)
- self.tables = [hashtable.Int64HashTable(ngroups) for _ in range(self.k)]
-
- self._populate_tables()
-
- def _populate_tables(self):
- for labs, table in zip(self.labels, self.tables):
- table.map(self.comp_ids, labs.astype(np.int64))
-
- def get_key(self, comp_id):
- return tuple(
- level[table.get_item(comp_id)]
- for table, level in zip(self.tables, self.levels)
- )
-
-
-def get_flattened_iterator(comp_ids, ngroups, levels, labels):
- # provide "flattened" iterator for multi-group setting
- mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
- return [mapper.get_key(i) for i in range(ngroups)]
+def get_flattened_list(
+ comp_ids: np.ndarray,
+ ngroups: int,
+ levels: Iterable["Index"],
+ labels: Iterable[np.ndarray],
+) -> List[Tuple]:
+ """Map compressed group id -> key tuple."""
+ comp_ids = comp_ids.astype(np.int64, copy=False)
+ arrays: DefaultDict[int, List[int]] = defaultdict(list)
+ for labs, level in zip(labels, levels):
+ table = hashtable.Int64HashTable(ngroups)
+ table.map(comp_ids, labs.astype(np.int64, copy=False))
+ for i in range(ngroups):
+ arrays[i].append(level[table.get_item(i)])
+ return [tuple(array) for array in arrays.values()]
def get_indexer_dict(label_list, keys):
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Refactor to just a single function
| https://api.github.com/repos/pandas-dev/pandas/pulls/35515 | 2020-08-02T07:07:45Z | 2020-08-06T21:51:06Z | 2020-08-06T21:51:06Z | 2020-08-06T23:00:09Z |
BUG: fix combine_first converting timestamp to int | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index a269580bc4453..11195e448ed77 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -301,6 +301,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
+- Bug in :meth:`DataFrame.combine_first` that would convert datetime-like column on other :class:`DataFrame` to integer when the column is not present in original :class:`DataFrame` (:issue:`28481`)
- Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`)
- Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`)
- Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1f9987d9d3f5b..4267a563fc50e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6169,7 +6169,7 @@ def combine(
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
- arr = maybe_downcast_to_dtype(arr, this_dtype)
+ arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 78f265d32f8df..6c1531d182767 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -103,6 +103,7 @@ def test_combine_first_mixed_bug(self):
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
+ def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
@@ -118,6 +119,7 @@ def test_combine_first_mixed_bug(self):
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
+ def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
@@ -134,16 +136,23 @@ def test_combine_first_mixed_bug(self):
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
+ def test_combine_first_return_obj_type_with_bools(self):
# GH3552, return object dtype with bools
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
- result = df1.combine_first(df2)[2]
- expected = Series([True, True, False], name=2)
- tm.assert_series_equal(result, expected)
+ expected1 = pd.Series([True, True, False], name=2, dtype=object)
+ expected2 = pd.Series([True, True, False], name=2, dtype=object)
+ result1 = df1.combine_first(df2)[2]
+ result2 = df2.combine_first(df1)[2]
+
+ tm.assert_series_equal(result1, expected1)
+ tm.assert_series_equal(result2, expected2)
+
+ def test_combine_first_convert_datatime_correctly(self):
# GH 3593, converting datetime64[ns] incorrectly
df0 = DataFrame(
{"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
@@ -339,9 +348,14 @@ def test_combine_first_int(self):
df1 = pd.DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = pd.DataFrame({"a": [1, 4]}, dtype="int64")
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["a"].dtype == "int64"
+ exp1 = pd.DataFrame({"a": [0, 1, 3, 5]}, dtype="float64")
+ exp2 = pd.DataFrame({"a": [1, 4, 3, 5]}, dtype="float64")
+
+ res1 = df1.combine_first(df2)
+ res2 = df2.combine_first(df1)
+
+ tm.assert_frame_equal(res1, exp1)
+ tm.assert_frame_equal(res2, exp2)
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
@@ -353,3 +367,22 @@ def test_combine_first_with_asymmetric_other(self, val):
exp = pd.DataFrame({"isBool": [True], "isNum": [val]})
tm.assert_frame_equal(res, exp)
+
+
+@pytest.mark.parametrize(
+ "val1, val2",
+ [
+ (datetime(2020, 1, 1), datetime(2020, 1, 2)),
+ (pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")),
+ (pd.Timedelta("89 days"), pd.Timedelta("60 min")),
+ ],
+)
+def test_combine_first_timestamp_bug(val1, val2, nulls_fixture):
+
+ df1 = pd.DataFrame([[nulls_fixture, nulls_fixture]], columns=["a", "b"])
+ df2 = pd.DataFrame([[val1, val2]], columns=["b", "c"])
+
+ res = df1.combine_first(df2)
+ exp = pd.DataFrame([[nulls_fixture, val1, val2]], columns=["a", "b", "c"])
+
+ tm.assert_frame_equal(res, exp)
| - [x] closes #28481
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This fix introduced two regression, but it appears like the fix only made the API consistent. Previously the failing regressions where inconsistent say `df1.combine_first(df2)` would not return the same result as `df2.combine_first(df1)` for the failing cases, more on these in the code comments.
Let me know if there is a better way to handle this. | https://api.github.com/repos/pandas-dev/pandas/pulls/35514 | 2020-08-02T06:35:41Z | 2020-11-29T04:31:40Z | null | 2020-11-29T04:31:40Z |
BUG: RollingGroupby respects __getitem__ | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 6a327a4fc732f..5e36bfe6b6307 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -16,8 +16,7 @@ Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
--
--
+- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 445f179248226..87bcaa7d9512f 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -2220,6 +2220,10 @@ def _apply(
def _constructor(self):
return Rolling
+ @cache_readonly
+ def _selected_obj(self):
+ return self._groupby._selected_obj
+
def _create_blocks(self, obj: FrameOrSeries):
"""
Split data into blocks & return conformed data.
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 744ca264e91d9..ca5a9eccea4f5 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -214,3 +214,28 @@ def foo(x):
name="value",
)
tm.assert_series_equal(result, expected)
+
+ def test_groupby_subselect_rolling(self):
+ # GH 35486
+ df = DataFrame(
+ {"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [10, 20, 30, 20]}
+ )
+ result = df.groupby("a")[["b"]].rolling(2).max()
+ expected = DataFrame(
+ [np.nan, np.nan, 2.0, np.nan],
+ columns=["b"],
+ index=pd.MultiIndex.from_tuples(
+ ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.groupby("a")["b"].rolling(2).max()
+ expected = Series(
+ [np.nan, np.nan, 2.0, np.nan],
+ index=pd.MultiIndex.from_tuples(
+ ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
+ ),
+ name="b",
+ )
+ tm.assert_series_equal(result, expected)
| - [x] closes #35486
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35513 | 2020-08-02T02:23:26Z | 2020-08-06T17:45:20Z | 2020-08-06T17:45:20Z | 2020-08-06T19:18:11Z |
REGR: Check for float in isnaobj_old | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 443589308ad4c..f8f655ce7b866 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
-
-
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 760fab3781fd4..771e8053ac9be 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -155,7 +155,10 @@ def isnaobj_old(arr: ndarray) -> ndarray:
result = np.zeros(n, dtype=np.uint8)
for i in range(n):
val = arr[i]
- result[i] = checknull(val) or val == INF or val == NEGINF
+ result[i] = (
+ checknull(val)
+ or util.is_float_object(val) and (val == INF or val == NEGINF)
+ )
return result.view(np.bool_)
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 12e73bae40eac..5154a9ba6fdf0 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -18,7 +18,7 @@
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
import pandas.util._test_decorators as td
-from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
+from pandas import DataFrame, Index, MultiIndex, Series, compat, concat, option_context
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
@@ -2179,3 +2179,13 @@ def test_read_csv_names_not_accepting_sets(all_parsers):
parser = all_parsers
with pytest.raises(ValueError, match="Names should be an ordered collection."):
parser.read_csv(StringIO(data), names=set("QAZ"))
+
+
+def test_read_csv_with_use_inf_as_na(all_parsers):
+ # https://github.com/pandas-dev/pandas/issues/35493
+ parser = all_parsers
+ data = "1.0\nNaN\n3.0"
+ with option_context("use_inf_as_na", True):
+ result = parser.read_csv(StringIO(data), header=None)
+ expected = DataFrame([1.0, np.nan, 3.0])
+ tm.assert_frame_equal(result, expected)
| - [x] closes #35493
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Looks like we lost an isinstance check that caused this regression. | https://api.github.com/repos/pandas-dev/pandas/pulls/35510 | 2020-08-01T21:55:55Z | 2020-08-03T23:29:32Z | 2020-08-03T23:29:32Z | 2020-08-04T09:55:53Z |
Added paragraph on creating DataFrame from list of namedtuples | diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst
index 360a14998b227..23bd44c1969a5 100644
--- a/doc/source/user_guide/dsintro.rst
+++ b/doc/source/user_guide/dsintro.rst
@@ -397,6 +397,32 @@ The result will be a DataFrame with the same index as the input Series, and
with one column whose name is the original name of the Series (only if no other
column name provided).
+
+.. _basics.dataframe.from_list_namedtuples:
+
+From a list of namedtuples
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The field names of the first ``namedtuple`` in the list determine the columns
+of the ``DataFrame``. The remaining namedtuples (or tuples) are simply unpacked
+and their values are fed into the rows of the ``DataFrame``. If any of those
+tuples is shorter than the first ``namedtuple`` then the later columns in the
+corresponding row are marked as missing values. If any are longer than the
+first ``namedtuple``, a ``ValueError`` is raised.
+
+.. ipython:: python
+
+ from collections import namedtuple
+
+ Point = namedtuple('Point', 'x y')
+
+ pd.DataFrame([Point(0, 0), Point(0, 3), (2, 3)])
+
+ Point3D = namedtuple('Point3D', 'x y z')
+
+ pd.DataFrame([Point3D(0, 0, 0), Point3D(0, 3, 5), Point(2, 3)])
+
+
.. _basics.dataframe.from_list_dataclasses:
From a list of dataclasses
| - [x] closes #35438
Is a whatsnew entry necessary for this PR? | https://api.github.com/repos/pandas-dev/pandas/pulls/35507 | 2020-08-01T18:53:17Z | 2020-08-05T02:27:23Z | 2020-08-05T02:27:23Z | 2020-08-05T09:38:25Z |
Added alignable boolean series and its example to `.loc` docs. | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 04d1dbceb3342..dd81823055390 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -255,6 +255,8 @@ def loc(self) -> "_LocIndexer":
- A boolean array of the same length as the axis being sliced,
e.g. ``[True, False, True]``.
+ - An alignable boolean Series. The index of the key will be aligned before
+ masking.
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above)
@@ -264,6 +266,8 @@ def loc(self) -> "_LocIndexer":
------
KeyError
If any items are not found.
+ IndexingError
+ If an indexed key is passed and its index is unalignable to the frame index.
See Also
--------
@@ -319,6 +323,13 @@ def loc(self) -> "_LocIndexer":
max_speed shield
sidewinder 7 8
+ Alignable boolean Series:
+
+ >>> df.loc[pd.Series([False, True, False],
+ ... index=['viper', 'sidewinder', 'cobra'])]
+ max_speed shield
+ sidewinder 7 8
+
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
| - [x] closes #31054
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I'm just not sure if I needed to add `IndexingError` to raises section. Added it to be safe, will remove if it's unnecessary.
Also, when someone uses `df.loc[df['col'] > 0]` it really is the same under the hood, `df['col'] > 0` is a boolean series with an alignable index. So there might be some overlap? | https://api.github.com/repos/pandas-dev/pandas/pulls/35506 | 2020-08-01T16:37:53Z | 2020-08-02T10:18:02Z | 2020-08-02T10:18:01Z | 2020-08-02T10:18:22Z |
TST: ensure that DataFrameGroupBy.apply does not convert datetime.date to pd.Timestamp | diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 5a1268bfb03db..525a6fe2637c3 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import date, datetime
from io import StringIO
import numpy as np
@@ -1014,3 +1014,33 @@ def test_apply_with_timezones_aware():
result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy())
tm.assert_frame_equal(result1, result2)
+
+
+def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp():
+ # GH 29617
+
+ df = pd.DataFrame(
+ {
+ "A": ["a", "a", "a", "b"],
+ "B": [
+ date(2020, 1, 10),
+ date(2020, 1, 10),
+ date(2020, 2, 10),
+ date(2020, 2, 10),
+ ],
+ "C": [1, 2, 3, 4],
+ },
+ index=pd.Index([100, 101, 102, 103], name="idx"),
+ )
+
+ grp = df.groupby(["A", "B"])
+ result = grp.apply(lambda x: x.head(1))
+
+ expected = df.iloc[[0, 2, 3]]
+ expected = expected.reset_index()
+ expected.index = pd.MultiIndex.from_frame(expected[["A", "B", "idx"]])
+ expected = expected.drop(columns="idx")
+
+ tm.assert_frame_equal(result, expected)
+ for val in result.index.levels[1]:
+ assert type(val) is date
| - [x] closes #29617
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Previously, there was a bug in `DataFrameGroupBy.apply` where a column of `datetime.date` would raise a `ValueError` if it was included as column in a multi-column grouping. Somewhere in the grouping the values would be converted to `pd.Timestamp` and then when the `pd.Timestamp` couldn't be found in the index of `datetime.date`s it would throw an error.
This bug persisted until 1.0.5 but was fixed in 1.1.0 (not sure which change fixed it). In 1.1.0 the `datetime.date` are not converted to `pd.Timestamp`, so they are treated like any other dtype=object and left unchanged.
This PR adds a test to enforce this behaviour to make sure the bug does not arise again. | https://api.github.com/repos/pandas-dev/pandas/pulls/35504 | 2020-08-01T13:01:04Z | 2020-08-03T23:19:27Z | 2020-08-03T23:19:27Z | 2020-08-03T23:19:31Z |
DOC: add note on str cons to read_sql | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index c87391eaa62b1..51888e5021d80 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -439,7 +439,8 @@ def read_sql(
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
- for engine disposal and connection closure for the SQLAlchemy connectable. See
+ for engine disposal and connection closure for the SQLAlchemy connectable; str
+ connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
| - [x] closes #35495
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35503 | 2020-08-01T12:50:57Z | 2020-08-01T17:05:55Z | 2020-08-01T17:05:55Z | 2020-08-01T17:05:55Z |
CI: xfail numpy-dev | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index c8b780455f862..f5b9f4a401e60 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -5,6 +5,7 @@
import pytest
from pandas._libs import iNaT
+from pandas.compat.numpy import _is_numpy_dev
from pandas.errors import InvalidIndexError
from pandas.core.dtypes.common import is_datetime64tz_dtype
@@ -417,7 +418,7 @@ def test_set_ops_error_cases(self, case, method, index):
with pytest.raises(TypeError, match=msg):
getattr(index, method)(case)
- def test_intersection_base(self, index):
+ def test_intersection_base(self, index, request):
if isinstance(index, CategoricalIndex):
return
@@ -434,6 +435,15 @@ def test_intersection_base(self, index):
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
+ # https://github.com/pandas-dev/pandas/issues/35481
+ if (
+ _is_numpy_dev
+ and isinstance(case, Series)
+ and isinstance(index, UInt64Index)
+ ):
+ mark = pytest.mark.xfail(reason="gh-35481")
+ request.node.add_marker(mark)
+
result = first.intersection(case)
assert tm.equalContents(result, second)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 30b13b6ea9fce..193800fae751f 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -5,6 +5,8 @@
import numpy as np
import pytest
+from pandas.compat.numpy import _is_numpy_dev
+
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
@@ -945,6 +947,7 @@ def test_loc_setitem_empty_append(self):
df.loc[0, "x"] = expected.loc[0, "x"]
tm.assert_frame_equal(df, expected)
+ @pytest.mark.xfail(_is_numpy_dev, reason="gh-35481")
def test_loc_setitem_empty_append_raises(self):
# GH6173, various appends to an empty dataframe
| xref #35481 | https://api.github.com/repos/pandas-dev/pandas/pulls/35502 | 2020-08-01T11:39:06Z | 2020-08-01T12:46:27Z | 2020-08-01T12:46:27Z | 2020-08-04T09:03:32Z |
Backport PR #35470 on branch 1.1.x (CI: unpin isort 5 (#35134)) | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index dc6f45f810f3d..e0a2257b0ca1f 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -6,7 +6,7 @@
from .pandas_vb_common import tm
try:
- from pandas.tseries.offsets import Nano, Hour
+ from pandas.tseries.offsets import Hour, Nano
except ImportError:
# For compatibility with older versions
from pandas.core.datetools import * # noqa
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index e266d871f5bc6..5d9070de92ec7 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -7,14 +7,14 @@
try:
from pandas import (
- rolling_median,
+ rolling_kurt,
+ rolling_max,
rolling_mean,
+ rolling_median,
rolling_min,
- rolling_max,
- rolling_var,
rolling_skew,
- rolling_kurt,
rolling_std,
+ rolling_var,
)
have_rolling_methods = True
diff --git a/asv_bench/benchmarks/io/parsers.py b/asv_bench/benchmarks/io/parsers.py
index ec3eddfff7184..5390056ba36f2 100644
--- a/asv_bench/benchmarks/io/parsers.py
+++ b/asv_bench/benchmarks/io/parsers.py
@@ -2,8 +2,8 @@
try:
from pandas._libs.tslibs.parsing import (
- concat_date_cols,
_does_string_look_like_datetime,
+ concat_date_cols,
)
except ImportError:
# Avoid whole benchmark suite import failure on asv (currently 0.4)
diff --git a/asv_bench/benchmarks/tslibs/normalize.py b/asv_bench/benchmarks/tslibs/normalize.py
index 7d4e0556f4d96..9a206410d8775 100644
--- a/asv_bench/benchmarks/tslibs/normalize.py
+++ b/asv_bench/benchmarks/tslibs/normalize.py
@@ -1,5 +1,5 @@
try:
- from pandas._libs.tslibs import normalize_i8_timestamps, is_date_array_normalized
+ from pandas._libs.tslibs import is_date_array_normalized, normalize_i8_timestamps
except ImportError:
from pandas._libs.tslibs.conversion import (
normalize_i8_timestamps,
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 7b12de387d648..69ce0f1adce22 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -121,7 +121,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
# Imports - Check formatting using isort see setup.cfg for settings
MSG='Check import format using isort' ; echo $MSG
- ISORT_CMD="isort --quiet --recursive --check-only pandas asv_bench scripts"
+ ISORT_CMD="isort --quiet --check-only pandas asv_bench scripts"
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]}))
else
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index b85e9403038ab..1b0e36e7b6933 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -751,7 +751,7 @@ Imports are alphabetically sorted within these sections.
As part of :ref:`Continuous Integration <contributing.ci>` checks we run::
- isort --recursive --check-only pandas
+ isort --check-only pandas
to check that imports are correctly formatted as per the `setup.cfg`.
@@ -770,8 +770,6 @@ You should run::
to automatically format imports correctly. This will modify your local copy of the files.
-The `--recursive` flag can be passed to sort all files in a directory.
-
Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above <contributing.code-formatting>`::
git diff upstream/master --name-only -- "*.py" | xargs -r isort
diff --git a/environment.yml b/environment.yml
index 3b088ca511be9..9efb995e29497 100644
--- a/environment.yml
+++ b/environment.yml
@@ -21,7 +21,7 @@ dependencies:
- flake8<3.8.0 # temporary pin, GH#34150
- flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions
- flake8-rst>=0.6.0,<=0.7.0 # linting of code blocks in rst files
- - isort=4.3.21 # check that imports are in the right order
+ - isort>=5.2.1 # check that imports are in the right order
- mypy=0.730
- pycodestyle # used by flake8
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index f5e16cddeb04c..d7b73a0a685d3 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -442,8 +442,8 @@ def register_option(
ValueError if `validator` is specified and `defval` is not a valid value.
"""
- import tokenize
import keyword
+ import tokenize
key = key.lower()
@@ -660,8 +660,8 @@ def _build_option_description(k: str) -> str:
def pp_options_list(keys: Iterable[str], width=80, _print: bool = False):
""" Builds a concise listing of available options, grouped by prefix """
- from textwrap import wrap
from itertools import groupby
+ from textwrap import wrap
def pp(name: str, ks: Iterable[str]) -> List[str]:
pfx = "- " + name + ".[" if name else ""
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 6b6ead795584f..7e90a8cc681ef 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -1,11 +1,12 @@
import cython
from cython import Py_ssize_t
-from libc.stdlib cimport malloc, free
-from libc.string cimport memmove
from libc.math cimport fabs, sqrt
+from libc.stdlib cimport free, malloc
+from libc.string cimport memmove
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
NPY_FLOAT32,
@@ -31,12 +32,11 @@ from numpy cimport (
uint32_t,
uint64_t,
)
+
cnp.import_array()
cimport pandas._libs.util as util
-from pandas._libs.util cimport numeric, get_nat
-
from pandas._libs.khash cimport (
kh_destroy_int64,
kh_get_int64,
@@ -46,7 +46,7 @@ from pandas._libs.khash cimport (
kh_resize_int64,
khiter_t,
)
-
+from pandas._libs.util cimport get_nat, numeric
import pandas._libs.missing as missing
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 7c57e6ee9dbfd..38cb973d6dde9 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -1,27 +1,51 @@
import cython
from cython import Py_ssize_t
-from cython cimport floating
-from libc.stdlib cimport malloc, free
+from cython cimport floating
+from libc.stdlib cimport free, malloc
import numpy as np
+
cimport numpy as cnp
-from numpy cimport (ndarray,
- int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
- uint32_t, uint64_t, float32_t, float64_t, complex64_t, complex128_t)
+from numpy cimport (
+ complex64_t,
+ complex128_t,
+ float32_t,
+ float64_t,
+ int8_t,
+ int16_t,
+ int32_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+ uint16_t,
+ uint32_t,
+ uint64_t,
+)
from numpy.math cimport NAN
-cnp.import_array()
-from pandas._libs.util cimport numeric, get_nat
+cnp.import_array()
-from pandas._libs.algos cimport (swap, TiebreakEnumType, TIEBREAK_AVERAGE,
- TIEBREAK_MIN, TIEBREAK_MAX, TIEBREAK_FIRST,
- TIEBREAK_DENSE)
-from pandas._libs.algos import (take_2d_axis1_float64_float64,
- groupsort_indexer, tiebreakers)
+from pandas._libs.algos cimport (
+ TIEBREAK_AVERAGE,
+ TIEBREAK_DENSE,
+ TIEBREAK_FIRST,
+ TIEBREAK_MAX,
+ TIEBREAK_MIN,
+ TiebreakEnumType,
+ swap,
+)
+from pandas._libs.util cimport get_nat, numeric
+
+from pandas._libs.algos import (
+ groupsort_indexer,
+ take_2d_axis1_float64_float64,
+ tiebreakers,
+)
from pandas._libs.missing cimport checknull
+
cdef int64_t NPY_NAT = get_nat()
_int64_max = np.iinfo(np.int64).max
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index a98820ca57895..f2af04d91a3e3 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -2,10 +2,13 @@
# at https://github.com/veorq/SipHash
import cython
-from libc.stdlib cimport malloc, free
+
+from libc.stdlib cimport free, malloc
import numpy as np
-from numpy cimport ndarray, uint8_t, uint32_t, uint64_t, import_array
+
+from numpy cimport import_array, ndarray, uint8_t, uint32_t, uint64_t
+
import_array()
from pandas._libs.util cimport is_nan
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index c3dcbb942d7fe..ffaf6d6505955 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -1,60 +1,57 @@
cimport cython
-
-from cpython.ref cimport PyObject, Py_INCREF
-from cpython.mem cimport PyMem_Malloc, PyMem_Free
-
-from libc.stdlib cimport malloc, free
+from cpython.mem cimport PyMem_Free, PyMem_Malloc
+from cpython.ref cimport Py_INCREF, PyObject
+from libc.stdlib cimport free, malloc
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, uint8_t, uint32_t, float64_t
+from numpy cimport float64_t, ndarray, uint8_t, uint32_t
from numpy.math cimport NAN
+
cnp.import_array()
+from pandas._libs cimport util
from pandas._libs.khash cimport (
- khiter_t,
- kh_str_t,
- kh_init_str,
- kh_put_str,
- kh_exist_str,
- kh_get_str,
- kh_destroy_str,
- kh_resize_str,
- kh_put_strbox,
- kh_get_strbox,
- kh_init_strbox,
- kh_int64_t,
- kh_init_int64,
- kh_resize_int64,
+ kh_destroy_float64,
kh_destroy_int64,
- kh_get_int64,
+ kh_destroy_pymap,
+ kh_destroy_str,
+ kh_destroy_uint64,
+ kh_exist_float64,
kh_exist_int64,
- kh_put_int64,
+ kh_exist_pymap,
+ kh_exist_str,
+ kh_exist_uint64,
kh_float64_t,
- kh_exist_float64,
- kh_put_float64,
- kh_init_float64,
kh_get_float64,
- kh_destroy_float64,
- kh_resize_float64,
- kh_resize_uint64,
- kh_exist_uint64,
- kh_destroy_uint64,
- kh_put_uint64,
+ kh_get_int64,
+ kh_get_pymap,
+ kh_get_str,
+ kh_get_strbox,
kh_get_uint64,
- kh_init_uint64,
- kh_destroy_pymap,
- kh_exist_pymap,
+ kh_init_float64,
+ kh_init_int64,
kh_init_pymap,
- kh_get_pymap,
+ kh_init_str,
+ kh_init_strbox,
+ kh_init_uint64,
+ kh_int64_t,
+ kh_put_float64,
+ kh_put_int64,
kh_put_pymap,
+ kh_put_str,
+ kh_put_strbox,
+ kh_put_uint64,
+ kh_resize_float64,
+ kh_resize_int64,
kh_resize_pymap,
+ kh_resize_str,
+ kh_resize_uint64,
+ kh_str_t,
+ khiter_t,
)
-
-
-from pandas._libs cimport util
-
from pandas._libs.missing cimport checknull
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 35c4b73b47695..d6659cc1895b1 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -1,6 +1,7 @@
import warnings
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
float32_t,
@@ -16,17 +17,16 @@ from numpy cimport (
uint32_t,
uint64_t,
)
+
cnp.import_array()
from pandas._libs cimport util
-
+from pandas._libs.hashtable cimport HashTable
from pandas._libs.tslibs.nattype cimport c_NaT as NaT
from pandas._libs.tslibs.period cimport is_period_object
-from pandas._libs.tslibs.timestamps cimport _Timestamp
from pandas._libs.tslibs.timedeltas cimport _Timedelta
-
-from pandas._libs.hashtable cimport HashTable
+from pandas._libs.tslibs.timestamps cimport _Timestamp
from pandas._libs import algos, hashtable as _hash
from pandas._libs.missing import checknull
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 8b4b490f49b12..4f27fde52414a 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -5,12 +5,15 @@ from cython import Py_ssize_t
from cpython.slice cimport PySlice_GetIndicesEx
+
cdef extern from "Python.h":
Py_ssize_t PY_SSIZE_T_MAX
import numpy as np
+
cimport numpy as cnp
from numpy cimport NPY_INT64, int64_t
+
cnp.import_array()
from pandas._libs.algos import ensure_int64
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 95881ebf1385c..6867e8aba7411 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -1,7 +1,8 @@
import numbers
from operator import le, lt
-from cpython.datetime cimport PyDelta_Check, PyDateTime_IMPORT
+from cpython.datetime cimport PyDateTime_IMPORT, PyDelta_Check
+
PyDateTime_IMPORT
from cpython.object cimport (
@@ -16,8 +17,8 @@ from cpython.object cimport (
import cython
from cython import Py_ssize_t
-
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
NPY_QUICKSORT,
@@ -30,22 +31,21 @@ from numpy cimport (
ndarray,
uint64_t,
)
+
cnp.import_array()
from pandas._libs cimport util
-
from pandas._libs.hashtable cimport Int64Vector
+from pandas._libs.tslibs.timedeltas cimport _Timedelta
+from pandas._libs.tslibs.timestamps cimport _Timestamp
+from pandas._libs.tslibs.timezones cimport tz_compare
from pandas._libs.tslibs.util cimport (
- is_integer_object,
is_float_object,
+ is_integer_object,
is_timedelta64_object,
)
-from pandas._libs.tslibs.timezones cimport tz_compare
-from pandas._libs.tslibs.timestamps cimport _Timestamp
-from pandas._libs.tslibs.timedeltas cimport _Timedelta
-
_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 54892a7e4bc77..13c7187923473 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -1,7 +1,7 @@
import cython
from cython import Py_ssize_t
-
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
float32_t,
@@ -16,6 +16,7 @@ from numpy cimport (
uint32_t,
uint64_t,
)
+
cnp.import_array()
from pandas._libs.algos import (
@@ -640,7 +641,11 @@ def outer_join_indexer(ndarray[join_t] left, ndarray[join_t] right):
# ----------------------------------------------------------------------
from pandas._libs.hashtable cimport (
- HashTable, PyObjectHashTable, UInt64HashTable, Int64HashTable)
+ HashTable,
+ Int64HashTable,
+ PyObjectHashTable,
+ UInt64HashTable,
+)
ctypedef fused asof_t:
uint8_t
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 5ecbb2c3ffd35..5fa91ffee8ea8 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -5,23 +5,24 @@ import warnings
import cython
from cython import Py_ssize_t
-from cpython.object cimport PyObject_RichCompareBool, Py_EQ
-from cpython.ref cimport Py_INCREF
-from cpython.tuple cimport PyTuple_SET_ITEM, PyTuple_New
-from cpython.iterator cimport PyIter_Check
-from cpython.sequence cimport PySequence_Check
-from cpython.number cimport PyNumber_Check
-
from cpython.datetime cimport (
- PyDateTime_Check,
PyDate_Check,
- PyTime_Check,
- PyDelta_Check,
+ PyDateTime_Check,
PyDateTime_IMPORT,
+ PyDelta_Check,
+ PyTime_Check,
)
+from cpython.iterator cimport PyIter_Check
+from cpython.number cimport PyNumber_Check
+from cpython.object cimport Py_EQ, PyObject_RichCompareBool
+from cpython.ref cimport Py_INCREF
+from cpython.sequence cimport PySequence_Check
+from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM
+
PyDateTime_IMPORT
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
NPY_OBJECT,
@@ -39,6 +40,7 @@ from numpy cimport (
uint8_t,
uint64_t,
)
+
cnp.import_array()
cdef extern from "numpy/arrayobject.h":
@@ -63,28 +65,23 @@ cdef extern from "src/parse_helper.h":
int floatify(object, float64_t *result, int *maybe_int) except -1
from pandas._libs cimport util
-from pandas._libs.util cimport is_nan, UINT64_MAX, INT64_MAX, INT64_MIN
+from pandas._libs.util cimport INT64_MAX, INT64_MIN, UINT64_MAX, is_nan
from pandas._libs.tslib import array_to_datetime
-from pandas._libs.tslibs.nattype cimport (
- NPY_NAT,
- c_NaT as NaT,
- checknull_with_nat,
-)
-from pandas._libs.tslibs.conversion cimport convert_to_tsobject
-from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64
-from pandas._libs.tslibs.timezones cimport tz_compare
-from pandas._libs.tslibs.period cimport is_period_object
-from pandas._libs.tslibs.offsets cimport is_offset_object
from pandas._libs.missing cimport (
+ C_NA,
checknull,
- isnaobj,
is_null_datetime64,
is_null_timedelta64,
- C_NA,
+ isnaobj,
)
-
+from pandas._libs.tslibs.conversion cimport convert_to_tsobject
+from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT, checknull_with_nat
+from pandas._libs.tslibs.offsets cimport is_offset_object
+from pandas._libs.tslibs.period cimport is_period_object
+from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64
+from pandas._libs.tslibs.timezones cimport tz_compare
# constants that will be compared to potentially arbitrarily large
# python int
@@ -1317,8 +1314,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str:
if not isinstance(value, list):
value = list(value)
- from pandas.core.dtypes.cast import (
- construct_1d_object_array_from_listlike)
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
values = construct_1d_object_array_from_listlike(value)
# make contiguous
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index fdd06fe631b97..760fab3781fd4 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -1,27 +1,25 @@
-import cython
-from cython import Py_ssize_t
-
import numbers
+import cython
+from cython import Py_ssize_t
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, uint8_t, float64_t
+from numpy cimport float64_t, int64_t, ndarray, uint8_t
+
cnp.import_array()
from pandas._libs cimport util
-
-
-from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
from pandas._libs.tslibs.nattype cimport (
c_NaT as NaT,
checknull_with_nat,
is_null_datetimelike,
)
-from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
+from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
+from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
from pandas.compat import is_platform_32bit
-
cdef:
float64_t INF = <float64_t>np.inf
float64_t NEGINF = -INF
diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx
index 658600cdfbe6c..d1f897d237c1b 100644
--- a/pandas/_libs/ops.pyx
+++ b/pandas/_libs/ops.pyx
@@ -10,18 +10,17 @@ from cpython.object cimport (
PyObject_RichCompareBool,
)
-
import cython
from cython import Py_ssize_t
-
import numpy as np
-from numpy cimport ndarray, uint8_t, import_array
-import_array()
+from numpy cimport import_array, ndarray, uint8_t
+
+import_array()
-from pandas._libs.util cimport UINT8_MAX, is_nan
from pandas._libs.missing cimport checknull
+from pandas._libs.util cimport UINT8_MAX, is_nan
@cython.wraparound(False)
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 6ffb036e01595..fa77af6bd5a25 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1,6 +1,8 @@
# Copyright (c) 2012, Lambda Foundry, Inc.
# See LICENSE for the license
import bz2
+from csv import QUOTE_MINIMAL, QUOTE_NONE, QUOTE_NONNUMERIC
+from errno import ENOENT
import gzip
import io
import os
@@ -9,17 +11,14 @@ import time
import warnings
import zipfile
-from csv import QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_NONE
-from errno import ENOENT
-
from libc.stdlib cimport free
-from libc.string cimport strncpy, strlen, strcasecmp
+from libc.string cimport strcasecmp, strlen, strncpy
import cython
from cython import Py_ssize_t
from cpython.bytes cimport PyBytes_AsString, PyBytes_FromString
-from cpython.exc cimport PyErr_Occurred, PyErr_Fetch
+from cpython.exc cimport PyErr_Fetch, PyErr_Occurred
from cpython.object cimport PyObject
from cpython.ref cimport Py_XDECREF
from cpython.unicode cimport PyUnicode_AsUTF8String, PyUnicode_Decode
@@ -30,37 +29,59 @@ cdef extern from "Python.h":
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, uint8_t, uint64_t, int64_t, float64_t
+from numpy cimport float64_t, int64_t, ndarray, uint8_t, uint64_t
+
cnp.import_array()
from pandas._libs cimport util
-from pandas._libs.util cimport UINT64_MAX, INT64_MAX, INT64_MIN
+from pandas._libs.util cimport INT64_MAX, INT64_MIN, UINT64_MAX
+
import pandas._libs.lib as lib
from pandas._libs.khash cimport (
- khiter_t,
- kh_str_t, kh_init_str, kh_put_str, kh_exist_str,
- kh_get_str, kh_destroy_str,
- kh_float64_t, kh_get_float64, kh_destroy_float64,
- kh_put_float64, kh_init_float64, kh_resize_float64,
- kh_strbox_t, kh_put_strbox, kh_get_strbox, kh_init_strbox,
+ kh_destroy_float64,
+ kh_destroy_str,
+ kh_destroy_str_starts,
kh_destroy_strbox,
- kh_str_starts_t, kh_put_str_starts_item, kh_init_str_starts,
- kh_get_str_starts_item, kh_destroy_str_starts, kh_resize_str_starts)
+ kh_exist_str,
+ kh_float64_t,
+ kh_get_float64,
+ kh_get_str,
+ kh_get_str_starts_item,
+ kh_get_strbox,
+ kh_init_float64,
+ kh_init_str,
+ kh_init_str_starts,
+ kh_init_strbox,
+ kh_put_float64,
+ kh_put_str,
+ kh_put_str_starts_item,
+ kh_put_strbox,
+ kh_resize_float64,
+ kh_resize_str_starts,
+ kh_str_starts_t,
+ kh_str_t,
+ kh_strbox_t,
+ khiter_t,
+)
+
+from pandas.compat import _get_lzma_file, _import_lzma
+from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning
from pandas.core.dtypes.common import (
+ is_bool_dtype,
is_categorical_dtype,
- is_integer_dtype, is_float_dtype,
- is_bool_dtype, is_object_dtype,
is_datetime64_dtype,
- pandas_dtype, is_extension_array_dtype)
+ is_extension_array_dtype,
+ is_float_dtype,
+ is_integer_dtype,
+ is_object_dtype,
+ pandas_dtype,
+)
from pandas.core.dtypes.concat import union_categoricals
-from pandas.compat import _import_lzma, _get_lzma_file
-from pandas.errors import (ParserError, DtypeWarning,
- EmptyDataError, ParserWarning)
-
lzma = _import_lzma()
cdef:
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index a01e0c5705dcf..7b36bc8baf891 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -2,15 +2,18 @@ from copy import copy
from cython import Py_ssize_t
-from libc.stdlib cimport malloc, free
+from libc.stdlib cimport free, malloc
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t
+from numpy cimport int64_t, ndarray
+
cnp.import_array()
from pandas._libs cimport util
-from pandas._libs.lib import maybe_convert_objects, is_scalar
+
+from pandas._libs.lib import is_scalar, maybe_convert_objects
cdef _check_result_array(object obj, Py_ssize_t cnt):
diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx
index da4dd00027395..5c6c15fb50fed 100644
--- a/pandas/_libs/reshape.pyx
+++ b/pandas/_libs/reshape.pyx
@@ -16,7 +16,9 @@ from numpy cimport (
)
import numpy as np
+
cimport numpy as cnp
+
cnp.import_array()
from pandas._libs.lib cimport c_is_list_like
diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index 7c9575d921dc9..321d7c374d8ec 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -1,9 +1,18 @@
import cython
-
import numpy as np
+
cimport numpy as cnp
-from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t,
- float64_t, float32_t)
+from numpy cimport (
+ float32_t,
+ float64_t,
+ int8_t,
+ int16_t,
+ int32_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+)
+
cnp.import_array()
diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx
index 785a4d1f8b923..64fc8d615ea9c 100644
--- a/pandas/_libs/testing.pyx
+++ b/pandas/_libs/testing.pyx
@@ -1,13 +1,16 @@
import math
import numpy as np
+
from numpy cimport import_array
+
import_array()
from pandas._libs.util cimport is_array
-from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import is_dtype_equal
+from pandas.core.dtypes.missing import array_equivalent, isna
+
cdef NUMERIC_TYPES = (
bool,
@@ -129,6 +132,7 @@ cpdef assert_almost_equal(a, b,
if not isiterable(b):
from pandas._testing import assert_class_equal
+
# classes can't be the same, to raise error
assert_class_equal(a, b, obj=obj)
@@ -181,6 +185,7 @@ cpdef assert_almost_equal(a, b,
elif isiterable(b):
from pandas._testing import assert_class_equal
+
# classes can't be the same, to raise error
assert_class_equal(a, b, obj=obj)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 35d5cd8f1e275..e4128af62d06d 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -7,23 +7,20 @@ from cpython.datetime cimport (
datetime,
tzinfo,
)
+
# import datetime C API
PyDateTime_IMPORT
cimport numpy as cnp
from numpy cimport float64_t, int64_t, ndarray
+
import numpy as np
+
cnp.import_array()
import pytz
-from pandas._libs.util cimport (
- is_datetime64_object,
- is_float_object,
- is_integer_object,
-)
-
from pandas._libs.tslibs.np_datetime cimport (
_string_to_dts,
check_dts_bounds,
@@ -34,9 +31,9 @@ from pandas._libs.tslibs.np_datetime cimport (
pydate_to_dt64,
pydatetime_to_dt64,
)
+from pandas._libs.util cimport is_datetime64_object, is_float_object, is_integer_object
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
-
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas._libs.tslibs.conversion cimport (
@@ -45,22 +42,18 @@ from pandas._libs.tslibs.conversion cimport (
convert_datetime_to_tsobject,
get_datetime64_nanos,
)
-
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
c_NaT as NaT,
c_nat_strings as nat_strings,
)
-
from pandas._libs.tslibs.timestamps cimport _Timestamp
-from pandas._libs.tslibs.timestamps import Timestamp
-from pandas._libs.tslibs.tzconversion cimport (
- tz_localize_to_utc_single,
-)
+from pandas._libs.tslibs.timestamps import Timestamp
# Note: this is the only non-tslibs intra-pandas dependency here
from pandas._libs.missing cimport checknull_with_nat_and_na
+from pandas._libs.tslibs.tzconversion cimport tz_localize_to_utc_single
def _test_parse_iso8601(ts: str):
diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx
index 00cecd25e5225..6cce2f5e1fd95 100644
--- a/pandas/_libs/tslibs/ccalendar.pyx
+++ b/pandas/_libs/tslibs/ccalendar.pyx
@@ -5,7 +5,7 @@ Cython implementations of functions resembling the stdlib calendar module
import cython
-from numpy cimport int64_t, int32_t
+from numpy cimport int32_t, int64_t
# ----------------------------------------------------------------------
# Constants
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 8cc3d25e86340..adf1dfbc1ac72 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -1,44 +1,68 @@
import cython
-
import numpy as np
+
cimport numpy as cnp
-from numpy cimport int64_t, int32_t, intp_t, ndarray
+from numpy cimport int32_t, int64_t, intp_t, ndarray
+
cnp.import_array()
import pytz
# stdlib datetime imports
-from cpython.datetime cimport (datetime, time, tzinfo,
- PyDateTime_Check, PyDate_Check,
- PyDateTime_IMPORT)
+
+from cpython.datetime cimport (
+ PyDate_Check,
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ datetime,
+ time,
+ tzinfo,
+)
+
PyDateTime_IMPORT
from pandas._libs.tslibs.base cimport ABCTimestamp
-
from pandas._libs.tslibs.np_datetime cimport (
- check_dts_bounds, npy_datetimestruct, pandas_datetime_to_datetimestruct,
- _string_to_dts, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64,
- get_datetime64_unit, get_datetime64_value, pydatetime_to_dt64,
- NPY_DATETIMEUNIT, NPY_FR_ns)
-from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
+ NPY_DATETIMEUNIT,
+ NPY_FR_ns,
+ _string_to_dts,
+ check_dts_bounds,
+ dt64_to_dtstruct,
+ dtstruct_to_dt64,
+ get_datetime64_unit,
+ get_datetime64_value,
+ npy_datetime,
+ npy_datetimestruct,
+ pandas_datetime_to_datetimestruct,
+ pydatetime_to_dt64,
+)
-from pandas._libs.tslibs.util cimport (
- is_datetime64_object, is_integer_object, is_float_object)
+from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
from pandas._libs.tslibs.timezones cimport (
- is_utc, is_tzlocal, is_fixed_offset, get_utcoffset, get_dst_info,
- maybe_get_tz, tz_compare,
+ get_dst_info,
+ get_utcoffset,
+ is_fixed_offset,
+ is_tzlocal,
+ is_utc,
+ maybe_get_tz,
+ tz_compare,
utc_pytz as UTC,
)
+from pandas._libs.tslibs.util cimport (
+ is_datetime64_object,
+ is_float_object,
+ is_integer_object,
+)
+
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
- checknull_with_nat,
c_NaT as NaT,
c_nat_strings as nat_strings,
+ checknull_with_nat,
)
-
from pandas._libs.tslibs.tzconversion cimport (
tz_convert_utc_to_tzlocal,
tz_localize_to_utc_single,
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 1d1f900bc18b3..16fa05c3801c6 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -6,26 +6,37 @@ from locale import LC_TIME
import cython
from cython import Py_ssize_t
-
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, int32_t, int8_t, uint32_t
+from numpy cimport int8_t, int32_t, int64_t, ndarray, uint32_t
+
cnp.import_array()
from pandas._config.localization import set_locale
-from pandas._libs.tslibs.ccalendar import MONTHS_FULL, DAYS_FULL
+from pandas._libs.tslibs.ccalendar import DAYS_FULL, MONTHS_FULL
+
from pandas._libs.tslibs.ccalendar cimport (
- get_days_in_month, is_leapyear, dayofweek, get_week_of_year,
- get_day_of_year, get_iso_calendar, iso_calendar_t,
- month_offset,
+ dayofweek,
+ get_day_of_year,
+ get_days_in_month,
get_firstbday,
+ get_iso_calendar,
get_lastbday,
+ get_week_of_year,
+ is_leapyear,
+ iso_calendar_t,
+ month_offset,
)
-from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct,
- td64_to_tdstruct)
from pandas._libs.tslibs.nattype cimport NPY_NAT
+from pandas._libs.tslibs.np_datetime cimport (
+ dt64_to_dtstruct,
+ npy_datetimestruct,
+ pandas_timedeltastruct,
+ td64_to_tdstruct,
+)
+
from pandas._libs.tslibs.strptime import LocaleTime
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 264013f928d22..73df51832d700 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -1,3 +1,10 @@
+from cpython.datetime cimport (
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ datetime,
+ timedelta,
+)
from cpython.object cimport (
Py_EQ,
Py_GE,
@@ -8,28 +15,19 @@ from cpython.object cimport (
PyObject_RichCompare,
)
-from cpython.datetime cimport (
- PyDateTime_Check,
- PyDateTime_IMPORT,
- PyDelta_Check,
- datetime,
- timedelta,
-)
PyDateTime_IMPORT
from cpython.version cimport PY_MINOR_VERSION
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t
+
cnp.import_array()
-from pandas._libs.tslibs.np_datetime cimport (
- get_datetime64_value,
- get_timedelta64_value,
-)
cimport pandas._libs.tslibs.util as util
-
+from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
# ----------------------------------------------------------------------
# Constants
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 31cc55ad981bb..12aaaf4ce3977 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -1,5 +1,3 @@
-from cpython.object cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE
-
from cpython.datetime cimport (
PyDateTime_DATE_GET_HOUR,
PyDateTime_DATE_GET_MICROSECOND,
@@ -10,11 +8,15 @@ from cpython.datetime cimport (
PyDateTime_GET_YEAR,
PyDateTime_IMPORT,
)
+from cpython.object cimport Py_EQ, Py_GE, Py_GT, Py_LE, Py_LT, Py_NE
+
PyDateTime_IMPORT
from numpy cimport int64_t
+
from pandas._libs.tslibs.util cimport get_c_string_buf_and_size
+
cdef extern from "src/datetime/np_datetime.h":
int cmp_npy_datetimestruct(npy_datetimestruct *a,
npy_datetimestruct *b)
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 9a7ca15a2a1c2..ac2725fc58aee 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -1,39 +1,51 @@
-import cython
-
import operator
import re
import time
from typing import Any
import warnings
-from cpython.datetime cimport (PyDateTime_IMPORT,
- PyDateTime_Check,
- PyDate_Check,
- PyDelta_Check,
- datetime, timedelta, date,
- time as dt_time)
+
+import cython
+
+from cpython.datetime cimport (
+ PyDate_Check,
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ date,
+ datetime,
+ time as dt_time,
+ timedelta,
+)
+
PyDateTime_IMPORT
-from dateutil.relativedelta import relativedelta
from dateutil.easter import easter
-
+from dateutil.relativedelta import relativedelta
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t, ndarray
+
cnp.import_array()
# TODO: formalize having _libs.properties "above" tslibs in the dependency structure
+
from pandas._libs.properties import cache_readonly
from pandas._libs.tslibs cimport util
from pandas._libs.tslibs.util cimport (
- is_integer_object,
is_datetime64_object,
is_float_object,
+ is_integer_object,
)
from pandas._libs.tslibs.ccalendar import (
- MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday,
+ MONTH_ALIASES,
+ MONTH_TO_CAL_NUM,
+ int_to_weekday,
+ weekday_to_int,
)
+
from pandas._libs.tslibs.ccalendar cimport (
DAY_NANOS,
dayofweek,
@@ -47,17 +59,20 @@ from pandas._libs.tslibs.conversion cimport (
)
from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT
from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct,
- dtstruct_to_dt64,
dt64_to_dtstruct,
+ dtstruct_to_dt64,
+ npy_datetimestruct,
pydate_to_dtstruct,
)
from pandas._libs.tslibs.tzconversion cimport tz_convert_from_utc_single
from .dtypes cimport PeriodDtypeCode
from .timedeltas cimport delta_to_nanoseconds
+
from .timedeltas import Timedelta
+
from .timestamps cimport _Timestamp
+
from .timestamps import Timestamp
# ---------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index c4f369d0d3b3f..8429aebbd85b8 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -9,39 +9,44 @@ from libc.string cimport strchr
import cython
from cython import Py_ssize_t
-from cpython.object cimport PyObject_Str
-
from cpython.datetime cimport datetime, datetime_new, import_datetime, tzinfo
+from cpython.object cimport PyObject_Str
from cpython.version cimport PY_VERSION_HEX
+
import_datetime()
import numpy as np
+
cimport numpy as cnp
-from numpy cimport (PyArray_GETITEM, PyArray_ITER_DATA, PyArray_ITER_NEXT,
- PyArray_IterNew, flatiter, float64_t)
+from numpy cimport (
+ PyArray_GETITEM,
+ PyArray_ITER_DATA,
+ PyArray_ITER_NEXT,
+ PyArray_IterNew,
+ flatiter,
+ float64_t,
+)
+
cnp.import_array()
# dateutil compat
-from dateutil.tz import (tzoffset,
- tzlocal as _dateutil_tzlocal,
- tzutc as _dateutil_tzutc,
- tzstr as _dateutil_tzstr)
+
+from dateutil.parser import DEFAULTPARSER, parse as du_parse
from dateutil.relativedelta import relativedelta
-from dateutil.parser import DEFAULTPARSER
-from dateutil.parser import parse as du_parse
+from dateutil.tz import (
+ tzlocal as _dateutil_tzlocal,
+ tzoffset,
+ tzstr as _dateutil_tzstr,
+ tzutc as _dateutil_tzutc,
+)
from pandas._config import get_option
from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS
-from pandas._libs.tslibs.nattype cimport (
- c_nat_strings as nat_strings,
- c_NaT as NaT,
-)
-from pandas._libs.tslibs.util cimport (
- is_array,
- get_c_string_buf_and_size,
-)
+from pandas._libs.tslibs.nattype cimport c_NaT as NaT, c_nat_strings as nat_strings
from pandas._libs.tslibs.offsets cimport is_offset_object
+from pandas._libs.tslibs.util cimport get_c_string_buf_and_size, is_array
+
cdef extern from "../src/headers/portable.h":
int getdigit_ascii(char c, int default) nogil
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 20961c6da56bd..86b6533f5caf5 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1,96 +1,98 @@
import warnings
-from cpython.object cimport PyObject_RichCompareBool, Py_EQ, Py_NE
+from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompareBool
+from numpy cimport import_array, int64_t, ndarray
-from numpy cimport int64_t, import_array, ndarray
import numpy as np
+
import_array()
from libc.stdlib cimport free, malloc
+from libc.string cimport memset, strlen
from libc.time cimport strftime, tm
-from libc.string cimport strlen, memset
import cython
from cpython.datetime cimport (
- datetime,
PyDate_Check,
PyDateTime_Check,
PyDateTime_IMPORT,
PyDelta_Check,
+ datetime,
)
+
# import datetime C API
PyDateTime_IMPORT
from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct,
- dtstruct_to_dt64,
- dt64_to_dtstruct,
- pandas_datetime_to_datetimestruct,
- check_dts_bounds,
NPY_DATETIMEUNIT,
NPY_FR_D,
NPY_FR_us,
+ check_dts_bounds,
+ dt64_to_dtstruct,
+ dtstruct_to_dt64,
+ npy_datetimestruct,
+ pandas_datetime_to_datetimestruct,
)
+
cdef extern from "src/datetime/np_datetime.h":
int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr,
npy_datetimestruct *d) nogil
cimport pandas._libs.tslibs.util as util
-from pandas._libs.tslibs.timestamps import Timestamp
from pandas._libs.tslibs.timedeltas import Timedelta
-from pandas._libs.tslibs.timedeltas cimport (
- delta_to_nanoseconds,
- is_any_td_scalar,
-)
+from pandas._libs.tslibs.timestamps import Timestamp
from pandas._libs.tslibs.ccalendar cimport (
+ c_MONTH_NUMBERS,
dayofweek,
get_day_of_year,
- is_leapyear,
- get_week_of_year,
get_days_in_month,
+ get_week_of_year,
+ is_leapyear,
)
-from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS
+from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds, is_any_td_scalar
+
from pandas._libs.tslibs.conversion import ensure_datetime64ns
from pandas._libs.tslibs.dtypes cimport (
- PeriodDtypeBase,
- FR_UND,
FR_ANN,
- FR_QTR,
- FR_MTH,
- FR_WK,
FR_BUS,
FR_DAY,
FR_HR,
FR_MIN,
- FR_SEC,
FR_MS,
- FR_US,
+ FR_MTH,
FR_NS,
+ FR_QTR,
+ FR_SEC,
+ FR_UND,
+ FR_US,
+ FR_WK,
+ PeriodDtypeBase,
attrname_to_abbrevs,
)
-
from pandas._libs.tslibs.parsing cimport get_rule_month
+
from pandas._libs.tslibs.parsing import parse_time_string
+
from pandas._libs.tslibs.nattype cimport (
- _nat_scalar_rules,
NPY_NAT,
- is_null_datetimelike,
+ _nat_scalar_rules,
c_NaT as NaT,
c_nat_strings as nat_strings,
+ is_null_datetimelike,
)
from pandas._libs.tslibs.offsets cimport (
BaseOffset,
- to_offset,
- is_tick_object,
is_offset_object,
+ is_tick_object,
+ to_offset,
)
-from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
+from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
cdef:
enum:
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 660b582f73e6e..d2690be905a68 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -1,27 +1,30 @@
"""Strptime-related classes and functions.
"""
-import time
-import locale
import calendar
+import locale
import re
+import time
from cpython.datetime cimport date, tzinfo
from _thread import allocate_lock as _thread_allocate_lock
+import numpy as np
import pytz
-import numpy as np
from numpy cimport int64_t
-from pandas._libs.tslibs.np_datetime cimport (
- check_dts_bounds, dtstruct_to_dt64, npy_datetimestruct)
-
from pandas._libs.tslibs.nattype cimport (
- checknull_with_nat,
NPY_NAT,
c_nat_strings as nat_strings,
+ checknull_with_nat,
)
+from pandas._libs.tslibs.np_datetime cimport (
+ check_dts_bounds,
+ dtstruct_to_dt64,
+ npy_datetimestruct,
+)
+
cdef dict _parse_code_table = {'y': 0,
'Y': 1,
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 8f3a599bf107c..ee32ed53a908b 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -2,39 +2,47 @@ import collections
import cython
-from cpython.object cimport Py_NE, Py_EQ, PyObject_RichCompare
+from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompare
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t, ndarray
+
cnp.import_array()
-from cpython.datetime cimport (timedelta,
- PyDateTime_Check, PyDelta_Check,
- PyDateTime_IMPORT)
+from cpython.datetime cimport (
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ timedelta,
+)
+
PyDateTime_IMPORT
cimport pandas._libs.tslibs.util as util
-from pandas._libs.tslibs.util cimport (
- is_timedelta64_object, is_datetime64_object, is_integer_object,
- is_float_object, is_array
-)
-
from pandas._libs.tslibs.base cimport ABCTimestamp
-
from pandas._libs.tslibs.conversion cimport cast_from_unit
-
-from pandas._libs.tslibs.np_datetime cimport (
- cmp_scalar, td64_to_tdstruct, pandas_timedeltastruct)
-
from pandas._libs.tslibs.nattype cimport (
- checknull_with_nat,
NPY_NAT,
c_NaT as NaT,
c_nat_strings as nat_strings,
+ checknull_with_nat,
+)
+from pandas._libs.tslibs.np_datetime cimport (
+ cmp_scalar,
+ pandas_timedeltastruct,
+ td64_to_tdstruct,
)
from pandas._libs.tslibs.offsets cimport is_tick_object
+from pandas._libs.tslibs.util cimport (
+ is_array,
+ is_datetime64_object,
+ is_float_object,
+ is_integer_object,
+ is_timedelta64_object,
+)
# ----------------------------------------------------------------------
# Constants
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 8cef685933863..bddfc30d86a53 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -9,54 +9,66 @@ shadows the python class, where we do any heavy lifting.
import warnings
import numpy as np
+
cimport numpy as cnp
-from numpy cimport int64_t, int8_t, uint8_t, ndarray
-cnp.import_array()
+from numpy cimport int8_t, int64_t, ndarray, uint8_t
-from cpython.object cimport (PyObject_RichCompareBool, PyObject_RichCompare,
- Py_EQ, Py_NE)
+cnp.import_array()
-from cpython.datetime cimport (
- datetime,
- time,
- tzinfo,
- tzinfo as tzinfo_type, # alias bc `tzinfo` is a kwarg below
+from cpython.datetime cimport ( # alias bc `tzinfo` is a kwarg below
PyDateTime_Check,
+ PyDateTime_IMPORT,
PyDelta_Check,
PyTZInfo_Check,
- PyDateTime_IMPORT,
-)
-PyDateTime_IMPORT
-
-from pandas._libs.tslibs.util cimport (
- is_datetime64_object, is_float_object, is_integer_object,
- is_timedelta64_object, is_array,
+ datetime,
+ time,
+ tzinfo as tzinfo_type,
)
+from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompare, PyObject_RichCompareBool
-from pandas._libs.tslibs.base cimport ABCTimestamp
+PyDateTime_IMPORT
from pandas._libs.tslibs cimport ccalendar
-
+from pandas._libs.tslibs.base cimport ABCTimestamp
from pandas._libs.tslibs.conversion cimport (
_TSObject,
- convert_to_tsobject,
convert_datetime_to_tsobject,
+ convert_to_tsobject,
normalize_i8_stamp,
)
-from pandas._libs.tslibs.fields import get_start_end_field, get_date_name_field
+from pandas._libs.tslibs.util cimport (
+ is_array,
+ is_datetime64_object,
+ is_float_object,
+ is_integer_object,
+ is_timedelta64_object,
+)
+
+from pandas._libs.tslibs.fields import get_date_name_field, get_start_end_field
+
from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT
from pandas._libs.tslibs.np_datetime cimport (
- check_dts_bounds, npy_datetimestruct, dt64_to_dtstruct,
+ check_dts_bounds,
cmp_scalar,
+ dt64_to_dtstruct,
+ npy_datetimestruct,
pydatetime_to_dt64,
)
+
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
-from pandas._libs.tslibs.offsets cimport to_offset, is_offset_object
-from pandas._libs.tslibs.timedeltas cimport is_any_td_scalar, delta_to_nanoseconds
+
+from pandas._libs.tslibs.offsets cimport is_offset_object, to_offset
+from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds, is_any_td_scalar
+
from pandas._libs.tslibs.timedeltas import Timedelta
+
from pandas._libs.tslibs.timezones cimport (
- is_utc, maybe_get_tz, treat_tz_as_pytz, utc_pytz as UTC,
- get_timezone, tz_compare,
+ get_timezone,
+ is_utc,
+ maybe_get_tz,
+ treat_tz_as_pytz,
+ tz_compare,
+ utc_pytz as UTC,
)
from pandas._libs.tslibs.tzconversion cimport (
tz_convert_from_utc_single,
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index a8c785704d8e8..b82291a71057e 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -1,27 +1,31 @@
from datetime import timezone
+
from cpython.datetime cimport datetime, timedelta, tzinfo
# dateutil compat
+
from dateutil.tz import (
gettz as dateutil_gettz,
tzfile as _dateutil_tzfile,
tzlocal as _dateutil_tzlocal,
tzutc as _dateutil_tzutc,
)
-
-
-from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
import pytz
+from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
+
UTC = pytz.utc
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t
+
cnp.import_array()
# ----------------------------------------------------------------------
-from pandas._libs.tslibs.util cimport is_integer_object, get_nat
+from pandas._libs.tslibs.util cimport get_nat, is_integer_object
+
cdef int64_t NPY_NAT = get_nat()
cdef tzinfo utc_stdlib = timezone.utc
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 606639af16a18..2b148cd8849f1 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -5,21 +5,27 @@ import cython
from cython import Py_ssize_t
from cpython.datetime cimport (
- PyDateTime_IMPORT, PyDelta_Check, datetime, timedelta, tzinfo)
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ datetime,
+ timedelta,
+ tzinfo,
+)
+
PyDateTime_IMPORT
-import pytz
from dateutil.tz import tzutc
-
import numpy as np
+import pytz
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, uint8_t, intp_t
+from numpy cimport int64_t, intp_t, ndarray, uint8_t
+
cnp.import_array()
from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, HOUR_NANOS
from pandas._libs.tslibs.nattype cimport NPY_NAT
-from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct, dt64_to_dtstruct)
+from pandas._libs.tslibs.np_datetime cimport dt64_to_dtstruct, npy_datetimestruct
from pandas._libs.tslibs.timezones cimport (
get_dst_info,
get_utcoffset,
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index c8f8daf6724c2..bdc00f6c6e21a 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -1,18 +1,21 @@
import cython
-from cpython.datetime cimport datetime, date, time, tzinfo
+from cpython.datetime cimport date, datetime, time, tzinfo
import numpy as np
+
from numpy cimport int64_t, intp_t, ndarray
from .conversion cimport normalize_i8_stamp
+
from .dtypes import Resolution
+
from .nattype cimport NPY_NAT, c_NaT as NaT
-from .np_datetime cimport npy_datetimestruct, dt64_to_dtstruct
+from .np_datetime cimport dt64_to_dtstruct, npy_datetimestruct
from .offsets cimport to_offset
from .period cimport get_period_ordinal
from .timestamps cimport create_timestamp_from_ts
-from .timezones cimport is_utc, is_tzlocal, get_dst_info
+from .timezones cimport get_dst_info, is_tzlocal, is_utc
from .tzconversion cimport tz_convert_utc_to_tzlocal
# -------------------------------------------------------------------------
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 362d0e6263697..3ec4547d223ce 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -2,13 +2,15 @@
import cython
from cython import Py_ssize_t
-from libcpp.deque cimport deque
-from libc.stdlib cimport malloc, free
+from libc.stdlib cimport free, malloc
+from libcpp.deque cimport deque
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, float64_t, float32_t, uint8_t
+from numpy cimport float32_t, float64_t, int64_t, ndarray, uint8_t
+
cnp.import_array()
@@ -22,6 +24,7 @@ from pandas._libs.algos import is_monotonic
from pandas._libs.util cimport numeric
+
cdef extern from "../src/skiplist.h":
ctypedef struct node_t:
node_t **next
diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx
index 8a1e7feb57ace..9af1159a805ec 100644
--- a/pandas/_libs/window/indexers.pyx
+++ b/pandas/_libs/window/indexers.pyx
@@ -1,7 +1,8 @@
# cython: boundscheck=False, wraparound=False, cdivision=True
import numpy as np
-from numpy cimport ndarray, int64_t
+
+from numpy cimport int64_t, ndarray
# Cython routines for window indexers
diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx
index 2d5b31d7ccbcf..40c39aabb7a7a 100644
--- a/pandas/_libs/writers.pyx
+++ b/pandas/_libs/writers.pyx
@@ -5,8 +5,8 @@ from cpython.bytes cimport PyBytes_GET_SIZE
from cpython.unicode cimport PyUnicode_GET_SIZE
import numpy as np
-from numpy cimport ndarray, uint8_t
+from numpy cimport ndarray, uint8_t
ctypedef fused pandas_string:
str
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 1cf9304ed2715..a020fbff3553a 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -535,7 +535,7 @@ def rands(nchars):
def close(fignum=None):
- from matplotlib.pyplot import get_fignums, close as _close
+ from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 8e98833ad37f7..76ec527e6e258 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -24,13 +24,15 @@
# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from pandas._libs import Period, Timedelta, Timestamp # noqa: F401
- from pandas.core.arrays.base import ExtensionArray # noqa: F401
+
from pandas.core.dtypes.dtypes import ExtensionDtype # noqa: F401
- from pandas.core.indexes.base import Index # noqa: F401
- from pandas.core.generic import NDFrame # noqa: F401
+
from pandas import Interval # noqa: F401
- from pandas.core.series import Series # noqa: F401
+ from pandas.core.arrays.base import ExtensionArray # noqa: F401
from pandas.core.frame import DataFrame # noqa: F401
+ from pandas.core.generic import NDFrame # noqa: F401
+ from pandas.core.indexes.base import Index # noqa: F401
+ from pandas.core.series import Series # noqa: F401
# array-like
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 0484de3fa165d..015b203a60256 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -14,7 +14,7 @@
from pandas import Index
if TYPE_CHECKING:
- from pandas import Series, DataFrame
+ from pandas import DataFrame, Series
def load_reduce(self):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 733dbeed34b72..6b8d7dc35fe95 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -15,7 +15,7 @@
from pandas.core.construction import create_series_with_explicit_dtype
if TYPE_CHECKING:
- from pandas import DataFrame, Series, Index
+ from pandas import DataFrame, Index, Series
ResType = Dict[int, Any]
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index db9cfd9d7fc59..6e5c7bc699962 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -520,7 +520,7 @@ def _from_inferred_categories(
-------
Categorical
"""
- from pandas import Index, to_numeric, to_datetime, to_timedelta
+ from pandas import Index, to_datetime, to_numeric, to_timedelta
cats = Index(inferred_categories)
known_categories = (
@@ -1403,7 +1403,7 @@ def value_counts(self, dropna=True):
--------
Series.value_counts
"""
- from pandas import Series, CategoricalIndex
+ from pandas import CategoricalIndex, Series
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index ee4d43fdb3bc2..c6945e2f78b5a 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -959,7 +959,7 @@ def value_counts(self, dropna=False):
-------
Series
"""
- from pandas import Series, Index
+ from pandas import Index, Series
if dropna:
values = self[~self.isna()]._data
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index b0958af41158c..57df067c7b16e 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -116,6 +116,7 @@ def __from_arrow__(
Construct IntegerArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
+
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index c861d25afd13f..ed2437cc061bd 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1105,6 +1105,7 @@ def __arrow_array__(self, type=None):
Convert myself into a pyarrow Array.
"""
import pyarrow
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
try:
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 8d5cb12d60e4d..fe78481d99d30 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -300,6 +300,7 @@ def __arrow_array__(self, type=None):
Convert myself into a pyarrow Array.
"""
import pyarrow
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
index 8a30d2b954b55..da8d695c59b9e 100644
--- a/pandas/core/arrays/sparse/accessor.py
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -87,8 +87,8 @@ def from_coo(cls, A, dense_index=False):
1 0 3.0
dtype: Sparse[float64, nan]
"""
- from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
from pandas import Series
+ from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
result = _coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
@@ -253,9 +253,10 @@ def from_spmatrix(cls, data, index=None, columns=None):
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
- from pandas import DataFrame
from pandas._libs.sparse import IntIndex
+ from pandas import DataFrame
+
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
@@ -354,8 +355,8 @@ def density(self) -> float:
@staticmethod
def _prep_index(data, index, columns):
- import pandas.core.indexes.base as ibase
from pandas.core.indexes.api import ensure_index
+ import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 86f6be77bc505..2b2431149e230 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -662,8 +662,10 @@ def register_plotting_backend_cb(key):
def register_converter_cb(key):
- from pandas.plotting import register_matplotlib_converters
- from pandas.plotting import deregister_matplotlib_converters
+ from pandas.plotting import (
+ deregister_matplotlib_converters,
+ register_matplotlib_converters,
+ )
if cf.get_option(key):
register_matplotlib_converters()
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 6c58698989e96..47f10f1f65f4a 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -48,9 +48,9 @@
import pandas.core.common as com
if TYPE_CHECKING:
- from pandas.core.series import Series # noqa: F401
- from pandas.core.indexes.api import Index # noqa: F401
from pandas.core.arrays import ExtensionArray # noqa: F401
+ from pandas.core.indexes.api import Index # noqa: F401
+ from pandas.core.series import Series # noqa: F401
def array(
@@ -255,14 +255,14 @@ def array(
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
- period_array,
BooleanArray,
+ DatetimeArray,
IntegerArray,
IntervalArray,
PandasArray,
- DatetimeArray,
- TimedeltaArray,
StringArray,
+ TimedeltaArray,
+ period_array,
)
if lib.is_scalar(data):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 6b84f0e81f48b..228329898b6a4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1244,6 +1244,7 @@ def try_datetime(v):
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
from pandas._libs.tslibs import conversion
+
from pandas import DatetimeIndex
try:
@@ -1303,8 +1304,8 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
- from pandas.core.tools.timedeltas import to_timedelta
from pandas.core.tools.datetimes import to_datetime
+ from pandas.core.tools.timedeltas import to_timedelta
if dtype is not None:
if isinstance(dtype, str):
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 22480fbc47508..8350e136417b1 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -30,12 +30,13 @@
if TYPE_CHECKING:
import pyarrow # noqa: F401
+
+ from pandas import Categorical # noqa: F401
from pandas.core.arrays import ( # noqa: F401
+ DatetimeArray,
IntervalArray,
PeriodArray,
- DatetimeArray,
)
- from pandas import Categorical # noqa: F401
str_type = str
@@ -391,12 +392,13 @@ def __repr__(self) -> str_type:
@staticmethod
def _hash_categories(categories, ordered: Ordered = True) -> int:
+ from pandas.core.dtypes.common import DT64NS_DTYPE, is_datetime64tz_dtype
+
from pandas.core.util.hashing import (
- hash_array,
_combine_hash_arrays,
+ hash_array,
hash_tuples,
)
- from pandas.core.dtypes.common import is_datetime64tz_dtype, DT64NS_DTYPE
if len(categories) and isinstance(categories[0], tuple):
# assumes if any individual category is a tuple, then all our. ATM
@@ -939,6 +941,7 @@ def __from_arrow__(
Construct PeriodArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
+
from pandas.core.arrays import PeriodArray
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
@@ -1136,6 +1139,7 @@ def __from_arrow__(
Construct IntervalArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
+
from pandas.core.arrays import IntervalArray
if isinstance(array, pyarrow.Array):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f52341ed782d8..3e4c9393f74de 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -150,6 +150,7 @@
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
+
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
@@ -5204,8 +5205,9 @@ def duplicated(
4 True
dtype: bool
"""
+ from pandas._libs.hashtable import _SIZE_HINT_LIMIT, duplicated_int64
+
from pandas.core.sorting import get_group_index
- from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return self._constructor_sliced(dtype=bool)
@@ -7867,8 +7869,8 @@ def join(
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
- from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
+ from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index ec7b14f27c5a1..c50b753cf3293 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -681,8 +681,8 @@ def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
- from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
+ from pandas.core.reshape.tile import cut
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 67003dffb90bb..8239a792c65dd 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -237,7 +237,6 @@ def __new__(cls, *args, **kwargs):
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
-
# hacky way to set the stacklevel: if cls is TimeGrouper it means
# that the call comes from a pandas internal call of resample,
# otherwise it comes from pd.Grouper
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 986d6323e704e..1be381e38b157 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5731,9 +5731,9 @@ def _maybe_cast_data_without_dtype(subarr):
"""
# Runtime import needed bc IntervalArray imports Index
from pandas.core.arrays import (
+ DatetimeArray,
IntervalArray,
PeriodArray,
- DatetimeArray,
TimedeltaArray,
)
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index fd9a9a5ef6c93..6eedf72726acb 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -5,8 +5,8 @@
from pandas._typing import ArrayLike
if TYPE_CHECKING:
- from pandas.core.internals.managers import BlockManager # noqa:F401
from pandas.core.internals.blocks import Block # noqa:F401
+ from pandas.core.internals.managers import BlockManager # noqa:F401
def operate_blockwise(
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index a1db7742916de..6702bf519c52e 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -155,7 +155,7 @@ def _map_stringarray(
an ndarray.
"""
- from pandas.arrays import IntegerArray, StringArray, BooleanArray
+ from pandas.arrays import BooleanArray, IntegerArray, StringArray
mask = isna(arr)
@@ -2186,7 +2186,7 @@ def _wrap_result(
returns_string=True,
):
- from pandas import Index, Series, MultiIndex
+ from pandas import Index, MultiIndex, Series
# for category, we do the stuff on the categories, so blow it up
# to the full series again
@@ -2292,7 +2292,7 @@ def _get_series_list(self, others):
list of Series
Others transformed into list of Series.
"""
- from pandas import Series, DataFrame
+ from pandas import DataFrame, Series
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, ABCIndexClass) else self._orig.index
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 0adab143f6052..7aac2f793f61a 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -53,9 +53,10 @@
from pandas.core.indexes.datetimes import DatetimeIndex
if TYPE_CHECKING:
- from pandas import Series # noqa:F401
from pandas._libs.tslibs.nattype import NaTType # noqa:F401
+ from pandas import Series # noqa:F401
+
# ---------------------------------------------------------------------
# types used in annotations
@@ -876,7 +877,7 @@ def _assemble_from_unit_mappings(arg, errors, tz):
-------
Series
"""
- from pandas import to_timedelta, to_numeric, DataFrame
+ from pandas import DataFrame, to_numeric, to_timedelta
arg = DataFrame(arg)
if not arg.columns.is_unique:
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index 1b56b6d5a46fa..d79b9f4092325 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -275,7 +275,7 @@ def hash_array(
# then hash and rename categories. We allow skipping the categorization
# when the values are known/likely to be unique.
if categorize:
- from pandas import factorize, Categorical, Index
+ from pandas import Categorical, Index, factorize
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories), ordered=False, fastpath=True)
diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py
index 40bff5a75709b..d16955a98b62f 100644
--- a/pandas/io/clipboard/__init__.py
+++ b/pandas/io/clipboard/__init__.py
@@ -311,17 +311,17 @@ def init_windows_clipboard():
global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
from ctypes.wintypes import (
- HGLOBAL,
- LPVOID,
+ BOOL,
DWORD,
- LPCSTR,
- INT,
- HWND,
+ HANDLE,
+ HGLOBAL,
HINSTANCE,
HMENU,
- BOOL,
+ HWND,
+ INT,
+ LPCSTR,
+ LPVOID,
UINT,
- HANDLE,
)
windll = ctypes.windll
@@ -528,8 +528,8 @@ def determine_clipboard():
# Setup for the MAC OS X platform:
if os.name == "mac" or platform.system() == "Darwin":
try:
- import Foundation # check if pyobjc is installed
import AppKit
+ import Foundation # check if pyobjc is installed
except ImportError:
return init_osx_pbcopy_clipboard()
else:
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 2a12f779230b2..b1bbda4a4b7e0 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -834,8 +834,8 @@ class ExcelFile:
from pandas.io.excel._odfreader import _ODFReader
from pandas.io.excel._openpyxl import _OpenpyxlReader
- from pandas.io.excel._xlrd import _XlrdReader
from pandas.io.excel._pyxlsb import _PyxlsbReader
+ from pandas.io.excel._xlrd import _XlrdReader
_engines = {
"xlrd": _XlrdReader,
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 85ec9afaaec25..44abaf5d3b3c9 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -191,9 +191,9 @@ def _get_cell_string_value(self, cell) -> str:
Find and decode OpenDocument text:s tags that represent
a run length encoded sequence of space characters.
"""
- from odf.element import Text, Element
- from odf.text import S, P
+ from odf.element import Element, Text
from odf.namespaces import TEXTNS
+ from odf.text import P, S
text_p = P().qname
text_s = S().qname
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 0696d82e51f34..03a30cbd62f9a 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -225,7 +225,7 @@ def _convert_to_fill(cls, fill_dict):
-------
fill : openpyxl.styles.Fill
"""
- from openpyxl.styles import PatternFill, GradientFill
+ from openpyxl.styles import GradientFill, PatternFill
_pattern_fill_key_map = {
"patternType": "fill_type",
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index 8f7d3b1368fc7..af82c15fd6b66 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -48,11 +48,11 @@ def get_sheet_by_index(self, index):
def get_sheet_data(self, sheet, convert_float):
from xlrd import (
- xldate,
+ XL_CELL_BOOLEAN,
XL_CELL_DATE,
XL_CELL_ERROR,
- XL_CELL_BOOLEAN,
XL_CELL_NUMBER,
+ xldate,
)
epoch1904 = self.book.datemode
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index fe85eab4bfbf5..c05f79f935548 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -72,7 +72,7 @@
from pandas.io.formats.printing import adjoin, justify, pprint_thing
if TYPE_CHECKING:
- from pandas import Series, DataFrame, Categorical
+ from pandas import Categorical, DataFrame, Series
FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index d11144938eb26..fd1efa2d1b668 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -42,8 +42,8 @@
try:
- import matplotlib.pyplot as plt
from matplotlib import colors
+ import matplotlib.pyplot as plt
has_mpl = True
except ImportError:
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 3193f52d239f1..8354cf413814e 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -707,8 +707,8 @@ def _build_doc(self):
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
- from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
+ from lxml.html import HTMLParser, fromstring, parse
parser = HTMLParser(recover=True, encoding=self.encoding)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b67a1c5781d91..e0df4c29e543e 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -57,7 +57,7 @@
from pandas.io.formats.printing import adjoin, pprint_thing
if TYPE_CHECKING:
- from tables import File, Node, Col # noqa:F401
+ from tables import Col, File, Node # noqa:F401
# versioning attribute
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 0038e39e2ffcc..17b41fd2b4379 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -1,8 +1,8 @@
# cython: profile=False
# cython: boundscheck=False, initializedcheck=False
from cython import Py_ssize_t
-
import numpy as np
+
import pandas.io.sas.sas_constants as const
ctypedef signed long long int64_t
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 9177696ca13d6..c87391eaa62b1 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -937,7 +937,7 @@ def _get_column_names_and_types(self, dtype_mapper):
return column_names_and_types
def _create_table_setup(self):
- from sqlalchemy import Table, Column, PrimaryKeyConstraint
+ from sqlalchemy import Column, PrimaryKeyConstraint, Table
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
@@ -1026,15 +1026,15 @@ def _sqlalchemy_type(self, col):
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
+ TIMESTAMP,
BigInteger,
- Integer,
- Float,
- Text,
Boolean,
- DateTime,
Date,
+ DateTime,
+ Float,
+ Integer,
+ Text,
Time,
- TIMESTAMP,
)
if col_type == "datetime64" or col_type == "datetime":
@@ -1079,7 +1079,7 @@ def _sqlalchemy_type(self, col):
return Text
def _get_dtype(self, sqltype):
- from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date, TIMESTAMP
+ from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer
if isinstance(sqltype, Float):
return float
@@ -1374,7 +1374,7 @@ def to_sql(
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
- from sqlalchemy.types import to_instance, TypeEngine
+ from sqlalchemy.types import TypeEngine, to_instance
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 353bc8a8936a5..b490e07e43753 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1149,8 +1149,8 @@ def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds):
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._matplotlib.timeseries import (
- _maybe_resample,
_decorate_axes,
+ _maybe_resample,
format_dateaxis,
)
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index 8f3571cf13cbc..95f9fbf3995ed 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -24,7 +24,7 @@
from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod
if TYPE_CHECKING:
- from pandas import Series, Index # noqa:F401
+ from pandas import Index, Series # noqa:F401
# ---------------------------------------------------------------------
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index ecd20796b6f21..caa348d3a1fb9 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -267,9 +267,10 @@ def test_sparsearray():
def test_np():
- import numpy as np
import warnings
+ import numpy as np
+
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert (pd.np.arange(0, 10) == np.arange(0, 10)).all()
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index d517eaaec68d2..0176755b54dd1 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -142,6 +142,7 @@ def test_repr():
@pyarrow_skip
def test_arrow_extension_type():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
p1 = ArrowIntervalType(pa.int64(), "left")
@@ -158,6 +159,7 @@ def test_arrow_extension_type():
@pyarrow_skip
def test_arrow_array():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
intervals = pd.interval_range(1, 5, freq=1).array
@@ -187,6 +189,7 @@ def test_arrow_array():
@pyarrow_skip
def test_arrow_array_missing():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
arr = IntervalArray.from_breaks([0.0, 1.0, 2.0, 3.0])
@@ -221,6 +224,7 @@ def test_arrow_array_missing():
)
def test_arrow_table_roundtrip(breaks):
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
arr = IntervalArray.from_breaks(breaks)
diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py
index 8887dd0278afe..0d81e8e733842 100644
--- a/pandas/tests/arrays/test_period.py
+++ b/pandas/tests/arrays/test_period.py
@@ -359,6 +359,7 @@ def test_arrow_extension_type():
)
def test_arrow_array(data, freq):
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
periods = period_array(data, freq=freq)
@@ -384,6 +385,7 @@ def test_arrow_array(data, freq):
@pyarrow_skip
def test_arrow_array_missing():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
@@ -399,6 +401,7 @@ def test_arrow_array_missing():
@pyarrow_skip
def test_arrow_table_roundtrip():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 9d6b9f39a0578..52a1e3aae9058 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -287,7 +287,7 @@ def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api("median", float_frame, float_string_frame)
try:
- from scipy.stats import skew, kurtosis # noqa:F401
+ from scipy.stats import kurtosis, skew # noqa:F401
assert_stat_op_api("skew", float_frame, float_string_frame)
assert_stat_op_api("kurt", float_frame, float_string_frame)
@@ -370,7 +370,7 @@ def kurt(x):
)
try:
- from scipy import skew, kurtosis # noqa:F401
+ from scipy import kurtosis, skew # noqa:F401
assert_stat_op_calc("skew", skewness, float_frame_with_na)
assert_stat_op_calc("kurt", kurt, float_frame_with_na)
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index ec4162f87010f..7bb1d98086a91 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -59,6 +59,7 @@ def test_reindex_with_same_tz(self):
def test_time_loc(self): # GH8667
from datetime import time
+
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py
index be193e0854d8d..d8e56661b7d61 100644
--- a/pandas/tests/indexing/multiindex/test_indexing_slow.py
+++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py
@@ -15,7 +15,7 @@ def test_multiindex_get_loc(): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
- from numpy.random import randint, choice, randn
+ from numpy.random import choice, randint, randn
cols = ["jim", "joe", "jolie", "joline", "jolia"]
diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py
index c397a61616c1c..d64e2d1933ace 100644
--- a/pandas/tests/io/test_fsspec.py
+++ b/pandas/tests/io/test_fsspec.py
@@ -37,8 +37,8 @@ def test_read_csv(cleared_fs):
def test_reasonable_error(monkeypatch, cleared_fs):
- from fsspec.registry import known_implementations
from fsspec import registry
+ from fsspec.registry import known_implementations
registry.target.clear()
with pytest.raises(ValueError) as e:
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 4d93119ffa3f5..eacf4fa08545d 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -11,8 +11,7 @@
@td.skip_if_no("gcsfs")
def test_read_csv_gcs(monkeypatch):
- from fsspec import AbstractFileSystem
- from fsspec import registry
+ from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
@@ -37,8 +36,7 @@ def open(*args, **kwargs):
@td.skip_if_no("gcsfs")
def test_to_csv_gcs(monkeypatch):
- from fsspec import AbstractFileSystem
- from fsspec import registry
+ from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
@@ -76,8 +74,7 @@ def mock_get_filepath_or_buffer(*args, **kwargs):
@td.skip_if_no("gcsfs")
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
- from fsspec import AbstractFileSystem
- from fsspec import registry
+ from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 0991fae39138e..29b787d39c09d 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -48,10 +48,10 @@
try:
import sqlalchemy
- import sqlalchemy.schema
- import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
+ import sqlalchemy.schema
+ import sqlalchemy.sql.sqltypes as sqltypes
SQLALCHEMY_INSTALLED = True
except ImportError:
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 896d3278cdde1..3b1ff233c5ec1 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -13,7 +13,6 @@
from pandas import DataFrame, Series
import pandas._testing as tm
-
"""
This is a common base class used for various plotting tests
"""
@@ -24,6 +23,7 @@ class TestPlotBase:
def setup_method(self, method):
import matplotlib as mpl
+
from pandas.plotting._matplotlib import compat
mpl.rcdefaults()
@@ -187,8 +187,8 @@ def _check_colors(
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
+ from matplotlib.collections import Collection, LineCollection, PolyCollection
from matplotlib.lines import Line2D
- from matplotlib.collections import Collection, PolyCollection, LineCollection
conv = self.colorconverter
if linecolors is not None:
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 317a994bd9a32..ee43e5d7072fe 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2408,8 +2408,8 @@ def test_specified_props_kwd_plot_box(self, props, expected):
assert result[expected][0].get_color() == "C1"
def test_default_color_cycle(self):
- import matplotlib.pyplot as plt
import cycler
+ import matplotlib.pyplot as plt
colors = list("rgbk")
plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
@@ -2953,8 +2953,8 @@ def _check(axes):
@td.skip_if_no_scipy
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
- import weakref
import gc
+ import weakref
results = {}
for kind in plotting.PlotAccessor._all_kinds:
@@ -3032,8 +3032,8 @@ def test_df_subplots_patterns_minorticks(self):
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
- import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
+ import matplotlib.pyplot as plt
ts = Series(np.random.randn(10), index=date_range("1/1/2000", periods=10))
@@ -3422,9 +3422,9 @@ def test_xlabel_ylabel_dataframe_subplots(
def _generate_4_axes_via_gridspec():
- import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
+ import matplotlib.pyplot as plt
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index b6a6c326c3df3..34c881855d16a 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -101,7 +101,7 @@ def test_hist_layout_with_by(self):
@pytest.mark.slow
def test_hist_no_overlap(self):
- from matplotlib.pyplot import subplot, gcf
+ from matplotlib.pyplot import gcf, subplot
x = Series(randn(2))
y = Series(randn(2))
@@ -352,6 +352,7 @@ class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_grouped_hist_legacy(self):
from matplotlib.patches import Rectangle
+
from pandas.plotting._matplotlib.hist import _grouped_hist
df = DataFrame(randn(500, 2), columns=["A", "B"])
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 75eeede472fe9..f5c1c58f3f7ed 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -131,9 +131,10 @@ def test_scatter_matrix_axis(self):
@pytest.mark.slow
def test_andrews_curves(self, iris):
- from pandas.plotting import andrews_curves
from matplotlib import cm
+ from pandas.plotting import andrews_curves
+
df = iris
_check_plot_works(andrews_curves, frame=df, class_column="Name")
@@ -206,9 +207,10 @@ def test_andrews_curves(self, iris):
@pytest.mark.slow
def test_parallel_coordinates(self, iris):
- from pandas.plotting import parallel_coordinates
from matplotlib import cm
+ from pandas.plotting import parallel_coordinates
+
df = iris
ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name")
@@ -279,9 +281,10 @@ def test_parallel_coordinates_with_sorted_labels(self):
@pytest.mark.slow
def test_radviz(self, iris):
- from pandas.plotting import radviz
from matplotlib import cm
+ from pandas.plotting import radviz
+
df = iris
_check_plot_works(radviz, frame=df, class_column="Name")
@@ -397,6 +400,7 @@ def test_get_standard_colors_no_appending(self):
# Make sure not to add more colors so that matplotlib can cycle
# correctly.
from matplotlib import cm
+
from pandas.plotting._matplotlib.style import _get_standard_colors
color_before = cm.gnuplot(range(5))
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 151bb3bed7207..cc00626e992f3 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -452,7 +452,7 @@ def test_hist_layout_with_by(self):
@pytest.mark.slow
def test_hist_no_overlap(self):
- from matplotlib.pyplot import subplot, gcf
+ from matplotlib.pyplot import gcf, subplot
x = Series(randn(2))
y = Series(randn(2))
@@ -827,6 +827,7 @@ def test_standard_colors(self):
@pytest.mark.slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
+
from pandas.plotting._matplotlib.style import _get_standard_colors
# multiple colors like mediumaquamarine
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 0b34fab7b80b1..088f8681feb99 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -11,7 +11,6 @@
from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
import pandas._testing as tm
-
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
@@ -166,6 +165,7 @@ def test_getitem_setitem_datetime_tz_pytz():
def test_getitem_setitem_datetime_tz_dateutil():
from dateutil.tz import tzutc
+
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
tz = (
diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py
index 19caf4eccf748..4b4ef5ea046be 100644
--- a/pandas/tests/series/methods/test_asof.py
+++ b/pandas/tests/series/methods/test_asof.py
@@ -90,7 +90,7 @@ def test_with_nan(self):
tm.assert_series_equal(result, expected)
def test_periodindex(self):
- from pandas import period_range, PeriodIndex
+ from pandas import PeriodIndex, period_range
# array or list or dates
N = 50
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 5c8a0d224c4f9..ef2bafd4ea2ad 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -195,8 +195,8 @@ def test_add_with_duplicate_index(self):
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
- from decimal import Decimal
from datetime import date
+ from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index e718a6b759963..b32c5e91af295 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -90,7 +90,7 @@ def test_statsmodels():
def test_scikit_learn(df):
sklearn = import_module("sklearn") # noqa
- from sklearn import svm, datasets
+ from sklearn import datasets, svm
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.0)
diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py
index f413490764124..3a8a1a3144269 100644
--- a/pandas/util/_doctools.py
+++ b/pandas/util/_doctools.py
@@ -53,8 +53,8 @@ def plot(self, left, right, labels=None, vertical: bool = True):
vertical : bool, default True
If True, use vertical layout. If False, use horizontal layout.
"""
- import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
+ import matplotlib.pyplot as plt
if not isinstance(left, list):
left = [left]
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 7bf3df176b378..c0dd77cd73ddc 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -11,7 +11,7 @@ cpplint
flake8<3.8.0
flake8-comprehensions>=3.1.0
flake8-rst>=0.6.0,<=0.7.0
-isort==4.3.21
+isort>=5.2.1
mypy==0.730
pycodestyle
gitpython
| Backport PR #35470: CI: unpin isort 5 (#35134) | https://api.github.com/repos/pandas-dev/pandas/pulls/35501 | 2020-08-01T08:47:01Z | 2020-08-01T09:39:28Z | 2020-08-01T09:39:28Z | 2020-08-01T09:39:28Z |
Backport PR #35477 on branch 1.1.x (MAINT: Use float arange when required or intended) | diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 4a0212e890d3a..2300d8dd5529b 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -140,7 +140,7 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
)
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
- values = np.arange(10)
+ values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
@@ -177,7 +177,7 @@ def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_rolling_forward_skewness(constructor):
- values = np.arange(10)
+ values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=5)
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 12c314d5e9ec9..69cd1d1ba069c 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -108,7 +108,7 @@ def test_ewma_halflife_without_times(halflife_with_times):
@pytest.mark.parametrize("min_periods", [0, 2])
def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
halflife = halflife_with_times
- data = np.arange(10)
+ data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()
| Backport PR #35477: MAINT: Use float arange when required or intended | https://api.github.com/repos/pandas-dev/pandas/pulls/35500 | 2020-08-01T08:46:48Z | 2020-08-01T09:23:36Z | 2020-08-01T09:23:36Z | 2020-08-01T09:23:36Z |
BUG: Index.get_indexer_non_unique misbehaves with multiple nan | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 16474dd83a1f5..b4d62befc4075 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -234,6 +234,7 @@ Indexing
- Bug in indexing on a :class:`MultiIndex` failing to drop scalar levels when the indexer is a tuple containing a datetime-like string (:issue:`42476`)
- Bug in :meth:`DataFrame.sort_values` and :meth:`Series.sort_values` when passing an ascending value, failed to raise or incorrectly raising ``ValueError`` (:issue:`41634`)
- Bug in updating values of :class:`pandas.Series` using boolean index, created by using :meth:`pandas.DataFrame.pop` (:issue:`42530`)
+- Bug in :meth:`Index.get_indexer_non_unique` when index contains multiple ``np.nan`` (:issue:`35392`)
- Bug in :meth:`DataFrame.query` did not handle the degree sign in a backticked column name, such as \`Temp(°C)\`, used in an expression to query a dataframe (:issue:`42826`)
-
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 3351bb7cac7d6..f2e2abd16b985 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -288,10 +288,12 @@ cdef class IndexEngine:
object val
int count = 0, count_missing = 0
Py_ssize_t i, j, n, n_t, n_alloc
+ bint d_has_nan = False, stargets_has_nan = False, need_nan_check = True
self._ensure_mapping_populated()
values = np.array(self._get_index_values(), copy=False)
stargets = set(targets)
+
n = len(values)
n_t = len(targets)
if n > 10_000:
@@ -321,6 +323,7 @@ cdef class IndexEngine:
if stargets:
# otherwise, map by iterating through all items in the index
+
for i in range(n):
val = values[i]
if val in stargets:
@@ -328,12 +331,27 @@ cdef class IndexEngine:
d[val] = []
d[val].append(i)
+ elif util.is_nan(val):
+ # GH#35392
+ if need_nan_check:
+ # Do this check only once
+ stargets_has_nan = any(util.is_nan(val) for x in stargets)
+ need_nan_check = False
+
+ if stargets_has_nan:
+ if not d_has_nan:
+ # use a canonical nan object
+ d[np.nan] = []
+ d_has_nan = True
+ d[np.nan].append(i)
+
for i in range(n_t):
val = targets[i]
# found
- if val in d:
- for j in d[val]:
+ if val in d or (d_has_nan and util.is_nan(val)):
+ key = val if not util.is_nan(val) else np.nan
+ for j in d[key]:
# realloc if needed
if count >= n_alloc:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 54271f0f9b492..4df39c61daccb 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5383,6 +5383,12 @@ def get_indexer_for(self, target) -> npt.NDArray[np.intp]:
-------
np.ndarray[np.intp]
List of indices.
+
+ Examples
+ --------
+ >>> idx = pd.Index([np.nan, 'var1', np.nan])
+ >>> idx.get_indexer_for([np.nan])
+ array([0, 2])
"""
if self._index_as_unique:
return self.get_indexer(target)
diff --git a/pandas/tests/indexes/object/test_indexing.py b/pandas/tests/indexes/object/test_indexing.py
index b26676a0d83cf..039483cc948df 100644
--- a/pandas/tests/indexes/object/test_indexing.py
+++ b/pandas/tests/indexes/object/test_indexing.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+from pandas._libs.missing import is_matching_na
+
import pandas as pd
from pandas import Index
import pandas._testing as tm
@@ -66,6 +68,35 @@ def test_get_indexer_with_NA_values(
tm.assert_numpy_array_equal(result, expected)
+class TestGetIndexerNonUnique:
+ def test_get_indexer_non_unique_nas(self, nulls_fixture):
+ # even though this isn't non-unique, this should still work
+ index = Index(["a", "b", nulls_fixture])
+ indexer, missing = index.get_indexer_non_unique([nulls_fixture])
+
+ expected_indexer = np.array([2], dtype=np.intp)
+ expected_missing = np.array([], dtype=np.intp)
+ tm.assert_numpy_array_equal(indexer, expected_indexer)
+ tm.assert_numpy_array_equal(missing, expected_missing)
+
+ # actually non-unique
+ index = Index(["a", nulls_fixture, "b", nulls_fixture])
+ indexer, missing = index.get_indexer_non_unique([nulls_fixture])
+
+ expected_indexer = np.array([1, 3], dtype=np.intp)
+ tm.assert_numpy_array_equal(indexer, expected_indexer)
+ tm.assert_numpy_array_equal(missing, expected_missing)
+
+ # matching-but-not-identical nans
+ if is_matching_na(nulls_fixture, float("NaN")):
+ index = Index(["a", float("NaN"), "b", float("NaN")])
+ indexer, missing = index.get_indexer_non_unique([nulls_fixture])
+
+ expected_indexer = np.array([1, 3], dtype=np.intp)
+ tm.assert_numpy_array_equal(indexer, expected_indexer)
+ tm.assert_numpy_array_equal(missing, expected_missing)
+
+
class TestSliceLocs:
@pytest.mark.parametrize(
"in_slice,expected",
diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py
index 5f6d0155ae6cf..80237baeb9594 100644
--- a/pandas/tests/indexes/test_indexing.py
+++ b/pandas/tests/indexes/test_indexing.py
@@ -7,6 +7,7 @@
take
where
get_indexer
+ get_indexer_for
slice_locs
asof_locs
@@ -25,6 +26,7 @@
Int64Index,
IntervalIndex,
MultiIndex,
+ NaT,
PeriodIndex,
RangeIndex,
Series,
@@ -294,3 +296,32 @@ def test_maybe_cast_slice_bound_kind_deprecated(index):
with tm.assert_produces_warning(FutureWarning):
# pass as positional
index._maybe_cast_slice_bound(index[0], "left", "loc")
+
+
+@pytest.mark.parametrize(
+ "idx,target,expected",
+ [
+ ([np.nan, "var1", np.nan], [np.nan], np.array([0, 2], dtype=np.intp)),
+ (
+ [np.nan, "var1", np.nan],
+ [np.nan, "var1"],
+ np.array([0, 2, 1], dtype=np.intp),
+ ),
+ (
+ np.array([np.nan, "var1", np.nan], dtype=object),
+ [np.nan],
+ np.array([0, 2], dtype=np.intp),
+ ),
+ (
+ DatetimeIndex(["2020-08-05", NaT, NaT]),
+ [NaT],
+ np.array([1, 2], dtype=np.intp),
+ ),
+ (["a", "b", "a", np.nan], [np.nan], np.array([3], dtype=np.intp)),
+ ],
+)
+def test_get_indexer_non_unique_multiple_nans(idx, target, expected):
+ # GH 35392
+ axis = Index(idx)
+ actual = axis.get_indexer_for(target)
+ tm.assert_numpy_array_equal(actual, expected)
| - [x] closes #35392
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Looking at the implementation in index.pyx, I notice that when `[np.nan]` is passed to `get_indexer_non_unique`, the code was not able to get passed the `__contains__` method for stargets (line 314).
Further testing (python=3.7.7, numpy=1.18.5):
```
import numpy as np
# Case 1: Does not work -> prints nothing
# nan dtype: np.float64, ndarray dtype: np.float64
targets = np.array([np.nan])
# Case 2: Works -> prints 0, 1, 2
# nan dtype: U3, ndarray dtype: <U32
targets = np.array([np.nan, 'var1'])
values = np.array([np.nan, 'var1', np.nan])
stargets = set(targets)
for i, v in enumerate(values):
if v in stargets:
print(i)
```
Case 1 and 2 results differ because of the dtype of nan (U3 vs float64).
Upon further research, I figured out that np.nan != np.nan as per IEEE (when it is a float) and creating a set from a np.array could lead to some bizarre results (https://github.com/numpy/numpy/issues/9358). Also, since a dictionary is the main data structure in this method to keep track of the targets indices, I don't think it is ideal to use nans as keys (https://stackoverflow.com/questions/6441857/nans-as-key-in-dictionaries).
I thought it would be appropriate to replace nans (with 0s) in the targets and values arrays in order to avoid the problems stated above. When considering where to replace the nans, I thought of two places where it could potentially happen:
1. In `get_indexer_non_unique` (/pandas/core/indexes/base.py)
2. In `get_indexer_non_unique` (/pandas/_libs/index.pyx)
Including the changes in 1. would mean overwriting the the Index object's properties, so I decided to include the changes in 2.
FYI -- I wasn't sure if the test I included was in the correct file. Please let me know if you would like this test to be in another file.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35498 | 2020-07-31T19:24:51Z | 2021-08-05T11:51:18Z | 2021-08-05T11:51:18Z | 2021-08-05T13:07:53Z |
Fix numpydev CI failure | diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index 5cb58756a6ac1..7248b1740058f 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -14,7 +14,7 @@ dependencies:
- pytz
- pip
- pip:
- - cython==0.29.16 # GH#34014
+ - cython==0.29.21 # GH#34014
- "git+git://github.com/dateutil/dateutil.git"
- "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple"
- "--pre"
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35496 | 2020-07-31T18:58:27Z | 2020-08-03T19:01:34Z | null | 2023-04-12T20:17:11Z |
TST: GroupBy on 2 rows of MultiIndex returns correct group indices | diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index ebce5b0ef0a66..8c51ebf89f5c0 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2055,3 +2055,17 @@ def test_groups_repr_truncates(max_seq_items, expected):
result = df.groupby(np.array(df.a)).groups.__repr__()
assert result == expected
+
+
+def test_group_on_two_row_multiindex_returns_one_tuple_key():
+ # GH 18451
+ df = pd.DataFrame([{"a": 1, "b": 2, "c": 99}, {"a": 1, "b": 2, "c": 88}])
+ df = df.set_index(["a", "b"])
+
+ grp = df.groupby(["a", "b"])
+ result = grp.indices
+ expected = {(1, 2): np.array([0, 1], dtype=np.int64)}
+
+ assert len(result) == 1
+ key = (1, 2)
+ assert (result[key] == expected[key]).all()
| - [x] closes #18451
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
There was previously a bug where grouping on precisely two rows of a MultiIndex would return a wrongly formatted index. This PR is adding a test to enforce this correct behaviour. The new test fails on 0.20.3 but passes on >=1.0.5.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35494 | 2020-07-31T10:42:08Z | 2020-08-03T23:46:23Z | 2020-08-03T23:46:23Z | 2020-08-03T23:46:31Z |
DOC: Add note to docstring DataFrame.compare about identical labels | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4668f264000e7..c1cb24cb9bffa 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5932,14 +5932,23 @@ def _construct_result(self, result) -> "DataFrame":
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
+Raises
+------
+ValueError
+ When the two DataFrames don't have identical labels or shape.
+
See Also
--------
Series.compare : Compare with another Series and show differences.
+DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
+Can only compare identically-labeled
+(i.e. same shape, identical row and column labels) DataFrames
+
Examples
--------
>>> df = pd.DataFrame(
| - [x] closes #35491
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/35492 | 2020-07-31T09:57:01Z | 2020-09-24T06:49:10Z | 2020-09-24T06:49:09Z | 2020-09-24T08:07:01Z |
MAINT: Fix broadcasting of arrays | diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 2d4163e0dee89..5c8d2cccfa5f3 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -266,8 +266,11 @@ def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None):
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
- arrays.loc[missing] = [val] * missing.sum()
-
+ if val.size == 0:
+ for iloc in np.where(missing)[0]:
+ arrays.iloc[iloc] = val
+ else:
+ arrays.loc[missing] = [val] * missing.sum()
else:
keys = list(data.keys())
columns = data_names = Index(keys)
| Prevent empty object arrays from being broadcast
- [X] closes #35481
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35485 | 2020-07-30T22:29:36Z | 2020-08-03T15:12:40Z | null | 2020-08-03T15:12:40Z |
DatetimeIndex get_loc validates date object | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 905242bfdd8ad..247fae02d348f 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1,4 +1,4 @@
-from datetime import datetime, time, timedelta, tzinfo
+from datetime import date, datetime, time, timedelta, tzinfo
from typing import Optional, Union
import warnings
@@ -152,7 +152,7 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
_typ = "datetimearray"
_scalar_type = Timestamp
- _recognized_scalars = (datetime, np.datetime64)
+ _recognized_scalars = (datetime, np.datetime64, date)
_is_recognized_dtype = is_datetime64_any_dtype
# define my properties & methods for delegation
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 24b00199611bf..d3cefa07e373e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import date, datetime, timedelta
import inspect
import re
from typing import TYPE_CHECKING, Any, List, Optional, Type, Union, cast
@@ -2226,6 +2226,8 @@ def _can_hold_element(self, element: Any) -> bool:
return is_datetime64_dtype(tipo)
elif element is NaT:
return True
+ elif isinstance(element, date):
+ return True
elif isinstance(element, datetime):
if self.is_datetimetz:
return tz_compare(element.tzinfo, self.dtype.tz)
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index cefd2ae7a9ddb..b270e806b84ca 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -70,10 +70,6 @@ def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
4.0,
object(),
timedelta(days=2),
- # GH#19800, GH#19301 datetime.date comparison raises to
- # match DatetimeIndex/Timestamp. This also matches the behavior
- # of stdlib datetime.datetime
- datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index b801f750718ac..933b24ac0fa8f 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -403,3 +403,10 @@ def test_split_non_utc(self):
result = np.split(indices, indices_or_sections=[])[0]
expected = indices._with_freq(None)
tm.assert_index_equal(result, expected)
+
+ def test_in_contains_date_object(self):
+ # GH#35466
+ d1 = date(2002, 9, 1)
+ idx1 = DatetimeIndex([d1])
+ assert d1 in idx1
+ assert "2002-09-01" in idx1
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index bddc50a3cbcc1..27207b313d723 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1067,7 +1067,7 @@ def test_datetime_block_can_hold_element(self):
arr[0] = val
val = date(2010, 10, 10)
- assert not block._can_hold_element(val)
+ assert block._can_hold_element(val)
msg = (
"value should be a 'Timestamp', 'NaT', "
| - [ ] closes #35466
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Adding the date object try cast in get_loc. | https://api.github.com/repos/pandas-dev/pandas/pulls/35478 | 2020-07-30T15:17:43Z | 2020-11-04T16:55:11Z | null | 2020-11-04T16:55:11Z |
MAINT: Use float arange when required or intended | diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 4a0212e890d3a..2300d8dd5529b 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -140,7 +140,7 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
)
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
- values = np.arange(10)
+ values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
@@ -177,7 +177,7 @@ def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_rolling_forward_skewness(constructor):
- values = np.arange(10)
+ values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=5)
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 12c314d5e9ec9..69cd1d1ba069c 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -108,7 +108,7 @@ def test_ewma_halflife_without_times(halflife_with_times):
@pytest.mark.parametrize("min_periods", [0, 2])
def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
halflife = halflife_with_times
- data = np.arange(10)
+ data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()
| Ensure arange is float when intened or required by NumPy
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35477 | 2020-07-30T14:05:09Z | 2020-07-30T18:45:46Z | 2020-07-30T18:45:46Z | 2020-08-01T08:46:01Z |
REGR: Fix conversion of mixed dtype DataFrame to numpy str | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index ade88a6127014..f0ad9d1ca3b0f 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`)
- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`).
- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`)
- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index aabdac16e9a1a..b66b6b92336f2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1371,6 +1371,8 @@ def to_numpy(
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
+ if result.dtype is not dtype:
+ result = np.array(result, dtype=dtype, copy=False)
return result
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 4b85f92391dce..aa74d173d69b3 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -19,6 +19,7 @@
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_datetimelike_v_numeric,
+ is_dtype_equal,
is_extension_array_dtype,
is_list_like,
is_numeric_v_string_like,
@@ -865,6 +866,8 @@ def _interleave(self, dtype=None, na_value=lib.no_default) -> np.ndarray:
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
+ elif is_dtype_equal(dtype, str):
+ dtype = "object"
result = np.empty(self.shape, dtype=dtype)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 2b79fc8cd3406..cc57a3970d18b 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -367,6 +367,13 @@ def test_to_numpy_copy(self):
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is not arr
+ def test_to_numpy_mixed_dtype_to_str(self):
+ # https://github.com/pandas-dev/pandas/issues/35455
+ df = pd.DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]])
+ result = df.to_numpy(dtype=str)
+ expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
| - [x] closes https://github.com/pandas-dev/pandas/issues/35455
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35473 | 2020-07-30T01:20:16Z | 2020-08-07T23:17:15Z | 2020-08-07T23:17:15Z | 2020-08-08T01:14:35Z |
CI: unpin isort 5 (#35134) | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index dc6f45f810f3d..e0a2257b0ca1f 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -6,7 +6,7 @@
from .pandas_vb_common import tm
try:
- from pandas.tseries.offsets import Nano, Hour
+ from pandas.tseries.offsets import Hour, Nano
except ImportError:
# For compatibility with older versions
from pandas.core.datetools import * # noqa
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index e266d871f5bc6..5d9070de92ec7 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -7,14 +7,14 @@
try:
from pandas import (
- rolling_median,
+ rolling_kurt,
+ rolling_max,
rolling_mean,
+ rolling_median,
rolling_min,
- rolling_max,
- rolling_var,
rolling_skew,
- rolling_kurt,
rolling_std,
+ rolling_var,
)
have_rolling_methods = True
diff --git a/asv_bench/benchmarks/io/parsers.py b/asv_bench/benchmarks/io/parsers.py
index ec3eddfff7184..5390056ba36f2 100644
--- a/asv_bench/benchmarks/io/parsers.py
+++ b/asv_bench/benchmarks/io/parsers.py
@@ -2,8 +2,8 @@
try:
from pandas._libs.tslibs.parsing import (
- concat_date_cols,
_does_string_look_like_datetime,
+ concat_date_cols,
)
except ImportError:
# Avoid whole benchmark suite import failure on asv (currently 0.4)
diff --git a/asv_bench/benchmarks/tslibs/normalize.py b/asv_bench/benchmarks/tslibs/normalize.py
index 7d4e0556f4d96..9a206410d8775 100644
--- a/asv_bench/benchmarks/tslibs/normalize.py
+++ b/asv_bench/benchmarks/tslibs/normalize.py
@@ -1,5 +1,5 @@
try:
- from pandas._libs.tslibs import normalize_i8_timestamps, is_date_array_normalized
+ from pandas._libs.tslibs import is_date_array_normalized, normalize_i8_timestamps
except ImportError:
from pandas._libs.tslibs.conversion import (
normalize_i8_timestamps,
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 7b12de387d648..69ce0f1adce22 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -121,7 +121,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
# Imports - Check formatting using isort see setup.cfg for settings
MSG='Check import format using isort' ; echo $MSG
- ISORT_CMD="isort --quiet --recursive --check-only pandas asv_bench scripts"
+ ISORT_CMD="isort --quiet --check-only pandas asv_bench scripts"
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]}))
else
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index b85e9403038ab..1b0e36e7b6933 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -751,7 +751,7 @@ Imports are alphabetically sorted within these sections.
As part of :ref:`Continuous Integration <contributing.ci>` checks we run::
- isort --recursive --check-only pandas
+ isort --check-only pandas
to check that imports are correctly formatted as per the `setup.cfg`.
@@ -770,8 +770,6 @@ You should run::
to automatically format imports correctly. This will modify your local copy of the files.
-The `--recursive` flag can be passed to sort all files in a directory.
-
Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above <contributing.code-formatting>`::
git diff upstream/master --name-only -- "*.py" | xargs -r isort
diff --git a/environment.yml b/environment.yml
index 3b088ca511be9..9efb995e29497 100644
--- a/environment.yml
+++ b/environment.yml
@@ -21,7 +21,7 @@ dependencies:
- flake8<3.8.0 # temporary pin, GH#34150
- flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions
- flake8-rst>=0.6.0,<=0.7.0 # linting of code blocks in rst files
- - isort=4.3.21 # check that imports are in the right order
+ - isort>=5.2.1 # check that imports are in the right order
- mypy=0.730
- pycodestyle # used by flake8
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index f5e16cddeb04c..d7b73a0a685d3 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -442,8 +442,8 @@ def register_option(
ValueError if `validator` is specified and `defval` is not a valid value.
"""
- import tokenize
import keyword
+ import tokenize
key = key.lower()
@@ -660,8 +660,8 @@ def _build_option_description(k: str) -> str:
def pp_options_list(keys: Iterable[str], width=80, _print: bool = False):
""" Builds a concise listing of available options, grouped by prefix """
- from textwrap import wrap
from itertools import groupby
+ from textwrap import wrap
def pp(name: str, ks: Iterable[str]) -> List[str]:
pfx = "- " + name + ".[" if name else ""
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 6b6ead795584f..7e90a8cc681ef 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -1,11 +1,12 @@
import cython
from cython import Py_ssize_t
-from libc.stdlib cimport malloc, free
-from libc.string cimport memmove
from libc.math cimport fabs, sqrt
+from libc.stdlib cimport free, malloc
+from libc.string cimport memmove
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
NPY_FLOAT32,
@@ -31,12 +32,11 @@ from numpy cimport (
uint32_t,
uint64_t,
)
+
cnp.import_array()
cimport pandas._libs.util as util
-from pandas._libs.util cimport numeric, get_nat
-
from pandas._libs.khash cimport (
kh_destroy_int64,
kh_get_int64,
@@ -46,7 +46,7 @@ from pandas._libs.khash cimport (
kh_resize_int64,
khiter_t,
)
-
+from pandas._libs.util cimport get_nat, numeric
import pandas._libs.missing as missing
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 7c57e6ee9dbfd..38cb973d6dde9 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -1,27 +1,51 @@
import cython
from cython import Py_ssize_t
-from cython cimport floating
-from libc.stdlib cimport malloc, free
+from cython cimport floating
+from libc.stdlib cimport free, malloc
import numpy as np
+
cimport numpy as cnp
-from numpy cimport (ndarray,
- int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
- uint32_t, uint64_t, float32_t, float64_t, complex64_t, complex128_t)
+from numpy cimport (
+ complex64_t,
+ complex128_t,
+ float32_t,
+ float64_t,
+ int8_t,
+ int16_t,
+ int32_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+ uint16_t,
+ uint32_t,
+ uint64_t,
+)
from numpy.math cimport NAN
-cnp.import_array()
-from pandas._libs.util cimport numeric, get_nat
+cnp.import_array()
-from pandas._libs.algos cimport (swap, TiebreakEnumType, TIEBREAK_AVERAGE,
- TIEBREAK_MIN, TIEBREAK_MAX, TIEBREAK_FIRST,
- TIEBREAK_DENSE)
-from pandas._libs.algos import (take_2d_axis1_float64_float64,
- groupsort_indexer, tiebreakers)
+from pandas._libs.algos cimport (
+ TIEBREAK_AVERAGE,
+ TIEBREAK_DENSE,
+ TIEBREAK_FIRST,
+ TIEBREAK_MAX,
+ TIEBREAK_MIN,
+ TiebreakEnumType,
+ swap,
+)
+from pandas._libs.util cimport get_nat, numeric
+
+from pandas._libs.algos import (
+ groupsort_indexer,
+ take_2d_axis1_float64_float64,
+ tiebreakers,
+)
from pandas._libs.missing cimport checknull
+
cdef int64_t NPY_NAT = get_nat()
_int64_max = np.iinfo(np.int64).max
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index a98820ca57895..f2af04d91a3e3 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -2,10 +2,13 @@
# at https://github.com/veorq/SipHash
import cython
-from libc.stdlib cimport malloc, free
+
+from libc.stdlib cimport free, malloc
import numpy as np
-from numpy cimport ndarray, uint8_t, uint32_t, uint64_t, import_array
+
+from numpy cimport import_array, ndarray, uint8_t, uint32_t, uint64_t
+
import_array()
from pandas._libs.util cimport is_nan
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index c3dcbb942d7fe..ffaf6d6505955 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -1,60 +1,57 @@
cimport cython
-
-from cpython.ref cimport PyObject, Py_INCREF
-from cpython.mem cimport PyMem_Malloc, PyMem_Free
-
-from libc.stdlib cimport malloc, free
+from cpython.mem cimport PyMem_Free, PyMem_Malloc
+from cpython.ref cimport Py_INCREF, PyObject
+from libc.stdlib cimport free, malloc
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, uint8_t, uint32_t, float64_t
+from numpy cimport float64_t, ndarray, uint8_t, uint32_t
from numpy.math cimport NAN
+
cnp.import_array()
+from pandas._libs cimport util
from pandas._libs.khash cimport (
- khiter_t,
- kh_str_t,
- kh_init_str,
- kh_put_str,
- kh_exist_str,
- kh_get_str,
- kh_destroy_str,
- kh_resize_str,
- kh_put_strbox,
- kh_get_strbox,
- kh_init_strbox,
- kh_int64_t,
- kh_init_int64,
- kh_resize_int64,
+ kh_destroy_float64,
kh_destroy_int64,
- kh_get_int64,
+ kh_destroy_pymap,
+ kh_destroy_str,
+ kh_destroy_uint64,
+ kh_exist_float64,
kh_exist_int64,
- kh_put_int64,
+ kh_exist_pymap,
+ kh_exist_str,
+ kh_exist_uint64,
kh_float64_t,
- kh_exist_float64,
- kh_put_float64,
- kh_init_float64,
kh_get_float64,
- kh_destroy_float64,
- kh_resize_float64,
- kh_resize_uint64,
- kh_exist_uint64,
- kh_destroy_uint64,
- kh_put_uint64,
+ kh_get_int64,
+ kh_get_pymap,
+ kh_get_str,
+ kh_get_strbox,
kh_get_uint64,
- kh_init_uint64,
- kh_destroy_pymap,
- kh_exist_pymap,
+ kh_init_float64,
+ kh_init_int64,
kh_init_pymap,
- kh_get_pymap,
+ kh_init_str,
+ kh_init_strbox,
+ kh_init_uint64,
+ kh_int64_t,
+ kh_put_float64,
+ kh_put_int64,
kh_put_pymap,
+ kh_put_str,
+ kh_put_strbox,
+ kh_put_uint64,
+ kh_resize_float64,
+ kh_resize_int64,
kh_resize_pymap,
+ kh_resize_str,
+ kh_resize_uint64,
+ kh_str_t,
+ khiter_t,
)
-
-
-from pandas._libs cimport util
-
from pandas._libs.missing cimport checknull
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 35c4b73b47695..d6659cc1895b1 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -1,6 +1,7 @@
import warnings
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
float32_t,
@@ -16,17 +17,16 @@ from numpy cimport (
uint32_t,
uint64_t,
)
+
cnp.import_array()
from pandas._libs cimport util
-
+from pandas._libs.hashtable cimport HashTable
from pandas._libs.tslibs.nattype cimport c_NaT as NaT
from pandas._libs.tslibs.period cimport is_period_object
-from pandas._libs.tslibs.timestamps cimport _Timestamp
from pandas._libs.tslibs.timedeltas cimport _Timedelta
-
-from pandas._libs.hashtable cimport HashTable
+from pandas._libs.tslibs.timestamps cimport _Timestamp
from pandas._libs import algos, hashtable as _hash
from pandas._libs.missing import checknull
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 8b4b490f49b12..4f27fde52414a 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -5,12 +5,15 @@ from cython import Py_ssize_t
from cpython.slice cimport PySlice_GetIndicesEx
+
cdef extern from "Python.h":
Py_ssize_t PY_SSIZE_T_MAX
import numpy as np
+
cimport numpy as cnp
from numpy cimport NPY_INT64, int64_t
+
cnp.import_array()
from pandas._libs.algos import ensure_int64
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 95881ebf1385c..6867e8aba7411 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -1,7 +1,8 @@
import numbers
from operator import le, lt
-from cpython.datetime cimport PyDelta_Check, PyDateTime_IMPORT
+from cpython.datetime cimport PyDateTime_IMPORT, PyDelta_Check
+
PyDateTime_IMPORT
from cpython.object cimport (
@@ -16,8 +17,8 @@ from cpython.object cimport (
import cython
from cython import Py_ssize_t
-
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
NPY_QUICKSORT,
@@ -30,22 +31,21 @@ from numpy cimport (
ndarray,
uint64_t,
)
+
cnp.import_array()
from pandas._libs cimport util
-
from pandas._libs.hashtable cimport Int64Vector
+from pandas._libs.tslibs.timedeltas cimport _Timedelta
+from pandas._libs.tslibs.timestamps cimport _Timestamp
+from pandas._libs.tslibs.timezones cimport tz_compare
from pandas._libs.tslibs.util cimport (
- is_integer_object,
is_float_object,
+ is_integer_object,
is_timedelta64_object,
)
-from pandas._libs.tslibs.timezones cimport tz_compare
-from pandas._libs.tslibs.timestamps cimport _Timestamp
-from pandas._libs.tslibs.timedeltas cimport _Timedelta
-
_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 54892a7e4bc77..13c7187923473 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -1,7 +1,7 @@
import cython
from cython import Py_ssize_t
-
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
float32_t,
@@ -16,6 +16,7 @@ from numpy cimport (
uint32_t,
uint64_t,
)
+
cnp.import_array()
from pandas._libs.algos import (
@@ -640,7 +641,11 @@ def outer_join_indexer(ndarray[join_t] left, ndarray[join_t] right):
# ----------------------------------------------------------------------
from pandas._libs.hashtable cimport (
- HashTable, PyObjectHashTable, UInt64HashTable, Int64HashTable)
+ HashTable,
+ Int64HashTable,
+ PyObjectHashTable,
+ UInt64HashTable,
+)
ctypedef fused asof_t:
uint8_t
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 5ecbb2c3ffd35..5fa91ffee8ea8 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -5,23 +5,24 @@ import warnings
import cython
from cython import Py_ssize_t
-from cpython.object cimport PyObject_RichCompareBool, Py_EQ
-from cpython.ref cimport Py_INCREF
-from cpython.tuple cimport PyTuple_SET_ITEM, PyTuple_New
-from cpython.iterator cimport PyIter_Check
-from cpython.sequence cimport PySequence_Check
-from cpython.number cimport PyNumber_Check
-
from cpython.datetime cimport (
- PyDateTime_Check,
PyDate_Check,
- PyTime_Check,
- PyDelta_Check,
+ PyDateTime_Check,
PyDateTime_IMPORT,
+ PyDelta_Check,
+ PyTime_Check,
)
+from cpython.iterator cimport PyIter_Check
+from cpython.number cimport PyNumber_Check
+from cpython.object cimport Py_EQ, PyObject_RichCompareBool
+from cpython.ref cimport Py_INCREF
+from cpython.sequence cimport PySequence_Check
+from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM
+
PyDateTime_IMPORT
import numpy as np
+
cimport numpy as cnp
from numpy cimport (
NPY_OBJECT,
@@ -39,6 +40,7 @@ from numpy cimport (
uint8_t,
uint64_t,
)
+
cnp.import_array()
cdef extern from "numpy/arrayobject.h":
@@ -63,28 +65,23 @@ cdef extern from "src/parse_helper.h":
int floatify(object, float64_t *result, int *maybe_int) except -1
from pandas._libs cimport util
-from pandas._libs.util cimport is_nan, UINT64_MAX, INT64_MAX, INT64_MIN
+from pandas._libs.util cimport INT64_MAX, INT64_MIN, UINT64_MAX, is_nan
from pandas._libs.tslib import array_to_datetime
-from pandas._libs.tslibs.nattype cimport (
- NPY_NAT,
- c_NaT as NaT,
- checknull_with_nat,
-)
-from pandas._libs.tslibs.conversion cimport convert_to_tsobject
-from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64
-from pandas._libs.tslibs.timezones cimport tz_compare
-from pandas._libs.tslibs.period cimport is_period_object
-from pandas._libs.tslibs.offsets cimport is_offset_object
from pandas._libs.missing cimport (
+ C_NA,
checknull,
- isnaobj,
is_null_datetime64,
is_null_timedelta64,
- C_NA,
+ isnaobj,
)
-
+from pandas._libs.tslibs.conversion cimport convert_to_tsobject
+from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT, checknull_with_nat
+from pandas._libs.tslibs.offsets cimport is_offset_object
+from pandas._libs.tslibs.period cimport is_period_object
+from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64
+from pandas._libs.tslibs.timezones cimport tz_compare
# constants that will be compared to potentially arbitrarily large
# python int
@@ -1317,8 +1314,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str:
if not isinstance(value, list):
value = list(value)
- from pandas.core.dtypes.cast import (
- construct_1d_object_array_from_listlike)
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
values = construct_1d_object_array_from_listlike(value)
# make contiguous
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index fdd06fe631b97..760fab3781fd4 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -1,27 +1,25 @@
-import cython
-from cython import Py_ssize_t
-
import numbers
+import cython
+from cython import Py_ssize_t
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, uint8_t, float64_t
+from numpy cimport float64_t, int64_t, ndarray, uint8_t
+
cnp.import_array()
from pandas._libs cimport util
-
-
-from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
from pandas._libs.tslibs.nattype cimport (
c_NaT as NaT,
checknull_with_nat,
is_null_datetimelike,
)
-from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
+from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
+from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
from pandas.compat import is_platform_32bit
-
cdef:
float64_t INF = <float64_t>np.inf
float64_t NEGINF = -INF
diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx
index 658600cdfbe6c..d1f897d237c1b 100644
--- a/pandas/_libs/ops.pyx
+++ b/pandas/_libs/ops.pyx
@@ -10,18 +10,17 @@ from cpython.object cimport (
PyObject_RichCompareBool,
)
-
import cython
from cython import Py_ssize_t
-
import numpy as np
-from numpy cimport ndarray, uint8_t, import_array
-import_array()
+from numpy cimport import_array, ndarray, uint8_t
+
+import_array()
-from pandas._libs.util cimport UINT8_MAX, is_nan
from pandas._libs.missing cimport checknull
+from pandas._libs.util cimport UINT8_MAX, is_nan
@cython.wraparound(False)
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 6ffb036e01595..fa77af6bd5a25 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1,6 +1,8 @@
# Copyright (c) 2012, Lambda Foundry, Inc.
# See LICENSE for the license
import bz2
+from csv import QUOTE_MINIMAL, QUOTE_NONE, QUOTE_NONNUMERIC
+from errno import ENOENT
import gzip
import io
import os
@@ -9,17 +11,14 @@ import time
import warnings
import zipfile
-from csv import QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_NONE
-from errno import ENOENT
-
from libc.stdlib cimport free
-from libc.string cimport strncpy, strlen, strcasecmp
+from libc.string cimport strcasecmp, strlen, strncpy
import cython
from cython import Py_ssize_t
from cpython.bytes cimport PyBytes_AsString, PyBytes_FromString
-from cpython.exc cimport PyErr_Occurred, PyErr_Fetch
+from cpython.exc cimport PyErr_Fetch, PyErr_Occurred
from cpython.object cimport PyObject
from cpython.ref cimport Py_XDECREF
from cpython.unicode cimport PyUnicode_AsUTF8String, PyUnicode_Decode
@@ -30,37 +29,59 @@ cdef extern from "Python.h":
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, uint8_t, uint64_t, int64_t, float64_t
+from numpy cimport float64_t, int64_t, ndarray, uint8_t, uint64_t
+
cnp.import_array()
from pandas._libs cimport util
-from pandas._libs.util cimport UINT64_MAX, INT64_MAX, INT64_MIN
+from pandas._libs.util cimport INT64_MAX, INT64_MIN, UINT64_MAX
+
import pandas._libs.lib as lib
from pandas._libs.khash cimport (
- khiter_t,
- kh_str_t, kh_init_str, kh_put_str, kh_exist_str,
- kh_get_str, kh_destroy_str,
- kh_float64_t, kh_get_float64, kh_destroy_float64,
- kh_put_float64, kh_init_float64, kh_resize_float64,
- kh_strbox_t, kh_put_strbox, kh_get_strbox, kh_init_strbox,
+ kh_destroy_float64,
+ kh_destroy_str,
+ kh_destroy_str_starts,
kh_destroy_strbox,
- kh_str_starts_t, kh_put_str_starts_item, kh_init_str_starts,
- kh_get_str_starts_item, kh_destroy_str_starts, kh_resize_str_starts)
+ kh_exist_str,
+ kh_float64_t,
+ kh_get_float64,
+ kh_get_str,
+ kh_get_str_starts_item,
+ kh_get_strbox,
+ kh_init_float64,
+ kh_init_str,
+ kh_init_str_starts,
+ kh_init_strbox,
+ kh_put_float64,
+ kh_put_str,
+ kh_put_str_starts_item,
+ kh_put_strbox,
+ kh_resize_float64,
+ kh_resize_str_starts,
+ kh_str_starts_t,
+ kh_str_t,
+ kh_strbox_t,
+ khiter_t,
+)
+
+from pandas.compat import _get_lzma_file, _import_lzma
+from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning
from pandas.core.dtypes.common import (
+ is_bool_dtype,
is_categorical_dtype,
- is_integer_dtype, is_float_dtype,
- is_bool_dtype, is_object_dtype,
is_datetime64_dtype,
- pandas_dtype, is_extension_array_dtype)
+ is_extension_array_dtype,
+ is_float_dtype,
+ is_integer_dtype,
+ is_object_dtype,
+ pandas_dtype,
+)
from pandas.core.dtypes.concat import union_categoricals
-from pandas.compat import _import_lzma, _get_lzma_file
-from pandas.errors import (ParserError, DtypeWarning,
- EmptyDataError, ParserWarning)
-
lzma = _import_lzma()
cdef:
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index a01e0c5705dcf..7b36bc8baf891 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -2,15 +2,18 @@ from copy import copy
from cython import Py_ssize_t
-from libc.stdlib cimport malloc, free
+from libc.stdlib cimport free, malloc
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t
+from numpy cimport int64_t, ndarray
+
cnp.import_array()
from pandas._libs cimport util
-from pandas._libs.lib import maybe_convert_objects, is_scalar
+
+from pandas._libs.lib import is_scalar, maybe_convert_objects
cdef _check_result_array(object obj, Py_ssize_t cnt):
diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx
index da4dd00027395..5c6c15fb50fed 100644
--- a/pandas/_libs/reshape.pyx
+++ b/pandas/_libs/reshape.pyx
@@ -16,7 +16,9 @@ from numpy cimport (
)
import numpy as np
+
cimport numpy as cnp
+
cnp.import_array()
from pandas._libs.lib cimport c_is_list_like
diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index 7c9575d921dc9..321d7c374d8ec 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -1,9 +1,18 @@
import cython
-
import numpy as np
+
cimport numpy as cnp
-from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t,
- float64_t, float32_t)
+from numpy cimport (
+ float32_t,
+ float64_t,
+ int8_t,
+ int16_t,
+ int32_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+)
+
cnp.import_array()
diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx
index 785a4d1f8b923..64fc8d615ea9c 100644
--- a/pandas/_libs/testing.pyx
+++ b/pandas/_libs/testing.pyx
@@ -1,13 +1,16 @@
import math
import numpy as np
+
from numpy cimport import_array
+
import_array()
from pandas._libs.util cimport is_array
-from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import is_dtype_equal
+from pandas.core.dtypes.missing import array_equivalent, isna
+
cdef NUMERIC_TYPES = (
bool,
@@ -129,6 +132,7 @@ cpdef assert_almost_equal(a, b,
if not isiterable(b):
from pandas._testing import assert_class_equal
+
# classes can't be the same, to raise error
assert_class_equal(a, b, obj=obj)
@@ -181,6 +185,7 @@ cpdef assert_almost_equal(a, b,
elif isiterable(b):
from pandas._testing import assert_class_equal
+
# classes can't be the same, to raise error
assert_class_equal(a, b, obj=obj)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 35d5cd8f1e275..e4128af62d06d 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -7,23 +7,20 @@ from cpython.datetime cimport (
datetime,
tzinfo,
)
+
# import datetime C API
PyDateTime_IMPORT
cimport numpy as cnp
from numpy cimport float64_t, int64_t, ndarray
+
import numpy as np
+
cnp.import_array()
import pytz
-from pandas._libs.util cimport (
- is_datetime64_object,
- is_float_object,
- is_integer_object,
-)
-
from pandas._libs.tslibs.np_datetime cimport (
_string_to_dts,
check_dts_bounds,
@@ -34,9 +31,9 @@ from pandas._libs.tslibs.np_datetime cimport (
pydate_to_dt64,
pydatetime_to_dt64,
)
+from pandas._libs.util cimport is_datetime64_object, is_float_object, is_integer_object
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
-
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas._libs.tslibs.conversion cimport (
@@ -45,22 +42,18 @@ from pandas._libs.tslibs.conversion cimport (
convert_datetime_to_tsobject,
get_datetime64_nanos,
)
-
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
c_NaT as NaT,
c_nat_strings as nat_strings,
)
-
from pandas._libs.tslibs.timestamps cimport _Timestamp
-from pandas._libs.tslibs.timestamps import Timestamp
-from pandas._libs.tslibs.tzconversion cimport (
- tz_localize_to_utc_single,
-)
+from pandas._libs.tslibs.timestamps import Timestamp
# Note: this is the only non-tslibs intra-pandas dependency here
from pandas._libs.missing cimport checknull_with_nat_and_na
+from pandas._libs.tslibs.tzconversion cimport tz_localize_to_utc_single
def _test_parse_iso8601(ts: str):
diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx
index 00cecd25e5225..6cce2f5e1fd95 100644
--- a/pandas/_libs/tslibs/ccalendar.pyx
+++ b/pandas/_libs/tslibs/ccalendar.pyx
@@ -5,7 +5,7 @@ Cython implementations of functions resembling the stdlib calendar module
import cython
-from numpy cimport int64_t, int32_t
+from numpy cimport int32_t, int64_t
# ----------------------------------------------------------------------
# Constants
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 8cc3d25e86340..adf1dfbc1ac72 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -1,44 +1,68 @@
import cython
-
import numpy as np
+
cimport numpy as cnp
-from numpy cimport int64_t, int32_t, intp_t, ndarray
+from numpy cimport int32_t, int64_t, intp_t, ndarray
+
cnp.import_array()
import pytz
# stdlib datetime imports
-from cpython.datetime cimport (datetime, time, tzinfo,
- PyDateTime_Check, PyDate_Check,
- PyDateTime_IMPORT)
+
+from cpython.datetime cimport (
+ PyDate_Check,
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ datetime,
+ time,
+ tzinfo,
+)
+
PyDateTime_IMPORT
from pandas._libs.tslibs.base cimport ABCTimestamp
-
from pandas._libs.tslibs.np_datetime cimport (
- check_dts_bounds, npy_datetimestruct, pandas_datetime_to_datetimestruct,
- _string_to_dts, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64,
- get_datetime64_unit, get_datetime64_value, pydatetime_to_dt64,
- NPY_DATETIMEUNIT, NPY_FR_ns)
-from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
+ NPY_DATETIMEUNIT,
+ NPY_FR_ns,
+ _string_to_dts,
+ check_dts_bounds,
+ dt64_to_dtstruct,
+ dtstruct_to_dt64,
+ get_datetime64_unit,
+ get_datetime64_value,
+ npy_datetime,
+ npy_datetimestruct,
+ pandas_datetime_to_datetimestruct,
+ pydatetime_to_dt64,
+)
-from pandas._libs.tslibs.util cimport (
- is_datetime64_object, is_integer_object, is_float_object)
+from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
from pandas._libs.tslibs.timezones cimport (
- is_utc, is_tzlocal, is_fixed_offset, get_utcoffset, get_dst_info,
- maybe_get_tz, tz_compare,
+ get_dst_info,
+ get_utcoffset,
+ is_fixed_offset,
+ is_tzlocal,
+ is_utc,
+ maybe_get_tz,
+ tz_compare,
utc_pytz as UTC,
)
+from pandas._libs.tslibs.util cimport (
+ is_datetime64_object,
+ is_float_object,
+ is_integer_object,
+)
+
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
- checknull_with_nat,
c_NaT as NaT,
c_nat_strings as nat_strings,
+ checknull_with_nat,
)
-
from pandas._libs.tslibs.tzconversion cimport (
tz_convert_utc_to_tzlocal,
tz_localize_to_utc_single,
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 1d1f900bc18b3..16fa05c3801c6 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -6,26 +6,37 @@ from locale import LC_TIME
import cython
from cython import Py_ssize_t
-
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, int32_t, int8_t, uint32_t
+from numpy cimport int8_t, int32_t, int64_t, ndarray, uint32_t
+
cnp.import_array()
from pandas._config.localization import set_locale
-from pandas._libs.tslibs.ccalendar import MONTHS_FULL, DAYS_FULL
+from pandas._libs.tslibs.ccalendar import DAYS_FULL, MONTHS_FULL
+
from pandas._libs.tslibs.ccalendar cimport (
- get_days_in_month, is_leapyear, dayofweek, get_week_of_year,
- get_day_of_year, get_iso_calendar, iso_calendar_t,
- month_offset,
+ dayofweek,
+ get_day_of_year,
+ get_days_in_month,
get_firstbday,
+ get_iso_calendar,
get_lastbday,
+ get_week_of_year,
+ is_leapyear,
+ iso_calendar_t,
+ month_offset,
)
-from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct,
- td64_to_tdstruct)
from pandas._libs.tslibs.nattype cimport NPY_NAT
+from pandas._libs.tslibs.np_datetime cimport (
+ dt64_to_dtstruct,
+ npy_datetimestruct,
+ pandas_timedeltastruct,
+ td64_to_tdstruct,
+)
+
from pandas._libs.tslibs.strptime import LocaleTime
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 264013f928d22..73df51832d700 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -1,3 +1,10 @@
+from cpython.datetime cimport (
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ datetime,
+ timedelta,
+)
from cpython.object cimport (
Py_EQ,
Py_GE,
@@ -8,28 +15,19 @@ from cpython.object cimport (
PyObject_RichCompare,
)
-from cpython.datetime cimport (
- PyDateTime_Check,
- PyDateTime_IMPORT,
- PyDelta_Check,
- datetime,
- timedelta,
-)
PyDateTime_IMPORT
from cpython.version cimport PY_MINOR_VERSION
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t
+
cnp.import_array()
-from pandas._libs.tslibs.np_datetime cimport (
- get_datetime64_value,
- get_timedelta64_value,
-)
cimport pandas._libs.tslibs.util as util
-
+from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
# ----------------------------------------------------------------------
# Constants
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 31cc55ad981bb..12aaaf4ce3977 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -1,5 +1,3 @@
-from cpython.object cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE
-
from cpython.datetime cimport (
PyDateTime_DATE_GET_HOUR,
PyDateTime_DATE_GET_MICROSECOND,
@@ -10,11 +8,15 @@ from cpython.datetime cimport (
PyDateTime_GET_YEAR,
PyDateTime_IMPORT,
)
+from cpython.object cimport Py_EQ, Py_GE, Py_GT, Py_LE, Py_LT, Py_NE
+
PyDateTime_IMPORT
from numpy cimport int64_t
+
from pandas._libs.tslibs.util cimport get_c_string_buf_and_size
+
cdef extern from "src/datetime/np_datetime.h":
int cmp_npy_datetimestruct(npy_datetimestruct *a,
npy_datetimestruct *b)
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 9a7ca15a2a1c2..ac2725fc58aee 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -1,39 +1,51 @@
-import cython
-
import operator
import re
import time
from typing import Any
import warnings
-from cpython.datetime cimport (PyDateTime_IMPORT,
- PyDateTime_Check,
- PyDate_Check,
- PyDelta_Check,
- datetime, timedelta, date,
- time as dt_time)
+
+import cython
+
+from cpython.datetime cimport (
+ PyDate_Check,
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ date,
+ datetime,
+ time as dt_time,
+ timedelta,
+)
+
PyDateTime_IMPORT
-from dateutil.relativedelta import relativedelta
from dateutil.easter import easter
-
+from dateutil.relativedelta import relativedelta
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t, ndarray
+
cnp.import_array()
# TODO: formalize having _libs.properties "above" tslibs in the dependency structure
+
from pandas._libs.properties import cache_readonly
from pandas._libs.tslibs cimport util
from pandas._libs.tslibs.util cimport (
- is_integer_object,
is_datetime64_object,
is_float_object,
+ is_integer_object,
)
from pandas._libs.tslibs.ccalendar import (
- MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday,
+ MONTH_ALIASES,
+ MONTH_TO_CAL_NUM,
+ int_to_weekday,
+ weekday_to_int,
)
+
from pandas._libs.tslibs.ccalendar cimport (
DAY_NANOS,
dayofweek,
@@ -47,17 +59,20 @@ from pandas._libs.tslibs.conversion cimport (
)
from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT
from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct,
- dtstruct_to_dt64,
dt64_to_dtstruct,
+ dtstruct_to_dt64,
+ npy_datetimestruct,
pydate_to_dtstruct,
)
from pandas._libs.tslibs.tzconversion cimport tz_convert_from_utc_single
from .dtypes cimport PeriodDtypeCode
from .timedeltas cimport delta_to_nanoseconds
+
from .timedeltas import Timedelta
+
from .timestamps cimport _Timestamp
+
from .timestamps import Timestamp
# ---------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index c4f369d0d3b3f..8429aebbd85b8 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -9,39 +9,44 @@ from libc.string cimport strchr
import cython
from cython import Py_ssize_t
-from cpython.object cimport PyObject_Str
-
from cpython.datetime cimport datetime, datetime_new, import_datetime, tzinfo
+from cpython.object cimport PyObject_Str
from cpython.version cimport PY_VERSION_HEX
+
import_datetime()
import numpy as np
+
cimport numpy as cnp
-from numpy cimport (PyArray_GETITEM, PyArray_ITER_DATA, PyArray_ITER_NEXT,
- PyArray_IterNew, flatiter, float64_t)
+from numpy cimport (
+ PyArray_GETITEM,
+ PyArray_ITER_DATA,
+ PyArray_ITER_NEXT,
+ PyArray_IterNew,
+ flatiter,
+ float64_t,
+)
+
cnp.import_array()
# dateutil compat
-from dateutil.tz import (tzoffset,
- tzlocal as _dateutil_tzlocal,
- tzutc as _dateutil_tzutc,
- tzstr as _dateutil_tzstr)
+
+from dateutil.parser import DEFAULTPARSER, parse as du_parse
from dateutil.relativedelta import relativedelta
-from dateutil.parser import DEFAULTPARSER
-from dateutil.parser import parse as du_parse
+from dateutil.tz import (
+ tzlocal as _dateutil_tzlocal,
+ tzoffset,
+ tzstr as _dateutil_tzstr,
+ tzutc as _dateutil_tzutc,
+)
from pandas._config import get_option
from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS
-from pandas._libs.tslibs.nattype cimport (
- c_nat_strings as nat_strings,
- c_NaT as NaT,
-)
-from pandas._libs.tslibs.util cimport (
- is_array,
- get_c_string_buf_and_size,
-)
+from pandas._libs.tslibs.nattype cimport c_NaT as NaT, c_nat_strings as nat_strings
from pandas._libs.tslibs.offsets cimport is_offset_object
+from pandas._libs.tslibs.util cimport get_c_string_buf_and_size, is_array
+
cdef extern from "../src/headers/portable.h":
int getdigit_ascii(char c, int default) nogil
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 20961c6da56bd..86b6533f5caf5 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1,96 +1,98 @@
import warnings
-from cpython.object cimport PyObject_RichCompareBool, Py_EQ, Py_NE
+from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompareBool
+from numpy cimport import_array, int64_t, ndarray
-from numpy cimport int64_t, import_array, ndarray
import numpy as np
+
import_array()
from libc.stdlib cimport free, malloc
+from libc.string cimport memset, strlen
from libc.time cimport strftime, tm
-from libc.string cimport strlen, memset
import cython
from cpython.datetime cimport (
- datetime,
PyDate_Check,
PyDateTime_Check,
PyDateTime_IMPORT,
PyDelta_Check,
+ datetime,
)
+
# import datetime C API
PyDateTime_IMPORT
from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct,
- dtstruct_to_dt64,
- dt64_to_dtstruct,
- pandas_datetime_to_datetimestruct,
- check_dts_bounds,
NPY_DATETIMEUNIT,
NPY_FR_D,
NPY_FR_us,
+ check_dts_bounds,
+ dt64_to_dtstruct,
+ dtstruct_to_dt64,
+ npy_datetimestruct,
+ pandas_datetime_to_datetimestruct,
)
+
cdef extern from "src/datetime/np_datetime.h":
int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr,
npy_datetimestruct *d) nogil
cimport pandas._libs.tslibs.util as util
-from pandas._libs.tslibs.timestamps import Timestamp
from pandas._libs.tslibs.timedeltas import Timedelta
-from pandas._libs.tslibs.timedeltas cimport (
- delta_to_nanoseconds,
- is_any_td_scalar,
-)
+from pandas._libs.tslibs.timestamps import Timestamp
from pandas._libs.tslibs.ccalendar cimport (
+ c_MONTH_NUMBERS,
dayofweek,
get_day_of_year,
- is_leapyear,
- get_week_of_year,
get_days_in_month,
+ get_week_of_year,
+ is_leapyear,
)
-from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS
+from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds, is_any_td_scalar
+
from pandas._libs.tslibs.conversion import ensure_datetime64ns
from pandas._libs.tslibs.dtypes cimport (
- PeriodDtypeBase,
- FR_UND,
FR_ANN,
- FR_QTR,
- FR_MTH,
- FR_WK,
FR_BUS,
FR_DAY,
FR_HR,
FR_MIN,
- FR_SEC,
FR_MS,
- FR_US,
+ FR_MTH,
FR_NS,
+ FR_QTR,
+ FR_SEC,
+ FR_UND,
+ FR_US,
+ FR_WK,
+ PeriodDtypeBase,
attrname_to_abbrevs,
)
-
from pandas._libs.tslibs.parsing cimport get_rule_month
+
from pandas._libs.tslibs.parsing import parse_time_string
+
from pandas._libs.tslibs.nattype cimport (
- _nat_scalar_rules,
NPY_NAT,
- is_null_datetimelike,
+ _nat_scalar_rules,
c_NaT as NaT,
c_nat_strings as nat_strings,
+ is_null_datetimelike,
)
from pandas._libs.tslibs.offsets cimport (
BaseOffset,
- to_offset,
- is_tick_object,
is_offset_object,
+ is_tick_object,
+ to_offset,
)
-from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
+from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
cdef:
enum:
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 660b582f73e6e..d2690be905a68 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -1,27 +1,30 @@
"""Strptime-related classes and functions.
"""
-import time
-import locale
import calendar
+import locale
import re
+import time
from cpython.datetime cimport date, tzinfo
from _thread import allocate_lock as _thread_allocate_lock
+import numpy as np
import pytz
-import numpy as np
from numpy cimport int64_t
-from pandas._libs.tslibs.np_datetime cimport (
- check_dts_bounds, dtstruct_to_dt64, npy_datetimestruct)
-
from pandas._libs.tslibs.nattype cimport (
- checknull_with_nat,
NPY_NAT,
c_nat_strings as nat_strings,
+ checknull_with_nat,
)
+from pandas._libs.tslibs.np_datetime cimport (
+ check_dts_bounds,
+ dtstruct_to_dt64,
+ npy_datetimestruct,
+)
+
cdef dict _parse_code_table = {'y': 0,
'Y': 1,
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 8f3a599bf107c..ee32ed53a908b 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -2,39 +2,47 @@ import collections
import cython
-from cpython.object cimport Py_NE, Py_EQ, PyObject_RichCompare
+from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompare
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t, ndarray
+
cnp.import_array()
-from cpython.datetime cimport (timedelta,
- PyDateTime_Check, PyDelta_Check,
- PyDateTime_IMPORT)
+from cpython.datetime cimport (
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ timedelta,
+)
+
PyDateTime_IMPORT
cimport pandas._libs.tslibs.util as util
-from pandas._libs.tslibs.util cimport (
- is_timedelta64_object, is_datetime64_object, is_integer_object,
- is_float_object, is_array
-)
-
from pandas._libs.tslibs.base cimport ABCTimestamp
-
from pandas._libs.tslibs.conversion cimport cast_from_unit
-
-from pandas._libs.tslibs.np_datetime cimport (
- cmp_scalar, td64_to_tdstruct, pandas_timedeltastruct)
-
from pandas._libs.tslibs.nattype cimport (
- checknull_with_nat,
NPY_NAT,
c_NaT as NaT,
c_nat_strings as nat_strings,
+ checknull_with_nat,
+)
+from pandas._libs.tslibs.np_datetime cimport (
+ cmp_scalar,
+ pandas_timedeltastruct,
+ td64_to_tdstruct,
)
from pandas._libs.tslibs.offsets cimport is_tick_object
+from pandas._libs.tslibs.util cimport (
+ is_array,
+ is_datetime64_object,
+ is_float_object,
+ is_integer_object,
+ is_timedelta64_object,
+)
# ----------------------------------------------------------------------
# Constants
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 8cef685933863..bddfc30d86a53 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -9,54 +9,66 @@ shadows the python class, where we do any heavy lifting.
import warnings
import numpy as np
+
cimport numpy as cnp
-from numpy cimport int64_t, int8_t, uint8_t, ndarray
-cnp.import_array()
+from numpy cimport int8_t, int64_t, ndarray, uint8_t
-from cpython.object cimport (PyObject_RichCompareBool, PyObject_RichCompare,
- Py_EQ, Py_NE)
+cnp.import_array()
-from cpython.datetime cimport (
- datetime,
- time,
- tzinfo,
- tzinfo as tzinfo_type, # alias bc `tzinfo` is a kwarg below
+from cpython.datetime cimport ( # alias bc `tzinfo` is a kwarg below
PyDateTime_Check,
+ PyDateTime_IMPORT,
PyDelta_Check,
PyTZInfo_Check,
- PyDateTime_IMPORT,
-)
-PyDateTime_IMPORT
-
-from pandas._libs.tslibs.util cimport (
- is_datetime64_object, is_float_object, is_integer_object,
- is_timedelta64_object, is_array,
+ datetime,
+ time,
+ tzinfo as tzinfo_type,
)
+from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompare, PyObject_RichCompareBool
-from pandas._libs.tslibs.base cimport ABCTimestamp
+PyDateTime_IMPORT
from pandas._libs.tslibs cimport ccalendar
-
+from pandas._libs.tslibs.base cimport ABCTimestamp
from pandas._libs.tslibs.conversion cimport (
_TSObject,
- convert_to_tsobject,
convert_datetime_to_tsobject,
+ convert_to_tsobject,
normalize_i8_stamp,
)
-from pandas._libs.tslibs.fields import get_start_end_field, get_date_name_field
+from pandas._libs.tslibs.util cimport (
+ is_array,
+ is_datetime64_object,
+ is_float_object,
+ is_integer_object,
+ is_timedelta64_object,
+)
+
+from pandas._libs.tslibs.fields import get_date_name_field, get_start_end_field
+
from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT
from pandas._libs.tslibs.np_datetime cimport (
- check_dts_bounds, npy_datetimestruct, dt64_to_dtstruct,
+ check_dts_bounds,
cmp_scalar,
+ dt64_to_dtstruct,
+ npy_datetimestruct,
pydatetime_to_dt64,
)
+
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
-from pandas._libs.tslibs.offsets cimport to_offset, is_offset_object
-from pandas._libs.tslibs.timedeltas cimport is_any_td_scalar, delta_to_nanoseconds
+
+from pandas._libs.tslibs.offsets cimport is_offset_object, to_offset
+from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds, is_any_td_scalar
+
from pandas._libs.tslibs.timedeltas import Timedelta
+
from pandas._libs.tslibs.timezones cimport (
- is_utc, maybe_get_tz, treat_tz_as_pytz, utc_pytz as UTC,
- get_timezone, tz_compare,
+ get_timezone,
+ is_utc,
+ maybe_get_tz,
+ treat_tz_as_pytz,
+ tz_compare,
+ utc_pytz as UTC,
)
from pandas._libs.tslibs.tzconversion cimport (
tz_convert_from_utc_single,
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index a8c785704d8e8..b82291a71057e 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -1,27 +1,31 @@
from datetime import timezone
+
from cpython.datetime cimport datetime, timedelta, tzinfo
# dateutil compat
+
from dateutil.tz import (
gettz as dateutil_gettz,
tzfile as _dateutil_tzfile,
tzlocal as _dateutil_tzlocal,
tzutc as _dateutil_tzutc,
)
-
-
-from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
import pytz
+from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
+
UTC = pytz.utc
import numpy as np
+
cimport numpy as cnp
from numpy cimport int64_t
+
cnp.import_array()
# ----------------------------------------------------------------------
-from pandas._libs.tslibs.util cimport is_integer_object, get_nat
+from pandas._libs.tslibs.util cimport get_nat, is_integer_object
+
cdef int64_t NPY_NAT = get_nat()
cdef tzinfo utc_stdlib = timezone.utc
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 606639af16a18..2b148cd8849f1 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -5,21 +5,27 @@ import cython
from cython import Py_ssize_t
from cpython.datetime cimport (
- PyDateTime_IMPORT, PyDelta_Check, datetime, timedelta, tzinfo)
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+ datetime,
+ timedelta,
+ tzinfo,
+)
+
PyDateTime_IMPORT
-import pytz
from dateutil.tz import tzutc
-
import numpy as np
+import pytz
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, uint8_t, intp_t
+from numpy cimport int64_t, intp_t, ndarray, uint8_t
+
cnp.import_array()
from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, HOUR_NANOS
from pandas._libs.tslibs.nattype cimport NPY_NAT
-from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct, dt64_to_dtstruct)
+from pandas._libs.tslibs.np_datetime cimport dt64_to_dtstruct, npy_datetimestruct
from pandas._libs.tslibs.timezones cimport (
get_dst_info,
get_utcoffset,
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index c8f8daf6724c2..bdc00f6c6e21a 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -1,18 +1,21 @@
import cython
-from cpython.datetime cimport datetime, date, time, tzinfo
+from cpython.datetime cimport date, datetime, time, tzinfo
import numpy as np
+
from numpy cimport int64_t, intp_t, ndarray
from .conversion cimport normalize_i8_stamp
+
from .dtypes import Resolution
+
from .nattype cimport NPY_NAT, c_NaT as NaT
-from .np_datetime cimport npy_datetimestruct, dt64_to_dtstruct
+from .np_datetime cimport dt64_to_dtstruct, npy_datetimestruct
from .offsets cimport to_offset
from .period cimport get_period_ordinal
from .timestamps cimport create_timestamp_from_ts
-from .timezones cimport is_utc, is_tzlocal, get_dst_info
+from .timezones cimport get_dst_info, is_tzlocal, is_utc
from .tzconversion cimport tz_convert_utc_to_tzlocal
# -------------------------------------------------------------------------
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 362d0e6263697..3ec4547d223ce 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -2,13 +2,15 @@
import cython
from cython import Py_ssize_t
-from libcpp.deque cimport deque
-from libc.stdlib cimport malloc, free
+from libc.stdlib cimport free, malloc
+from libcpp.deque cimport deque
import numpy as np
+
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, float64_t, float32_t, uint8_t
+from numpy cimport float32_t, float64_t, int64_t, ndarray, uint8_t
+
cnp.import_array()
@@ -22,6 +24,7 @@ from pandas._libs.algos import is_monotonic
from pandas._libs.util cimport numeric
+
cdef extern from "../src/skiplist.h":
ctypedef struct node_t:
node_t **next
diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx
index 8a1e7feb57ace..9af1159a805ec 100644
--- a/pandas/_libs/window/indexers.pyx
+++ b/pandas/_libs/window/indexers.pyx
@@ -1,7 +1,8 @@
# cython: boundscheck=False, wraparound=False, cdivision=True
import numpy as np
-from numpy cimport ndarray, int64_t
+
+from numpy cimport int64_t, ndarray
# Cython routines for window indexers
diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx
index 2d5b31d7ccbcf..40c39aabb7a7a 100644
--- a/pandas/_libs/writers.pyx
+++ b/pandas/_libs/writers.pyx
@@ -5,8 +5,8 @@ from cpython.bytes cimport PyBytes_GET_SIZE
from cpython.unicode cimport PyUnicode_GET_SIZE
import numpy as np
-from numpy cimport ndarray, uint8_t
+from numpy cimport ndarray, uint8_t
ctypedef fused pandas_string:
str
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 1cf9304ed2715..a020fbff3553a 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -535,7 +535,7 @@ def rands(nchars):
def close(fignum=None):
- from matplotlib.pyplot import get_fignums, close as _close
+ from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 8e98833ad37f7..76ec527e6e258 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -24,13 +24,15 @@
# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from pandas._libs import Period, Timedelta, Timestamp # noqa: F401
- from pandas.core.arrays.base import ExtensionArray # noqa: F401
+
from pandas.core.dtypes.dtypes import ExtensionDtype # noqa: F401
- from pandas.core.indexes.base import Index # noqa: F401
- from pandas.core.generic import NDFrame # noqa: F401
+
from pandas import Interval # noqa: F401
- from pandas.core.series import Series # noqa: F401
+ from pandas.core.arrays.base import ExtensionArray # noqa: F401
from pandas.core.frame import DataFrame # noqa: F401
+ from pandas.core.generic import NDFrame # noqa: F401
+ from pandas.core.indexes.base import Index # noqa: F401
+ from pandas.core.series import Series # noqa: F401
# array-like
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 0484de3fa165d..015b203a60256 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -14,7 +14,7 @@
from pandas import Index
if TYPE_CHECKING:
- from pandas import Series, DataFrame
+ from pandas import DataFrame, Series
def load_reduce(self):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 733dbeed34b72..6b8d7dc35fe95 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -15,7 +15,7 @@
from pandas.core.construction import create_series_with_explicit_dtype
if TYPE_CHECKING:
- from pandas import DataFrame, Series, Index
+ from pandas import DataFrame, Index, Series
ResType = Dict[int, Any]
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index db9cfd9d7fc59..6e5c7bc699962 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -520,7 +520,7 @@ def _from_inferred_categories(
-------
Categorical
"""
- from pandas import Index, to_numeric, to_datetime, to_timedelta
+ from pandas import Index, to_datetime, to_numeric, to_timedelta
cats = Index(inferred_categories)
known_categories = (
@@ -1403,7 +1403,7 @@ def value_counts(self, dropna=True):
--------
Series.value_counts
"""
- from pandas import Series, CategoricalIndex
+ from pandas import CategoricalIndex, Series
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index ee4d43fdb3bc2..c6945e2f78b5a 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -959,7 +959,7 @@ def value_counts(self, dropna=False):
-------
Series
"""
- from pandas import Series, Index
+ from pandas import Index, Series
if dropna:
values = self[~self.isna()]._data
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index b0958af41158c..57df067c7b16e 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -116,6 +116,7 @@ def __from_arrow__(
Construct IntegerArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
+
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index c861d25afd13f..ed2437cc061bd 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1105,6 +1105,7 @@ def __arrow_array__(self, type=None):
Convert myself into a pyarrow Array.
"""
import pyarrow
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
try:
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 8d5cb12d60e4d..fe78481d99d30 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -300,6 +300,7 @@ def __arrow_array__(self, type=None):
Convert myself into a pyarrow Array.
"""
import pyarrow
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
index 8a30d2b954b55..da8d695c59b9e 100644
--- a/pandas/core/arrays/sparse/accessor.py
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -87,8 +87,8 @@ def from_coo(cls, A, dense_index=False):
1 0 3.0
dtype: Sparse[float64, nan]
"""
- from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
from pandas import Series
+ from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
result = _coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
@@ -253,9 +253,10 @@ def from_spmatrix(cls, data, index=None, columns=None):
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
- from pandas import DataFrame
from pandas._libs.sparse import IntIndex
+ from pandas import DataFrame
+
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
@@ -354,8 +355,8 @@ def density(self) -> float:
@staticmethod
def _prep_index(data, index, columns):
- import pandas.core.indexes.base as ibase
from pandas.core.indexes.api import ensure_index
+ import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 86f6be77bc505..2b2431149e230 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -662,8 +662,10 @@ def register_plotting_backend_cb(key):
def register_converter_cb(key):
- from pandas.plotting import register_matplotlib_converters
- from pandas.plotting import deregister_matplotlib_converters
+ from pandas.plotting import (
+ deregister_matplotlib_converters,
+ register_matplotlib_converters,
+ )
if cf.get_option(key):
register_matplotlib_converters()
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 6c58698989e96..47f10f1f65f4a 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -48,9 +48,9 @@
import pandas.core.common as com
if TYPE_CHECKING:
- from pandas.core.series import Series # noqa: F401
- from pandas.core.indexes.api import Index # noqa: F401
from pandas.core.arrays import ExtensionArray # noqa: F401
+ from pandas.core.indexes.api import Index # noqa: F401
+ from pandas.core.series import Series # noqa: F401
def array(
@@ -255,14 +255,14 @@ def array(
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
- period_array,
BooleanArray,
+ DatetimeArray,
IntegerArray,
IntervalArray,
PandasArray,
- DatetimeArray,
- TimedeltaArray,
StringArray,
+ TimedeltaArray,
+ period_array,
)
if lib.is_scalar(data):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 6b84f0e81f48b..228329898b6a4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1244,6 +1244,7 @@ def try_datetime(v):
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
from pandas._libs.tslibs import conversion
+
from pandas import DatetimeIndex
try:
@@ -1303,8 +1304,8 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
- from pandas.core.tools.timedeltas import to_timedelta
from pandas.core.tools.datetimes import to_datetime
+ from pandas.core.tools.timedeltas import to_timedelta
if dtype is not None:
if isinstance(dtype, str):
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 22480fbc47508..8350e136417b1 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -30,12 +30,13 @@
if TYPE_CHECKING:
import pyarrow # noqa: F401
+
+ from pandas import Categorical # noqa: F401
from pandas.core.arrays import ( # noqa: F401
+ DatetimeArray,
IntervalArray,
PeriodArray,
- DatetimeArray,
)
- from pandas import Categorical # noqa: F401
str_type = str
@@ -391,12 +392,13 @@ def __repr__(self) -> str_type:
@staticmethod
def _hash_categories(categories, ordered: Ordered = True) -> int:
+ from pandas.core.dtypes.common import DT64NS_DTYPE, is_datetime64tz_dtype
+
from pandas.core.util.hashing import (
- hash_array,
_combine_hash_arrays,
+ hash_array,
hash_tuples,
)
- from pandas.core.dtypes.common import is_datetime64tz_dtype, DT64NS_DTYPE
if len(categories) and isinstance(categories[0], tuple):
# assumes if any individual category is a tuple, then all our. ATM
@@ -939,6 +941,7 @@ def __from_arrow__(
Construct PeriodArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
+
from pandas.core.arrays import PeriodArray
from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask
@@ -1136,6 +1139,7 @@ def __from_arrow__(
Construct IntervalArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
+
from pandas.core.arrays import IntervalArray
if isinstance(array, pyarrow.Array):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3f634c1e6e1ff..79627e43d78c2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -150,6 +150,7 @@
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
+
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
@@ -5205,8 +5206,9 @@ def duplicated(
4 True
dtype: bool
"""
+ from pandas._libs.hashtable import _SIZE_HINT_LIMIT, duplicated_int64
+
from pandas.core.sorting import get_group_index
- from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return self._constructor_sliced(dtype=bool)
@@ -7868,8 +7870,8 @@ def join(
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
- from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
+ from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index ec7b14f27c5a1..c50b753cf3293 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -681,8 +681,8 @@ def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
- from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
+ from pandas.core.reshape.tile import cut
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 67003dffb90bb..8239a792c65dd 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -237,7 +237,6 @@ def __new__(cls, *args, **kwargs):
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
-
# hacky way to set the stacklevel: if cls is TimeGrouper it means
# that the call comes from a pandas internal call of resample,
# otherwise it comes from pd.Grouper
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 986d6323e704e..1be381e38b157 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5731,9 +5731,9 @@ def _maybe_cast_data_without_dtype(subarr):
"""
# Runtime import needed bc IntervalArray imports Index
from pandas.core.arrays import (
+ DatetimeArray,
IntervalArray,
PeriodArray,
- DatetimeArray,
TimedeltaArray,
)
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index fd9a9a5ef6c93..6eedf72726acb 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -5,8 +5,8 @@
from pandas._typing import ArrayLike
if TYPE_CHECKING:
- from pandas.core.internals.managers import BlockManager # noqa:F401
from pandas.core.internals.blocks import Block # noqa:F401
+ from pandas.core.internals.managers import BlockManager # noqa:F401
def operate_blockwise(
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index a1db7742916de..6702bf519c52e 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -155,7 +155,7 @@ def _map_stringarray(
an ndarray.
"""
- from pandas.arrays import IntegerArray, StringArray, BooleanArray
+ from pandas.arrays import BooleanArray, IntegerArray, StringArray
mask = isna(arr)
@@ -2186,7 +2186,7 @@ def _wrap_result(
returns_string=True,
):
- from pandas import Index, Series, MultiIndex
+ from pandas import Index, MultiIndex, Series
# for category, we do the stuff on the categories, so blow it up
# to the full series again
@@ -2292,7 +2292,7 @@ def _get_series_list(self, others):
list of Series
Others transformed into list of Series.
"""
- from pandas import Series, DataFrame
+ from pandas import DataFrame, Series
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, ABCIndexClass) else self._orig.index
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 0adab143f6052..7aac2f793f61a 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -53,9 +53,10 @@
from pandas.core.indexes.datetimes import DatetimeIndex
if TYPE_CHECKING:
- from pandas import Series # noqa:F401
from pandas._libs.tslibs.nattype import NaTType # noqa:F401
+ from pandas import Series # noqa:F401
+
# ---------------------------------------------------------------------
# types used in annotations
@@ -876,7 +877,7 @@ def _assemble_from_unit_mappings(arg, errors, tz):
-------
Series
"""
- from pandas import to_timedelta, to_numeric, DataFrame
+ from pandas import DataFrame, to_numeric, to_timedelta
arg = DataFrame(arg)
if not arg.columns.is_unique:
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index 1b56b6d5a46fa..d79b9f4092325 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -275,7 +275,7 @@ def hash_array(
# then hash and rename categories. We allow skipping the categorization
# when the values are known/likely to be unique.
if categorize:
- from pandas import factorize, Categorical, Index
+ from pandas import Categorical, Index, factorize
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories), ordered=False, fastpath=True)
diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py
index 40bff5a75709b..d16955a98b62f 100644
--- a/pandas/io/clipboard/__init__.py
+++ b/pandas/io/clipboard/__init__.py
@@ -311,17 +311,17 @@ def init_windows_clipboard():
global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
from ctypes.wintypes import (
- HGLOBAL,
- LPVOID,
+ BOOL,
DWORD,
- LPCSTR,
- INT,
- HWND,
+ HANDLE,
+ HGLOBAL,
HINSTANCE,
HMENU,
- BOOL,
+ HWND,
+ INT,
+ LPCSTR,
+ LPVOID,
UINT,
- HANDLE,
)
windll = ctypes.windll
@@ -528,8 +528,8 @@ def determine_clipboard():
# Setup for the MAC OS X platform:
if os.name == "mac" or platform.system() == "Darwin":
try:
- import Foundation # check if pyobjc is installed
import AppKit
+ import Foundation # check if pyobjc is installed
except ImportError:
return init_osx_pbcopy_clipboard()
else:
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 2a12f779230b2..b1bbda4a4b7e0 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -834,8 +834,8 @@ class ExcelFile:
from pandas.io.excel._odfreader import _ODFReader
from pandas.io.excel._openpyxl import _OpenpyxlReader
- from pandas.io.excel._xlrd import _XlrdReader
from pandas.io.excel._pyxlsb import _PyxlsbReader
+ from pandas.io.excel._xlrd import _XlrdReader
_engines = {
"xlrd": _XlrdReader,
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 85ec9afaaec25..44abaf5d3b3c9 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -191,9 +191,9 @@ def _get_cell_string_value(self, cell) -> str:
Find and decode OpenDocument text:s tags that represent
a run length encoded sequence of space characters.
"""
- from odf.element import Text, Element
- from odf.text import S, P
+ from odf.element import Element, Text
from odf.namespaces import TEXTNS
+ from odf.text import P, S
text_p = P().qname
text_s = S().qname
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 0696d82e51f34..03a30cbd62f9a 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -225,7 +225,7 @@ def _convert_to_fill(cls, fill_dict):
-------
fill : openpyxl.styles.Fill
"""
- from openpyxl.styles import PatternFill, GradientFill
+ from openpyxl.styles import GradientFill, PatternFill
_pattern_fill_key_map = {
"patternType": "fill_type",
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index 8f7d3b1368fc7..af82c15fd6b66 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -48,11 +48,11 @@ def get_sheet_by_index(self, index):
def get_sheet_data(self, sheet, convert_float):
from xlrd import (
- xldate,
+ XL_CELL_BOOLEAN,
XL_CELL_DATE,
XL_CELL_ERROR,
- XL_CELL_BOOLEAN,
XL_CELL_NUMBER,
+ xldate,
)
epoch1904 = self.book.datemode
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index fe85eab4bfbf5..c05f79f935548 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -72,7 +72,7 @@
from pandas.io.formats.printing import adjoin, justify, pprint_thing
if TYPE_CHECKING:
- from pandas import Series, DataFrame, Categorical
+ from pandas import Categorical, DataFrame, Series
FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index d11144938eb26..fd1efa2d1b668 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -42,8 +42,8 @@
try:
- import matplotlib.pyplot as plt
from matplotlib import colors
+ import matplotlib.pyplot as plt
has_mpl = True
except ImportError:
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 3193f52d239f1..8354cf413814e 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -707,8 +707,8 @@ def _build_doc(self):
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
- from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
+ from lxml.html import HTMLParser, fromstring, parse
parser = HTMLParser(recover=True, encoding=self.encoding)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b67a1c5781d91..e0df4c29e543e 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -57,7 +57,7 @@
from pandas.io.formats.printing import adjoin, pprint_thing
if TYPE_CHECKING:
- from tables import File, Node, Col # noqa:F401
+ from tables import Col, File, Node # noqa:F401
# versioning attribute
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 0038e39e2ffcc..17b41fd2b4379 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -1,8 +1,8 @@
# cython: profile=False
# cython: boundscheck=False, initializedcheck=False
from cython import Py_ssize_t
-
import numpy as np
+
import pandas.io.sas.sas_constants as const
ctypedef signed long long int64_t
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 9177696ca13d6..c87391eaa62b1 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -937,7 +937,7 @@ def _get_column_names_and_types(self, dtype_mapper):
return column_names_and_types
def _create_table_setup(self):
- from sqlalchemy import Table, Column, PrimaryKeyConstraint
+ from sqlalchemy import Column, PrimaryKeyConstraint, Table
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
@@ -1026,15 +1026,15 @@ def _sqlalchemy_type(self, col):
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
+ TIMESTAMP,
BigInteger,
- Integer,
- Float,
- Text,
Boolean,
- DateTime,
Date,
+ DateTime,
+ Float,
+ Integer,
+ Text,
Time,
- TIMESTAMP,
)
if col_type == "datetime64" or col_type == "datetime":
@@ -1079,7 +1079,7 @@ def _sqlalchemy_type(self, col):
return Text
def _get_dtype(self, sqltype):
- from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date, TIMESTAMP
+ from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer
if isinstance(sqltype, Float):
return float
@@ -1374,7 +1374,7 @@ def to_sql(
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
- from sqlalchemy.types import to_instance, TypeEngine
+ from sqlalchemy.types import TypeEngine, to_instance
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 353bc8a8936a5..b490e07e43753 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -1149,8 +1149,8 @@ def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds):
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._matplotlib.timeseries import (
- _maybe_resample,
_decorate_axes,
+ _maybe_resample,
format_dateaxis,
)
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index 8f3571cf13cbc..95f9fbf3995ed 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -24,7 +24,7 @@
from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod
if TYPE_CHECKING:
- from pandas import Series, Index # noqa:F401
+ from pandas import Index, Series # noqa:F401
# ---------------------------------------------------------------------
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index ecd20796b6f21..caa348d3a1fb9 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -267,9 +267,10 @@ def test_sparsearray():
def test_np():
- import numpy as np
import warnings
+ import numpy as np
+
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert (pd.np.arange(0, 10) == np.arange(0, 10)).all()
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index d517eaaec68d2..0176755b54dd1 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -142,6 +142,7 @@ def test_repr():
@pyarrow_skip
def test_arrow_extension_type():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
p1 = ArrowIntervalType(pa.int64(), "left")
@@ -158,6 +159,7 @@ def test_arrow_extension_type():
@pyarrow_skip
def test_arrow_array():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
intervals = pd.interval_range(1, 5, freq=1).array
@@ -187,6 +189,7 @@ def test_arrow_array():
@pyarrow_skip
def test_arrow_array_missing():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
arr = IntervalArray.from_breaks([0.0, 1.0, 2.0, 3.0])
@@ -221,6 +224,7 @@ def test_arrow_array_missing():
)
def test_arrow_table_roundtrip(breaks):
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowIntervalType
arr = IntervalArray.from_breaks(breaks)
diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py
index 8887dd0278afe..0d81e8e733842 100644
--- a/pandas/tests/arrays/test_period.py
+++ b/pandas/tests/arrays/test_period.py
@@ -359,6 +359,7 @@ def test_arrow_extension_type():
)
def test_arrow_array(data, freq):
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
periods = period_array(data, freq=freq)
@@ -384,6 +385,7 @@ def test_arrow_array(data, freq):
@pyarrow_skip
def test_arrow_array_missing():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
@@ -399,6 +401,7 @@ def test_arrow_array_missing():
@pyarrow_skip
def test_arrow_table_roundtrip():
import pyarrow as pa
+
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 9d6b9f39a0578..52a1e3aae9058 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -287,7 +287,7 @@ def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api("median", float_frame, float_string_frame)
try:
- from scipy.stats import skew, kurtosis # noqa:F401
+ from scipy.stats import kurtosis, skew # noqa:F401
assert_stat_op_api("skew", float_frame, float_string_frame)
assert_stat_op_api("kurt", float_frame, float_string_frame)
@@ -370,7 +370,7 @@ def kurt(x):
)
try:
- from scipy import skew, kurtosis # noqa:F401
+ from scipy import kurtosis, skew # noqa:F401
assert_stat_op_calc("skew", skewness, float_frame_with_na)
assert_stat_op_calc("kurt", kurt, float_frame_with_na)
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index ec4162f87010f..7bb1d98086a91 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -59,6 +59,7 @@ def test_reindex_with_same_tz(self):
def test_time_loc(self): # GH8667
from datetime import time
+
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py
index be193e0854d8d..d8e56661b7d61 100644
--- a/pandas/tests/indexing/multiindex/test_indexing_slow.py
+++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py
@@ -15,7 +15,7 @@ def test_multiindex_get_loc(): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
- from numpy.random import randint, choice, randn
+ from numpy.random import choice, randint, randn
cols = ["jim", "joe", "jolie", "joline", "jolia"]
diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py
index c397a61616c1c..d64e2d1933ace 100644
--- a/pandas/tests/io/test_fsspec.py
+++ b/pandas/tests/io/test_fsspec.py
@@ -37,8 +37,8 @@ def test_read_csv(cleared_fs):
def test_reasonable_error(monkeypatch, cleared_fs):
- from fsspec.registry import known_implementations
from fsspec import registry
+ from fsspec.registry import known_implementations
registry.target.clear()
with pytest.raises(ValueError) as e:
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 4d93119ffa3f5..eacf4fa08545d 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -11,8 +11,7 @@
@td.skip_if_no("gcsfs")
def test_read_csv_gcs(monkeypatch):
- from fsspec import AbstractFileSystem
- from fsspec import registry
+ from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
@@ -37,8 +36,7 @@ def open(*args, **kwargs):
@td.skip_if_no("gcsfs")
def test_to_csv_gcs(monkeypatch):
- from fsspec import AbstractFileSystem
- from fsspec import registry
+ from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
@@ -76,8 +74,7 @@ def mock_get_filepath_or_buffer(*args, **kwargs):
@td.skip_if_no("gcsfs")
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
- from fsspec import AbstractFileSystem
- from fsspec import registry
+ from fsspec import AbstractFileSystem, registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 0991fae39138e..29b787d39c09d 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -48,10 +48,10 @@
try:
import sqlalchemy
- import sqlalchemy.schema
- import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
+ import sqlalchemy.schema
+ import sqlalchemy.sql.sqltypes as sqltypes
SQLALCHEMY_INSTALLED = True
except ImportError:
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 896d3278cdde1..3b1ff233c5ec1 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -13,7 +13,6 @@
from pandas import DataFrame, Series
import pandas._testing as tm
-
"""
This is a common base class used for various plotting tests
"""
@@ -24,6 +23,7 @@ class TestPlotBase:
def setup_method(self, method):
import matplotlib as mpl
+
from pandas.plotting._matplotlib import compat
mpl.rcdefaults()
@@ -187,8 +187,8 @@ def _check_colors(
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
+ from matplotlib.collections import Collection, LineCollection, PolyCollection
from matplotlib.lines import Line2D
- from matplotlib.collections import Collection, PolyCollection, LineCollection
conv = self.colorconverter
if linecolors is not None:
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 317a994bd9a32..ee43e5d7072fe 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2408,8 +2408,8 @@ def test_specified_props_kwd_plot_box(self, props, expected):
assert result[expected][0].get_color() == "C1"
def test_default_color_cycle(self):
- import matplotlib.pyplot as plt
import cycler
+ import matplotlib.pyplot as plt
colors = list("rgbk")
plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
@@ -2953,8 +2953,8 @@ def _check(axes):
@td.skip_if_no_scipy
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
- import weakref
import gc
+ import weakref
results = {}
for kind in plotting.PlotAccessor._all_kinds:
@@ -3032,8 +3032,8 @@ def test_df_subplots_patterns_minorticks(self):
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
- import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
+ import matplotlib.pyplot as plt
ts = Series(np.random.randn(10), index=date_range("1/1/2000", periods=10))
@@ -3422,9 +3422,9 @@ def test_xlabel_ylabel_dataframe_subplots(
def _generate_4_axes_via_gridspec():
- import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
+ import matplotlib.pyplot as plt
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index b6a6c326c3df3..34c881855d16a 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -101,7 +101,7 @@ def test_hist_layout_with_by(self):
@pytest.mark.slow
def test_hist_no_overlap(self):
- from matplotlib.pyplot import subplot, gcf
+ from matplotlib.pyplot import gcf, subplot
x = Series(randn(2))
y = Series(randn(2))
@@ -352,6 +352,7 @@ class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_grouped_hist_legacy(self):
from matplotlib.patches import Rectangle
+
from pandas.plotting._matplotlib.hist import _grouped_hist
df = DataFrame(randn(500, 2), columns=["A", "B"])
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 75eeede472fe9..f5c1c58f3f7ed 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -131,9 +131,10 @@ def test_scatter_matrix_axis(self):
@pytest.mark.slow
def test_andrews_curves(self, iris):
- from pandas.plotting import andrews_curves
from matplotlib import cm
+ from pandas.plotting import andrews_curves
+
df = iris
_check_plot_works(andrews_curves, frame=df, class_column="Name")
@@ -206,9 +207,10 @@ def test_andrews_curves(self, iris):
@pytest.mark.slow
def test_parallel_coordinates(self, iris):
- from pandas.plotting import parallel_coordinates
from matplotlib import cm
+ from pandas.plotting import parallel_coordinates
+
df = iris
ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name")
@@ -279,9 +281,10 @@ def test_parallel_coordinates_with_sorted_labels(self):
@pytest.mark.slow
def test_radviz(self, iris):
- from pandas.plotting import radviz
from matplotlib import cm
+ from pandas.plotting import radviz
+
df = iris
_check_plot_works(radviz, frame=df, class_column="Name")
@@ -397,6 +400,7 @@ def test_get_standard_colors_no_appending(self):
# Make sure not to add more colors so that matplotlib can cycle
# correctly.
from matplotlib import cm
+
from pandas.plotting._matplotlib.style import _get_standard_colors
color_before = cm.gnuplot(range(5))
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 151bb3bed7207..cc00626e992f3 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -452,7 +452,7 @@ def test_hist_layout_with_by(self):
@pytest.mark.slow
def test_hist_no_overlap(self):
- from matplotlib.pyplot import subplot, gcf
+ from matplotlib.pyplot import gcf, subplot
x = Series(randn(2))
y = Series(randn(2))
@@ -827,6 +827,7 @@ def test_standard_colors(self):
@pytest.mark.slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
+
from pandas.plotting._matplotlib.style import _get_standard_colors
# multiple colors like mediumaquamarine
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 0b34fab7b80b1..088f8681feb99 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -11,7 +11,6 @@
from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
import pandas._testing as tm
-
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
@@ -166,6 +165,7 @@ def test_getitem_setitem_datetime_tz_pytz():
def test_getitem_setitem_datetime_tz_dateutil():
from dateutil.tz import tzutc
+
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
tz = (
diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py
index 19caf4eccf748..4b4ef5ea046be 100644
--- a/pandas/tests/series/methods/test_asof.py
+++ b/pandas/tests/series/methods/test_asof.py
@@ -90,7 +90,7 @@ def test_with_nan(self):
tm.assert_series_equal(result, expected)
def test_periodindex(self):
- from pandas import period_range, PeriodIndex
+ from pandas import PeriodIndex, period_range
# array or list or dates
N = 50
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 5c8a0d224c4f9..ef2bafd4ea2ad 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -195,8 +195,8 @@ def test_add_with_duplicate_index(self):
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
- from decimal import Decimal
from datetime import date
+ from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index e718a6b759963..b32c5e91af295 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -90,7 +90,7 @@ def test_statsmodels():
def test_scikit_learn(df):
sklearn = import_module("sklearn") # noqa
- from sklearn import svm, datasets
+ from sklearn import datasets, svm
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.0)
diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py
index f413490764124..3a8a1a3144269 100644
--- a/pandas/util/_doctools.py
+++ b/pandas/util/_doctools.py
@@ -53,8 +53,8 @@ def plot(self, left, right, labels=None, vertical: bool = True):
vertical : bool, default True
If True, use vertical layout. If False, use horizontal layout.
"""
- import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
+ import matplotlib.pyplot as plt
if not isinstance(left, list):
left = [left]
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 7bf3df176b378..c0dd77cd73ddc 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -11,7 +11,7 @@ cpplint
flake8<3.8.0
flake8-comprehensions>=3.1.0
flake8-rst>=0.6.0,<=0.7.0
-isort==4.3.21
+isort>=5.2.1
mypy==0.730
pycodestyle
gitpython
| - [x] closes #35134
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/35470 | 2020-07-29T19:49:12Z | 2020-08-01T08:43:02Z | 2020-08-01T08:43:01Z | 2020-08-01T13:24:54Z |
Backport PR #35272 on branch 1.1.x (CI: Unpin pytest) | diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml
index 2dc53f8181ac4..15704cf0d5427 100644
--- a/ci/deps/azure-36-32bit.yaml
+++ b/ci/deps/azure-36-32bit.yaml
@@ -23,4 +23,4 @@ dependencies:
- pip
- pip:
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml
index d31015fde4741..a9b9a5a47ccf5 100644
--- a/ci/deps/azure-36-locale.yaml
+++ b/ci/deps/azure-36-locale.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- pytest-asyncio
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml
index 23121b985492e..c086b3651afc3 100644
--- a/ci/deps/azure-36-locale_slow.yaml
+++ b/ci/deps/azure-36-locale_slow.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-azurepipelines
diff --git a/ci/deps/azure-36-slow.yaml b/ci/deps/azure-36-slow.yaml
index 0a6d1d13c8549..87bad59fa4873 100644
--- a/ci/deps/azure-36-slow.yaml
+++ b/ci/deps/azure-36-slow.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml
index 4dbb6a5344976..6f64c81f299d1 100644
--- a/ci/deps/azure-37-locale.yaml
+++ b/ci/deps/azure-37-locale.yaml
@@ -6,7 +6,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- pytest-asyncio
- hypothesis>=3.58.0
diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index 451fb5884a4af..5cb58756a6ac1 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.7.*
# tools
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-azurepipelines
diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml
index 81a27465f9e61..eeea249a19ca1 100644
--- a/ci/deps/azure-macos-36.yaml
+++ b/ci/deps/azure-macos-36.yaml
@@ -5,7 +5,7 @@ dependencies:
- python=3.6.*
# tools
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-azurepipelines
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index 4d7e1d821037b..548660cabaa67 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-azurepipelines
diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml
index 34fca631df6c1..5bbd0e2795d7e 100644
--- a/ci/deps/azure-windows-37.yaml
+++ b/ci/deps/azure-windows-37.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-azurepipelines
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml
index 5f5ea8034cddf..177e0d3f4c0af 100644
--- a/ci/deps/travis-36-cov.yaml
+++ b/ci/deps/travis-36-cov.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
- pytest-cov # this is only needed in the coverage build
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml
index 6bc4aba733ee5..03a1e751b6a86 100644
--- a/ci/deps/travis-36-locale.yaml
+++ b/ci/deps/travis-36-locale.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml
index f434a03609b26..5cb53489be225 100644
--- a/ci/deps/travis-37-arm64.yaml
+++ b/ci/deps/travis-37-arm64.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.13
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index aaf706d61fe5c..e896233aac63c 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml
index ac39a223cd086..b879c0f81dab2 100644
--- a/ci/deps/travis-38.yaml
+++ b/ci/deps/travis-38.yaml
@@ -7,7 +7,7 @@ dependencies:
# tools
- cython>=0.29.16
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-xdist>=1.21
- hypothesis>=3.58.0
diff --git a/environment.yml b/environment.yml
index 53222624619de..3b088ca511be9 100644
--- a/environment.yml
+++ b/environment.yml
@@ -52,7 +52,7 @@ dependencies:
- botocore>=1.11
- hypothesis>=3.82
- moto # mock S3
- - pytest>=5.0.1,<6.0.0rc0
+ - pytest>=5.0.1
- pytest-cov
- pytest-xdist>=1.21
- pytest-asyncio
diff --git a/pandas/_testing.py b/pandas/_testing.py
index fc6df7a95e348..1cf9304ed2715 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -9,7 +9,7 @@
from shutil import rmtree
import string
import tempfile
-from typing import Any, Callable, List, Optional, Type, Union, cast
+from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
@@ -2880,9 +2880,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]):
return expected
-def external_error_raised(
- expected_exception: Type[Exception],
-) -> Callable[[Type[Exception], None], None]:
+def external_error_raised(expected_exception: Type[Exception],) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 7e4513da37dc9..0d447a70b540d 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -1294,9 +1294,7 @@ def test_get_nonexistent_category():
)
-def test_series_groupby_on_2_categoricals_unobserved(
- reduction_func: str, observed: bool, request
-):
+def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
# GH 17605
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
@@ -1326,7 +1324,7 @@ def test_series_groupby_on_2_categoricals_unobserved(
def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
- reduction_func: str, request
+ reduction_func, request
):
# GH 17605
# Tests whether the unobserved categories in the result contain 0 or NaN
@@ -1374,7 +1372,7 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
assert np.issubdtype(result.dtype, np.integer)
-def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func: str):
+def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two pd.Categorical variables,
@@ -1402,7 +1400,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_fun
@pytest.mark.parametrize("observed", [False, None])
def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
- reduction_func: str, observed: bool, request
+ reduction_func, observed, request
):
# GH 23865
# GH 27075
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index a4a1d83177c50..bdf633839b2cd 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -120,7 +120,9 @@ def _skip_if_no_scipy() -> bool:
)
-def skip_if_installed(package: str) -> Callable:
+# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
+# https://github.com/pytest-dev/pytest/issues/7469
+def skip_if_installed(package: str):
"""
Skip a test if a package is installed.
@@ -134,7 +136,9 @@ def skip_if_installed(package: str) -> Callable:
)
-def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable:
+# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
+# https://github.com/pytest-dev/pytest/issues/7469
+def skip_if_no(package: str, min_version: Optional[str] = None):
"""
Generic function to help skip tests when required packages are not
present on the testing system.
@@ -196,14 +200,12 @@ def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable:
)
-def skip_if_np_lt(
- ver_str: str, reason: Optional[str] = None, *args, **kwds
-) -> Callable:
+# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
+# https://github.com/pytest-dev/pytest/issues/7469
+def skip_if_np_lt(ver_str: str, *args, reason: Optional[str] = None):
if reason is None:
reason = f"NumPy {ver_str} or greater required"
- return pytest.mark.skipif(
- _np_version < LooseVersion(ver_str), reason=reason, *args, **kwds
- )
+ return pytest.mark.skipif(_np_version < LooseVersion(ver_str), *args, reason=reason)
def parametrize_fixture_doc(*args):
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 0c024d1b54637..7bf3df176b378 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -32,7 +32,7 @@ boto3
botocore>=1.11
hypothesis>=3.82
moto
-pytest>=5.0.1,<6.0.0rc0
+pytest>=5.0.1
pytest-cov
pytest-xdist>=1.21
pytest-asyncio
diff --git a/setup.cfg b/setup.cfg
index 00af7f6f1b79a..ee5725e36d193 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -105,7 +105,7 @@ known_dtypes = pandas.core.dtypes
known_post_core = pandas.tseries,pandas.io,pandas.plotting
sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER
known_first_party = pandas
-known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,validate_unwanted_patterns,yaml,odf
+known_third_party = announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,validate_unwanted_patterns,yaml,odf
multi_line_output = 3
include_trailing_comma = True
force_grid_wrap = 0
| Backport PR #35272: CI: Unpin pytest | https://api.github.com/repos/pandas-dev/pandas/pulls/35469 | 2020-07-29T18:29:47Z | 2020-07-30T11:56:38Z | 2020-07-30T11:56:38Z | 2020-07-30T11:56:38Z |
CI: activate azure pipelines on 1.1.x | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index e45cafc02cb61..113ad3e338952 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,9 +1,11 @@
# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml
trigger:
- master
+- 1.1.x
pr:
- master
+- 1.1.x
variables:
PYTEST_WORKERS: auto
| same as #35461 against master. so should be OK when backported.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35468 | 2020-07-29T18:22:16Z | 2020-08-03T23:40:27Z | 2020-08-03T23:40:27Z | 2020-08-04T08:26:58Z |
CI: activate github actions on 1.1.x (PR only) | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index db1fc30111a2d..149acef72db26 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -4,7 +4,9 @@ on:
push:
branches: master
pull_request:
- branches: master
+ branches:
+ - master
+ - 1.1.x
env:
ENV_FILE: environment.yml
| same as #35459 against master. so should be OK when backported. | https://api.github.com/repos/pandas-dev/pandas/pulls/35467 | 2020-07-29T18:13:16Z | 2020-08-04T08:24:29Z | 2020-08-04T08:24:29Z | 2020-08-04T08:28:34Z |
CI: activate azure pipelines on 1.1.x - DO NOT MERGE | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index e45cafc02cb61..113ad3e338952 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,9 +1,11 @@
# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml
trigger:
- master
+- 1.1.x
pr:
- master
+- 1.1.x
variables:
PYTEST_WORKERS: auto
| Azure pipelines worked for 1.0.x since #32706 which specified which branches to run on was not backported
If works. will open PR against master instead. and backport. | https://api.github.com/repos/pandas-dev/pandas/pulls/35461 | 2020-07-29T15:21:32Z | 2020-07-29T18:19:26Z | null | 2020-07-29T18:19:27Z |
CI: activate github actions on 1.1.x (PR only) - DO NOT MERGE | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index db1fc30111a2d..149acef72db26 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -4,7 +4,9 @@ on:
push:
branches: master
pull_request:
- branches: master
+ branches:
+ - master
+ - 1.1.x
env:
ENV_FILE: environment.yml
| IIUC without changes to other scripts, activating GitHub actions for merge would push the docs. so this is only for PRs
If works. will open PR against master instead. and backport.
xref https://github.com/pandas-dev/pandas/pull/34800#issuecomment-644669496 | https://api.github.com/repos/pandas-dev/pandas/pulls/35459 | 2020-07-29T14:54:36Z | 2020-07-29T18:14:29Z | null | 2020-07-29T18:16:00Z |
Backport PR #35452 on branch 1.1.x (DOC: Start 1.1.1) | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index ad5bb5a5b2d72..8ce10136dd2bb 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -16,6 +16,7 @@ Version 1.1
.. toctree::
:maxdepth: 2
+ v1.1.1
v1.1.0
Version 1.0
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
new file mode 100644
index 0000000000000..443589308ad4c
--- /dev/null
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -0,0 +1,54 @@
+.. _whatsnew_111:
+
+What's new in 1.1.1 (?)
+-----------------------
+
+These are the changes in pandas 1.1.1. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+-
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+**Datetimelike**
+
+-
+-
+
+**Numeric**
+
+-
+-
+
+**Plotting**
+
+-
+
+**Indexing**
+
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.1.0..v1.1.1|HEAD
| Backport PR #35452: DOC: Start 1.1.1 | https://api.github.com/repos/pandas-dev/pandas/pulls/35458 | 2020-07-29T13:46:11Z | 2020-07-29T14:35:14Z | 2020-07-29T14:35:13Z | 2020-07-29T14:46:57Z |
Add note about limited propagation of attrs | diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 3b595ba5ab206..6bd4abe257cce 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -536,6 +536,10 @@ Metadata
.. warning:: ``Series.attrs`` is considered experimental and may change without warning.
+.. note:: ``Series.attrs`` is retained for the ``Series`` object alone. If the series
+ is placed into a ``DataFrame`` the ``attrs`` dictionary will not be propagated through
+ or retained in the ``DataFrame``.
+
.. autosummary::
:toctree: api/
| Explicitly mention that `attrs` are not retained once a series is placed into a dataframe, related to issue #35425 | https://api.github.com/repos/pandas-dev/pandas/pulls/35456 | 2020-07-29T13:09:59Z | 2020-11-30T05:57:05Z | null | 2020-11-30T05:57:06Z |
DOC: update Python support policy | diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst
index 1031bbfc46457..a564afc408df9 100644
--- a/doc/source/development/policies.rst
+++ b/doc/source/development/policies.rst
@@ -52,6 +52,6 @@ Python support
~~~~~~~~~~~~~~
pandas will only drop support for specific Python versions (e.g. 3.6.x, 3.7.x) in
-pandas **major** releases.
+pandas **major** or **minor** releases.
.. _SemVer: https://semver.org
| xref https://github.com/pandas-dev/pandas/issues/34472#issuecomment-660695759 | https://api.github.com/repos/pandas-dev/pandas/pulls/35454 | 2020-07-29T12:28:46Z | 2020-07-29T15:43:11Z | 2020-07-29T15:43:11Z | 2020-07-29T18:06:02Z |
DOC: Add 1.1.1 whatsnew | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 17043b83f03df..a280a981c789b 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -24,6 +24,7 @@ Version 1.1
.. toctree::
:maxdepth: 2
+ v1.1.1
v1.1.0
Version 1.0
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
new file mode 100644
index 0000000000000..e8d313d99dcf3
--- /dev/null
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -0,0 +1,39 @@
+.. _whatsnew_111:
+
+What's new in 1.1.1 (August XX, 2020)
+--------------------------------------
+
+These are the changes in pandas 1.1.1. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.1.0..v1.1.1|HEAD
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35453 | 2020-07-29T11:50:48Z | 2020-07-29T11:51:07Z | null | 2020-07-29T14:11:54Z |
DOC: Start 1.1.1 | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 17043b83f03df..a280a981c789b 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -24,6 +24,7 @@ Version 1.1
.. toctree::
:maxdepth: 2
+ v1.1.1
v1.1.0
Version 1.0
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
new file mode 100644
index 0000000000000..443589308ad4c
--- /dev/null
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -0,0 +1,54 @@
+.. _whatsnew_111:
+
+What's new in 1.1.1 (?)
+-----------------------
+
+These are the changes in pandas 1.1.1. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+-
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+**Datetimelike**
+
+-
+-
+
+**Numeric**
+
+-
+-
+
+**Plotting**
+
+-
+
+**Indexing**
+
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_111.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.1.0..v1.1.1|HEAD
| https://api.github.com/repos/pandas-dev/pandas/pulls/35452 | 2020-07-29T11:48:02Z | 2020-07-29T13:45:33Z | 2020-07-29T13:45:33Z | 2020-07-29T13:45:52Z | |
WEB: Fixing whatsnew link in the home page (version was hardcoded) | diff --git a/web/pandas/index.html b/web/pandas/index.html
index 83d0f48197033..75c797d6dd93d 100644
--- a/web/pandas/index.html
+++ b/web/pandas/index.html
@@ -63,7 +63,7 @@ <h5>With the support of:</h5>
{% if releases %}
<h4>Latest version: {{ releases[0].name }}</h4>
<ul>
- <li><a href="docs/whatsnew/v1.0.0.html">What's new in {{ releases[0].name }}</a></li>
+ <li><a href="docs/whatsnew/v{{ releases[0].name }}.html">What's new in {{ releases[0].name }}</a></li>
<li>Release date:<br/>{{ releases[0].published.strftime("%b %d, %Y") }}</li>
<li><a href="{{ base_url}}/docs/">Documentation (web)</a></li>
<li><a href="{{ base_url }}/docs/pandas.pdf">Documentation (pdf)</a></li>
| https://api.github.com/repos/pandas-dev/pandas/pulls/35451 | 2020-07-29T11:14:06Z | 2020-08-04T15:20:56Z | 2020-08-04T15:20:56Z | 2020-08-04T15:21:03Z | |
Updated chunksize docstring for DataFrame.to_csv() | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e46fde1f59f16..93a375182b7d0 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3052,8 +3052,8 @@ def to_csv(
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
- mode : str
- Python write mode, default 'w'.
+ mode : str, default 'w'
+ Python write mode.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
@@ -3092,8 +3092,9 @@ def to_csv(
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
- chunksize : int or None
+ chunksize : int or None, default None
Rows to write at a time.
+ If None then it's set to `100000 // (len(self.cols) or 1)) or 1`.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
| Added the documentation for what happens if chunksize is left as None.
Is there a good way to make this DRY somehow? | https://api.github.com/repos/pandas-dev/pandas/pulls/35447 | 2020-07-29T08:31:46Z | 2020-08-08T11:37:08Z | null | 2020-08-08T11:37:08Z |
WIP: ENH: Add numba engine to groupby apply | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index ac45222625569..e8010f1216dd4 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -34,7 +34,7 @@ class providing the base-class of operations.
from pandas._config.config import option_context
-from pandas._libs import Timestamp
+from pandas._libs import Timestamp, lib
import pandas._libs.groupby as libgroupby
from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Scalar
from pandas.compat.numpy import function as nv
@@ -61,11 +61,11 @@ class providing the base-class of operations.
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
-from pandas.core.groupby import base, ops
+from pandas.core.groupby import base, numba_, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
-from pandas.core.util.numba_ import maybe_use_numba
+from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba
_common_see_also = """
See Also
@@ -827,7 +827,12 @@ def __iter__(self):
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
- def apply(self, func, *args, **kwargs):
+ def apply(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
+
+ if maybe_use_numba(engine):
+ return self._apply_with_numba(
+ func, *args, engine_kwargs=engine_kwargs, **kwargs
+ )
func = self._is_builtin_func(func)
@@ -871,6 +876,35 @@ def f(g):
return result
+ def _apply_with_numba(self, func, *args, engine_kwargs=None, **kwargs):
+ group_keys = self.grouper._get_group_keys()
+
+ with _group_selection_context(self):
+ # We always drop the column with the groupby key
+ data = self._selected_obj
+ labels, _, n_groups = self.grouper.group_info
+ sorted_index = get_group_index_sorter(labels, n_groups)
+ sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False)
+ sorted_data = data.take(sorted_index, axis=self.axis)
+ starts, ends = lib.generate_slices(sorted_labels, n_groups)
+ cache_key = (func, "groupby_apply")
+ if cache_key in NUMBA_FUNC_CACHE:
+ # Return an already compiled version of roll_apply if available
+ apply_func = NUMBA_FUNC_CACHE[cache_key]
+ else:
+ apply_func = numba_.generate_numba_apply_func(
+ tuple(args), kwargs, func, engine_kwargs
+ )
+ result = apply_func(
+ sorted_data.to_numpy(), starts, ends, len(group_keys), len(data.columns)
+ )
+
+ if self.grouper.nkeys > 1:
+ index = MultiIndex.from_tuples(group_keys, names=self.grouper.names)
+ else:
+ index = Index(group_keys, name=self.grouper.names[0])
+ return self.obj._constructor(result, index=index, columns=data.columns)
+
def _python_apply_general(
self, f: F, data: FrameOrSeriesUnion
) -> FrameOrSeriesUnion:
diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py
new file mode 100644
index 0000000000000..6ba3659985f6b
--- /dev/null
+++ b/pandas/core/groupby/numba_.py
@@ -0,0 +1,73 @@
+from typing import Any, Callable, Dict, Optional, Tuple
+
+import numpy as np
+
+from pandas._typing import Scalar
+from pandas.compat._optional import import_optional_dependency
+
+from pandas.core.util.numba_ import (
+ check_kwargs_and_nopython,
+ get_jit_arguments,
+ jit_user_function,
+)
+
+
+def generate_numba_apply_func(
+ args: Tuple,
+ kwargs: Dict[str, Any],
+ func: Callable[..., Scalar],
+ engine_kwargs: Optional[Dict[str, bool]],
+):
+ """
+ Generate a numba jitted apply function specified by values from engine_kwargs.
+
+ 1. jit the user's function
+ 2. Return a rolling apply function with the jitted function inline
+
+ Configurations specified in engine_kwargs apply to both the user's
+ function _AND_ the rolling apply function.
+
+ Parameters
+ ----------
+ args : tuple
+ *args to be passed into the function
+ kwargs : dict
+ **kwargs to be passed into the function
+ func : function
+ function to be applied to each window and will be JITed
+ engine_kwargs : dict
+ dictionary of arguments to be passed into numba.jit
+
+ Returns
+ -------
+ Numba function
+ """
+ nopython, nogil, parallel = get_jit_arguments(engine_kwargs)
+
+ check_kwargs_and_nopython(kwargs, nopython)
+
+ numba_func = jit_user_function(func, nopython, nogil, parallel)
+
+ numba = import_optional_dependency("numba")
+
+ if parallel:
+ loop_range = numba.prange
+ else:
+ loop_range = range
+
+ @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
+ def group_apply(
+ values: np.ndarray,
+ begin: np.ndarray,
+ end: np.ndarray,
+ num_groups: int,
+ num_columns: int,
+ ) -> np.ndarray:
+ result = np.empty((num_groups, num_columns))
+ for i in loop_range(num_groups):
+ for j in loop_range(num_columns):
+ group = values[begin[i] : end[i], j]
+ result[i, j] = numba_func(group, *args)
+ return result
+
+ return group_apply
| - [x] closes #31845
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Some notes:
- The passed function must be a reduction op
- The numba engine will drop the grouping column by default
- Can only operate on numeric data and will return float64
Preliminary default timing:
```
(pandas-dev) matthewroeschke:pandas-mroeschke matthewroeschke$ ipython
Python 3.8.3 | packaged by conda-forge | (default, Jun 1 2020, 17:21:09)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.16.1 -- An enhanced Interactive Python. Type '?' for help.
In [1]: df_g = pd.DataFrame({'a': range(10**4), 'b': range(10**4), 'c': range(10**4)})
In [2]: def f(x):
...: return np.sum(x) + 1
...:
In [3]: df_g.groupby('a').apply(f)
Out[3]:
a b c
a
0 1 1 1
1 2 2 2
2 3 3 3
3 4 4 4
4 5 5 5
... ... ... ...
9995 9996 9996 9996
9996 9997 9997 9997
9997 9998 9998 9998
9998 9999 9999 9999
9999 10000 10000 10000
[10000 rows x 3 columns]
In [4]: %timeit df_g.groupby('a').apply(f)
3.07 s ± 57.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
In [5]: df_g.groupby('a').apply(f, engine='numba', engine_kwargs={'parallel': True})
Out[5]:
0 1
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
3 4.0 4.0
4 5.0 5.0
... ... ...
9995 9996.0 9996.0
9996 9997.0 9997.0
9997 9998.0 9998.0
9998 9999.0 9999.0
9999 10000.0 10000.0
[10000 rows x 2 columns]
In [6]: %timeit df_g.groupby('a').apply(f, engine='numba', engine_kwargs={'parallel': True})
510 ms ± 3.46 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/35445 | 2020-07-29T04:42:05Z | 2020-08-16T07:05:30Z | null | 2020-11-12T05:15:20Z |
BUG: Attributes are lost when subsetting columns in groupby | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 55570341cf4e8..1617bf66c4f04 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -255,6 +255,8 @@ Groupby/resample/rolling
- Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising an ``TypeError`` (:issue:`35325`)
- Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`)
- Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`)
+- Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`)
+-
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index e39464628ccaa..7b45a114e548b 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1603,17 +1603,32 @@ def _gotitem(self, key, ndim: int, subset=None):
return DataFrameGroupBy(
subset,
self.grouper,
- selection=key,
+ axis=self.axis,
+ level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
+ selection=key,
as_index=self.as_index,
+ sort=self.sort,
+ group_keys=self.group_keys,
+ squeeze=self.squeeze,
observed=self.observed,
+ mutated=self.mutated,
+ dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
- subset, selection=key, grouper=self.grouper, observed=self.observed
+ subset,
+ level=self.level,
+ grouper=self.grouper,
+ selection=key,
+ sort=self.sort,
+ group_keys=self.group_keys,
+ squeeze=self.squeeze,
+ observed=self.observed,
+ dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 8c51ebf89f5c0..c743058c988b4 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2069,3 +2069,45 @@ def test_group_on_two_row_multiindex_returns_one_tuple_key():
assert len(result) == 1
key = (1, 2)
assert (result[key] == expected[key]).all()
+
+
+@pytest.mark.parametrize(
+ "klass, attr, value",
+ [
+ (DataFrame, "axis", 1),
+ (DataFrame, "level", "a"),
+ (DataFrame, "as_index", False),
+ (DataFrame, "sort", False),
+ (DataFrame, "group_keys", False),
+ (DataFrame, "squeeze", True),
+ (DataFrame, "observed", True),
+ (DataFrame, "dropna", False),
+ pytest.param(
+ Series,
+ "axis",
+ 1,
+ marks=pytest.mark.xfail(
+ reason="GH 35443: Attribute currently not passed on to series"
+ ),
+ ),
+ (Series, "level", "a"),
+ (Series, "as_index", False),
+ (Series, "sort", False),
+ (Series, "group_keys", False),
+ (Series, "squeeze", True),
+ (Series, "observed", True),
+ (Series, "dropna", False),
+ ],
+)
+@pytest.mark.filterwarnings(
+ "ignore:The `squeeze` parameter is deprecated:FutureWarning"
+)
+def test_subsetting_columns_keeps_attrs(klass, attr, value):
+ # GH 9959 - When subsetting columns, don't drop attributes
+ df = pd.DataFrame({"a": [1], "b": [2], "c": [3]})
+ if attr != "axis":
+ df = df.set_index("a")
+
+ expected = df.groupby("a", **{attr: value})
+ result = expected[["b"]] if klass is DataFrame else expected["b"]
+ assert getattr(result, attr) == getattr(expected, attr)
| - [x] closes #9959
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Avoiding the behavior of Series here, e.g. `df.groupby('a')['b']` as I think that will involve some API changes. Will follow up: ref #35443 | https://api.github.com/repos/pandas-dev/pandas/pulls/35444 | 2020-07-29T00:46:49Z | 2020-08-31T22:31:08Z | 2020-08-31T22:31:07Z | 2020-08-31T22:37:50Z |
BUG: GroupBy.apply() throws erroneous ValueError with duplicate axes | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index b16ca0a80c5b4..74c683638c654 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -132,6 +132,7 @@ Plotting
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
+- Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`)
-
-
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 3aaeef3b63760..2cb4674b2e33a 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -211,7 +211,7 @@ def apply(self, f: F, data: FrameOrSeries, axis: int = 0):
# group might be modified
group_axes = group.axes
res = f(group)
- if not _is_indexed_like(res, group_axes):
+ if not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
@@ -897,13 +897,13 @@ def agg_series(
return grouper.get_result()
-def _is_indexed_like(obj, axes) -> bool:
+def _is_indexed_like(obj, axes, axis: int) -> bool:
if isinstance(obj, Series):
if len(axes) > 1:
return False
- return obj.index.equals(axes[0])
+ return obj.axes[axis].equals(axes[axis])
elif isinstance(obj, DataFrame):
- return obj.index.equals(axes[0])
+ return obj.axes[axis].equals(axes[axis])
return False
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 525a6fe2637c3..665cd12225ad7 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -63,15 +63,8 @@ def test_apply_trivial():
tm.assert_frame_equal(result, expected)
-@pytest.mark.xfail(
- reason="GH#20066; function passed into apply "
- "returns a DataFrame with the same index "
- "as the one to create GroupBy object."
-)
def test_apply_trivial_fail():
# GH 20066
- # trivial apply fails if the constant dataframe has the same index
- # with the one used to create GroupBy object.
df = pd.DataFrame(
{"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=["key", "data"],
@@ -1044,3 +1037,23 @@ def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp():
tm.assert_frame_equal(result, expected)
for val in result.index.levels[1]:
assert type(val) is date
+
+
+def test_apply_by_cols_equals_apply_by_rows_transposed():
+ # GH 16646
+ # Operating on the columns, or transposing and operating on the rows
+ # should give the same result. There was previously a bug where the
+ # by_rows operation would work fine, but by_cols would throw a ValueError
+
+ df = pd.DataFrame(
+ np.random.random([6, 4]),
+ columns=pd.MultiIndex.from_product([["A", "B"], [1, 2]]),
+ )
+
+ by_rows = df.T.groupby(axis=0, level=0).apply(
+ lambda x: x.droplevel(axis=0, level=0)
+ )
+ by_cols = df.groupby(axis=1, level=0).apply(lambda x: x.droplevel(axis=1, level=0))
+
+ tm.assert_frame_equal(by_cols, by_rows.T)
+ tm.assert_frame_equal(by_cols, df)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index cbfba16223f74..42945be923fa0 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -940,10 +940,6 @@ def test_frame_describe_multikey(tsframe):
groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
- expected.index = pd.MultiIndex(
- levels=[[0, 1], expected.index],
- codes=[[0, 0, 1, 1], range(len(expected.index))],
- )
tm.assert_frame_equal(result, expected)
| - [x] closes #16646
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
*Behavioural Change*
`GroupBy.apply` would sometimes throw an erroneous `ValueError: cannot reindex from duplicate axis` because when it checked if the output was mutated from its original shape it would only check the index, when sometimes it should check the columns.
*Tests*
One new test added on `test_apply.py` which fails on master because of the bug; passes with this PR. One existing test was marked *xfail* but the *xfail* is now removed. One existing test previously had to manipulate the expected index, but that is no longer necessary.
All of the copy-pasteable examples from #16646 are fixed with this PR. | https://api.github.com/repos/pandas-dev/pandas/pulls/35441 | 2020-07-28T23:02:37Z | 2020-08-06T23:29:22Z | 2020-08-06T23:29:22Z | 2020-08-07T23:29:39Z |
BUG: CategoricalIndex.format | diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst
index 443589308ad4c..815ce2c4c2905 100644
--- a/doc/source/whatsnew/v1.1.1.rst
+++ b/doc/source/whatsnew/v1.1.1.rst
@@ -26,6 +26,13 @@ Fixed regressions
Bug fixes
~~~~~~~~~
+
+Categorical
+^^^^^^^^^^^
+
+- Bug in :meth:`CategoricalIndex.format` where, when stringified scalars had different lengths, the shorter string would be right-filled with spaces, so it had the same length as the longest string (:issue:`35439`)
+
+
**Datetimelike**
-
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index b0b008de69a94..74b235655e345 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -20,7 +20,7 @@
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
+from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
from pandas.core import accessor
from pandas.core.algorithms import take_1d
@@ -348,12 +348,12 @@ def _format_attrs(self):
return attrs
def _format_with_header(self, header, na_rep="NaN") -> List[str]:
- from pandas.io.formats.format import format_array
+ from pandas.io.formats.printing import pprint_thing
- formatted_values = format_array(
- self._values, formatter=None, na_rep=na_rep, justify="left"
- )
- result = ibase.trim_front(formatted_values)
+ result = [
+ pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep
+ for x in self._values
+ ]
return header + result
# --------------------------------------------------------------------
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index e5e98039ff77b..eee610681087d 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,7 +1,7 @@
from datetime import timedelta
import operator
from sys import getsizeof
-from typing import Any, List, Optional
+from typing import Any, Optional
import warnings
import numpy as np
@@ -33,8 +33,6 @@
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.common import unpack_zerodim_and_defer
-from pandas.io.formats.printing import pprint_thing
-
_empty_range = range(0)
@@ -197,9 +195,6 @@ def _format_data(self, name=None):
# we are formatting thru the attributes
return None
- def _format_with_header(self, header, na_rep="NaN") -> List[str]:
- return header + [pprint_thing(x) for x in self._range]
-
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index 7f30a77872bc1..8af26eef504fc 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -478,3 +478,9 @@ def test_reindex_base(self):
def test_map_str(self):
# See test_map.py
pass
+
+ def test_format_different_scalar_lengths(self):
+ # GH35439
+ idx = CategoricalIndex(["aaaaaaaaa", "b"])
+ expected = ["aaaaaaaaa", "b"]
+ assert idx.format() == expected
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index f5b9f4a401e60..3b41c4bfacf73 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -642,6 +642,12 @@ def test_equals_op(self):
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
+ def test_format(self):
+ # GH35439
+ idx = self.create_index()
+ expected = [str(x) for x in idx]
+ assert idx.format() == expected
+
def test_hasnans_isnans(self, index):
# GH 11343, added tests for hasnans / isnans
if isinstance(index, MultiIndex):
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index 7345ae3032463..a5abf2946feda 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -20,6 +20,12 @@ def index(self, request):
def create_index(self) -> DatetimeIndex:
return date_range("20130101", periods=5)
+ def test_format(self):
+ # GH35439
+ idx = self.create_index()
+ expected = [f"{x:%Y-%m-%d}" for x in idx]
+ assert idx.format() == expected
+
def test_shift(self):
pass # handled in test_ops
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index eaf48421dc071..59ee88117a984 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1171,8 +1171,11 @@ def test_summary_bug(self):
assert "~:{range}:0" in result
assert "{other}%s" in result
- def test_format(self, index):
- self._check_method_works(Index.format, index)
+ def test_format_different_scalar_lengths(self):
+ # GH35439
+ idx = Index(["aaaaaaaaa", "b"])
+ expected = ["aaaaaaaaa", "b"]
+ assert idx.format() == expected
def test_format_bug(self):
# GH 14626
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index a7c5734ef9b02..bfcac5d433d2c 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -21,6 +21,13 @@ def test_can_hold_identifiers(self):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
+ def test_format(self):
+ # GH35439
+ idx = self.create_index()
+ max_width = max(len(str(x)) for x in idx)
+ expected = [str(x).ljust(max_width) for x in idx]
+ assert idx.format() == expected
+
def test_numeric_compat(self):
pass # override Base method
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index e236b3da73c69..84805d06df4a8 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -2141,6 +2141,15 @@ def test_dict_entries(self):
assert "'a': 1" in val
assert "'b': 2" in val
+ def test_categorical_columns(self):
+ # GH35439
+ data = [[4, 2], [3, 2], [4, 3]]
+ cols = ["aaaaaaaaa", "b"]
+ df = pd.DataFrame(data, columns=cols)
+ df_cat_cols = pd.DataFrame(data, columns=pd.CategoricalIndex(cols))
+
+ assert df.to_string() == df_cat_cols.to_string()
+
def test_period(self):
# GH 12615
df = pd.DataFrame(
| - [x] closes #35439
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I've temporarily put the whatsnewentry in the v.1.1.0 release note, because there isn't a v.1.1.1 version yet. I'll move it, before this is merged. | https://api.github.com/repos/pandas-dev/pandas/pulls/35440 | 2020-07-28T22:28:20Z | 2020-08-03T23:48:55Z | 2020-08-03T23:48:55Z | 2020-08-04T09:55:01Z |
BUG: tuple-of-tuples indexing results in NumPy VisibleDeprecationWarning | diff --git a/pandas/core/common.py b/pandas/core/common.py
index e7260a9923ee0..1d150f5d744df 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -138,9 +138,12 @@ def is_bool_indexer(key: Any) -> bool:
return True
elif isinstance(key, list):
try:
- arr = np.asarray(key)
+ # https://github.com/pandas-dev/pandas/issues/35434
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", category=np.VisibleDeprecationWarning)
+ arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
- except TypeError: # pragma: no cover
+ except (TypeError, ValueError): # pragma: no cover
return False
return False
@@ -225,7 +228,15 @@ def asarray_tuplesafe(values, dtype=None):
if isinstance(values, list) and dtype in [np.object_, object]:
return construct_1d_object_array_from_listlike(values)
- result = np.asarray(values, dtype=dtype)
+ try:
+ # https://github.com/pandas-dev/pandas/issues/35434
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", category=np.VisibleDeprecationWarning)
+ result = np.asarray(values, dtype=dtype)
+ except ValueError:
+ # we get here with a list-like of nested values if dtype=None
+ # for numpy < 1.18
+ return construct_1d_object_array_from_listlike(values)
if issubclass(result.dtype.type, str):
result = np.asarray(values, dtype=object)
diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index d9aa02db3e42a..8fa89ef2f475a 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -17,6 +17,41 @@
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
+from pandas.core.dtypes.common import is_scalar, is_iterator
+import pandas.core.common as com
+
+
+class Indexer:
+ _is_iterator = None
+ _is_bool_indexer = None
+
+ def __init__(self, key):
+ if isinstance(key, (list, tuple)):
+ key = unpack_1tuple(key)
+ self.key = key
+
+ @property
+ def is_scalar(self):
+ return is_scalar(self.key)
+
+ @property
+ def is_bool_indexer(self):
+ is_bool_indexer = self._is_bool_indexer
+ if is_bool_indexer is not None:
+ return is_bool_indexer
+
+ key = self.key
+ if self._is_iterator is None:
+ if is_iterator(key):
+ key = list(key)
+ self.key = key
+ self._is_iterator = False
+
+ is_bool_indexer = com.is_bool_indexer(key)
+ self._is_bool_indexer = is_bool_indexer
+ return is_bool_indexer
+
+
# -----------------------------------------------------------
# Indexer Identification
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ef3be854bc3bb..c5a7506611287 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -50,7 +50,6 @@
is_dict_like,
is_extension_array_dtype,
is_integer,
- is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
@@ -78,7 +77,7 @@
sanitize_array,
)
from pandas.core.generic import NDFrame
-from pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple
+from pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple, Indexer
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.api import Float64Index, Index, MultiIndex, ensure_index
import pandas.core.indexes.base as ibase
@@ -897,17 +896,23 @@ def __getitem__(self, key):
# in the first level of our MultiIndex
return self._get_values_tuple(key)
- if is_iterator(key):
- key = list(key)
+ # if is_iterator(key):
+ # key = list(key)
+
+ _key = Indexer(key)
- if com.is_bool_indexer(key):
+ if _key.is_bool_indexer:
key = check_bool_indexer(self.index, key)
key = np.asarray(key, dtype=bool)
return self._get_values(key)
- return self._get_with(key)
+ return self._get_with(_key)
def _get_with(self, key):
+ # breakpoint()
+ _key = key
+ key = _key.key
+
# other: fancy integer or otherwise
if isinstance(key, slice):
# _convert_slice_indexer to determine if this slice is positional
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 5b7f013d5de31..9e87faadf446b 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1110,3 +1110,12 @@ def test_setitem_categorical():
{"h": pd.Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
+
+
+def test_nested_tuple_no_warning():
+ # https://github.com/pandas-dev/pandas/issues/35434
+ tup = "A", ("B", 2)
+ ser = pd.Series([42], index=[tup])
+ with tm.assert_produces_warning(None):
+ result = ser[[tup]]
+ tm.assert_series_equal(result, ser)
| - [ ] closes #35434
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35437 | 2020-07-28T14:53:24Z | 2020-07-29T19:35:06Z | null | 2020-07-29T19:35:07Z |
DOC: 1.1.0 release date | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 04a816b50103c..a49b29d691692 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_110:
-What's new in 1.1.0 (??)
-------------------------
+What's new in 1.1.0 (July 28, 2020)
+-----------------------------------
These are the changes in pandas 1.1.0. See :ref:`release` for a full changelog
including other versions of pandas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35435 | 2020-07-28T11:56:29Z | 2020-07-28T13:11:27Z | 2020-07-28T13:11:27Z | 2020-07-28T13:11:31Z | |
CLN/PERF: move RangeIndex._cached_data to RangeIndex._cache | diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index eee610681087d..1dc4fc1e91462 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,7 +1,7 @@
from datetime import timedelta
import operator
from sys import getsizeof
-from typing import Any, Optional
+from typing import Any
import warnings
import numpy as np
@@ -78,8 +78,6 @@ class RangeIndex(Int64Index):
_engine_type = libindex.Int64Engine
_range: range
- # check whether self._data has been called
- _cached_data: Optional[np.ndarray] = None
# --------------------------------------------------------------------
# Constructors
@@ -150,20 +148,14 @@ def _constructor(self):
""" return the class to use for construction """
return Int64Index
- @property
+ @cache_readonly
def _data(self):
"""
An int array that for performance reasons is created only when needed.
- The constructed array is saved in ``_cached_data``. This allows us to
- check if the array has been created without accessing ``_data`` and
- triggering the construction.
+ The constructed array is saved in ``_cache``.
"""
- if self._cached_data is None:
- self._cached_data = np.arange(
- self.start, self.stop, self.step, dtype=np.int64
- )
- return self._cached_data
+ return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _int64index(self) -> Int64Index:
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index 5b6f9cb358b7d..ef4bb9a0869b0 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -137,53 +137,58 @@ def test_dtype(self):
index = self.create_index()
assert index.dtype == np.int64
- def test_cached_data(self):
- # GH 26565, GH26617
- # Calling RangeIndex._data caches an int64 array of the same length at
- # self._cached_data. This test checks whether _cached_data has been set
+ def test_cache(self):
+ # GH 26565, GH26617, GH35432
+ # This test checks whether _cache has been set.
+ # Calling RangeIndex._cache["_data"] creates an int64 array of the same length
+ # as the RangeIndex and stores it in _cache.
idx = RangeIndex(0, 100, 10)
- assert idx._cached_data is None
+ assert idx._cache == {}
repr(idx)
- assert idx._cached_data is None
+ assert idx._cache == {}
str(idx)
- assert idx._cached_data is None
+ assert idx._cache == {}
idx.get_loc(20)
- assert idx._cached_data is None
+ assert idx._cache == {}
- 90 in idx
- assert idx._cached_data is None
+ 90 in idx # True
+ assert idx._cache == {}
- 91 in idx
- assert idx._cached_data is None
+ 91 in idx # False
+ assert idx._cache == {}
idx.all()
- assert idx._cached_data is None
+ assert idx._cache == {}
idx.any()
- assert idx._cached_data is None
+ assert idx._cache == {}
df = pd.DataFrame({"a": range(10)}, index=idx)
df.loc[50]
- assert idx._cached_data is None
+ assert idx._cache == {}
with pytest.raises(KeyError, match="51"):
df.loc[51]
- assert idx._cached_data is None
+ assert idx._cache == {}
df.loc[10:50]
- assert idx._cached_data is None
+ assert idx._cache == {}
df.iloc[5:10]
- assert idx._cached_data is None
+ assert idx._cache == {}
- # actually calling idx._data
+ # idx._cache should contain a _data entry after call to idx._data
+ idx._data
assert isinstance(idx._data, np.ndarray)
- assert isinstance(idx._cached_data, np.ndarray)
+ assert idx._data is idx._data # check cached value is reused
+ assert len(idx._cache) == 4
+ expected = np.arange(0, 100, 10, dtype="int64")
+ tm.assert_numpy_array_equal(idx._cache["_data"], expected)
def test_is_monotonic(self):
index = RangeIndex(0, 20, 2)
| The ``._cached_data`` attribute is not necessary. It was originally added to allow a check to see if the ``._data`` ndarray had been created, but that's also possible to do by a ``"_data" in _cache`` check in the new implemention, which IMO would be more idiomatic.
The new implementation has the benefit that the ``_data`` will be available to new copies of a RangeIndex, saving the need to create a new ndarray for each new copy of the RangeIndex.
```python
>>> idx = pd.RangeIndex(1_000_000)
>>> idx[[1, 4]] # this accesses ._data and saves it in cached_data (master) or _cache["_data"](this PR)
>> %timeit idx._shallow_copy()[[1, 4]]
2.55 ms ± 69.3 µs per loop # master
17.7 µs ± 405 ns per loop # this PR
```
xref #26565.
| https://api.github.com/repos/pandas-dev/pandas/pulls/35432 | 2020-07-28T06:27:36Z | 2020-08-04T07:51:52Z | 2020-08-04T07:51:52Z | 2020-08-04T10:20:54Z |
WIP BUG: Inconsistent date parsing of to_datetime | diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 8429aebbd85b8..c01017f6fbd47 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -3,6 +3,7 @@ Parsing functions for datetime and datetime-like strings.
"""
import re
import time
+import warnings
from libc.string cimport strchr
@@ -154,14 +155,28 @@ cdef inline object _parse_delimited_date(str date_string, bint dayfirst):
# date_string can't be converted to date, above format
return None, None
+ swapped_day_and_month = False
if 1 <= month <= MAX_DAYS_IN_MONTH and 1 <= day <= MAX_DAYS_IN_MONTH \
and (month <= MAX_MONTH or day <= MAX_MONTH):
if (month > MAX_MONTH or (day <= MAX_MONTH and dayfirst)) and can_swap:
day, month = month, day
+ swapped_day_and_month = True
if PY_VERSION_HEX >= 0x03060100:
# In Python <= 3.6.0 there is no range checking for invalid dates
# in C api, thus we call faster C version for 3.6.1 or newer
+
+ if dayfirst and not swapped_day_and_month:
+ warnings.warn(f"Parsing '{date_string}' in MM/DD/YYYY format.")
+ elif not dayfirst and swapped_day_and_month:
+ warnings.warn(f"Parsing '{date_string}' in DD/MM/YYYY format.")
+
return datetime_new(year, month, day, 0, 0, 0, 0, None), reso
+
+ if dayfirst and not swapped_day_and_month:
+ warnings.warn(f"Parsing '{date_string}' in MM/DD/YYYY format.")
+ elif not dayfirst and swapped_day_and_month:
+ warnings.warn(f"Parsing '{date_string}' in DD/MM/YYYY format.")
+
return datetime(year, month, day, 0, 0, 0, 0, None), reso
raise DateParseError(f"Invalid date specified ({month}/{day})")
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index d2049892705ea..471e83b11b035 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1752,6 +1752,60 @@ def test_dayfirst(self, cache):
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
+ def test_dayfirst_warnings(self):
+ # GH 12585
+
+ # CASE 1: valid input
+ arr = ["31/12/2014", "10/03/2011"]
+ expected = DatetimeIndex(
+ ["2014-12-31", "2011-03-10"], dtype="datetime64[ns]", freq=None
+ )
+
+ # A. dayfirst arg correct, no warning
+ res1 = to_datetime(arr, dayfirst=True)
+ tm.assert_index_equal(expected, res1)
+
+ # B. dayfirst arg incorrect, warning + incorrect output
+ res2 = to_datetime(arr, dayfirst=False)
+ with pytest.raises(AssertionError):
+ tm.assert_index_equal(expected, res2)
+
+ # C. dayfirst default arg, same as B
+ res3 = to_datetime(arr, dayfirst=False)
+ with pytest.raises(AssertionError):
+ tm.assert_index_equal(expected, res3)
+
+ # D. infer_datetime_format=True overrides dayfirst default
+ # no warning + correct result
+ res4 = to_datetime(arr, infer_datetime_format=True)
+ tm.assert_index_equal(expected, res4)
+
+ # CASE 2: invalid input
+ # cannot consistently process with single format
+ # warnings *always* raised
+
+ arr = ["31/12/2014", "03/30/2011"]
+ # first in DD/MM/YYYY, second in MM/DD/YYYY
+ expected = DatetimeIndex(
+ ["2014-12-31", "2011-03-30"], dtype="datetime64[ns]", freq=None
+ )
+
+ # A. use dayfirst=True
+ res5 = to_datetime(arr, dayfirst=True)
+ tm.assert_index_equal(expected, res5)
+
+ # B. use dayfirst=False
+ res6 = to_datetime(arr, dayfirst=False)
+ tm.assert_index_equal(expected, res6)
+
+ # C. use dayfirst default arg, same as B
+ res7 = to_datetime(arr, dayfirst=False)
+ tm.assert_index_equal(expected, res7)
+
+ # D. use infer_datetime_format=True
+ res8 = to_datetime(arr, infer_datetime_format=True)
+ tm.assert_index_equal(expected, res8)
+
@pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray])
def test_to_datetime_dta_tz(self, klass):
# GH#27733
| - [x] closes #12585
- [x] tests added / passed
- [ ] documentation updated
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry (pending 1.2)
This PR is an attempt to address concerns about datetime parsing. I'll update the docs if people approve this change.
#### Original problem
From #12585:
``` python
In [3]: pd.to_datetime(["31/12/2014", "10/03/2011"])
...:
Out[3]: DatetimeIndex(['2014-12-31', '2011-03-10'], dtype='datetime64[ns]', freq=None)
```
The issue is that the first item is parsed as `DD\MM\YYYY` whereas for the second the format is `MM\DD\YYYY`, and there is no message to alert the user of inconsistency in the output.
The problem lies with the `dayfirst` argument that selects between `DD\MM\YYYY` and `MM\DD\YYYY`. In the parser:
https://github.com/pandas-dev/pandas/blob/04e9e0afd476b1b8bed930e47bf60ee20fa78f38/pandas/_libs/tslibs/parsing.pyx#L94-L163
we try `DD\MM\YYYY` or `MM\DD\YYYY` first depending on `dayfirst` but if the result is invalid we try the other.
#### Proposed change
I'd like to raise warnings whenever we parse in contradiction to the directive of `dayfirst`. The OP, where `dayfirst` defaults to `False`, now raises a Warning:
``` python
In [4]: pd.to_datetime(["31/12/2014", "10/03/2011"], dayfirst=False)
...:
/workspaces/pandas-arw2019/pandas/core/arrays/datetimes.py:2044: UserWarning: Parsing 31/12/2014 DD/MM format.
result, tz_parsed = tslib.array_to_datetime(
Out[4]: DatetimeIndex(['2014-12-31', '2011-10-03'], dtype='datetime64[ns]', freq=None)
```
A downside of this change is that sometimes a consistent output produces unnecessary warnings (due to mismatch with the default value of `dayfirst`):
``` python
In [11]: pd.to_datetime(["31/12/2014", "30/04/2011", "31/05/2016"] )
/workspaces/pandas-arw2019/pandas/core/arrays/datetimes.py:2044: UserWarning: Parsing 31/12/2014 DD/MM format.
result, tz_parsed = tslib.array_to_datetime(
/workspaces/pandas-arw2019/pandas/core/arrays/datetimes.py:2044: UserWarning: Parsing 30/04/2011 DD/MM format.
result, tz_parsed = tslib.array_to_datetime(
/workspaces/pandas-arw2019/pandas/core/arrays/datetimes.py:2044: UserWarning: Parsing 31/05/2016 DD/MM format.
result, tz_parsed = tslib.array_to_datetime(
Out[11]: DatetimeIndex(['2014-12-31', '2011-04-30', '2016-05-31'], dtype='datetime64[ns]', freq=None)
```
but these can be silenced with `infer_datetime_format`:
``` python
In [12]: pd.to_datetime(["31/12/2014", "30/04/2011", "31/05/2016"], infer_datetime_format=True )
Out[12]: DatetimeIndex(['2014-12-31', '2011-04-30', '2016-05-31'], dtype='datetime64[ns]', freq=None)
```
For invalid input (if it cannot be parsed according to a single format) we will *always* raise a warning, even with `infer_datetime_format=True`:
``` python
In [13]: pd.to_datetime(["31/12/2014", "03/31/2011"], infer_datetime_format=True)
/workspaces/pandas-arw2019/pandas/core/arrays/datetimes.py:2044: UserWarning: Parsing 31/12/2014 DD/MM format.
result, tz_parsed = tslib.array_to_datetime(
Out[13]: DatetimeIndex(['2014-12-31', '2011-03-31'], dtype='datetime64[ns]', freq=None)
```
#### Remaining issues
Personally I can see a case for raising an error with
``` python
In [13]: pd.to_datetime(["31/12/2014", "03/31/2011"], infer_datetime_format=True)
```
since there's no valid way to process that. I also think that
``` python
In [11]: pd.to_datetime(["31/12/2014", "30/04/2011", "31/05/2016"])
```
shouldn't be raising warnings, but I feel much less strongly about that. That said, changing this behaviour will require more heavy-handed alterations to the codebase (and maybe performance implications - not sure).
I think that the warnings I added here will allow users to figure out the origin of these problems when they turn up and that should do away with most of the headaches. | https://api.github.com/repos/pandas-dev/pandas/pulls/35428 | 2020-07-27T17:25:40Z | 2020-09-18T15:34:56Z | null | 2020-09-18T15:34:56Z |
MAINT: Fix issue in StataReader due to upstream changes | diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 7677d8a94d521..3717a2025cf51 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1643,8 +1643,7 @@ def read(
data = self._insert_strls(data)
- cols_ = np.where(self.dtyplist)[0]
-
+ cols_ = np.where([dtyp is not None for dtyp in self.dtyplist])[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
| Avoid creating an array of dtypes to workaround NumPy future change
closes #35426
- [X] closes #35426
- [ ] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35427 | 2020-07-27T10:57:26Z | 2020-08-06T12:42:57Z | 2020-08-06T12:42:57Z | 2020-08-06T12:43:13Z |
TST: adding test for .describe() with duplicate columns | diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index e693962e57ac3..cbfba16223f74 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -992,6 +992,68 @@ def test_frame_describe_unstacked_format():
tm.assert_frame_equal(result, expected)
+@pytest.mark.filterwarnings(
+ "ignore:"
+ "indexing past lexsort depth may impact performance:"
+ "pandas.errors.PerformanceWarning"
+)
+@pytest.mark.parametrize("as_index", [True, False])
+def test_describe_with_duplicate_output_column_names(as_index):
+ # GH 35314
+ df = pd.DataFrame(
+ {
+ "a": [99, 99, 99, 88, 88, 88],
+ "b": [1, 2, 3, 4, 5, 6],
+ "c": [10, 20, 30, 40, 50, 60],
+ },
+ columns=["a", "b", "b"],
+ )
+
+ expected = (
+ pd.DataFrame.from_records(
+ [
+ ("a", "count", 3.0, 3.0),
+ ("a", "mean", 88.0, 99.0),
+ ("a", "std", 0.0, 0.0),
+ ("a", "min", 88.0, 99.0),
+ ("a", "25%", 88.0, 99.0),
+ ("a", "50%", 88.0, 99.0),
+ ("a", "75%", 88.0, 99.0),
+ ("a", "max", 88.0, 99.0),
+ ("b", "count", 3.0, 3.0),
+ ("b", "mean", 5.0, 2.0),
+ ("b", "std", 1.0, 1.0),
+ ("b", "min", 4.0, 1.0),
+ ("b", "25%", 4.5, 1.5),
+ ("b", "50%", 5.0, 2.0),
+ ("b", "75%", 5.5, 2.5),
+ ("b", "max", 6.0, 3.0),
+ ("b", "count", 3.0, 3.0),
+ ("b", "mean", 5.0, 2.0),
+ ("b", "std", 1.0, 1.0),
+ ("b", "min", 4.0, 1.0),
+ ("b", "25%", 4.5, 1.5),
+ ("b", "50%", 5.0, 2.0),
+ ("b", "75%", 5.5, 2.5),
+ ("b", "max", 6.0, 3.0),
+ ],
+ )
+ .set_index([0, 1])
+ .T
+ )
+ expected.columns.names = [None, None]
+ expected.index = pd.Index([88, 99], name="a")
+
+ if as_index:
+ expected = expected.drop(columns=["a"], level=0)
+ else:
+ expected = expected.reset_index(drop=True)
+
+ result = df.groupby("a", as_index=as_index).describe()
+
+ tm.assert_frame_equal(result, expected)
+
+
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame(
| - [x] closes #35423
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
While working on #35314 I made a change that broke `GroupBy.apply` if you called it on a DataFrame with duplicate columns; however, this break was not caught by any test. Was just caught on inpsection: https://github.com/pandas-dev/pandas/pull/35314#discussion_r460000612
This PR is just to add a test for this case to ensure it works and doesn't break again in the future. | https://api.github.com/repos/pandas-dev/pandas/pulls/35424 | 2020-07-27T09:47:09Z | 2020-08-03T23:13:38Z | 2020-08-03T23:13:38Z | 2020-08-03T23:13:45Z |
BUG: df[col] = arr should not overwrite data in df[col] | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 41512ff9be82f..b91c627e86bd0 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1455,17 +1455,9 @@ def iget(self, col):
return self.values
def set_inplace(self, locs, values) -> None:
- # NB: This is a misnomer, is supposed to be inplace but is not,
- # see GH#33457
# When an ndarray, we should have locs.tolist() == [0]
# When a BlockPlacement we should have list(locs) == [0]
- self.values = values
- try:
- # TODO(GH33457) this can be removed
- self._cache.clear()
- except AttributeError:
- # _cache not yet initialized
- pass
+ self.values[:] = values
def _maybe_squeeze_arg(self, arg):
"""
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 3e0b62da64f42..2cce60110fbd5 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -660,7 +660,6 @@ def reindex_indexer(
use_na_proxy : bool, default False
Whether to use a np.void ndarray for newly introduced columns.
- pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index bee8ccb125315..36df1ddcdea74 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -545,6 +545,9 @@ def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
+ # check that the setitem below is not a no-op
+ assert not (float_frame["C"] == 4).all()
+
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
@@ -553,6 +556,12 @@ def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
assert (float_frame["C"] == 4).all()
+ # GH#35417 setting with setitem creates a new array, so we get the warning
+ # but do not modify the original
+ with pytest.raises(com.SettingWithCopyError, match=msg):
+ sliced["C"] = 5.0
+ assert (float_frame["C"] == 4).all()
+
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py
index 33fb191027c27..6c09f543f877d 100644
--- a/pandas/tests/frame/methods/test_rename.py
+++ b/pandas/tests/frame/methods/test_rename.py
@@ -4,8 +4,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
from pandas import (
DataFrame,
Index,
@@ -170,9 +168,9 @@ def test_rename_multiindex(self):
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
- @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem copy/view
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
+ renamed["foo"][:] = 1.0
assert np.shares_memory(renamed["foo"]._values, float_frame["C"]._values)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 17a990a6c7a38..f80beaf19effe 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -841,6 +841,22 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
# should be a shallow copy
assert np.shares_memory(original_df["a"], sliced_df["a"])
+ original_df.loc[:, "a"] = [4, 4, 4]
+ if not using_array_manager:
+ assert (sliced_df["a"] == 4).all()
+ else:
+ # FIXME: what is the expected/desired behavior here? test it!
+ pass
+
+ # GH#35417 but setting with setitem creates a new array, so
+ # sliced_df is not changed
+ original_df["a"] = [5, 5, 5]
+ if using_array_manager:
+ # TODO(ArrayManager) verify it is expected that the original didn't change
+ # setitem is replacing full column, so doesn't update "viewing" dataframe
+ assert not (sliced_df["a"] == 4).all()
+ else:
+ assert (sliced_df["a"] == 4).all()
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
original_df.loc[:, "a"] = [4, 4, 4]
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index b215aee4ea1c6..ffbcf1a5a7ad8 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1033,13 +1033,8 @@ def test_loc_empty_list_indexer_is_ok(self):
df.loc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
- def test_identity_slice_returns_new_object(self, using_array_manager, request):
+ def test_identity_slice_returns_new_object(self, request):
# GH13873
- if using_array_manager:
- mark = pytest.mark.xfail(
- reason="setting with .loc[:, 'a'] does not alter inplace"
- )
- request.node.add_marker(mark)
original_df = DataFrame({"a": [1, 2, 3]})
sliced_df = original_df.loc[:]
@@ -1047,6 +1042,8 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
assert original_df[:] is not original_df
# should be a shallow copy
+ original_df["a"][:] = [4, 4, 4]
+
assert np.shares_memory(original_df["a"]._values, sliced_df["a"]._values)
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index a5ba684a37edf..e012dbabdd793 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -744,35 +744,51 @@ def test_get_numeric_data(self):
item_shape=(3,),
)
mgr.iset(5, np.array([1, 2, 3], dtype=np.object_))
-
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
+ mgr_idx = mgr.items.get_loc("float")
+ num_idx = numeric.items.get_loc("float")
+
tm.assert_almost_equal(
- mgr.iget(mgr.items.get_loc("float")).internal_values(),
- numeric.iget(numeric.items.get_loc("float")).internal_values(),
+ mgr.iget(mgr_idx).internal_values(),
+ numeric.iget(num_idx).internal_values(),
)
# Check sharing
+ numeric.iget(num_idx).internal_values()[:] = [100.0, 200.0, 300.0]
numeric.iset(
numeric.items.get_loc("float"),
np.array([100.0, 200.0, 300.0]),
inplace=True,
)
+
tm.assert_almost_equal(
- mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ mgr.iget(mgr_idx).internal_values(),
np.array([100.0, 200.0, 300.0]),
)
+ def test_get_numeric_data_copy(self):
+ mgr = create_mgr(
+ "int: int; float: float; complex: complex;"
+ "str: object; bool: bool; obj: object; dt: datetime",
+ item_shape=(3,),
+ )
+ mgr.iset(5, np.array([1, 2, 3], dtype=np.object_))
+ numeric = mgr.get_numeric_data()
+ mgr_idx = mgr.items.get_loc("float")
+ num_idx = numeric.items.get_loc("float")
+
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
+ numeric2.iget(num_idx).internal_values()[:] = [1000.0, 2000.0, 3000.0]
numeric2.iset(
numeric2.items.get_loc("float"),
np.array([1000.0, 2000.0, 3000.0]),
inplace=True,
)
tm.assert_almost_equal(
- mgr.iget(mgr.items.get_loc("float")).internal_values(),
- np.array([100.0, 200.0, 300.0]),
+ mgr.iget(mgr_idx).internal_values(),
+ np.array([1.0, 1.0, 1.0]),
)
def test_get_bool_data(self):
@@ -790,18 +806,28 @@ def test_get_bool_data(self):
bools.iget(bools.items.get_loc("bool")).internal_values(),
)
- bools.iset(0, np.array([True, False, True]), inplace=True)
+ # GH#33457 setting a new array on bools does _not_ alter the original
+ # array in-place, so mgr is unchanged
+ bools.iset(0, np.array([True, False, True]))
tm.assert_numpy_array_equal(
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
- np.array([True, False, True]),
+ np.array([True, True, True]),
)
- # Check sharing
+ def test_get_bool_data_copy(self):
+ # GH#35417
+ mgr = create_mgr(
+ "int: int; float: float; complex: complex;"
+ "str: object; bool: bool; obj: object; dt: datetime",
+ item_shape=(3,),
+ )
+ mgr.iset(6, np.array([True, False, True], dtype=np.object_))
+
bools2 = mgr.get_bool_data(copy=True)
- bools2.iset(0, np.array([False, True, False]))
+ bools2.blocks[0].values[:] = [[False, True, False]]
tm.assert_numpy_array_equal(
mgr.iget(mgr.items.get_loc("bool")).internal_values(),
- np.array([True, False, True]),
+ np.array([True, True, True]),
)
def test_unicode_repr_doesnt_raise(self):
@@ -1346,23 +1372,6 @@ def check_frame_setitem(self, elem, index: Index, inplace: bool):
assert df.dtypes[0] == object
-class TestShouldStore:
- def test_should_store_categorical(self):
- cat = Categorical(["A", "B", "C"])
- df = DataFrame(cat)
- blk = df._mgr.blocks[0]
-
- # matching dtype
- assert blk.should_store(cat)
- assert blk.should_store(cat[:-1])
-
- # different dtype
- assert not blk.should_store(cat.as_ordered())
-
- # ndarray instead of Categorical
- assert not blk.should_store(np.asarray(cat))
-
-
def test_validate_ndim(block_maker):
values = np.array([1.0, 2.0])
placement = slice(2)
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index bf0a10fa702a5..3f970dfb19818 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -1310,7 +1310,7 @@ def test_index_with_nan(self):
# all-nan in mi
df2 = df.copy()
- df2.loc[:, "id2"] = np.nan
+ df2["id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
| - [x] closes #33457
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
cc @jorisvandenbossche this still fails 7 tests locally and there's one more (commented in-line) test that looks fishy. Extra eyeballs would be welcome.
xref #35271, #35266 | https://api.github.com/repos/pandas-dev/pandas/pulls/35417 | 2020-07-26T22:39:51Z | 2022-01-26T17:47:02Z | null | 2022-01-26T17:47:06Z |
CLN: _wrap_applied_output | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b7280a9f7db3c..449099e5ce073 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1219,171 +1219,107 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
return self.obj._constructor(index=keys)
- key_names = self.grouper.names
-
- # GH12824
+ # GH12824 - If first value is None, can't assume all are None
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
- # GH9684. If all values are None, then this will throw an error.
- # We'd prefer it return an empty dataframe.
+ # GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
- elif isinstance(first_not_none, DataFrame):
+
+ if isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
- else:
- if len(self.grouper.groupings) > 1:
- key_index = self.grouper.result_index
+ key_index = self.grouper.result_index if self.as_index else None
+
+ if not isinstance(first_not_none, (Series, np.ndarray, Index)):
+
+ # values are not series or array-like but scalars
+ # self._selection_name not passed through to Series as the
+ # result should not take the name of original selection
+ # of columns
+ if self.as_index:
+ return self.obj._constructor_sliced(values, index=key_index)
else:
- ping = self.grouper.groupings[0]
- if len(keys) == ping.ngroups:
- key_index = ping.group_index
- key_index.name = key_names[0]
+ result = DataFrame(values, index=key_index, columns=[self._selection])
+ self._insert_inaxis_grouper_inplace(result)
+ return result
- key_lookup = Index(keys)
- indexer = key_lookup.get_indexer(key_index)
+ elif not isinstance(first_not_none, Series):
- # reorder the values
- values = [values[i] for i in indexer]
+ # GH1738: values is list of arrays of unequal lengths
+ # TODO: sure this is right? we used to do this
+ # after raising AttributeError above
+ return self.obj._constructor_sliced(
+ values, index=key_index, name=self._selection_name
+ )
- # update due to the potential reorder
- first_not_none = next(com.not_none(*values), None)
- else:
+ # this is to silence a DeprecationWarning
+ # TODO: Replace when default dtype of empty Series is object
+ # with backup = first_not_none._constructor(**kwargs)
+ kwargs = first_not_none._construct_axes_dict()
+ backup = create_series_with_explicit_dtype(**kwargs, dtype_if_empty=object)
+ values = [x if (x is not None) else backup for x in values]
+
+ v = values[0]
+ all_indexed_same = all_indexes_same((x.index for x in values))
+
+ # GH3596 - provide a reduction (Frame -> Series) if groups are unique
+ if self.squeeze:
+ # assign the name to this series
+ applied_index = self._selected_obj._get_axis(self.axis)
+ if len(values) == 1 and applied_index.nlevels == 1:
+ v.name = keys[0]
+
+ # GH2893
+ # we have series in the values array, we want to
+ # produce a series:
+ # if any of the sub-series are not indexed the same
+ # OR we don't have a multi-index and we have only a
+ # single values
+ return self._concat_objects(
+ keys, values, not_indexed_same=not_indexed_same
+ )
- key_index = Index(keys, name=key_names[0])
+ # still a series
+ # path added as of GH 5545
+ elif all_indexed_same:
+ from pandas.core.reshape.concat import concat
- # don't use the key indexer
- if not self.as_index:
- key_index = None
+ return concat(values)
- # make Nones an empty object
- if first_not_none is None:
- return self.obj._constructor()
- elif isinstance(first_not_none, NDFrame):
+ if not all_indexed_same:
+ # GH 8467
+ return self._concat_objects(keys, values, not_indexed_same=True)
- # this is to silence a DeprecationWarning
- # TODO: Remove when default dtype of empty Series is object
- kwargs = first_not_none._construct_axes_dict()
- if isinstance(first_not_none, Series):
- backup = create_series_with_explicit_dtype(
- **kwargs, dtype_if_empty=object
- )
- else:
- backup = first_not_none._constructor(**kwargs)
-
- values = [x if (x is not None) else backup for x in values]
-
- v = values[0]
-
- if isinstance(v, (np.ndarray, Index, Series)) or not self.as_index:
- if isinstance(v, Series):
- applied_index = self._selected_obj._get_axis(self.axis)
- all_indexed_same = all_indexes_same([x.index for x in values])
- singular_series = len(values) == 1 and applied_index.nlevels == 1
-
- # GH3596
- # provide a reduction (Frame -> Series) if groups are
- # unique
- if self.squeeze:
- # assign the name to this series
- if singular_series:
- values[0].name = keys[0]
-
- # GH2893
- # we have series in the values array, we want to
- # produce a series:
- # if any of the sub-series are not indexed the same
- # OR we don't have a multi-index and we have only a
- # single values
- return self._concat_objects(
- keys, values, not_indexed_same=not_indexed_same
- )
-
- # still a series
- # path added as of GH 5545
- elif all_indexed_same:
- from pandas.core.reshape.concat import concat
-
- return concat(values)
-
- if not all_indexed_same:
- # GH 8467
- return self._concat_objects(keys, values, not_indexed_same=True)
-
- if self.axis == 0 and isinstance(v, ABCSeries):
- # GH6124 if the list of Series have a consistent name,
- # then propagate that name to the result.
- index = v.index.copy()
- if index.name is None:
- # Only propagate the series name to the result
- # if all series have a consistent name. If the
- # series do not have a consistent name, do
- # nothing.
- names = {v.name for v in values}
- if len(names) == 1:
- index.name = list(names)[0]
-
- # normally use vstack as its faster than concat
- # and if we have mi-columns
- if (
- isinstance(v.index, MultiIndex)
- or key_index is None
- or isinstance(key_index, MultiIndex)
- ):
- stacked_values = np.vstack([np.asarray(v) for v in values])
- result = self.obj._constructor(
- stacked_values, index=key_index, columns=index
- )
- else:
- # GH5788 instead of stacking; concat gets the
- # dtypes correct
- from pandas.core.reshape.concat import concat
-
- result = concat(
- values,
- keys=key_index,
- names=key_index.names,
- axis=self.axis,
- ).unstack()
- result.columns = index
- elif isinstance(v, ABCSeries):
- stacked_values = np.vstack([np.asarray(v) for v in values])
- result = self.obj._constructor(
- stacked_values.T, index=v.index, columns=key_index
- )
- elif not self.as_index:
- # We add grouping column below, so create a frame here
- result = DataFrame(
- values, index=key_index, columns=[self._selection]
- )
- else:
- # GH#1738: values is list of arrays of unequal lengths
- # fall through to the outer else clause
- # TODO: sure this is right? we used to do this
- # after raising AttributeError above
- return self.obj._constructor_sliced(
- values, index=key_index, name=self._selection_name
- )
+ stacked_values = np.vstack([np.asarray(v) for v in values])
- # if we have date/time like in the original, then coerce dates
- # as we are stacking can easily have object dtypes here
- so = self._selected_obj
- if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any():
- result = _recast_datetimelike_result(result)
- else:
- result = result._convert(datetime=True)
+ if self.axis == 0:
+ index = key_index
+ columns = v.index.copy()
+ if columns.name is None:
+ # GH6124 - propagate name of Series when it's consistent
+ names = {v.name for v in values}
+ if len(names) == 1:
+ columns.name = list(names)[0]
+ else:
+ index = v.index
+ columns = key_index
+ stacked_values = stacked_values.T
+
+ result = self.obj._constructor(stacked_values, index=index, columns=columns)
- if not self.as_index:
- self._insert_inaxis_grouper_inplace(result)
+ # if we have date/time like in the original, then coerce dates
+ # as we are stacking can easily have object dtypes here
+ so = self._selected_obj
+ if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any():
+ result = _recast_datetimelike_result(result)
+ else:
+ result = result._convert(datetime=True)
- return self._reindex_output(result)
+ if not self.as_index:
+ self._insert_inaxis_grouper_inplace(result)
- # values are not series or array-like but scalars
- else:
- # self._selection_name not passed through to Series as the
- # result should not take the name of original selection
- # of columns
- return self.obj._constructor_sliced(values, index=key_index)
+ return self._reindex_output(result)
def _transform_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index 30cc8cf480dcf..d352b001f5d2a 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -297,15 +297,16 @@ def all_indexes_same(indexes):
Parameters
----------
- indexes : list of Index objects
+ indexes : iterable of Index objects
Returns
-------
bool
True if all indexes contain the same elements, False otherwise.
"""
- first = indexes[0]
- for index in indexes[1:]:
+ itr = iter(indexes)
+ first = next(itr)
+ for index in itr:
if not first.equals(index):
return False
return True
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index ee38722ffb8ce..6b3ad9844ad76 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -861,13 +861,15 @@ def test_apply_multi_level_name(category):
b = [1, 2] * 5
if category:
b = pd.Categorical(b, categories=[1, 2, 3])
+ expected_index = pd.CategoricalIndex([1, 2], categories=[1, 2, 3], name="B")
+ else:
+ expected_index = pd.Index([1, 2], name="B")
df = pd.DataFrame(
{"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))}
).set_index(["A", "B"])
result = df.groupby("B").apply(lambda x: x.sum())
- expected = pd.DataFrame(
- {"C": [20, 25], "D": [20, 25]}, index=pd.Index([1, 2], name="B")
- )
+
+ expected = pd.DataFrame({"C": [20, 25], "D": [20, 25]}, index=expected_index)
tm.assert_frame_equal(result, expected)
assert df.index.names == ["A", "B"]
| The majority of this change is the result of three operations:
1. Move code that returns up top, to avoid the heavily nested structure.
2. Move variable creation as close as possible to where they are used. Previously, in certain cases computations were being done and then going unused.
3. Combine duplicated code.
There are two sections that I was able to entirely remove:
````
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
# update due to the potential reorder
first_not_none = next(com.not_none(*values), None)
````
and
````
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(
values,
keys=key_index,
names=key_index.names,
axis=self.axis,
).unstack()
result.columns = index
````
For the single test that I touched, it was checking that the categorical dtype of an index was being dropped after a groupby. I don't believe that is the correct behavior - that the categorical dtype should remain. I checked groupby/categorical bugs and didn't find any issues this closes. | https://api.github.com/repos/pandas-dev/pandas/pulls/35412 | 2020-07-25T20:03:20Z | 2020-08-18T21:47:16Z | null | 2020-09-10T00:07:45Z |
CLN: clarify TypeError for IndexSlice argument to pd.xs | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 260b92b5989c1..044226e7379bb 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -134,7 +134,7 @@ Missing
MultiIndex
^^^^^^^^^^
--
+- Bug in :meth:`DataFrame.xs` when used with :class:`IndexSlice` raises ``TypeError`` with message `Expected label or tuple of labels` (:issue:`35301`)
-
I/O
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6fd55c58ece40..843b602a12823 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3492,7 +3492,10 @@ class animal locomotion
index = self.index
if isinstance(index, MultiIndex):
- loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
+ try:
+ loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
+ except TypeError as e:
+ raise TypeError(f"Expected label or tuple of labels, got {key}") from e
else:
loc = self.index.get_loc(key)
diff --git a/pandas/tests/indexing/multiindex/test_xs.py b/pandas/tests/indexing/multiindex/test_xs.py
index b807795b9c309..91be1d913001b 100644
--- a/pandas/tests/indexing/multiindex/test_xs.py
+++ b/pandas/tests/indexing/multiindex/test_xs.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range
+from pandas import DataFrame, Index, IndexSlice, MultiIndex, Series, concat, date_range
import pandas._testing as tm
import pandas.core.common as com
@@ -220,6 +220,27 @@ def test_xs_level_series_slice_not_implemented(
s[2000, 3:4]
+def test_xs_IndexSlice_argument_not_implemented():
+ # GH 35301
+
+ index = MultiIndex(
+ levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
+ codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
+ )
+
+ series = Series(np.random.randn(6), index=index)
+ frame = DataFrame(np.random.randn(6, 4), index=index)
+
+ msg = (
+ "Expected label or tuple of labels, got "
+ r"\(\('foo', 'qux', 0\), slice\(None, None, None\)\)"
+ )
+ with pytest.raises(TypeError, match=msg):
+ frame.xs(IndexSlice[("foo", "qux", 0), :])
+ with pytest.raises(TypeError, match=msg):
+ series.xs(IndexSlice[("foo", "qux", 0), :])
+
+
def test_series_getitem_multiindex_xs():
# GH6258
dt = list(date_range("20130903", periods=3))
| - [x] closes #35301
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
As per discussion in #35301 we do not support `IndexSlice` arguments to `xs`. This PR aims to clarify the `TypeError` thrown when that happens.
I created a separate issue to house the discussion re: functionality of `loc` vs `xs` (#35418). | https://api.github.com/repos/pandas-dev/pandas/pulls/35411 | 2020-07-25T14:20:48Z | 2020-08-07T16:58:25Z | 2020-08-07T16:58:24Z | 2020-08-07T17:19:12Z |
DOC: update DataFrame.to_feather docstring | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e1a889bf79d95..3f3c77f9fdeb5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2216,14 +2216,14 @@ def to_stata(
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
- def to_feather(self, path, **kwargs) -> None:
+ def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
- path : str
- String file path.
+ path : str or file-like object
+ If a string, it will be used as Root Directory path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index a98eebe1c6a2a..ed3cd3cefe96e 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -1,6 +1,8 @@
""" feather-format compat """
-from pandas._typing import StorageOptions
+from typing import AnyStr
+
+from pandas._typing import FilePathOrBuffer, StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas import DataFrame, Int64Index, RangeIndex
@@ -8,7 +10,12 @@
from pandas.io.common import get_filepath_or_buffer
-def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kwargs):
+def to_feather(
+ df: DataFrame,
+ path: FilePathOrBuffer[AnyStr],
+ storage_options: StorageOptions = None,
+ **kwargs,
+):
"""
Write a DataFrame to the binary Feather format.
| - [x] closes #29532
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The issue was that `DataFrame.to_feather` had an `fname` argument which in fact could be either a filename or a buffer. Most of the work to close #29532 was done in #30338 where `fname` argument was deprecated in favor of `path`. Here I update the docstring to let users know that they can use a buffer. | https://api.github.com/repos/pandas-dev/pandas/pulls/35408 | 2020-07-25T05:27:28Z | 2020-09-13T23:10:57Z | 2020-09-13T23:10:56Z | 2020-09-13T23:11:01Z |
DOC: Add compose ecosystem docs | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index b02d4abd3ddf8..de231e43918f8 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -80,6 +80,11 @@ ML pipeline.
Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community.
+`Compose <https://github.com/FeatureLabs/compose>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Compose is a machine learning tool for labeling data and prediction engineering. It allows you to structure the labeling process by parameterizing prediction problems and transforming time-driven relational data into target values with cutoff times that can be used for supervised learning.
+
.. _ecosystem.visualization:
Visualization
@@ -445,6 +450,7 @@ Library Accessor Classes Description
`pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library.
`pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series.
`pint-pandas`_ ``pint`` ``Series``, ``DataFrame`` Provides units support for numeric Series and DataFrames.
+`composeml`_ ``slice`` ``DataFrame`` Provides a generator for enhanced data slicing.
=============== ========== ========================= ===============================================================
.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest
@@ -453,3 +459,4 @@ Library Accessor Classes Description
.. _pandas_path: https://github.com/drivendataorg/pandas-path/
.. _pathlib.Path: https://docs.python.org/3/library/pathlib.html
.. _pint-pandas: https://github.com/hgrecco/pint-pandas
+.. _composeml: https://github.com/FeatureLabs/compose
diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md
index be109ea53eb7d..515d23afb93ec 100644
--- a/web/pandas/community/ecosystem.md
+++ b/web/pandas/community/ecosystem.md
@@ -42,6 +42,13 @@ datasets into feature matrices for machine learning using reusable
feature engineering "primitives". Users can contribute their own
primitives in Python and share them with the rest of the community.
+### [Compose](https://github.com/FeatureLabs/compose)
+
+Compose is a machine learning tool for labeling data and prediction engineering.
+It allows you to structure the labeling process by parameterizing
+prediction problems and transforming time-driven relational data into
+target values with cutoff times that can be used for supervised learning.
+
## Visualization
### [Altair](https://altair-viz.github.io/)
@@ -372,3 +379,4 @@ authors to coordinate on the namespace.
| [pdvega](https://altair-viz.github.io/pdvega/) | `vgplot` | `Series`, `DataFrame` |
| [pandas_path](https://github.com/drivendataorg/pandas-path/) | `path` | `Index`, `Series` |
| [pint-pandas](https://github.com/hgrecco/pint-pandas) | `pint` | `Series`, `DataFrame` |
+ | [composeml](https://github.com/FeatureLabs/compose) | `slice` | `DataFrame` |
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35405 | 2020-07-24T16:14:25Z | 2020-08-06T23:37:25Z | 2020-08-06T23:37:25Z | 2020-08-06T23:37:30Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.