title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
ENH: Raise when subsetting columns on groupby with axis=1 | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index e488ca52be8a0..b1e43485cc44e 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -233,6 +233,7 @@ Other enhancements
- :func:`read_csv` supports memory-mapping for compressed files (:issue:`37621`)
- Improve error reporting for :meth:`DataFrame.merge()` when invalid merge column definitions were given (:issue:`16228`)
- Improve numerical stability for :meth:`Rolling.skew()`, :meth:`Rolling.kurt()`, :meth:`Expanding.skew()` and :meth:`Expanding.kurt()` through implementation of Kahan summation (:issue:`6929`)
+- Improved error reporting for subsetting columns of a :class:`DataFrameGroupBy` with ``axis=1`` (:issue:`37725`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index d0b58e8abc4ee..6f86819303537 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1530,6 +1530,9 @@ def filter(self, func, dropna=True, *args, **kwargs):
return self._apply_filter(indices, dropna)
def __getitem__(self, key):
+ if self.axis == 1:
+ # GH 37725
+ raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index a0c228200e73a..3a4abd58f0d39 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2081,7 +2081,6 @@ def test_group_on_two_row_multiindex_returns_one_tuple_key():
@pytest.mark.parametrize(
"klass, attr, value",
[
- (DataFrame, "axis", 1),
(DataFrame, "level", "a"),
(DataFrame, "as_index", False),
(DataFrame, "sort", False),
@@ -2120,6 +2119,14 @@ def test_subsetting_columns_keeps_attrs(klass, attr, value):
assert getattr(result, attr) == getattr(expected, attr)
+def test_subsetting_columns_axis_1():
+ # GH 37725
+ g = DataFrame({"A": [1], "B": [2], "C": [3]}).groupby([0, 0, 1], axis=1)
+ match = "Cannot subset columns when using axis=1"
+ with pytest.raises(ValueError, match=match):
+ g[["A", "B"]].sum()
+
+
@pytest.mark.parametrize("func", ["sum", "any", "shift"])
def test_groupby_column_index_name_lost(func):
# GH: 29764 groupby loses index sometimes
| - [x] closes #37725
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37727 | 2020-11-09T22:29:32Z | 2020-11-11T02:36:21Z | 2020-11-11T02:36:21Z | 2020-12-06T14:04:54Z |
TST: Add test for filling new rows through reindexing MultiIndex | diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py
index bc3750a196c5f..113a80f1c5c4e 100644
--- a/pandas/tests/frame/methods/test_reindex.py
+++ b/pandas/tests/frame/methods/test_reindex.py
@@ -864,3 +864,13 @@ def test_reindex_signature(self):
"fill_value",
"tolerance",
}
+
+ def test_reindex_multiindex_ffill_added_rows(self):
+ # GH#23693
+ # reindex added rows with nan values even when fill method was specified
+ mi = MultiIndex.from_tuples([("a", "b"), ("d", "e")])
+ df = DataFrame([[0, 7], [3, 4]], index=mi, columns=["x", "y"])
+ mi2 = MultiIndex.from_tuples([("a", "b"), ("d", "e"), ("h", "i")])
+ result = df.reindex(mi2, axis=0, method="ffill")
+ expected = DataFrame([[0, 7], [3, 4], [3, 4]], index=mi2, columns=["x", "y"])
+ tm.assert_frame_equal(result, expected)
| - [x] closes #23693
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Issue was fixed previously | https://api.github.com/repos/pandas-dev/pandas/pulls/37726 | 2020-11-09T22:29:15Z | 2020-11-13T05:11:10Z | 2020-11-13T05:11:10Z | 2020-11-13T13:16:08Z |
REF: remove Categorical._validate_listlike, unbox_listlike | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 87a049c77dc32..9f011bc9d2651 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1680,26 +1680,6 @@ def _box_func(self, i: int):
return np.NaN
return self.categories[i]
- def _validate_listlike(self, target: ArrayLike) -> np.ndarray:
- """
- Extract integer codes we can use for comparison.
-
- Notes
- -----
- If a value in target is not present, it gets coded as -1.
- """
-
- if isinstance(target, Categorical):
- # Indexing on codes is more efficient if categories are the same,
- # so we can apply some optimizations based on the degree of
- # dtype-matching.
- cat = self._encode_with_my_categories(target)
- codes = cat._codes
- else:
- codes = self.categories.get_indexer(target)
-
- return codes
-
def _unbox_scalar(self, key) -> int:
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
@@ -1707,10 +1687,6 @@ def _unbox_scalar(self, key) -> int:
code = self._codes.dtype.type(code)
return code
- def _unbox_listlike(self, value):
- unboxed = self.categories.get_indexer(value)
- return unboxed.astype(self._ndarray.dtype, copy=False)
-
# ------------------------------------------------------------------
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
@@ -1884,7 +1860,8 @@ def _validate_setitem_value(self, value):
"category, set the categories first"
)
- return self._unbox_listlike(rvalue)
+ codes = self.categories.get_indexer(rvalue)
+ return codes.astype(self._ndarray.dtype, copy=False)
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
"""
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index 3f04339803bf6..64037f5757a38 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -48,6 +48,9 @@ def recode_for_groupby(
"""
# we only care about observed values
if observed:
+ # In cases with c.ordered, this is equivalent to
+ # return c.remove_unused_categories(), c
+
unique_codes = unique1d(c.codes)
take_codes = unique_codes[unique_codes != -1]
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 859c26a40e50d..24bd60a7356dd 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -8,7 +8,7 @@
from pandas._libs import index as libindex
from pandas._libs.hashtable import duplicated_int64
from pandas._libs.lib import no_default
-from pandas._typing import Label
+from pandas._typing import ArrayLike, Label
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
@@ -542,7 +542,10 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
"method='nearest' not implemented yet for CategoricalIndex"
)
- codes = self._values._validate_listlike(target._values)
+ # Note: we use engine.get_indexer_non_unique below because, even if
+ # `target` is unique, any non-category entries in it will be encoded
+ # as -1 by _get_codes_for_get_indexer, so `codes` may not be unique.
+ codes = self._get_codes_for_get_indexer(target._values)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer)
@@ -550,10 +553,30 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
def get_indexer_non_unique(self, target):
target = ibase.ensure_index(target)
- codes = self._values._validate_listlike(target._values)
+ codes = self._get_codes_for_get_indexer(target._values)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
+ def _get_codes_for_get_indexer(self, target: ArrayLike) -> np.ndarray:
+ """
+ Extract integer codes we can use for comparison.
+
+ Notes
+ -----
+ If a value in target is not present, it gets coded as -1.
+ """
+
+ if isinstance(target, Categorical):
+ # Indexing on codes is more efficient if categories are the same,
+ # so we can apply some optimizations based on the degree of
+ # dtype-matching.
+ cat = self._data._encode_with_my_categories(target)
+ codes = cat._codes
+ else:
+ codes = self.categories.get_indexer(target)
+
+ return codes
+
@doc(Index._convert_list_indexer)
def _convert_list_indexer(self, keyarr):
# Return our indexer or raise if all of the values are not included in
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index d49e834fedb2d..dd45a00155721 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1951,21 +1951,23 @@ def _factorize_keys(
rk, _ = rk._values_for_factorize()
elif (
- is_categorical_dtype(lk) and is_categorical_dtype(rk) and is_dtype_equal(lk, rk)
+ is_categorical_dtype(lk.dtype)
+ and is_categorical_dtype(rk.dtype)
+ and is_dtype_equal(lk.dtype, rk.dtype)
):
assert isinstance(lk, Categorical)
assert isinstance(rk, Categorical)
# Cast rk to encoding so we can compare codes with lk
- rk = lk._validate_listlike(rk)
+ rk = lk._encode_with_my_categories(rk)
lk = ensure_int64(lk.codes)
- rk = ensure_int64(rk)
+ rk = ensure_int64(rk.codes)
elif is_extension_array_dtype(lk.dtype) and is_dtype_equal(lk.dtype, rk.dtype):
lk, _ = lk._values_for_factorize()
rk, _ = rk._values_for_factorize()
- if is_integer_dtype(lk) and is_integer_dtype(rk):
+ if is_integer_dtype(lk.dtype) and is_integer_dtype(rk.dtype):
# GH#23917 TODO: needs tests for case where lk is integer-dtype
# and rk is datetime-dtype
klass = libhashtable.Int64Factorizer
| Getting closer to having these standardized. | https://api.github.com/repos/pandas-dev/pandas/pulls/37724 | 2020-11-09T21:14:28Z | 2020-11-10T03:36:13Z | 2020-11-10T03:36:13Z | 2020-11-10T03:43:13Z |
REF: collect boilerplate in _datetimelike_compat | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index cfb02e5b1e987..d9e51810f1445 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -367,6 +367,32 @@ def _wrap_results(result, dtype: np.dtype, fill_value=None):
return result
+def _datetimelike_compat(func):
+ """
+ If we have datetime64 or timedelta64 values, ensure we have a correct
+ mask before calling the wrapped function, then cast back afterwards.
+ """
+
+ @functools.wraps(func)
+ def new_func(values, *, axis=None, skipna=True, mask=None, **kwargs):
+ orig_values = values
+
+ datetimelike = values.dtype.kind in ["m", "M"]
+ if datetimelike and mask is None:
+ mask = isna(values)
+
+ result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
+
+ if datetimelike:
+ result = _wrap_results(result, orig_values.dtype, fill_value=iNaT)
+ if not skipna:
+ result = _mask_datetimelike_result(result, axis, mask, orig_values)
+
+ return result
+
+ return new_func
+
+
def _na_for_min_count(
values: np.ndarray, axis: Optional[int]
) -> Union[Scalar, np.ndarray]:
@@ -480,6 +506,7 @@ def nanall(
@disallow("M8")
+@_datetimelike_compat
def nansum(
values: np.ndarray,
*,
@@ -511,25 +538,18 @@ def nansum(
>>> nanops.nansum(s)
3.0
"""
- orig_values = values
-
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
dtype_sum = dtype_max
- datetimelike = False
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
- datetimelike = True
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
- the_sum = _wrap_results(the_sum, dtype)
- if datetimelike and not skipna:
- the_sum = _mask_datetimelike_result(the_sum, axis, mask, orig_values)
return the_sum
@@ -552,6 +572,7 @@ def _mask_datetimelike_result(
@disallow(PeriodDtype)
@bottleneck_switch()
+@_datetimelike_compat
def nanmean(
values: np.ndarray,
*,
@@ -583,8 +604,6 @@ def nanmean(
>>> nanops.nanmean(s)
1.5
"""
- orig_values = values
-
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
@@ -592,9 +611,7 @@ def nanmean(
dtype_count = np.float64
# not using needs_i8_conversion because that includes period
- datetimelike = False
if dtype.kind in ["m", "M"]:
- datetimelike = True
dtype_sum = np.float64
elif is_integer_dtype(dtype):
dtype_sum = np.float64
@@ -616,9 +633,6 @@ def nanmean(
else:
the_mean = the_sum / count if count > 0 else np.nan
- the_mean = _wrap_results(the_mean, dtype)
- if datetimelike and not skipna:
- the_mean = _mask_datetimelike_result(the_mean, axis, mask, orig_values)
return the_mean
@@ -875,7 +889,7 @@ def nanvar(values, *, axis=None, skipna=True, ddof=1, mask=None):
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
- return _wrap_results(result, values.dtype)
+ return result
@disallow("M8", "m8")
@@ -930,6 +944,7 @@ def nansem(
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch(name="nan" + meth)
+ @_datetimelike_compat
def reduction(
values: np.ndarray,
*,
@@ -938,13 +953,10 @@ def reduction(
mask: Optional[np.ndarray] = None,
) -> Dtype:
- orig_values = values
values, mask, dtype, dtype_max, fill_value = _get_values(
values, skipna, fill_value_typ=fill_value_typ, mask=mask
)
- datetimelike = orig_values.dtype.kind in ["m", "M"]
-
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
@@ -954,12 +966,7 @@ def reduction(
else:
result = getattr(values, meth)(axis)
- result = _wrap_results(result, dtype, fill_value)
result = _maybe_null_out(result, axis, mask, values.shape)
-
- if datetimelike and not skipna:
- result = _mask_datetimelike_result(result, axis, mask, orig_values)
-
return result
return reduction
| https://api.github.com/repos/pandas-dev/pandas/pulls/37723 | 2020-11-09T21:02:07Z | 2020-11-10T03:21:39Z | 2020-11-10T03:21:39Z | 2020-11-10T19:30:14Z | |
BUG: loc.__getitem__[[na_value]] with CategoricalIndex containing NAs | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index e488ca52be8a0..77d55de5d3170 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -469,6 +469,7 @@ Indexing
- Bug in :meth:`Index.where` incorrectly casting numeric values to strings (:issue:`37591`)
- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` raises when numeric label was given for object :class:`Index` although label was in :class:`Index` (:issue:`26491`)
- Bug in :meth:`DataFrame.loc` returned requested key plus missing values when ``loc`` was applied to single level from :class:`MultiIndex` (:issue:`27104`)
+- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using a listlike indexer containing NA values (:issue:`37722`)
Missing
^^^^^^^
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 9b6133d2f7627..d5c078b817ca0 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -113,7 +113,9 @@ def is_bool_indexer(key: Any) -> bool:
if not lib.is_bool_array(key):
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
- if isna(key).any():
+ if lib.infer_dtype(key) == "boolean" and isna(key).any():
+ # Don't raise on e.g. ["A", "B", np.nan], see
+ # test_loc_getitem_list_of_labels_categoricalindex_with_na
raise ValueError(na_msg)
return False
return True
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 24bd60a7356dd..06df8f85cded7 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -586,11 +586,18 @@ def _convert_list_indexer(self, keyarr):
indexer = self.categories._convert_list_indexer(keyarr)
return Index(self.codes).get_indexer_for(indexer)
- indexer = self.categories.get_indexer(np.asarray(keyarr))
- if (indexer == -1).any():
- raise KeyError(
- "a list-indexer must only include values that are in the categories"
- )
+ msg = "a list-indexer must only include values that are in the categories"
+ if self.hasnans:
+ msg += " or NA"
+ try:
+ codes = self._data._validate_setitem_value(keyarr)
+ except (ValueError, TypeError) as err:
+ if "Index data must be 1-dimensional" in str(err):
+ # e.g. test_setitem_ndarray_3d
+ raise
+ raise KeyError(msg)
+ if not self.hasnans and (codes == -1).any():
+ raise KeyError(msg)
return self.get_indexer(keyarr)
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 20d7662855ab3..fdbdbb5f0c710 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -285,12 +285,8 @@ def test_loc_listlike(self):
tm.assert_frame_equal(result, expected, check_index_type=True)
# not all labels in the categories
- with pytest.raises(
- KeyError,
- match=(
- "'a list-indexer must only include values that are in the categories'"
- ),
- ):
+ msg = "a list-indexer must only include values that are in the categories"
+ with pytest.raises(KeyError, match=msg):
self.df2.loc[["a", "d"]]
def test_loc_listlike_dtypes(self):
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 0d40b5f38e48a..9aab867df4b17 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -12,6 +12,7 @@
import pandas as pd
from pandas import (
+ CategoricalIndex,
DataFrame,
Index,
MultiIndex,
@@ -1645,6 +1646,36 @@ def test_loc_setitem_mask_td64_series_value(self):
tm.assert_frame_equal(df, df_copy)
+class TestLocListlike:
+ @pytest.mark.parametrize("box", [lambda x: x, np.asarray, list])
+ def test_loc_getitem_list_of_labels_categoricalindex_with_na(self, box):
+ # passing a list can include valid categories _or_ NA values
+ ci = CategoricalIndex(["A", "B", np.nan])
+ ser = Series(range(3), index=ci)
+
+ result = ser.loc[box(ci)]
+ tm.assert_series_equal(result, ser)
+
+ result = ser[box(ci)]
+ tm.assert_series_equal(result, ser)
+
+ result = ser.to_frame().loc[box(ci)]
+ tm.assert_frame_equal(result, ser.to_frame())
+
+ ser2 = ser[:-1]
+ ci2 = ci[1:]
+ # but if there are no NAs present, this should raise KeyError
+ msg = "a list-indexer must only include values that are in the categories"
+ with pytest.raises(KeyError, match=msg):
+ ser2.loc[box(ci2)]
+
+ with pytest.raises(KeyError, match=msg):
+ ser2[box(ci2)]
+
+ with pytest.raises(KeyError, match=msg):
+ ser2.to_frame().loc[box(ci2)]
+
+
def test_series_loc_getitem_label_list_missing_values():
# gh-11428
key = np.array(
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 81d866ba63bc0..8e1186b790e3d 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -158,3 +158,11 @@ def test_serializable(obj):
# GH 35611
unpickled = tm.round_trip_pickle(obj)
assert type(obj) == type(unpickled)
+
+
+class TestIsBoolIndexer:
+ def test_non_bool_array_with_na(self):
+ # in particular, this should not raise
+ arr = np.array(["A", "B", np.nan])
+
+ assert not com.is_bool_indexer(arr)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37722 | 2020-11-09T20:41:42Z | 2020-11-13T05:11:47Z | 2020-11-13T05:11:47Z | 2020-11-13T05:16:23Z |
CLN: clean categorical indexes tests | diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 9885765bf53e4..20d7662855ab3 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -59,10 +59,6 @@ def test_loc_scalar(self):
with pytest.raises(TypeError, match=msg):
df.loc["d"] = 10
- msg = (
- "cannot insert an item into a CategoricalIndex that is not "
- "already an existing category"
- )
msg = "'fill_value=d' is not present in this Categorical's categories"
with pytest.raises(ValueError, match=msg):
df.loc["d", "A"] = 10
@@ -74,9 +70,9 @@ def test_loc_scalar(self):
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
- reversed = cat[::-1]
+ reverse = cat[::-1]
exp = np.array([4, 3, 2, 1], dtype=np.int64)
- tm.assert_numpy_array_equal(reversed.__array__(), exp)
+ tm.assert_numpy_array_equal(reverse.__array__(), exp)
df = DataFrame({"value": (np.arange(100) + 1).astype("int64")})
df["D"] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
@@ -170,23 +166,6 @@ def test_slicing_and_getting_ops(self):
res_val = df.loc["j", "cats"]
assert res_val == exp_val
- # ix
- # frame
- # res_df = df.loc["j":"k",[0,1]] # doesn't work?
- res_df = df.loc["j":"k", :]
- tm.assert_frame_equal(res_df, exp_df)
- assert is_categorical_dtype(res_df["cats"].dtype)
-
- # row
- res_row = df.loc["j", :]
- tm.assert_series_equal(res_row, exp_row)
- assert isinstance(res_row["cats"], str)
-
- # col
- res_col = df.loc[:, "cats"]
- tm.assert_series_equal(res_col, exp_col)
- assert is_categorical_dtype(res_col.dtype)
-
# single value
res_val = df.loc["j", df.columns[0]]
assert res_val == exp_val
| Found this while searching for other tests. Deleted code fragement was probably for ix previously. | https://api.github.com/repos/pandas-dev/pandas/pulls/37721 | 2020-11-09T20:05:19Z | 2020-11-09T22:16:02Z | 2020-11-09T22:16:02Z | 2020-11-09T22:16:17Z |
TST: Add test for missing fillvalue in categorical dtype | diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py
index f59ddc42ce4e4..189582ea635d2 100644
--- a/pandas/tests/indexes/categorical/test_reindex.py
+++ b/pandas/tests/indexes/categorical/test_reindex.py
@@ -1,6 +1,7 @@
import numpy as np
+import pytest
-from pandas import Categorical, CategoricalIndex, Index
+from pandas import Categorical, CategoricalIndex, Index, Series
import pandas._testing as tm
@@ -51,3 +52,10 @@ def test_reindex_empty_index(self):
res, indexer = c.reindex(["a", "b"])
tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
+
+ def test_reindex_missing_category(self):
+ # GH: 18185
+ ser = Series([1, 2, 3, 1], dtype="category")
+ msg = "'fill_value=-1' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=msg):
+ ser.reindex([1, 2, 3, 4, 5], fill_value=-1)
| - [x] closes #18185
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
cc @jreback Could not find a test for something similar with reindex. | https://api.github.com/repos/pandas-dev/pandas/pulls/37720 | 2020-11-09T19:48:40Z | 2020-11-09T21:24:32Z | 2020-11-09T21:24:31Z | 2020-11-09T21:36:10Z |
TST: Add test for KeyError with MultiIndex | diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 5a9da262dc54d..646520a9ac54f 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -614,3 +614,12 @@ def test_loc_with_nan():
result = df.loc["a"]
expected = DataFrame({"col": [1]}, index=Index([1], name="ind2"))
tm.assert_frame_equal(result, expected)
+
+
+def test_getitem_non_found_tuple():
+ # GH: 25236
+ df = DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]).set_index(
+ ["a", "b", "c"]
+ )
+ with pytest.raises(KeyError, match=r"\(2\.0, 2\.0, 3\.0\)"):
+ df.loc[(2.0, 2.0, 3.0)]
| - [x] closes #25236
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
cc @jreback Added a test | https://api.github.com/repos/pandas-dev/pandas/pulls/37719 | 2020-11-09T19:32:07Z | 2020-11-10T03:20:06Z | 2020-11-10T03:20:06Z | 2020-11-10T07:33:45Z |
TST: improve readability using dedent in test_to_string.py | diff --git a/pandas/tests/io/formats/test_to_string.py b/pandas/tests/io/formats/test_to_string.py
index a1ed1ba2e8bf5..5d7b4b417006a 100644
--- a/pandas/tests/io/formats/test_to_string.py
+++ b/pandas/tests/io/formats/test_to_string.py
@@ -1,5 +1,6 @@
from datetime import datetime
from io import StringIO
+from textwrap import dedent
import numpy as np
import pytest
@@ -169,12 +170,16 @@ def format_func(x):
return x.strftime("%Y-%m")
result = x.to_string(formatters={"months": format_func})
- expected = "months\n0 2016-01\n1 2016-02"
+ expected = dedent(
+ """\
+ months
+ 0 2016-01
+ 1 2016-02"""
+ )
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter():
-
x = DataFrame(
{"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")}
)
@@ -183,20 +188,37 @@ def format_func(x):
return x.strftime("%H:%M")
result = x.to_string(formatters={"hod": format_func})
- expected = "hod\n0 10:10\n1 12:12"
+ expected = dedent(
+ """\
+ hod
+ 0 10:10
+ 1 12:12"""
+ )
assert result.strip() == expected
def test_to_string_with_formatters_unicode():
df = DataFrame({"c/\u03c3": [1, 2, 3]})
result = df.to_string(formatters={"c/\u03c3": str})
- assert result == " c/\u03c3\n" + "0 1\n1 2\n2 3"
+ expected = dedent(
+ """\
+ c/\u03c3
+ 0 1
+ 1 2
+ 2 3"""
+ )
+ assert result == expected
def test_to_string_complex_number_trims_zeros():
s = Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j])
result = s.to_string()
- expected = "0 1.00+1.00j\n1 1.00+1.00j\n2 1.05+1.00j"
+ expected = dedent(
+ """\
+ 0 1.00+1.00j
+ 1 1.00+1.00j
+ 2 1.05+1.00j"""
+ )
assert result == expected
@@ -205,9 +227,12 @@ def test_nullable_float_to_string(float_ea_dtype):
dtype = float_ea_dtype
s = Series([0.0, 1.0, None], dtype=dtype)
result = s.to_string()
- expected = """0 0.0
-1 1.0
-2 <NA>"""
+ expected = dedent(
+ """\
+ 0 0.0
+ 1 1.0
+ 2 <NA>"""
+ )
assert result == expected
@@ -216,9 +241,12 @@ def test_nullable_int_to_string(any_nullable_int_dtype):
dtype = any_nullable_int_dtype
s = Series([0, 1, None], dtype=dtype)
result = s.to_string()
- expected = """0 0
-1 1
-2 <NA>"""
+ expected = dedent(
+ """\
+ 0 0
+ 1 1
+ 2 <NA>"""
+ )
assert result == expected
@@ -227,7 +255,10 @@ def test_to_string_na_rep_and_float_format(na_rep):
# GH 13828
df = DataFrame([["A", 1.2225], ["A", None]], columns=["Group", "Data"])
result = df.to_string(na_rep=na_rep, float_format="{:.2f}".format)
- expected = f""" Group Data
-0 A 1.22
-1 A {na_rep}"""
+ expected = dedent(
+ f"""\
+ Group Data
+ 0 A 1.22
+ 1 A {na_rep}"""
+ )
assert result == expected
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Improve readability of expected outputs using dedent in ``pandas/tests/io/formats/test_to_string.py`` | https://api.github.com/repos/pandas-dev/pandas/pulls/37718 | 2020-11-09T14:21:22Z | 2020-11-09T18:57:10Z | 2020-11-09T18:57:09Z | 2020-11-10T04:43:51Z |
TST: move and reformat latex_na_rep test | diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 7cf7ed3f77609..81e8e0bd2b526 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -972,6 +972,30 @@ def test_to_latex_float_format_no_fixed_width_integer(self):
)
assert result == expected
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+ def test_to_latex_na_rep_and_float_format(self, na_rep):
+ df = DataFrame(
+ [
+ ["A", 1.2225],
+ ["A", None],
+ ],
+ columns=["Group", "Data"],
+ )
+ result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = _dedent(
+ fr"""
+ \begin{{tabular}}{{llr}}
+ \toprule
+ {{}} & Group & Data \\
+ \midrule
+ 0 & A & 1.22 \\
+ 1 & A & {na_rep} \\
+ \bottomrule
+ \end{{tabular}}
+ """
+ )
+ assert result == expected
+
class TestToLatexMultiindex:
@pytest.fixture
@@ -1431,24 +1455,3 @@ def test_get_strrow_multindex_multicolumn(self, row_num, expected):
)
assert row_string_converter.get_strrow(row_num=row_num) == expected
-
- @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
- def test_to_latex_na_rep_and_float_format(self, na_rep):
- df = DataFrame(
- [
- ["A", 1.2225],
- ["A", None],
- ],
- columns=["Group", "Data"],
- )
- result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format)
- expected = f"""\\begin{{tabular}}{{llr}}
-\\toprule
-{{}} & Group & Data \\\\
-\\midrule
-0 & A & 1.22 \\\\
-1 & A & {na_rep} \\\\
-\\bottomrule
-\\end{{tabular}}
-"""
- assert result == expected
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Move test to a more suitable location (``class TestToLatexFormatters``)
and reformat using ``_dedent`` and raw string for better readability. | https://api.github.com/repos/pandas-dev/pandas/pulls/37717 | 2020-11-09T14:07:30Z | 2020-11-09T18:56:08Z | 2020-11-09T18:56:08Z | 2020-11-10T04:44:18Z |
CLN: cleanup, deduplicate plotting/test_frame | diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 2c635856cd255..11a46858ba281 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2047,7 +2047,6 @@ def test_no_legend(self):
df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
for kind in kinds:
-
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@@ -2067,8 +2066,8 @@ def test_style_by_column(self):
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
- for i, l in enumerate(ax.get_lines()[: len(markers)]):
- assert l.get_marker() == markers[i]
+ for idx, line in enumerate(ax.get_lines()[: len(markers)]):
+ assert line.get_marker() == markers[idx]
@pytest.mark.slow
def test_line_label_none(self):
@@ -2141,8 +2140,7 @@ def test_line_colors_and_styles_subplots(self):
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
- c = [c]
- self._check_colors(ax.get_lines(), linecolors=c)
+ self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
@@ -2584,32 +2582,30 @@ def test_hexbin_with_c(self):
assert len(ax.collections) == 1
@pytest.mark.slow
- def test_hexbin_cmap(self):
+ @pytest.mark.parametrize(
+ "kwargs, expected",
+ [
+ ({}, "BuGn"), # default cmap
+ ({"colormap": "cubehelix"}, "cubehelix"),
+ ({"cmap": "YlGn"}, "YlGn"),
+ ],
+ )
+ def test_hexbin_cmap(self, kwargs, expected):
df = self.hexbin_df
-
- # Default to BuGn
- ax = df.plot.hexbin(x="A", y="B")
- assert ax.collections[0].cmap.name == "BuGn"
-
- cm = "cubehelix"
- ax = df.plot.hexbin(x="A", y="B", colormap=cm)
- assert ax.collections[0].cmap.name == cm
+ ax = df.plot.hexbin(x="A", y="B", **kwargs)
+ assert ax.collections[0].cmap.name == expected
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
-
ax = df.plot.hexbin(x="A", y="B", colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
- def test_allow_cmap(self):
+ def test_mixing_cmap_and_colormap_raises(self):
df = self.hexbin_df
-
- ax = df.plot.hexbin(x="A", y="B", cmap="YlGn")
- assert ax.collections[0].cmap.name == "YlGn"
-
- with pytest.raises(TypeError):
+ msg = "Only specify one of `cmap` and `colormap`"
+ with pytest.raises(TypeError, match=msg):
df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn")
@pytest.mark.slow
@@ -2671,12 +2667,13 @@ def test_pie_df_nan(self):
expected[i] = ""
result = [x.get_text() for x in ax.texts]
assert result == expected
+
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
- assert [x.get_text() for x in ax.get_legend().get_texts()] == base_expected[
- :i
- ] + base_expected[i + 1 :]
+ result_labels = [x.get_text() for x in ax.get_legend().get_texts()]
+ expected_labels = base_expected[:i] + base_expected[i + 1 :]
+ assert result_labels == expected_labels
@pytest.mark.slow
def test_errorbar_plot(self):
@@ -2839,7 +2836,6 @@ def test_errorbar_timeseries(self, kind):
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
-
np.random.seed(0)
err = np.random.rand(3, 2, 5)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
1. Make more reasonable var names
2. Get rid of one test in favor of parametrization in another one | https://api.github.com/repos/pandas-dev/pandas/pulls/37714 | 2020-11-09T12:23:17Z | 2020-11-09T18:57:51Z | 2020-11-09T18:57:51Z | 2020-11-10T04:44:38Z |
Backport PR #37657 on branch 1.1.x: BUG: unpickling modifies Block.ndim | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index a122154904996..e0fa68e3b9f80 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -24,6 +24,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
+- Bug in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index c9ac9cb0f140a..4c52343d08513 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -284,14 +284,17 @@ def __getstate__(self):
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
- def unpickle_block(values, mgr_locs):
- return make_block(values, placement=mgr_locs)
+ def unpickle_block(values, mgr_locs, ndim: int):
+ # TODO(EA2D): ndim would be unnecessary with 2D EAs
+ return make_block(values, placement=mgr_locs, ndim=ndim)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
+ ndim = len(self.axes)
self.blocks = tuple(
- unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
+ unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)
+ for b in state["blocks"]
)
else:
raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index e4d43db7834e3..376091c62619b 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -477,3 +477,15 @@ def test_read_pickle_with_subclass():
tm.assert_series_equal(result[0], expected[0])
assert isinstance(result[1], MyTz)
+
+
+def test_pickle_preserves_block_ndim():
+ # GH#37631
+ ser = pd.Series(list("abc")).astype("category").iloc[[0]]
+ res = tm.round_trip_pickle(ser)
+
+ assert res._mgr.blocks[0].ndim == 1
+ assert res._mgr.blocks[0].shape == (1,)
+
+ # GH#37631 OP issue was about indexing, underlying problem was pickle
+ tm.assert_series_equal(res[[True]], ser)
| Backport PR #37657 | https://api.github.com/repos/pandas-dev/pandas/pulls/37713 | 2020-11-09T11:19:25Z | 2020-11-09T12:49:09Z | 2020-11-09T12:49:09Z | 2020-11-09T12:49:28Z |
REF: remove ObjectBlock._replace_single | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index fd23b89365496..ed77a210b6913 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -813,17 +813,50 @@ def replace(
)
return blocks
- def _replace_single(
+ def _replace_regex(
self,
to_replace,
value,
inplace: bool = False,
- regex: bool = False,
convert: bool = True,
mask=None,
) -> List["Block"]:
- """ no-op on a non-ObjectBlock """
- return [self] if inplace else [self.copy()]
+ """
+ Replace elements by the given value.
+
+ Parameters
+ ----------
+ to_replace : object or pattern
+ Scalar to replace or regular expression to match.
+ value : object
+ Replacement object.
+ inplace : bool, default False
+ Perform inplace modification.
+ convert : bool, default True
+ If true, try to coerce any object types to better types.
+ mask : array-like of bool, optional
+ True indicate corresponding element is ignored.
+
+ Returns
+ -------
+ List[Block]
+ """
+ if not self._can_hold_element(to_replace):
+ # i.e. only ObjectBlock, but could in principle include a
+ # String ExtensionBlock
+ return [self] if inplace else [self.copy()]
+
+ rx = re.compile(to_replace)
+
+ new_values = self.values if inplace else self.values.copy()
+ replace_regex(new_values, rx, value, mask)
+
+ block = self.make_block(new_values)
+ if convert:
+ nbs = block.convert(numeric=False)
+ else:
+ nbs = [block]
+ return nbs
def _replace_list(
self,
@@ -1598,14 +1631,16 @@ def _replace_coerce(
self = self.coerce_to_target_dtype(value)
return self.putmask(mask, value, inplace=inplace)
else:
- return self._replace_single(
- to_replace,
- value,
- inplace=inplace,
- regex=regex,
- convert=False,
- mask=mask,
- )
+ regex = _should_use_regex(regex, to_replace)
+ if regex:
+ return self._replace_regex(
+ to_replace,
+ value,
+ inplace=inplace,
+ convert=False,
+ mask=mask,
+ )
+ return self.replace(to_replace, value, inplace=inplace, regex=False)
return [self]
@@ -2506,72 +2541,26 @@ def replace(
# here with listlike to_replace or value, as those cases
# go through _replace_list
- if is_re(to_replace) or regex:
- return self._replace_single(to_replace, value, inplace=inplace, regex=True)
- else:
- return super().replace(to_replace, value, inplace=inplace, regex=regex)
-
- def _replace_single(
- self,
- to_replace,
- value,
- inplace: bool = False,
- regex: bool = False,
- convert: bool = True,
- mask=None,
- ) -> List["Block"]:
- """
- Replace elements by the given value.
-
- Parameters
- ----------
- to_replace : object or pattern
- Scalar to replace or regular expression to match.
- value : object
- Replacement object.
- inplace : bool, default False
- Perform inplace modification.
- regex : bool, default False
- If true, perform regular expression substitution.
- convert : bool, default True
- If true, try to coerce any object types to better types.
- mask : array-like of bool, optional
- True indicate corresponding element is ignored.
-
- Returns
- -------
- List[Block]
- """
- inplace = validate_bool_kwarg(inplace, "inplace")
-
- # to_replace is regex compilable
- regex = regex and is_re_compilable(to_replace)
+ regex = _should_use_regex(regex, to_replace)
- # try to get the pattern attribute (compiled re) or it's a string
- if is_re(to_replace):
- pattern = to_replace.pattern
+ if regex:
+ return self._replace_regex(to_replace, value, inplace=inplace)
else:
- pattern = to_replace
+ return super().replace(to_replace, value, inplace=inplace, regex=False)
- # if the pattern is not empty and to_replace is either a string or a
- # regex
- if regex and pattern:
- rx = re.compile(to_replace)
- else:
- # if the thing to replace is not a string or compiled regex call
- # the superclass method -> to_replace is some kind of object
- return super().replace(to_replace, value, inplace=inplace, regex=regex)
- new_values = self.values if inplace else self.values.copy()
- replace_regex(new_values, rx, value, mask)
+def _should_use_regex(regex: bool, to_replace: Any) -> bool:
+ """
+ Decide whether to treat `to_replace` as a regular expression.
+ """
+ if is_re(to_replace):
+ regex = True
- # convert
- block = self.make_block(new_values)
- if convert:
- nbs = block.convert(numeric=False)
- else:
- nbs = [block]
- return nbs
+ regex = regex and is_re_compilable(to_replace)
+
+ # Don't use regex if the pattern is empty.
+ regex = regex and re.compile(to_replace).pattern != ""
+ return regex
class CategoricalBlock(ExtensionBlock):
| https://api.github.com/repos/pandas-dev/pandas/pulls/37710 | 2020-11-09T04:07:19Z | 2020-11-09T12:52:49Z | 2020-11-09T12:52:49Z | 2020-11-09T15:53:13Z | |
Bug in loc raised Error when non-integer slice was given for MultiIndex | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 08c528fb484c8..48561b50f66ae 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -620,6 +620,7 @@ Indexing
- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using listlike indexer that contains elements that are in the index's ``categories`` but not in the index itself failing to raise ``KeyError`` (:issue:`37901`)
- Bug in :meth:`DataFrame.iloc` and :meth:`Series.iloc` aligning objects in ``__setitem__`` (:issue:`22046`)
- Bug in :meth:`DataFrame.loc` did not raise ``KeyError`` when missing combination was given with ``slice(None)`` for remaining levels (:issue:`19556`)
+- Bug in :meth:`DataFrame.loc` raising ``TypeError`` when non-integer slice was given to select values from :class:`MultiIndex` (:issue:`25165`, :issue:`24263`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 11dd3598b4864..be0b0c5208b1c 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2764,9 +2764,17 @@ def _partial_tup_index(self, tup, side="left"):
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
- if k < n - 1:
+ if isinstance(idx, slice) and k < n - 1:
+ # Get start and end value from slice, necessary when a non-integer
+ # interval is given as input GH#37707
+ start = idx.start
+ end = idx.stop
+ elif k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
+ elif isinstance(idx, slice):
+ idx = idx.start
+ return start + section.searchsorted(idx, side=side)
else:
return start + section.searchsorted(idx, side=side)
@@ -3102,6 +3110,8 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
+ elif isinstance(start, slice):
+ stop = len(level_index)
else:
stop = len(level_index) - 1
step = key.step
@@ -3136,22 +3146,27 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
else:
- code = self._get_loc_single_level_index(level_index, key)
+ idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
- locs = np.array(level_codes == code, dtype=bool, copy=False)
+ locs = np.array(level_codes == idx, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
- i = level_codes.searchsorted(code, side="left")
- j = level_codes.searchsorted(code, side="right")
- if i == j:
+ if isinstance(idx, slice):
+ start = idx.start
+ end = idx.stop
+ else:
+ start = level_codes.searchsorted(idx, side="left")
+ end = level_codes.searchsorted(idx, side="right")
+
+ if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
- return slice(i, j)
+ return slice(start, end)
def get_locs(self, seq):
"""
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 2b7a6ee304891..e0241c2c5eadd 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -768,7 +768,7 @@ def test_timestamp_multiindex_indexer():
[
pd.date_range(
start="2019-01-02T00:15:33",
- end="2019-01-05T02:15:33",
+ end="2019-01-05T03:15:33",
freq="H",
name="date",
),
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 22ffca46a4829..cd6176722245b 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -659,3 +659,12 @@ def test_getitem_non_found_tuple():
)
with pytest.raises(KeyError, match=r"\(2\.0, 2\.0, 3\.0\)"):
df.loc[(2.0, 2.0, 3.0)]
+
+
+def test_get_loc_datetime_index():
+ # GH#24263
+ index = pd.date_range("2001-01-01", periods=100)
+ mi = MultiIndex.from_arrays([index])
+ # Check if get_loc matches for Index and MultiIndex
+ assert mi.get_loc("2001-01") == slice(0, 31, None)
+ assert index.get_loc("2001-01") == slice(0, 31, None)
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index 538aa1d3a1164..9c356b81b85db 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -1,7 +1,14 @@
import numpy as np
import pytest
-from pandas import DataFrame, Float64Index, Int64Index, MultiIndex
+from pandas import (
+ DataFrame,
+ Float64Index,
+ Int64Index,
+ MultiIndex,
+ date_range,
+ to_datetime,
+)
import pandas._testing as tm
@@ -208,6 +215,45 @@ def test_setitem_multiple_partial(self, multiindex_dataframe_random_data):
expected.loc["bar"] = 0
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize(
+ "indexer, exp_idx, exp_values",
+ [
+ (slice("2019-2", None), [to_datetime("2019-02-01")], [2, 3]),
+ (
+ slice(None, "2019-2"),
+ date_range("2019", periods=2, freq="MS"),
+ [0, 1, 2, 3],
+ ),
+ ],
+ )
+ def test_partial_getitem_loc_datetime(self, indexer, exp_idx, exp_values):
+ # GH: 25165
+ date_idx = date_range("2019", periods=2, freq="MS")
+ df = DataFrame(
+ list(range(4)),
+ index=MultiIndex.from_product([date_idx, [0, 1]], names=["x", "y"]),
+ )
+ expected = DataFrame(
+ exp_values,
+ index=MultiIndex.from_product([exp_idx, [0, 1]], names=["x", "y"]),
+ )
+ result = df[indexer]
+ tm.assert_frame_equal(result, expected)
+ result = df.loc[indexer]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.loc(axis=0)[indexer]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.loc[indexer, :]
+ tm.assert_frame_equal(result, expected)
+
+ df2 = df.swaplevel(0, 1).sort_index()
+ expected = expected.swaplevel(0, 1).sort_index()
+
+ result = df2.loc[:, indexer, :]
+ tm.assert_frame_equal(result, expected)
+
def test_loc_getitem_partial_both_axis():
# gh-12660
| - [x] closes #25165
- [x] closes #24263
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
A slice return caused issues everywhere, when this function was used. So I unpacked it right in there. | https://api.github.com/repos/pandas-dev/pandas/pulls/37707 | 2020-11-09T01:05:41Z | 2020-11-24T21:59:58Z | 2020-11-24T21:59:57Z | 2020-11-24T22:01:54Z |
BUG: Fix return of missing values when applying loc to single level of MultiIndex | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index d4d28dde52d58..ea995a4327ee8 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -467,6 +467,7 @@ Indexing
- Bug in :meth:`Series.__getitem__` when using an unsigned integer array as an indexer giving incorrect results or segfaulting instead of raising ``KeyError`` (:issue:`37218`)
- Bug in :meth:`Index.where` incorrectly casting numeric values to strings (:issue:`37591`)
- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` raises when numeric label was given for object :class:`Index` although label was in :class:`Index` (:issue:`26491`)
+- Bug in :meth:`DataFrame.loc` returned requested key plus missing values when ``loc`` was applied to single level from :class:`MultiIndex` (:issue:`27104`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index fd402ef27a3a9..cb5641a74e60b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3842,9 +3842,9 @@ def _get_leaf_sorter(labels):
else:
left_lev_indexer = ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))
-
+ old_codes = left.codes[level]
new_lev_codes = algos.take_nd(
- rev_indexer, left.codes[level], allow_fill=False
+ rev_indexer, old_codes[old_codes != -1], allow_fill=False
)
new_codes = list(left.codes)
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index d79af1ea6b804..5a9da262dc54d 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -598,3 +598,19 @@ def test_getitem_loc_commutability(multiindex_year_month_day_dataframe_random_da
result = ser[2000, 5]
expected = df.loc[2000, 5]["A"]
tm.assert_series_equal(result, expected)
+
+
+def test_loc_with_nan():
+ # GH: 27104
+ df = DataFrame(
+ {"col": [1, 2, 5], "ind1": ["a", "d", np.nan], "ind2": [1, 4, 5]}
+ ).set_index(["ind1", "ind2"])
+ result = df.loc[["a"]]
+ expected = DataFrame(
+ {"col": [1]}, index=MultiIndex.from_tuples([("a", 1)], names=["ind1", "ind2"])
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.loc["a"]
+ expected = DataFrame({"col": [1]}, index=Index([1], name="ind2"))
+ tm.assert_frame_equal(result, expected)
| - [x] closes #27104
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This confused ``-1`` representing missing values in input with the ``-1`` signaling which components to exclude from output. | https://api.github.com/repos/pandas-dev/pandas/pulls/37706 | 2020-11-08T22:31:11Z | 2020-11-09T20:27:35Z | 2020-11-09T20:27:35Z | 2020-11-09T20:29:14Z |
REF: simplify NDFrame.replace, ObjectBlock.replace | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bea650c1b50fd..02fa7308e7ee8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6744,25 +6744,25 @@ def replace(
else:
raise TypeError("value argument must be scalar, dict, or Series")
- elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
- if is_list_like(value):
- if len(to_replace) != len(value):
- raise ValueError(
- f"Replacement lists must match in length. "
- f"Expecting {len(to_replace)} got {len(value)} "
- )
- self._consolidate_inplace()
- new_data = self._mgr.replace_list(
- src_list=to_replace,
- dest_list=value,
- inplace=inplace,
- regex=regex,
+ elif is_list_like(to_replace):
+ if not is_list_like(value):
+ # e.g. to_replace = [NA, ''] and value is 0,
+ # so we replace NA with 0 and then replace '' with 0
+ value = [value] * len(to_replace)
+
+ # e.g. we have to_replace = [NA, ''] and value = [0, 'missing']
+ if len(to_replace) != len(value):
+ raise ValueError(
+ f"Replacement lists must match in length. "
+ f"Expecting {len(to_replace)} got {len(value)} "
)
+ new_data = self._mgr.replace_list(
+ src_list=to_replace,
+ dest_list=value,
+ inplace=inplace,
+ regex=regex,
+ )
- else: # [NA, ''] -> 0
- new_data = self._mgr.replace(
- to_replace=to_replace, value=value, inplace=inplace, regex=regex
- )
elif to_replace is None:
if not (
is_re_compilable(regex)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 8e01aaa396265..9e6480dd709f0 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2502,39 +2502,14 @@ def replace(
inplace: bool = False,
regex: bool = False,
) -> List["Block"]:
- to_rep_is_list = is_list_like(to_replace)
- value_is_list = is_list_like(value)
- both_lists = to_rep_is_list and value_is_list
- either_list = to_rep_is_list or value_is_list
+ # Note: the checks we do in NDFrame.replace ensure we never get
+ # here with listlike to_replace or value, as those cases
+ # go through _replace_list
- result_blocks: List["Block"] = []
- blocks: List["Block"] = [self]
-
- if not either_list and is_re(to_replace):
+ if is_re(to_replace) or regex:
return self._replace_single(to_replace, value, inplace=inplace, regex=True)
- elif not (either_list or regex):
+ else:
return super().replace(to_replace, value, inplace=inplace, regex=regex)
- elif both_lists:
- for to_rep, v in zip(to_replace, value):
- result_blocks = []
- for b in blocks:
- result = b._replace_single(to_rep, v, inplace=inplace, regex=regex)
- result_blocks.extend(result)
- blocks = result_blocks
- return result_blocks
-
- elif to_rep_is_list and regex:
- for to_rep in to_replace:
- result_blocks = []
- for b in blocks:
- result = b._replace_single(
- to_rep, value, inplace=inplace, regex=regex
- )
- result_blocks.extend(result)
- blocks = result_blocks
- return result_blocks
-
- return self._replace_single(to_replace, value, inplace=inplace, regex=regex)
def _replace_single(
self,
@@ -2627,6 +2602,19 @@ def re_replacer(s):
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
+ def _replace_list(
+ self,
+ src_list: List[Any],
+ dest_list: List[Any],
+ inplace: bool = False,
+ regex: bool = False,
+ ) -> List["Block"]:
+ if len(algos.unique(dest_list)) == 1:
+ # We got likely here by tiling value inside NDFrame.replace,
+ # so un-tile here
+ return self.replace(src_list, dest_list[0], inplace, regex)
+ return super()._replace_list(src_list, dest_list, inplace, regex)
+
def replace(
self,
to_replace,
diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py
index 5889195ad68db..007c4bdea17f8 100644
--- a/pandas/tests/arrays/categorical/test_replace.py
+++ b/pandas/tests/arrays/categorical/test_replace.py
@@ -21,6 +21,7 @@
((1, 2, 4), 5, [5, 5, 3], False),
((5, 6), 2, [1, 2, 3], False),
# many-to-many, handled outside of Categorical and results in separate dtype
+ # except for cases with only 1 unique entry in `value`
([1], [2], [2, 2, 3], True),
([1, 4], [5, 2], [5, 2, 3], True),
# check_categorical sorts categories, which crashes on mixed dtypes
@@ -30,7 +31,7 @@
)
def test_replace(to_replace, value, expected, flip_categories):
# GH 31720
- stays_categorical = not isinstance(value, list)
+ stays_categorical = not isinstance(value, list) or len(pd.unique(value)) == 1
s = pd.Series([1, 2, 3], dtype="category")
result = s.replace(to_replace, value)
| This makes it so that we only call BlockManager.replace from one spot, so we can rule out more cases when reasoning about the arguments in ObjectBlock.replace.
This will have a merge conflict with #37696; doesnt matter which one goes in first. One both are in, there are further simplifications available.
I'm hopeful that we'll be able to get rid of Categorical.replace altogether (and with it, CategoricalBlock.replace, CategoricalBlock._replace_list), but that's a few steps away at best.
| https://api.github.com/repos/pandas-dev/pandas/pulls/37704 | 2020-11-08T18:16:28Z | 2020-11-09T00:37:00Z | 2020-11-09T00:37:00Z | 2020-11-09T00:39:32Z |
remove unnecessary use of Appender | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 02fa7308e7ee8..32dad8f9e3801 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10724,6 +10724,7 @@ def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
cls.all = all
@doc(
+ NDFrame.mad,
desc="Return the mean absolute deviation of the values "
"over the requested axis.",
name1=name1,
@@ -10732,7 +10733,6 @@ def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
see_also="",
examples="",
)
- @Appender(NDFrame.mad.__doc__)
def mad(self, axis=None, skipna=None, level=None):
return NDFrame.mad(self, axis, skipna, level)
| - [x] ref #31942
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] removes the last use of Appender() in NDFrame
| https://api.github.com/repos/pandas-dev/pandas/pulls/37703 | 2020-11-08T15:44:52Z | 2020-11-11T20:30:48Z | 2020-11-11T20:30:48Z | 2020-11-11T20:31:02Z |
CLN: Simplify groupby head/tail tests | diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index df1d7819a1894..10394ea997775 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -487,53 +487,30 @@ def test_nth_multi_index_as_expected():
tm.assert_frame_equal(result, expected)
-def test_groupby_head_tail():
+@pytest.mark.parametrize(
+ "op, n, expected_rows",
+ [
+ ("head", -1, []),
+ ("head", 0, []),
+ ("head", 1, [0, 2]),
+ ("head", 7, [0, 1, 2]),
+ ("tail", -1, []),
+ ("tail", 0, []),
+ ("tail", 1, [1, 2]),
+ ("tail", 7, [0, 1, 2]),
+ ],
+)
+@pytest.mark.parametrize("columns", [None, [], ["A"], ["B"], ["A", "B"]])
+@pytest.mark.parametrize("as_index", [True, False])
+def test_groupby_head_tail(op, n, expected_rows, columns, as_index):
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
- g_as = df.groupby("A", as_index=True)
- g_not_as = df.groupby("A", as_index=False)
-
- # as_index= False, much easier
- tm.assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
- tm.assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
-
- empty_not_as = DataFrame(columns=df.columns, index=Index([], dtype=df.index.dtype))
- empty_not_as["A"] = empty_not_as["A"].astype(df.A.dtype)
- empty_not_as["B"] = empty_not_as["B"].astype(df.B.dtype)
- tm.assert_frame_equal(empty_not_as, g_not_as.head(0))
- tm.assert_frame_equal(empty_not_as, g_not_as.tail(0))
- tm.assert_frame_equal(empty_not_as, g_not_as.head(-1))
- tm.assert_frame_equal(empty_not_as, g_not_as.tail(-1))
-
- tm.assert_frame_equal(df, g_not_as.head(7)) # contains all
- tm.assert_frame_equal(df, g_not_as.tail(7))
-
- # as_index=True, (used to be different)
- df_as = df
-
- tm.assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
- tm.assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
-
- empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
- empty_as["A"] = empty_not_as["A"].astype(df.A.dtype)
- empty_as["B"] = empty_not_as["B"].astype(df.B.dtype)
- tm.assert_frame_equal(empty_as, g_as.head(0))
- tm.assert_frame_equal(empty_as, g_as.tail(0))
- tm.assert_frame_equal(empty_as, g_as.head(-1))
- tm.assert_frame_equal(empty_as, g_as.tail(-1))
-
- tm.assert_frame_equal(df_as, g_as.head(7)) # contains all
- tm.assert_frame_equal(df_as, g_as.tail(7))
-
- # test with selection
- tm.assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
- tm.assert_frame_equal(g_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]])
- tm.assert_frame_equal(g_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]])
- tm.assert_frame_equal(g_as[["A", "B"]].head(1), df_as.loc[[0, 2]])
-
- tm.assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
- tm.assert_frame_equal(g_not_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]])
- tm.assert_frame_equal(g_not_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]])
- tm.assert_frame_equal(g_not_as[["A", "B"]].head(1), df_as.loc[[0, 2]])
+ g = df.groupby("A", as_index=as_index)
+ expected = df.iloc[expected_rows]
+ if columns is not None:
+ g = g[columns]
+ expected = expected[columns]
+ result = getattr(g, op)(n)
+ tm.assert_frame_equal(result, expected)
def test_group_selection_cache():
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Going to add axis=1 tests as part of resolution of #9772, simplifying test first. | https://api.github.com/repos/pandas-dev/pandas/pulls/37702 | 2020-11-08T15:08:24Z | 2020-11-09T00:39:05Z | 2020-11-09T00:39:05Z | 2020-12-06T14:04:53Z |
DOC: Highlight pre-commit section | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 5aa1c1099d6e0..9477a9ac79dd6 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -12,8 +12,9 @@ pandas code style guide
pandas follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_
standard and uses `Black <https://black.readthedocs.io/en/stable/>`_
and `Flake8 <https://flake8.pycqa.org/en/latest/>`_ to ensure a
-consistent code format throughout the project. For details see the
-:ref:`contributing guide to pandas<contributing.code-formatting>`.
+consistent code format throughout the project. We encourage you to use
+:ref:`pre-commit <contributing.pre-commit>` to automatically run ``black``,
+``flake8``, ``isort``, and related code checks when you make a git commit.
Patterns
========
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 08d8451127732..4261d79a5e3f5 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -638,7 +638,46 @@ In addition to ``./ci/code_checks.sh``, some extra checks are run by
``pre-commit`` - see :ref:`here <contributing.pre-commit>` for how to
run them.
-Additional standards are outlined on the :ref:`pandas code style guide <code_style>`
+Additional standards are outlined on the :ref:`pandas code style guide <code_style>`.
+
+.. _contributing.pre-commit:
+
+Pre-commit
+----------
+
+You can run many of these styling checks manually as we have described above. However,
+we encourage you to use `pre-commit hooks <https://pre-commit.com/>`_ instead
+to automatically run ``black``, ``flake8``, ``isort`` when you make a git commit. This
+can be done by installing ``pre-commit``::
+
+ pip install pre-commit
+
+and then running::
+
+ pre-commit install
+
+from the root of the pandas repository. Now all of the styling checks will be
+run each time you commit changes without your needing to run each one manually.
+In addition, using ``pre-commit`` will also allow you to more easily
+remain up-to-date with our code checks as they change.
+
+Note that if needed, you can skip these checks with ``git commit --no-verify``.
+
+If you don't want to use ``pre-commit`` as part of your workflow, you can still use it
+to run its checks with::
+
+ pre-commit run --files <files you have modified>
+
+without needing to have done ``pre-commit install`` beforehand.
+
+.. note::
+
+ If you have conflicting installations of ``virtualenv``, then you may get an
+ error - see `here <https://github.com/pypa/virtualenv/issues/1875>`_.
+
+ Also, due to a `bug in virtualenv <https://github.com/pypa/virtualenv/issues/1986>`_,
+ you may run into issues if you're using conda. To solve this, you can downgrade
+ ``virtualenv`` to version ``20.0.33``.
Optional dependencies
---------------------
@@ -712,7 +751,7 @@ Python (PEP8 / black)
pandas follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ standard
and uses `Black <https://black.readthedocs.io/en/stable/>`_ and
`Flake8 <http://flake8.pycqa.org/en/latest/>`_ to ensure a consistent code
-format throughout the project.
+format throughout the project. We encourage you to use :ref:`pre-commit <contributing.pre-commit>`.
:ref:`Continuous Integration <contributing.ci>` will run those tools and
report any stylistic errors in your code. Therefore, it is helpful before
@@ -727,9 +766,6 @@ apply ``black`` as you edit files.
You should use a ``black`` version 20.8b1 as previous versions are not compatible
with the pandas codebase.
-If you wish to run these checks automatically, we encourage you to use
-:ref:`pre-commits <contributing.pre-commit>` instead.
-
One caveat about ``git diff upstream/master -u -- "*.py" | flake8 --diff``: this
command will catch any stylistic errors in your changes specifically, but
be beware it may not catch all of them. For example, if you delete the only
@@ -807,45 +843,6 @@ Where similar caveats apply if you are on OSX or Windows.
You can then verify the changes look ok, then git :ref:`commit <contributing.commit-code>` and :ref:`push <contributing.push-code>`.
-.. _contributing.pre-commit:
-
-Pre-commit
-~~~~~~~~~~
-
-You can run many of these styling checks manually as we have described above. However,
-we encourage you to use `pre-commit hooks <https://pre-commit.com/>`_ instead
-to automatically run ``black``, ``flake8``, ``isort`` when you make a git commit. This
-can be done by installing ``pre-commit``::
-
- pip install pre-commit
-
-and then running::
-
- pre-commit install
-
-from the root of the pandas repository. Now all of the styling checks will be
-run each time you commit changes without your needing to run each one manually.
-In addition, using this pre-commit hook will also allow you to more easily
-remain up-to-date with our code checks as they change.
-
-Note that if needed, you can skip these checks with ``git commit --no-verify``.
-
-If you don't want to use ``pre-commit`` as part of your workflow, you can still use it
-to run its checks by running::
-
- pre-commit run --files <files you have modified>
-
-without having to have done ``pre-commit install`` beforehand.
-
-.. note::
-
- If you have conflicting installations of ``virtualenv``, then you may get an
- error - see `here <https://github.com/pypa/virtualenv/issues/1875>`_.
-
- Also, due to a `bug in virtualenv <https://github.com/pypa/virtualenv/issues/1986>`_,
- you may run into issues if you're using conda. To solve this, you can downgrade
- ``virtualenv`` to version ``20.0.33``.
-
Backwards compatibility
~~~~~~~~~~~~~~~~~~~~~~~
| - [x] closes #37358
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/37701 | 2020-11-08T12:36:07Z | 2020-11-10T09:42:08Z | 2020-11-10T09:42:07Z | 2020-11-10T09:43:17Z |
DOC: Add caveat to to_stata with version=119 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index bfb633ae55095..76ae900d26a68 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2131,6 +2131,11 @@ def to_stata(
support Unicode characters, and version 119 supports more than
32,767 variables.
+ Version 119 should usually only be used when the number of
+ variables exceeds the capacity of dta format 118. Exporting
+ smaller datasets in format 119 may have unintended consequences,
+ and, as of November 2020, Stata SE cannot read version 119 files.
+
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
| - [x] closes #37490
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/37700 | 2020-11-08T11:20:49Z | 2020-11-10T03:38:45Z | 2020-11-10T03:38:45Z | 2020-11-10T04:08:07Z |
DOC: capitalize Gregorian and fix word order | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index b817d80c64ccd..d83138528a6f9 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -2316,7 +2316,7 @@ class Period(_Period):
freq : str, default None
One of pandas period strings or corresponding objects.
ordinal : int, default None
- The period offset from the gregorian proleptic epoch.
+ The period offset from the proleptic Gregorian epoch.
year : int, default None
Year value of the period.
month : int, default 1
| Gregorian is a proper noun
(https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar). Also,
"proleptic" is modifying the compound noun "Gregorian epoch" rather than
"Gregorian" modifying a "proleptic epoch".
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37697 | 2020-11-08T05:28:39Z | 2020-11-09T21:10:46Z | 2020-11-09T21:10:46Z | 2020-11-10T02:03:22Z |
REF: implement replace_regex, remove unreachable branch in ObjectBlock.replace | diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py
index 9eaa265adab2b..76d723beac7e6 100644
--- a/pandas/core/array_algos/replace.py
+++ b/pandas/core/array_algos/replace.py
@@ -3,7 +3,7 @@
"""
import operator
import re
-from typing import Pattern, Union
+from typing import Optional, Pattern, Union
import numpy as np
@@ -12,8 +12,10 @@
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_numeric_v_string_like,
+ is_re,
is_scalar,
)
+from pandas.core.dtypes.missing import isna
def compare_or_regex_search(
@@ -87,3 +89,45 @@ def _check_comparison_types(
_check_comparison_types(result, a, b)
return result
+
+
+def replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: Optional[np.ndarray]):
+ """
+ Parameters
+ ----------
+ values : ArrayLike
+ Object dtype.
+ rx : re.Pattern
+ value : Any
+ mask : np.ndarray[bool], optional
+
+ Notes
+ -----
+ Alters values in-place.
+ """
+
+ # deal with replacing values with objects (strings) that match but
+ # whose replacement is not a string (numeric, nan, object)
+ if isna(value) or not isinstance(value, str):
+
+ def re_replacer(s):
+ if is_re(rx) and isinstance(s, str):
+ return value if rx.search(s) is not None else s
+ else:
+ return s
+
+ else:
+ # value is guaranteed to be a string here, s can be either a string
+ # or null if it's null it gets returned
+ def re_replacer(s):
+ if is_re(rx) and isinstance(s, str):
+ return rx.sub(value, s)
+ else:
+ return s
+
+ f = np.vectorize(re_replacer, otypes=[values.dtype])
+
+ if mask is None:
+ values[:] = f(values)
+ else:
+ values[mask] = f(values[mask])
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 9e6480dd709f0..fd23b89365496 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -60,7 +60,7 @@
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat
import pandas.core.algorithms as algos
-from pandas.core.array_algos.replace import compare_or_regex_search
+from pandas.core.array_algos.replace import compare_or_regex_search, replace_regex
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays import (
Categorical,
@@ -2563,32 +2563,7 @@ def _replace_single(
return super().replace(to_replace, value, inplace=inplace, regex=regex)
new_values = self.values if inplace else self.values.copy()
-
- # deal with replacing values with objects (strings) that match but
- # whose replacement is not a string (numeric, nan, object)
- if isna(value) or not isinstance(value, str):
-
- def re_replacer(s):
- if is_re(rx) and isinstance(s, str):
- return value if rx.search(s) is not None else s
- else:
- return s
-
- else:
- # value is guaranteed to be a string here, s can be either a string
- # or null if it's null it gets returned
- def re_replacer(s):
- if is_re(rx) and isinstance(s, str):
- return rx.sub(value, s)
- else:
- return s
-
- f = np.vectorize(re_replacer, otypes=[self.dtype])
-
- if mask is None:
- new_values[:] = f(new_values)
- else:
- new_values[mask] = f(new_values[mask])
+ replace_regex(new_values, rx, value, mask)
# convert
block = self.make_block(new_values)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37696 | 2020-11-08T04:12:17Z | 2020-11-09T03:30:12Z | 2020-11-09T03:30:12Z | 2020-11-09T03:32:20Z | |
TYP: @final for NDFrame methods | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8470ef7ba5efb..255c45d5a45aa 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -49,6 +49,7 @@
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
+ final,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
@@ -240,6 +241,7 @@ def attrs(self) -> Dict[Optional[Hashable], Any]:
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
+ @final
@property
def flags(self) -> Flags:
"""
@@ -280,6 +282,7 @@ def flags(self) -> Flags:
"""
return self._flags
+ @final
def set_flags(
self: FrameOrSeries,
*,
@@ -330,6 +333,7 @@ def set_flags(
df.flags["allows_duplicate_labels"] = allows_duplicate_labels
return df
+ @final
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
@@ -375,6 +379,7 @@ def _constructor_expanddim(self):
# ----------------------------------------------------------------------
# Internals
+ @final
@property
def _data(self):
# GH#33054 retained because some downstream packages uses this,
@@ -405,12 +410,14 @@ def _AXIS_NAMES(self) -> Dict[int, str]:
warnings.warn("_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=3)
return {0: "index"}
+ @final
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
+ @final
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
@@ -442,6 +449,7 @@ def _construct_axes_from_arguments(
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
+ @final
@classmethod
def _get_axis_number(cls, axis: Axis) -> int:
try:
@@ -449,16 +457,19 @@ def _get_axis_number(cls, axis: Axis) -> int:
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
+ @final
@classmethod
def _get_axis_name(cls, axis: Axis) -> str:
axis_number = cls._get_axis_number(axis)
return cls._AXIS_ORDERS[axis_number]
+ @final
def _get_axis(self, axis: Axis) -> Index:
axis_number = self._get_axis_number(axis)
assert axis_number in {0, 1}
return self.index if axis_number == 0 else self.columns
+ @final
@classmethod
def _get_block_manager_axis(cls, axis: Axis) -> int:
"""Map the axis to the block_manager axis."""
@@ -468,6 +479,7 @@ def _get_block_manager_axis(cls, axis: Axis) -> int:
return m - axis
return axis
+ @final
def _get_axis_resolvers(self, axis: str) -> Dict[str, Union[Series, MultiIndex]]:
# index or columns
axis_index = getattr(self, axis)
@@ -498,6 +510,7 @@ def _get_axis_resolvers(self, axis: str) -> Dict[str, Union[Series, MultiIndex]]
d[axis] = dindex
return d
+ @final
def _get_index_resolvers(self) -> Dict[str, Union[Series, MultiIndex]]:
from pandas.core.computation.parsing import clean_column_name
@@ -507,6 +520,7 @@ def _get_index_resolvers(self) -> Dict[str, Union[Series, MultiIndex]]:
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
+ @final
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
@@ -596,11 +610,13 @@ def size(self) -> int:
"""
return np.prod(self.shape)
+ @final
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
+ @final
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
@@ -636,6 +652,7 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
self._check_inplace_and_allows_duplicate_labels(inplace)
return self._set_axis_nocheck(labels, axis, inplace)
+ @final
def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool):
# NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy.
if inplace:
@@ -650,6 +667,7 @@ def _set_axis(self, axis: int, labels: Index) -> None:
self._mgr.set_axis(axis, labels)
self._clear_item_cache()
+ @final
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
@@ -679,6 +697,7 @@ def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
new_values, *new_axes # type: ignore[arg-type]
).__finalize__(self, method="swapaxes")
+ @final
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
@@ -751,6 +770,7 @@ def pop(self, item: Label) -> Union[Series, Any]:
return result
+ @final
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
@@ -1215,6 +1235,7 @@ class name
if not inplace:
return result
+ @final
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
@@ -1277,11 +1298,13 @@ def _set_axis_name(self, name, axis=0, inplace=False):
# ----------------------------------------------------------------------
# Comparison Methods
+ @final
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
+ @final
def equals(self, other: object) -> bool:
"""
Test whether two objects contain the same elements.
@@ -1368,6 +1391,7 @@ def equals(self, other: object) -> bool:
# -------------------------------------------------------------------------
# Unary Methods
+ @final
def __neg__(self):
values = self._values
if is_bool_dtype(values):
@@ -1382,6 +1406,7 @@ def __neg__(self):
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
+ @final
def __pos__(self):
values = self._values
if is_bool_dtype(values):
@@ -1399,6 +1424,7 @@ def __pos__(self):
)
return self.__array_wrap__(arr)
+ @final
def __invert__(self):
if not self.size:
# inv fails with 0 len
@@ -1408,6 +1434,7 @@ def __invert__(self):
result = self._constructor(new_data).__finalize__(self, method="__invert__")
return result
+ @final
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
@@ -1416,6 +1443,7 @@ def __nonzero__(self):
__bool__ = __nonzero__
+ @final
def bool(self):
"""
Return the bool of a single element Series or DataFrame.
@@ -1460,9 +1488,11 @@ def bool(self):
self.__nonzero__()
+ @final
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
+ @final
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
@@ -1474,6 +1504,7 @@ def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
+ @final
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
@@ -1504,6 +1535,7 @@ def _is_level_reference(self, key, axis=0):
and not self._is_label_reference(key, axis=axis)
)
+ @final
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
@@ -1533,6 +1565,7 @@ def _is_label_reference(self, key, axis=0) -> bool_t:
and any(key in self.axes[ax] for ax in other_axes)
)
+ @final
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
@@ -1557,6 +1590,7 @@ def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
key, axis=axis
)
+ @final
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
@@ -1600,6 +1634,7 @@ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
)
raise ValueError(msg)
+ @final
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
@@ -1664,6 +1699,7 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
return values
+ @final
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
@@ -1794,6 +1830,7 @@ def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
+ @final
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@@ -1901,6 +1938,7 @@ def __array_wrap__(
# ----------------------------------------------------------------------
# Picklability
+ @final
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
@@ -1912,6 +1950,7 @@ def __getstate__(self) -> Dict[str, Any]:
**meta,
)
+ @final
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._mgr = state
@@ -1956,6 +1995,7 @@ def __repr__(self) -> str:
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
+ @final
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
@@ -1966,6 +2006,7 @@ def _repr_latex_(self):
else:
return None
+ @final
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
@@ -1982,6 +2023,7 @@ def _repr_data_resource_(self):
# ----------------------------------------------------------------------
# I/O Methods
+ @final
@doc(klass="object")
def to_excel(
self,
@@ -2134,6 +2176,7 @@ def to_excel(
engine=engine,
)
+ @final
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
@@ -2421,6 +2464,7 @@ def to_json(
storage_options=storage_options,
)
+ @final
def to_hdf(
self,
path_or_buf,
@@ -2562,6 +2606,7 @@ def to_hdf(
encoding=encoding,
)
+ @final
def to_sql(
self,
name: str,
@@ -2729,6 +2774,7 @@ def to_sql(
method=method,
)
+ @final
def to_pickle(
self,
path,
@@ -2806,6 +2852,7 @@ def to_pickle(
storage_options=storage_options,
)
+ @final
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
@@ -2867,6 +2914,7 @@ def to_clipboard(
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
+ @final
def to_xarray(self):
"""
Return an xarray object from the pandas object.
@@ -2950,6 +2998,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal'
else:
return xarray.Dataset.from_dataframe(self)
+ @final
@doc(returns=fmt.return_docstring)
def to_latex(
self,
@@ -3135,6 +3184,7 @@ def to_latex(
position=position,
)
+ @final
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
@@ -3344,6 +3394,7 @@ def to_csv(
# ----------------------------------------------------------------------
# Lookup Caching
+ @final
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
@@ -3351,6 +3402,7 @@ def _set_as_cached(self, item, cacher) -> None:
"""
self._cacher = (item, weakref.ref(cacher))
+ @final
def _reset_cacher(self) -> None:
"""
Reset the cacher.
@@ -3358,6 +3410,7 @@ def _reset_cacher(self) -> None:
if hasattr(self, "_cacher"):
del self._cacher
+ @final
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
@@ -3365,11 +3418,13 @@ def _maybe_cache_changed(self, item, value) -> None:
loc = self._info_axis.get_loc(item)
self._mgr.iset(loc, value)
+ @final
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
+ @final
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
@@ -3377,6 +3432,7 @@ def _get_cacher(self):
cacher = cacher[1]()
return cacher
+ @final
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
@@ -3414,6 +3470,7 @@ def _maybe_update_cacher(
if clear:
self._clear_item_cache()
+ @final
def _clear_item_cache(self) -> None:
self._item_cache.clear()
@@ -3519,6 +3576,7 @@ class max_speed
)
return self._constructor(new_data).__finalize__(self, method="take")
+ @final
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
@@ -3533,6 +3591,7 @@ def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
result._set_is_copy(self)
return result
+ @final
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
@@ -3701,6 +3760,7 @@ class animal locomotion
def __getitem__(self, item):
raise AbstractMethodError(self)
+ @final
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
@@ -3751,6 +3811,7 @@ def _set_item(self, key, value) -> None:
NDFrame._iset_item(self, loc, value)
+ @final
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
@@ -3758,6 +3819,7 @@ def _set_is_copy(self, ref, copy: bool_t = True) -> None:
assert ref is not None
self._is_copy = weakref.ref(ref)
+ @final
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
@@ -3778,6 +3840,7 @@ def _check_is_chained_assignment_possible(self) -> bool_t:
self._check_setitem_copy(stacklevel=4, t="referent")
return False
+ @final
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
@@ -3892,6 +3955,7 @@ def __delitem__(self, key) -> None:
# ----------------------------------------------------------------------
# Unsorted
+ @final
def _check_inplace_and_allows_duplicate_labels(self, inplace):
if inplace and not self.flags.allows_duplicate_labels:
raise ValueError(
@@ -3899,6 +3963,7 @@ def _check_inplace_and_allows_duplicate_labels(self, inplace):
"'self.flags.allows_duplicate_labels' is False."
)
+ @final
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
@@ -3918,11 +3983,13 @@ def get(self, key, default=None):
except (KeyError, ValueError, IndexError):
return default
+ @final
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._mgr.is_view
+ @final
def reindex_like(
self: FrameOrSeries,
other,
@@ -4070,6 +4137,7 @@ def drop(
else:
return obj
+ @final
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
@@ -4125,6 +4193,7 @@ def _drop_axis(
return result
+ @final
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
@@ -4142,6 +4211,7 @@ def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
self._mgr = result._mgr
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
+ @final
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
@@ -4205,6 +4275,7 @@ def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
return self.rename(**mapper) # type: ignore[return-value, arg-type]
+ @final
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
@@ -4719,6 +4790,7 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self, method="reindex")
+ @final
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
@@ -4744,6 +4816,7 @@ def _reindex_axes(
return obj
+ @final
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
@@ -4756,6 +4829,7 @@ def _needs_reindex_multi(self, axes, method, level) -> bool_t:
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
+ @final
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
@@ -4897,6 +4971,7 @@ def f(x) -> bool:
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
+ @final
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
@@ -4969,6 +5044,7 @@ def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
return self.iloc[:n]
+ @final
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
@@ -5043,6 +5119,7 @@ def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
return self.iloc[0:0]
return self.iloc[-n:]
+ @final
def sample(
self: FrameOrSeries,
n=None,
@@ -5251,6 +5328,7 @@ def sample(
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
+ @final
@doc(klass=_shared_doc_kwargs["klass"])
def pipe(self, func, *args, **kwargs):
r"""
@@ -5308,6 +5386,7 @@ def pipe(self, func, *args, **kwargs):
# ----------------------------------------------------------------------
# Attribute access
+ @final
def __finalize__(
self: FrameOrSeries, other, method: Optional[str] = None, **kwargs
) -> FrameOrSeries:
@@ -5404,6 +5483,7 @@ def __setattr__(self, name: str, value) -> None:
)
object.__setattr__(self, name, value)
+ @final
def _dir_additions(self) -> Set[str]:
"""
add the string-like attributes from the info_axis.
@@ -5417,6 +5497,7 @@ def _dir_additions(self) -> Set[str]:
# ----------------------------------------------------------------------
# Consolidation of internals
+ @final
def _protect_consolidate(self, f):
"""
Consolidate _mgr -- if the blocks have changed, then clear the
@@ -5428,6 +5509,7 @@ def _protect_consolidate(self, f):
self._clear_item_cache()
return result
+ @final
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
@@ -5436,6 +5518,7 @@ def f():
self._protect_consolidate(f)
+ @final
def _consolidate(self):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
@@ -5449,6 +5532,7 @@ def _consolidate(self):
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
+ @final
@property
def _is_mixed_type(self) -> bool_t:
if self._mgr.is_single_block:
@@ -5461,6 +5545,7 @@ def _is_mixed_type(self) -> bool_t:
return self.dtypes.nunique() > 1
+ @final
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
@@ -5477,9 +5562,11 @@ def _check_inplace_setting(self, value) -> bool_t:
return True
+ @final
def _get_numeric_data(self):
return self._constructor(self._mgr.get_numeric_data()).__finalize__(self)
+ @final
def _get_bool_data(self):
return self._constructor(self._mgr.get_bool_data()).__finalize__(self)
@@ -5599,6 +5686,7 @@ def dtypes(self):
data = self._mgr.get_dtypes()
return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)
+ @final
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
@@ -5776,6 +5864,7 @@ def astype(
result.columns = self.columns
return result
+ @final
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
@@ -5885,9 +5974,11 @@ def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
self._clear_item_cache()
return self._constructor(data).__finalize__(self, method="copy")
+ @final
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
+ @final
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
@@ -5897,6 +5988,7 @@ def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
return self.copy(deep=True)
+ @final
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
@@ -5938,6 +6030,7 @@ def _convert(
)
).__finalize__(self)
+ @final
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
@@ -5985,6 +6078,7 @@ def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
)
).__finalize__(self, method="infer_objects")
+ @final
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
@@ -6309,6 +6403,7 @@ def fillna(
else:
return result.__finalize__(self, method="fillna")
+ @final
def ffill(
self: FrameOrSeries,
axis=None,
@@ -6330,6 +6425,7 @@ def ffill(
pad = ffill
+ @final
def bfill(
self: FrameOrSeries,
axis=None,
@@ -6805,6 +6901,7 @@ def replace(
else:
return result.__finalize__(self, method="replace")
+ @final
def interpolate(
self: FrameOrSeries,
method: str = "linear",
@@ -7103,6 +7200,7 @@ def interpolate(
# ----------------------------------------------------------------------
# Timeseries methods Methods
+ @final
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
@@ -7407,6 +7505,7 @@ def notna(self: FrameOrSeries) -> FrameOrSeries:
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self, method="notnull")
+ @final
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
@@ -7432,6 +7531,7 @@ def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
else:
return result
+ @final
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
@@ -7455,6 +7555,7 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
threshold = align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
+ @final
def clip(
self: FrameOrSeries,
lower=None,
@@ -7581,6 +7682,7 @@ def clip(
return result
+ @final
def asfreq(
self: FrameOrSeries,
freq,
@@ -7692,6 +7794,7 @@ def asfreq(
fill_value=fill_value,
)
+ @final
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
@@ -7750,6 +7853,7 @@ def at_time(
indexer = index.indexer_at_time(time, asof=asof)
return self._take_with_is_copy(indexer, axis=axis)
+ @final
def between_time(
self: FrameOrSeries,
start_time,
@@ -7834,6 +7938,7 @@ def between_time(
)
return self._take_with_is_copy(indexer, axis=axis)
+ @final
def resample(
self,
rule,
@@ -8235,6 +8340,7 @@ def resample(
offset=offset,
)
+ @final
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select initial periods of time series data based on a date offset.
@@ -8303,6 +8409,7 @@ def first(self: FrameOrSeries, offset) -> FrameOrSeries:
return self.loc[:end]
+ @final
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select final periods of time series data based on a date offset.
@@ -8366,6 +8473,7 @@ def last(self: FrameOrSeries, offset) -> FrameOrSeries:
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
+ @final
def rank(
self: FrameOrSeries,
axis=0,
@@ -8686,6 +8794,7 @@ def align(
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
+ @final
def _align_frame(
self,
other,
@@ -8755,6 +8864,7 @@ def _align_frame(
right.__finalize__(other),
)
+ @final
def _align_series(
self,
other,
@@ -8846,6 +8956,7 @@ def _align_series(
right.__finalize__(other),
)
+ @final
def _where(
self,
cond,
@@ -8993,6 +9104,7 @@ def _where(
result = self._constructor(new_data)
return result.__finalize__(self)
+ @final
@doc(
klass=_shared_doc_kwargs["klass"],
cond="True",
@@ -9135,6 +9247,7 @@ def where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
+ @final
@doc(
where,
klass=_shared_doc_kwargs["klass"],
@@ -9317,6 +9430,7 @@ def shift(
result = self.set_axis(new_ax, axis)
return result.__finalize__(self, method="shift")
+ @final
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
@@ -9365,6 +9479,7 @@ def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
return new_obj.__finalize__(self, method="slice_shift")
+ @final
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis: Axis = 0
) -> FrameOrSeries:
@@ -9571,6 +9686,7 @@ def truncate(
return result
+ @final
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
@@ -9628,6 +9744,7 @@ def _tz_convert(ax, tz):
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_convert")
+ @final
def tz_localize(
self: FrameOrSeries,
tz,
@@ -9801,6 +9918,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent):
# ----------------------------------------------------------------------
# Numeric Methods
+ @final
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
@@ -9870,6 +9988,7 @@ def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
return np.abs(self)
+ @final
def describe(
self: FrameOrSeries,
percentiles=None,
@@ -10143,7 +10262,7 @@ def describe(
formatted_percentiles = format_percentiles(percentiles)
- def describe_numeric_1d(series):
+ def describe_numeric_1d(series) -> "Series":
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
@@ -10154,7 +10273,7 @@ def describe_numeric_1d(series):
)
return pd.Series(d, index=stat_index, name=series.name)
- def describe_categorical_1d(data):
+ def describe_categorical_1d(data) -> "Series":
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
@@ -10203,7 +10322,7 @@ def describe_categorical_1d(data):
return pd.Series(result, index=names, name=data.name, dtype=dtype)
- def describe_timestamp_1d(data):
+ def describe_timestamp_1d(data) -> "Series":
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
@@ -10213,7 +10332,7 @@ def describe_timestamp_1d(data):
)
return pd.Series(d, index=stat_index, name=data.name)
- def describe_1d(data):
+ def describe_1d(data) -> "Series":
if is_bool_dtype(data.dtype):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
@@ -10226,7 +10345,9 @@ def describe_1d(data):
return describe_categorical_1d(data)
if self.ndim == 1:
- return describe_1d(self)
+ # Incompatible return value type
+ # (got "Series", expected "FrameOrSeries") [return-value]
+ return describe_1d(self) # type:ignore[return-value]
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
default_include = [np.number]
@@ -10256,6 +10377,7 @@ def describe_1d(data):
d.columns = data.columns.copy()
return d
+ @final
def pct_change(
self: FrameOrSeries,
periods=1,
@@ -10394,6 +10516,7 @@ def pct_change(
rs = rs.reindex_like(data)
return rs
+ @final
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
@@ -10405,6 +10528,7 @@ def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
+ @final
def _logical_func(
self, name: str, func, axis=0, bool_only=None, skipna=True, level=None, **kwargs
):
@@ -10442,6 +10566,7 @@ def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"all", nanops.nanall, axis, bool_only, skipna, level, **kwargs
)
+ @final
def _accum_func(self, name: str, func, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
@@ -10482,6 +10607,7 @@ def cumsum(self, axis=None, skipna=True, *args, **kwargs):
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs)
+ @final
def _stat_function_ddof(
self,
name: str,
@@ -10527,6 +10653,7 @@ def std(
"std", nanops.nanstd, axis, skipna, level, ddof, numeric_only, **kwargs
)
+ @final
def _stat_function(
self,
name: str,
@@ -10583,6 +10710,7 @@ def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
kurtosis = kurt
+ @final
def _min_count_stat_function(
self,
name: str,
@@ -11052,6 +11180,7 @@ def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
# [assignment]
cls.min = min # type: ignore[assignment]
+ @final
@doc(Rolling)
def rolling(
self,
@@ -11088,6 +11217,7 @@ def rolling(
closed=closed,
)
+ @final
@doc(Expanding)
def expanding(
self, min_periods: int = 1, center: Optional[bool_t] = None, axis: Axis = 0
@@ -11104,6 +11234,7 @@ def expanding(
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
+ @final
@doc(ExponentialMovingWindow)
def ewm(
self,
@@ -11134,6 +11265,7 @@ def ewm(
# ----------------------------------------------------------------------
# Arithmetic Methods
+ @final
def _inplace_method(self, other, op):
"""
Wrap arithmetic method to operate inplace.
@@ -11192,6 +11324,7 @@ def __ixor__(self, other):
# ----------------------------------------------------------------------
# Misc methods
+ @final
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
@@ -11210,6 +11343,7 @@ def _find_valid_index(self, how: str):
return None
return self.index[idxpos]
+ @final
@doc(position="first", klass=_shared_doc_kwargs["klass"])
def first_valid_index(self):
"""
@@ -11226,6 +11360,7 @@ def first_valid_index(self):
"""
return self._find_valid_index("first")
+ @final
@doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"])
def last_valid_index(self):
return self._find_valid_index("last")
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 2739806a0f338..9744eb0ecbb88 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -802,7 +802,7 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
- mask = True
+ mask = np.array(True)
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
@@ -811,9 +811,7 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
- # pandas\core\indexes\datetimes.py:764: error: "bool" has no
- # attribute "nonzero" [attr-defined]
- indexer = mask.nonzero()[0][::step] # type: ignore[attr-defined]
+ indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 9793f7a1e4613..0916494d8ab60 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -594,13 +594,13 @@ def _format_header_regular(self):
# has incompatible type "Union[Sequence[Optional[Hashable]],
# bool]"; expected "Sized" [arg-type]
if len(self.header) != len(self.columns): # type: ignore[arg-type]
- # pandas\io\formats\excel.py:595: error: Argument 1 to
+ # pandas\io\formats\excel.py:602: error: Argument 1 to
# "len" has incompatible type
# "Union[Sequence[Optional[Hashable]], bool]"; expected
# "Sized" [arg-type]
raise ValueError(
- f"Writing {len(self.columns)} " # type: ignore[arg-type]
- f"cols but got {len(self.header)} aliases"
+ f"Writing {len(self.columns)} cols " # type: ignore[arg-type]
+ f"but got {len(self.header)} aliases"
)
else:
colnames = self.header
| attempt 2 | https://api.github.com/repos/pandas-dev/pandas/pulls/37694 | 2020-11-08T02:52:02Z | 2020-11-10T23:16:46Z | 2020-11-10T23:16:46Z | 2020-11-10T23:28:45Z |
REF: make Series._replace_single a regular method | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 36ce2c4776bd0..bea650c1b50fd 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -137,36 +137,6 @@
)
-def _single_replace(self: "Series", to_replace, method, inplace, limit):
- """
- Replaces values in a Series using the fill method specified when no
- replacement value is given in the replace method
- """
- if self.ndim != 1:
- raise TypeError(
- f"cannot replace {to_replace} with method {method} on a "
- f"{type(self).__name__}"
- )
-
- orig_dtype = self.dtype
- result = self if inplace else self.copy()
- fill_f = missing.get_fill_func(method)
-
- mask = missing.mask_missing(result.values, to_replace)
- values = fill_f(result.values, limit=limit, mask=mask)
-
- if values.dtype == orig_dtype and inplace:
- return
-
- result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
-
- if inplace:
- self._update_inplace(result)
- return
-
- return result
-
-
bool_t = bool # Need alias because NDFrame has def bool:
@@ -6690,11 +6660,14 @@ def replace(
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
+ from pandas import Series
+
return self.apply(
- _single_replace, args=(to_replace, method, inplace, limit)
+ Series._replace_single,
+ args=(to_replace, method, inplace, limit),
)
self = cast("Series", self)
- return _single_replace(self, to_replace, method, inplace, limit)
+ return self._replace_single(to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e4a805a18bcdb..41adc1fc1ae93 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -67,7 +67,7 @@
remove_na_arraylike,
)
-from pandas.core import algorithms, base, generic, nanops, ops
+from pandas.core import algorithms, base, generic, missing, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.aggregation import aggregate, transform
from pandas.core.arrays import ExtensionArray
@@ -4550,6 +4550,31 @@ def replace(
method=method,
)
+ def _replace_single(self, to_replace, method, inplace, limit):
+ """
+ Replaces values in a Series using the fill method specified when no
+ replacement value is given in the replace method
+ """
+
+ orig_dtype = self.dtype
+ result = self if inplace else self.copy()
+ fill_f = missing.get_fill_func(method)
+
+ mask = missing.mask_missing(result.values, to_replace)
+ values = fill_f(result.values, limit=limit, mask=mask)
+
+ if values.dtype == orig_dtype and inplace:
+ return
+
+ result = self._constructor(values, index=self.index, dtype=self.dtype)
+ result = result.__finalize__(self)
+
+ if inplace:
+ self._update_inplace(result)
+ return
+
+ return result
+
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "Series":
return super().shift(
| https://api.github.com/repos/pandas-dev/pandas/pulls/37691 | 2020-11-07T23:44:47Z | 2020-11-08T02:10:13Z | 2020-11-08T02:10:13Z | 2020-11-08T02:41:15Z | |
DOC: add examples to insert and update generally | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f710660d6ad8e..2f5736be8f604 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3752,6 +3752,28 @@ def insert(self, loc, column, value, allow_duplicates=False) -> None:
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
+
+ See Also
+ --------
+ Index.insert : Insert new item by index.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
+ >>> df
+ col1 col2
+ 0 1 3
+ 1 2 4
+ >>> df.insert(1, "newcol", [99, 99])
+ >>> df
+ col1 newcol col2
+ 0 1 99 3
+ 1 2 99 4
+ >>> df.insert(0, "col1", [100, 100], allow_duplicates=True)
+ >>> df
+ col1 col1 newcol col2
+ 0 100 1 99 3
+ 1 100 2 99 4
"""
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
| This commit mainly adds examples on usage. This commit also fills in
information suggested by `validate_docstrings.py` script.
Ran
```
python scripts/validate_docstrings.py pandas.DataFrame.insert
```
to check docs and added in suggested information (e.g., See Also section).
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37690 | 2020-11-07T23:10:53Z | 2020-12-08T18:32:45Z | 2020-12-08T18:32:45Z | 2020-12-08T18:45:16Z |
CLN: Clean indexing tests | diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 6c80354610a78..5bb7258329653 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -615,7 +615,6 @@ def test_iloc_mask(self):
# UserWarnings from reindex of a boolean mask
with catch_warnings(record=True):
simplefilter("ignore", UserWarning)
- result = dict()
for idx in [None, "index", "locs"]:
mask = (df.nums > 2).values
if idx:
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 614e424e8aca2..06bd8a5f300bb 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -843,6 +843,7 @@ def test_maybe_numeric_slice(self):
result = maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ["A", "C"]]
+ assert all(result[1] == expected[1])
result = maybe_numeric_slice(df, [1])
expected = [1]
assert result == expected
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 3b0fae537a6e5..47ab80696494e 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1191,7 +1191,8 @@ def test_loc_setitem_multiindex_slice(self):
def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self):
times = date_range("2000-01-01", freq="10min", periods=100000)
ser = Series(range(100000), times)
- ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
+ result = ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
+ tm.assert_series_equal(result, ser)
class TestLocSetitemWithExpansion:
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 01db937153b3a..3bf37f4cade8b 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -185,14 +185,14 @@ def test_series_partial_set(self):
# loc equiv to .reindex
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
with pytest.raises(KeyError, match="with any missing labels"):
- result = ser.loc[[3, 2, 3]]
+ ser.loc[[3, 2, 3]]
result = ser.reindex([3, 2, 3])
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, "x"])
with pytest.raises(KeyError, match="with any missing labels"):
- result = ser.loc[[3, 2, 3, "x"]]
+ ser.loc[[3, 2, 3, "x"]]
result = ser.reindex([3, 2, 3, "x"])
tm.assert_series_equal(result, expected, check_index_type=True)
@@ -203,7 +203,7 @@ def test_series_partial_set(self):
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, "x", 1])
with pytest.raises(KeyError, match="with any missing labels"):
- result = ser.loc[[2, 2, "x", 1]]
+ ser.loc[[2, 2, "x", 1]]
result = ser.reindex([2, 2, "x", 1])
tm.assert_series_equal(result, expected, check_index_type=True)
| - [x] tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Found a few unuesed statements | https://api.github.com/repos/pandas-dev/pandas/pulls/37689 | 2020-11-07T23:01:09Z | 2020-11-08T02:59:32Z | 2020-11-08T02:59:31Z | 2020-11-08T12:07:04Z |
TYP: make some internal funcs keyword-only | diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py
index 3f4625e2b712a..bce6f1aafb2c5 100644
--- a/pandas/core/array_algos/masked_reductions.py
+++ b/pandas/core/array_algos/masked_reductions.py
@@ -17,6 +17,7 @@ def _sumprod(
func: Callable,
values: np.ndarray,
mask: np.ndarray,
+ *,
skipna: bool = True,
min_count: int = 0,
):
@@ -52,19 +53,25 @@ def _sumprod(
return func(values, where=~mask)
-def sum(values: np.ndarray, mask: np.ndarray, skipna: bool = True, min_count: int = 0):
+def sum(
+ values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
+):
return _sumprod(
np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count
)
-def prod(values: np.ndarray, mask: np.ndarray, skipna: bool = True, min_count: int = 0):
+def prod(
+ values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
+):
return _sumprod(
np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count
)
-def _minmax(func: Callable, values: np.ndarray, mask: np.ndarray, skipna: bool = True):
+def _minmax(
+ func: Callable, values: np.ndarray, mask: np.ndarray, *, skipna: bool = True
+):
"""
Reduction for 1D masked array.
@@ -94,9 +101,9 @@ def _minmax(func: Callable, values: np.ndarray, mask: np.ndarray, skipna: bool =
return libmissing.NA
-def min(values: np.ndarray, mask: np.ndarray, skipna: bool = True):
+def min(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
return _minmax(np.min, values=values, mask=mask, skipna=skipna)
-def max(values: np.ndarray, mask: np.ndarray, skipna: bool = True):
+def max(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
return _minmax(np.max, values=values, mask=mask, skipna=skipna)
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 63c414d96c8de..a8c0e77270dfc 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -54,6 +54,7 @@ def _validate_scalar(self, value):
def take(
self: _T,
indices: Sequence[int],
+ *,
allow_fill: bool = False,
fill_value: Any = None,
axis: int = 0,
@@ -246,7 +247,7 @@ def fillna(self: _T, value=None, method=None, limit=None) -> _T:
# ------------------------------------------------------------------------
# Reductions
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index be105fd1f2a46..afbddc53804ac 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -173,7 +173,7 @@ class ExtensionArray:
# ------------------------------------------------------------------------
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
"""
Construct a new ExtensionArray from a sequence of scalars.
@@ -195,7 +195,7 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
raise AbstractMethodError(cls)
@classmethod
- def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
+ def _from_sequence_of_strings(cls, strings, *, dtype=None, copy=False):
"""
Construct a new ExtensionArray from a sequence of strings.
@@ -922,7 +922,11 @@ def repeat(self, repeats, axis=None):
# ------------------------------------------------------------------------
def take(
- self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None
+ self,
+ indices: Sequence[int],
+ *,
+ allow_fill: bool = False,
+ fill_value: Any = None,
) -> "ExtensionArray":
"""
Take elements from an array.
@@ -1153,7 +1157,7 @@ def _concat_same_type(
# of objects
_can_hold_na = True
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 21306455573b8..c6c7396a980b0 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -273,7 +273,9 @@ def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "BooleanArray":
+ def _from_sequence(
+ cls, scalars, *, dtype=None, copy: bool = False
+ ) -> "BooleanArray":
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
@@ -281,7 +283,7 @@ def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "BooleanArra
@classmethod
def _from_sequence_of_strings(
- cls, strings: List[str], dtype=None, copy: bool = False
+ cls, strings: List[str], *, dtype=None, copy: bool = False
) -> "BooleanArray":
def map_string(s):
if isna(s):
@@ -294,7 +296,7 @@ def map_string(s):
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
- return cls._from_sequence(scalars, dtype, copy)
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
@@ -682,12 +684,12 @@ def _arith_method(self, other, op):
return self._maybe_mask_result(result, mask, other, op_name)
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
- return super()._reduce(name, skipna, **kwargs)
+ return super()._reduce(name, skipna=skipna, **kwargs)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index edbf24ca87f5c..51f3c16f3f467 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -385,7 +385,7 @@ def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 905242bfdd8ad..a05dc717f83c1 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -301,7 +301,7 @@ def _simple_new(
return result
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy: bool = False):
+ def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index 4cfaae23e4389..a5ebdd8d963e2 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -275,16 +275,18 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
super().__init__(values, mask, copy=copy)
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "FloatingArray":
+ def _from_sequence(
+ cls, scalars, *, dtype=None, copy: bool = False
+ ) -> "FloatingArray":
values, mask = coerce_to_array(scalars, dtype=dtype, copy=copy)
return FloatingArray(values, mask)
@classmethod
def _from_sequence_of_strings(
- cls, strings, dtype=None, copy: bool = False
+ cls, strings, *, dtype=None, copy: bool = False
) -> "FloatingArray":
scalars = to_numeric(strings, errors="raise")
- return cls._from_sequence(scalars, dtype, copy)
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index e3d19e53e4517..c9d7632e39228 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -358,15 +358,17 @@ def __abs__(self):
return type(self)(np.abs(self._data), self._mask)
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "IntegerArray":
+ def _from_sequence(
+ cls, scalars, *, dtype=None, copy: bool = False
+ ) -> "IntegerArray":
return integer_array(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_of_strings(
- cls, strings, dtype=None, copy: bool = False
+ cls, strings, *, dtype=None, copy: bool = False
) -> "IntegerArray":
scalars = to_numeric(strings, errors="raise")
- return cls._from_sequence(scalars, dtype, copy)
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 7b10334804ef9..a2eb506c6747a 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -227,7 +227,7 @@ def _simple_new(cls, data, closed="right"):
return result
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
@@ -788,7 +788,7 @@ def shift(self, periods: int = 1, fill_value: object = None) -> "IntervalArray":
b = empty
return self._concat_same_type([a, b])
- def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
+ def take(self, indices, *, allow_fill=False, fill_value=None, axis=None, **kwargs):
"""
Take elements from the IntervalArray.
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index b633f268049e5..9cc4cc72e4c8e 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -269,6 +269,7 @@ def _concat_same_type(cls: Type[BaseMaskedArrayT], to_concat) -> BaseMaskedArray
def take(
self: BaseMaskedArrayT,
indexer,
+ *,
allow_fill: bool = False,
fill_value: Optional[Scalar] = None,
) -> BaseMaskedArrayT:
@@ -357,7 +358,7 @@ def value_counts(self, dropna: bool = True) -> "Series":
return Series(counts, index=index)
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
data = self._data
mask = self._mask
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index e1a424b719a4a..9419f111cc869 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -172,7 +172,9 @@ def __init__(self, values: Union[np.ndarray, "PandasArray"], copy: bool = False)
self._dtype = PandasDtype(values.dtype)
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "PandasArray":
+ def _from_sequence(
+ cls, scalars, *, dtype=None, copy: bool = False
+ ) -> "PandasArray":
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 8de84a0187e95..80882acceb56a 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -192,6 +192,7 @@ def _simple_new(
def _from_sequence(
cls: Type["PeriodArray"],
scalars: Union[Sequence[Optional[Period]], AnyArrayLike],
+ *,
dtype: Optional[PeriodDtype] = None,
copy: bool = False,
) -> "PeriodArray":
@@ -214,9 +215,9 @@ def _from_sequence(
@classmethod
def _from_sequence_of_strings(
- cls, strings, dtype=None, copy=False
+ cls, strings, *, dtype=None, copy=False
) -> "PeriodArray":
- return cls._from_sequence(strings, dtype, copy)
+ return cls._from_sequence(strings, dtype=dtype, copy=copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None) -> "PeriodArray":
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 9152ce72d75aa..d976526955ac2 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -484,7 +484,7 @@ def __setitem__(self, key, value):
raise TypeError(msg)
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
return cls(scalars, dtype=dtype)
@classmethod
@@ -809,7 +809,7 @@ def _get_val_at(self, loc):
val = maybe_box_datetimelike(val, self.sp_values.dtype)
return val
- def take(self, indices, allow_fill=False, fill_value=None) -> "SparseArray":
+ def take(self, indices, *, allow_fill=False, fill_value=None) -> "SparseArray":
if is_scalar(indices):
raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")
indices = np.asarray(indices, dtype=np.int32)
@@ -1156,7 +1156,7 @@ def nonzero(self):
# Reductions
# ------------------------------------------------------------------------
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
method = getattr(self, name, None)
if method is None:
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 8231a5fa0509b..b17481c8e5f88 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -198,7 +198,7 @@ def _validate(self):
)
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
if dtype:
assert dtype == "string"
@@ -226,7 +226,7 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
return new_string_array
@classmethod
- def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
+ def _from_sequence_of_strings(cls, strings, *, dtype=None, copy=False):
return cls._from_sequence(strings, dtype=dtype, copy=copy)
def __arrow_array__(self, type=None):
@@ -295,7 +295,7 @@ def astype(self, dtype, copy=True):
return super().astype(dtype, copy)
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in ["min", "max"]:
return getattr(self, name)(skipna=skipna)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 8a87df18b6adb..a75d411b4a40c 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -219,7 +219,7 @@ def _simple_new(
@classmethod
def _from_sequence(
- cls, data, dtype=TD64NS_DTYPE, copy: bool = False
+ cls, data, *, dtype=TD64NS_DTYPE, copy: bool = False
) -> "TimedeltaArray":
if dtype:
_validate_td64_dtype(dtype)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index c91e4db004f2a..b979298fa53f6 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -785,6 +785,7 @@ def _reduce(
self,
op,
name: str,
+ *,
axis=0,
skipna=True,
numeric_only=None,
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 049d2c4888a69..11b83a393dcc0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8719,6 +8719,7 @@ def _reduce(
self,
op,
name: str,
+ *,
axis=0,
skipna=True,
numeric_only=None,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e4a805a18bcdb..237c1c9a85575 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4190,7 +4190,15 @@ def f(x):
)
def _reduce(
- self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
+ self,
+ op,
+ name: str,
+ *,
+ axis=0,
+ skipna=True,
+ numeric_only=None,
+ filter_type=None,
+ **kwds,
):
"""
Perform a reduction operation.
diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index 04ce705690cf3..65c5102e22997 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -159,7 +159,7 @@ def _concat_same_type(cls, to_concat):
def __invert__(self):
return type(self).from_scalars(~self._data.to_pandas())
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if skipna:
arr = self[~self.isna()]
else:
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 3d1ebb01d632f..9ede9c7fbd0fd 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -178,7 +178,7 @@ def _formatter(self, boxed=False):
def _concat_same_type(cls, to_concat):
return cls(np.concatenate([x._data for x in to_concat]))
- def _reduce(self, name: str, skipna: bool = True, **kwargs):
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if skipna:
# If we don't have any NAs, we can ignore skipna
| https://api.github.com/repos/pandas-dev/pandas/pulls/37688 | 2020-11-07T22:54:51Z | 2020-11-08T02:04:59Z | 2020-11-08T02:04:59Z | 2020-11-08T02:40:21Z | |
pd.Series.loc.__getitem__ promotes to float64 instead of raising KeyError | diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 3b0fae537a6e5..0faa784634fd2 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1512,6 +1512,16 @@ def test_series_loc_getitem_label_list_missing_values():
s.loc[key]
+def test_series_getitem_label_list_missing_integer_values():
+ # GH: 25927
+ s = Series(
+ index=np.array([9730701000001104, 10049011000001109]),
+ data=np.array([999000011000001104, 999000011000001104]),
+ )
+ with pytest.raises(KeyError, match="with any missing labels"):
+ s.loc[np.array([9730701000001104, 10047311000001102])]
+
+
@pytest.mark.parametrize(
"columns, column_key, expected_columns, check_column_type",
[
| - [x] closes #25927
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
cc @jreback Added a test. The test above was the only one explicitly added for the ``KeyError``
| https://api.github.com/repos/pandas-dev/pandas/pulls/37687 | 2020-11-07T22:34:01Z | 2020-11-07T23:47:12Z | 2020-11-07T23:47:12Z | 2020-11-07T23:50:43Z |
CLN: _replace_single | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 1f34e91d71077..8e01aaa396265 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -726,7 +726,6 @@ def replace(
value,
inplace: bool = False,
regex: bool = False,
- convert: bool = True,
) -> List["Block"]:
"""
replace the to_replace value with value, possible to create new
@@ -755,9 +754,7 @@ def replace(
if len(to_replace) == 1:
# _can_hold_element checks have reduced this back to the
# scalar case and we can avoid a costly object cast
- return self.replace(
- to_replace[0], value, inplace=inplace, regex=regex, convert=convert
- )
+ return self.replace(to_replace[0], value, inplace=inplace, regex=regex)
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
@@ -771,7 +768,6 @@ def replace(
value=value,
inplace=inplace,
regex=regex,
- convert=convert,
)
values = self.values
@@ -810,16 +806,21 @@ def replace(
value=value,
inplace=inplace,
regex=regex,
- convert=convert,
- )
- if convert:
- blocks = extend_blocks(
- [b.convert(numeric=False, copy=not inplace) for b in blocks]
)
+
+ blocks = extend_blocks(
+ [b.convert(numeric=False, copy=not inplace) for b in blocks]
+ )
return blocks
def _replace_single(
- self, to_replace, value, inplace=False, regex=False, convert=True, mask=None
+ self,
+ to_replace,
+ value,
+ inplace: bool = False,
+ regex: bool = False,
+ convert: bool = True,
+ mask=None,
) -> List["Block"]:
""" no-op on a non-ObjectBlock """
return [self] if inplace else [self.copy()]
@@ -860,9 +861,9 @@ def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray:
m = masks[i]
convert = i == src_len # only convert once at the end
result = blk._replace_coerce(
- mask=m,
to_replace=src,
value=dest,
+ mask=m,
inplace=inplace,
regex=regex,
)
@@ -1567,9 +1568,9 @@ def _replace_coerce(
self,
to_replace,
value,
+ mask: np.ndarray,
inplace: bool = True,
regex: bool = False,
- mask=None,
) -> List["Block"]:
"""
Replace value corresponding to the given boolean array with another
@@ -1581,12 +1582,12 @@ def _replace_coerce(
Scalar to replace or regular expression to match.
value : object
Replacement object.
+ mask : np.ndarray[bool]
+ True indicate corresponding element is ignored.
inplace : bool, default True
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
- mask : array-like of bool, optional
- True indicate corresponding element is ignored.
Returns
-------
@@ -2495,7 +2496,11 @@ def _can_hold_element(self, element: Any) -> bool:
return True
def replace(
- self, to_replace, value, inplace=False, regex=False, convert=True
+ self,
+ to_replace,
+ value,
+ inplace: bool = False,
+ regex: bool = False,
) -> List["Block"]:
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
@@ -2506,20 +2511,14 @@ def replace(
blocks: List["Block"] = [self]
if not either_list and is_re(to_replace):
- return self._replace_single(
- to_replace, value, inplace=inplace, regex=True, convert=convert
- )
+ return self._replace_single(to_replace, value, inplace=inplace, regex=True)
elif not (either_list or regex):
- return super().replace(
- to_replace, value, inplace=inplace, regex=regex, convert=convert
- )
+ return super().replace(to_replace, value, inplace=inplace, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
- result = b._replace_single(
- to_rep, v, inplace=inplace, regex=regex, convert=convert
- )
+ result = b._replace_single(to_rep, v, inplace=inplace, regex=regex)
result_blocks.extend(result)
blocks = result_blocks
return result_blocks
@@ -2529,18 +2528,22 @@ def replace(
result_blocks = []
for b in blocks:
result = b._replace_single(
- to_rep, value, inplace=inplace, regex=regex, convert=convert
+ to_rep, value, inplace=inplace, regex=regex
)
result_blocks.extend(result)
blocks = result_blocks
return result_blocks
- return self._replace_single(
- to_replace, value, inplace=inplace, convert=convert, regex=regex
- )
+ return self._replace_single(to_replace, value, inplace=inplace, regex=regex)
def _replace_single(
- self, to_replace, value, inplace=False, regex=False, convert=True, mask=None
+ self,
+ to_replace,
+ value,
+ inplace: bool = False,
+ regex: bool = False,
+ convert: bool = True,
+ mask=None,
) -> List["Block"]:
"""
Replace elements by the given value.
@@ -2567,23 +2570,7 @@ def _replace_single(
inplace = validate_bool_kwarg(inplace, "inplace")
# to_replace is regex compilable
- to_rep_re = regex and is_re_compilable(to_replace)
-
- # regex is regex compilable
- regex_re = is_re_compilable(regex)
-
- # only one will survive
- if to_rep_re and regex_re:
- raise AssertionError(
- "only one of to_replace and regex can be regex compilable"
- )
-
- # if regex was passed as something that can be a regex (rather than a
- # boolean)
- if regex_re:
- to_replace = regex
-
- regex = regex_re or to_rep_re
+ regex = regex and is_re_compilable(to_replace)
# try to get the pattern attribute (compiled re) or it's a string
if is_re(to_replace):
@@ -2646,7 +2633,6 @@ def replace(
value,
inplace: bool = False,
regex: bool = False,
- convert: bool = True,
) -> List["Block"]:
inplace = validate_bool_kwarg(inplace, "inplace")
result = self if inplace else self.copy()
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index a06d57e268fe2..fda4da8694ea3 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -638,9 +638,11 @@ def convert(
coerce=coerce,
)
- def replace(self, value, **kwargs) -> "BlockManager":
+ def replace(self, to_replace, value, inplace: bool, regex: bool) -> "BlockManager":
assert np.ndim(value) == 0, value
- return self.apply("replace", value=value, **kwargs)
+ return self.apply(
+ "replace", to_replace=to_replace, value=value, inplace=inplace, regex=regex
+ )
def replace_list(
self: T,
| Confirmed that the `regex` kwarg is always bool, which lets us remove some gymnastics (L2570-L2586). Then removed always-True `convert` kwarg. | https://api.github.com/repos/pandas-dev/pandas/pulls/37683 | 2020-11-07T04:52:12Z | 2020-11-08T01:47:27Z | 2020-11-08T01:47:27Z | 2020-11-08T02:08:02Z |
REF/TST: share some api tests | diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 25d3fab76ca36..157c8687808b3 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -263,12 +263,16 @@ def _check_f(base, f):
@async_mark()
@td.check_file_leaks
- async def test_tab_complete_warning(self, ip):
+ async def test_tab_complete_warning(self, ip, frame_or_series):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
- code = "from pandas import DataFrame; df = DataFrame()"
+ if frame_or_series is DataFrame:
+ code = "from pandas import DataFrame; obj = DataFrame()"
+ else:
+ code = "from pandas import Series; obj = Series(dtype=object)"
+
await ip.run_code(code)
# TODO: remove it when Ipython updates
@@ -283,7 +287,7 @@ async def test_tab_complete_warning(self, ip):
)
with warning:
with provisionalcompleter("ignore"):
- list(ip.Completer.completions("df.", 1))
+ list(ip.Completer.completions("obj.", 1))
def test_attrs(self):
df = DataFrame({"A": [2, 3]})
@@ -294,9 +298,15 @@ def test_attrs(self):
assert result.attrs == {"version": 1}
@pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
- def test_set_flags(self, allows_duplicate_labels):
- df = DataFrame({"A": [1, 2]})
- result = df.set_flags(allows_duplicate_labels=allows_duplicate_labels)
+ def test_set_flags(self, allows_duplicate_labels, frame_or_series):
+ obj = DataFrame({"A": [1, 2]})
+ key = (0, 0)
+ if frame_or_series is Series:
+ obj = obj["A"]
+ key = 0
+
+ result = obj.set_flags(allows_duplicate_labels=allows_duplicate_labels)
+
if allows_duplicate_labels is None:
# We don't update when it's not provided
assert result.flags.allows_duplicate_labels is True
@@ -304,21 +314,21 @@ def test_set_flags(self, allows_duplicate_labels):
assert result.flags.allows_duplicate_labels is allows_duplicate_labels
# We made a copy
- assert df is not result
+ assert obj is not result
- # We didn't mutate df
- assert df.flags.allows_duplicate_labels is True
+ # We didn't mutate obj
+ assert obj.flags.allows_duplicate_labels is True
# But we didn't copy data
- result.iloc[0, 0] = 0
- assert df.iloc[0, 0] == 0
+ result.iloc[key] = 0
+ assert obj.iloc[key] == 0
# Now we do copy.
- result = df.set_flags(
+ result = obj.set_flags(
copy=True, allows_duplicate_labels=allows_duplicate_labels
)
- result.iloc[0, 0] = 10
- assert df.iloc[0, 0] == 0
+ result.iloc[key] = 10
+ assert obj.iloc[key] == 0
@skip_if_no("jinja2")
def test_constructor_expanddim_lookup(self):
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 717d8b5c90d85..beace074894a8 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -3,9 +3,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-from pandas.util._test_decorators import async_mark
-
import pandas as pd
from pandas import DataFrame, Index, Series, Timedelta, Timestamp, date_range
import pandas._testing as tm
@@ -216,30 +213,6 @@ def test_empty_method(self):
for full_series in [Series([1]), s2]:
assert not full_series.empty
- @async_mark()
- @td.check_file_leaks
- async def test_tab_complete_warning(self, ip):
- # https://github.com/pandas-dev/pandas/issues/16409
- pytest.importorskip("IPython", minversion="6.0.0")
- from IPython.core.completer import provisionalcompleter
-
- code = "import pandas as pd; s = Series(dtype=object)"
- await ip.run_code(code)
-
- # TODO: remove it when Ipython updates
- # GH 33567, jedi version raises Deprecation warning in Ipython
- import jedi
-
- if jedi.__version__ < "0.17.0":
- warning = tm.assert_produces_warning(None)
- else:
- warning = tm.assert_produces_warning(
- DeprecationWarning, check_stacklevel=False
- )
- with warning:
- with provisionalcompleter("ignore"):
- list(ip.Completer.completions("s.", 1))
-
def test_integer_series_size(self):
# GH 25580
s = Series(range(9))
@@ -253,29 +226,3 @@ def test_attrs(self):
s.attrs["version"] = 1
result = s + 1
assert result.attrs == {"version": 1}
-
- @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
- def test_set_flags(self, allows_duplicate_labels):
- df = Series([1, 2])
- result = df.set_flags(allows_duplicate_labels=allows_duplicate_labels)
- if allows_duplicate_labels is None:
- # We don't update when it's not provided
- assert result.flags.allows_duplicate_labels is True
- else:
- assert result.flags.allows_duplicate_labels is allows_duplicate_labels
-
- # We made a copy
- assert df is not result
- # We didn't mutate df
- assert df.flags.allows_duplicate_labels is True
-
- # But we didn't copy data
- result.iloc[0] = 0
- assert df.iloc[0] == 0
-
- # Now we do copy.
- result = df.set_flags(
- copy=True, allows_duplicate_labels=allows_duplicate_labels
- )
- result.iloc[0] = 10
- assert df.iloc[0] == 0
| https://api.github.com/repos/pandas-dev/pandas/pulls/37679 | 2020-11-07T00:27:48Z | 2020-11-07T01:30:19Z | 2020-11-07T01:30:19Z | 2020-11-07T01:31:41Z | |
TST/REF: misplaced Categorical tests | diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index 45e0d503f30e7..5b0004a395334 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -59,30 +59,6 @@ def test_isin_cats():
tm.assert_numpy_array_equal(expected, result)
-@pytest.mark.parametrize(
- "to_replace, value, result, expected_error_msg",
- [
- ("b", "c", ["a", "c"], "Categorical.categories are different"),
- ("c", "d", ["a", "b"], None),
- # https://github.com/pandas-dev/pandas/issues/33288
- ("a", "a", ["a", "b"], None),
- ("b", None, ["a", None], "Categorical.categories length are different"),
- ],
-)
-def test_replace(to_replace, value, result, expected_error_msg):
- # GH 26988
- cat = pd.Categorical(["a", "b"])
- expected = pd.Categorical(result)
- result = cat.replace(to_replace, value)
- tm.assert_categorical_equal(result, expected)
- if to_replace == "b": # the "c" test is supposed to be unchanged
- with pytest.raises(AssertionError, match=expected_error_msg):
- # ensure non-inplace call does not affect original
- tm.assert_categorical_equal(cat, expected)
- cat.replace(to_replace, value, inplace=True)
- tm.assert_categorical_equal(cat, expected)
-
-
@pytest.mark.parametrize("empty", [[], pd.Series(dtype=object), np.array([])])
def test_isin_empty(empty):
s = pd.Categorical(["a", "b"])
@@ -105,94 +81,3 @@ def test_diff():
result = df.diff()
tm.assert_frame_equal(result, expected)
-
-
-class TestTake:
- # https://github.com/pandas-dev/pandas/issues/20664
-
- def test_take_default_allow_fill(self):
- cat = pd.Categorical(["a", "b"])
- with tm.assert_produces_warning(None):
- result = cat.take([0, -1])
-
- assert result.equals(cat)
-
- def test_take_positive_no_warning(self):
- cat = pd.Categorical(["a", "b"])
- with tm.assert_produces_warning(None):
- cat.take([0, 0])
-
- def test_take_bounds(self, allow_fill):
- # https://github.com/pandas-dev/pandas/issues/20664
- cat = pd.Categorical(["a", "b", "a"])
- if allow_fill:
- msg = "indices are out-of-bounds"
- else:
- msg = "index 4 is out of bounds for( axis 0 with)? size 3"
- with pytest.raises(IndexError, match=msg):
- cat.take([4, 5], allow_fill=allow_fill)
-
- def test_take_empty(self, allow_fill):
- # https://github.com/pandas-dev/pandas/issues/20664
- cat = pd.Categorical([], categories=["a", "b"])
- if allow_fill:
- msg = "indices are out-of-bounds"
- else:
- msg = "cannot do a non-empty take from an empty axes"
- with pytest.raises(IndexError, match=msg):
- cat.take([0], allow_fill=allow_fill)
-
- def test_positional_take(self, ordered):
- cat = pd.Categorical(
- ["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered
- )
- result = cat.take([0, 1, 2], allow_fill=False)
- expected = pd.Categorical(
- ["a", "a", "b"], categories=cat.categories, ordered=ordered
- )
- tm.assert_categorical_equal(result, expected)
-
- def test_positional_take_unobserved(self, ordered):
- cat = pd.Categorical(["a", "b"], categories=["a", "b", "c"], ordered=ordered)
- result = cat.take([1, 0], allow_fill=False)
- expected = pd.Categorical(
- ["b", "a"], categories=cat.categories, ordered=ordered
- )
- tm.assert_categorical_equal(result, expected)
-
- def test_take_allow_fill(self):
- # https://github.com/pandas-dev/pandas/issues/23296
- cat = pd.Categorical(["a", "a", "b"])
- result = cat.take([0, -1, -1], allow_fill=True)
- expected = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b"])
- tm.assert_categorical_equal(result, expected)
-
- def test_take_fill_with_negative_one(self):
- # -1 was a category
- cat = pd.Categorical([-1, 0, 1])
- result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
- expected = pd.Categorical([-1, -1, 0], categories=[-1, 0, 1])
- tm.assert_categorical_equal(result, expected)
-
- def test_take_fill_value(self):
- # https://github.com/pandas-dev/pandas/issues/23296
- cat = pd.Categorical(["a", "b", "c"])
- result = cat.take([0, 1, -1], fill_value="a", allow_fill=True)
- expected = pd.Categorical(["a", "b", "a"], categories=["a", "b", "c"])
- tm.assert_categorical_equal(result, expected)
-
- def test_take_fill_value_new_raises(self):
- # https://github.com/pandas-dev/pandas/issues/23296
- cat = pd.Categorical(["a", "b", "c"])
- xpr = r"'fill_value=d' is not present in this Categorical's categories"
- with pytest.raises(ValueError, match=xpr):
- cat.take([0, 1, -1], fill_value="d", allow_fill=True)
-
- def test_take_nd_deprecated(self):
- cat = pd.Categorical(["a", "b", "c"])
- with tm.assert_produces_warning(FutureWarning):
- cat.take_nd([0, 1])
-
- ci = pd.Index(cat)
- with tm.assert_produces_warning(FutureWarning):
- ci.take_nd([0, 1])
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 4bf9b4b40d0b6..98dcdd1692117 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -359,10 +359,3 @@ def test_validate_inplace_raises(self, value):
with pytest.raises(ValueError, match=msg):
cat.sort_values(inplace=value)
-
- def test_isna(self):
- exp = np.array([False, False, True])
- c = Categorical(["a", "b", np.nan])
- res = c.isna()
-
- tm.assert_numpy_array_equal(res, exp)
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index bf0b5289b5df1..6068166cb8618 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -1,7 +1,17 @@
import numpy as np
import pytest
-from pandas import Categorical, CategoricalIndex, Index, PeriodIndex, Series
+from pandas import (
+ Categorical,
+ CategoricalIndex,
+ Index,
+ Interval,
+ IntervalIndex,
+ PeriodIndex,
+ Series,
+ Timedelta,
+ Timestamp,
+)
import pandas._testing as tm
import pandas.core.common as com
from pandas.tests.arrays.categorical.common import TestCategorical
@@ -256,6 +266,55 @@ def test_where_ordered_differs_rasies(self):
ser.where([True, False, True], other)
+class TestContains:
+ def test_contains(self):
+ # GH#21508
+ c = Categorical(list("aabbca"), categories=list("cab"))
+
+ assert "b" in c
+ assert "z" not in c
+ assert np.nan not in c
+ with pytest.raises(TypeError, match="unhashable type: 'list'"):
+ assert [1] in c
+
+ # assert codes NOT in index
+ assert 0 not in c
+ assert 1 not in c
+
+ c = Categorical(list("aabbca") + [np.nan], categories=list("cab"))
+ assert np.nan in c
+
+ @pytest.mark.parametrize(
+ "item, expected",
+ [
+ (Interval(0, 1), True),
+ (1.5, True),
+ (Interval(0.5, 1.5), False),
+ ("a", False),
+ (Timestamp(1), False),
+ (Timedelta(1), False),
+ ],
+ ids=str,
+ )
+ def test_contains_interval(self, item, expected):
+ # GH#23705
+ cat = Categorical(IntervalIndex.from_breaks(range(3)))
+ result = item in cat
+ assert result is expected
+
+ def test_contains_list(self):
+ # GH#21729
+ cat = Categorical([1, 2, 3])
+
+ assert "a" not in cat
+
+ with pytest.raises(TypeError, match="unhashable type"):
+ ["a"] in cat
+
+ with pytest.raises(TypeError, match="unhashable type"):
+ ["a", "b"] in cat
+
+
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean(index):
s = Series(range(3))
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index 364c290edc46c..cb0ba128c1fb7 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -11,6 +11,13 @@
class TestCategoricalMissing:
+ def test_isna(self):
+ exp = np.array([False, False, True])
+ cat = Categorical(["a", "b", np.nan])
+ res = cat.isna()
+
+ tm.assert_numpy_array_equal(res, exp)
+
def test_na_flags_int_categories(self):
# #1457
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 51dc66c18a3e6..328b5771e617c 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -395,50 +395,3 @@ def test_numeric_like_ops(self):
msg = "Object with dtype category cannot perform the numpy op log"
with pytest.raises(TypeError, match=msg):
np.log(s)
-
- def test_contains(self):
- # GH21508
- c = Categorical(list("aabbca"), categories=list("cab"))
-
- assert "b" in c
- assert "z" not in c
- assert np.nan not in c
- with pytest.raises(TypeError, match="unhashable type: 'list'"):
- assert [1] in c
-
- # assert codes NOT in index
- assert 0 not in c
- assert 1 not in c
-
- c = Categorical(list("aabbca") + [np.nan], categories=list("cab"))
- assert np.nan in c
-
- @pytest.mark.parametrize(
- "item, expected",
- [
- (pd.Interval(0, 1), True),
- (1.5, True),
- (pd.Interval(0.5, 1.5), False),
- ("a", False),
- (pd.Timestamp(1), False),
- (pd.Timedelta(1), False),
- ],
- ids=str,
- )
- def test_contains_interval(self, item, expected):
- # GH 23705
- cat = Categorical(pd.IntervalIndex.from_breaks(range(3)))
- result = item in cat
- assert result is expected
-
- def test_contains_list(self):
- # GH#21729
- cat = Categorical([1, 2, 3])
-
- assert "a" not in cat
-
- with pytest.raises(TypeError, match="unhashable type"):
- ["a"] in cat
-
- with pytest.raises(TypeError, match="unhashable type"):
- ["a", "b"] in cat
diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py
index 8b784fde1d3c5..5889195ad68db 100644
--- a/pandas/tests/arrays/categorical/test_replace.py
+++ b/pandas/tests/arrays/categorical/test_replace.py
@@ -2,6 +2,7 @@
import pytest
import pandas as pd
+from pandas import Categorical
import pandas._testing as tm
@@ -45,3 +46,28 @@ def test_replace(to_replace, value, expected, flip_categories):
tm.assert_series_equal(expected, result, check_category_order=False)
tm.assert_series_equal(expected, s, check_category_order=False)
+
+
+@pytest.mark.parametrize(
+ "to_replace, value, result, expected_error_msg",
+ [
+ ("b", "c", ["a", "c"], "Categorical.categories are different"),
+ ("c", "d", ["a", "b"], None),
+ # https://github.com/pandas-dev/pandas/issues/33288
+ ("a", "a", ["a", "b"], None),
+ ("b", None, ["a", None], "Categorical.categories length are different"),
+ ],
+)
+def test_replace2(to_replace, value, result, expected_error_msg):
+ # TODO: better name
+ # GH#26988
+ cat = Categorical(["a", "b"])
+ expected = Categorical(result)
+ result = cat.replace(to_replace, value)
+ tm.assert_categorical_equal(result, expected)
+ if to_replace == "b": # the "c" test is supposed to be unchanged
+ with pytest.raises(AssertionError, match=expected_error_msg):
+ # ensure non-inplace call does not affect original
+ tm.assert_categorical_equal(cat, expected)
+ cat.replace(to_replace, value, inplace=True)
+ tm.assert_categorical_equal(cat, expected)
diff --git a/pandas/tests/arrays/categorical/test_take.py b/pandas/tests/arrays/categorical/test_take.py
new file mode 100644
index 0000000000000..7a27f5c3e73ad
--- /dev/null
+++ b/pandas/tests/arrays/categorical/test_take.py
@@ -0,0 +1,92 @@
+import numpy as np
+import pytest
+
+from pandas import Categorical, Index
+import pandas._testing as tm
+
+
+class TestTake:
+ # https://github.com/pandas-dev/pandas/issues/20664
+
+ def test_take_default_allow_fill(self):
+ cat = Categorical(["a", "b"])
+ with tm.assert_produces_warning(None):
+ result = cat.take([0, -1])
+
+ assert result.equals(cat)
+
+ def test_take_positive_no_warning(self):
+ cat = Categorical(["a", "b"])
+ with tm.assert_produces_warning(None):
+ cat.take([0, 0])
+
+ def test_take_bounds(self, allow_fill):
+ # https://github.com/pandas-dev/pandas/issues/20664
+ cat = Categorical(["a", "b", "a"])
+ if allow_fill:
+ msg = "indices are out-of-bounds"
+ else:
+ msg = "index 4 is out of bounds for( axis 0 with)? size 3"
+ with pytest.raises(IndexError, match=msg):
+ cat.take([4, 5], allow_fill=allow_fill)
+
+ def test_take_empty(self, allow_fill):
+ # https://github.com/pandas-dev/pandas/issues/20664
+ cat = Categorical([], categories=["a", "b"])
+ if allow_fill:
+ msg = "indices are out-of-bounds"
+ else:
+ msg = "cannot do a non-empty take from an empty axes"
+ with pytest.raises(IndexError, match=msg):
+ cat.take([0], allow_fill=allow_fill)
+
+ def test_positional_take(self, ordered):
+ cat = Categorical(["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered)
+ result = cat.take([0, 1, 2], allow_fill=False)
+ expected = Categorical(
+ ["a", "a", "b"], categories=cat.categories, ordered=ordered
+ )
+ tm.assert_categorical_equal(result, expected)
+
+ def test_positional_take_unobserved(self, ordered):
+ cat = Categorical(["a", "b"], categories=["a", "b", "c"], ordered=ordered)
+ result = cat.take([1, 0], allow_fill=False)
+ expected = Categorical(["b", "a"], categories=cat.categories, ordered=ordered)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_take_allow_fill(self):
+ # https://github.com/pandas-dev/pandas/issues/23296
+ cat = Categorical(["a", "a", "b"])
+ result = cat.take([0, -1, -1], allow_fill=True)
+ expected = Categorical(["a", np.nan, np.nan], categories=["a", "b"])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_take_fill_with_negative_one(self):
+ # -1 was a category
+ cat = Categorical([-1, 0, 1])
+ result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
+ expected = Categorical([-1, -1, 0], categories=[-1, 0, 1])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_take_fill_value(self):
+ # https://github.com/pandas-dev/pandas/issues/23296
+ cat = Categorical(["a", "b", "c"])
+ result = cat.take([0, 1, -1], fill_value="a", allow_fill=True)
+ expected = Categorical(["a", "b", "a"], categories=["a", "b", "c"])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_take_fill_value_new_raises(self):
+ # https://github.com/pandas-dev/pandas/issues/23296
+ cat = Categorical(["a", "b", "c"])
+ xpr = r"'fill_value=d' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=xpr):
+ cat.take([0, 1, -1], fill_value="d", allow_fill=True)
+
+ def test_take_nd_deprecated(self):
+ cat = Categorical(["a", "b", "c"])
+ with tm.assert_produces_warning(FutureWarning):
+ cat.take_nd([0, 1])
+
+ ci = Index(cat)
+ with tm.assert_produces_warning(FutureWarning):
+ ci.take_nd([0, 1])
| https://api.github.com/repos/pandas-dev/pandas/pulls/37678 | 2020-11-07T00:00:46Z | 2020-11-08T00:02:16Z | 2020-11-08T00:02:16Z | 2020-11-08T00:14:17Z | |
REF/TST: collect indexing tests by method | diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py
index d3c907f4ce30f..6137cadc93125 100644
--- a/pandas/tests/frame/indexing/test_categorical.py
+++ b/pandas/tests/frame/indexing/test_categorical.py
@@ -352,7 +352,7 @@ def test_assigning_ops(self):
df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
- def test_setitem_single_row_categorical(self):
+ def test_loc_setitem_single_row_categorical(self):
# GH 25495
df = DataFrame({"Alpha": ["a"], "Numeric": [0]})
categories = Categorical(df["Alpha"], categories=["a", "b", "c"])
diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py
index d9cdfa5ea45ec..079cc12389835 100644
--- a/pandas/tests/frame/indexing/test_getitem.py
+++ b/pandas/tests/frame/indexing/test_getitem.py
@@ -1,7 +1,16 @@
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex, period_range
+from pandas import (
+ Categorical,
+ CategoricalDtype,
+ CategoricalIndex,
+ DataFrame,
+ MultiIndex,
+ Timestamp,
+ get_dummies,
+ period_range,
+)
import pandas._testing as tm
@@ -29,3 +38,99 @@ def test_getitem_periodindex(self):
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
+
+ def test_getitem_list_of_labels_categoricalindex_cols(self):
+ # GH#16115
+ cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
+
+ expected = DataFrame(
+ [[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
+ )
+ dummies = get_dummies(cats)
+ result = dummies[list(dummies.columns)]
+ tm.assert_frame_equal(result, expected)
+
+
+class TestGetitemCallable:
+ def test_getitem_callable(self, float_frame):
+ # GH#12533
+ result = float_frame[lambda x: "A"]
+ expected = float_frame.loc[:, "A"]
+ tm.assert_series_equal(result, expected)
+
+ result = float_frame[lambda x: ["A", "B"]]
+ expected = float_frame.loc[:, ["A", "B"]]
+ tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
+
+ df = float_frame[:3]
+ result = df[lambda x: [True, False, True]]
+ expected = float_frame.iloc[[0, 2], :]
+ tm.assert_frame_equal(result, expected)
+
+
+class TestGetitemBooleanMask:
+ def test_getitem_bool_mask_categorical_index(self):
+
+ df3 = DataFrame(
+ {
+ "A": np.arange(6, dtype="int64"),
+ },
+ index=CategoricalIndex(
+ [1, 1, 2, 1, 3, 2],
+ dtype=CategoricalDtype([3, 2, 1], ordered=True),
+ name="B",
+ ),
+ )
+ df4 = DataFrame(
+ {
+ "A": np.arange(6, dtype="int64"),
+ },
+ index=CategoricalIndex(
+ [1, 1, 2, 1, 3, 2],
+ dtype=CategoricalDtype([3, 2, 1], ordered=False),
+ name="B",
+ ),
+ )
+
+ result = df3[df3.index == "a"]
+ expected = df3.iloc[[]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df4[df4.index == "a"]
+ expected = df4.iloc[[]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df3[df3.index == 1]
+ expected = df3.iloc[[0, 1, 3]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df4[df4.index == 1]
+ expected = df4.iloc[[0, 1, 3]]
+ tm.assert_frame_equal(result, expected)
+
+ # since we have an ordered categorical
+
+ # CategoricalIndex([1, 1, 2, 1, 3, 2],
+ # categories=[3, 2, 1],
+ # ordered=True,
+ # name='B')
+ result = df3[df3.index < 2]
+ expected = df3.iloc[[4]]
+ tm.assert_frame_equal(result, expected)
+
+ result = df3[df3.index > 1]
+ expected = df3.iloc[[]]
+ tm.assert_frame_equal(result, expected)
+
+ # unordered
+ # cannot be compared
+
+ # CategoricalIndex([1, 1, 2, 1, 3, 2],
+ # categories=[3, 2, 1],
+ # ordered=False,
+ # name='B')
+ msg = "Unordered Categoricals can only compare equality or not"
+ with pytest.raises(TypeError, match=msg):
+ df4[df4.index < 2]
+ with pytest.raises(TypeError, match=msg):
+ df4[df4.index > 1]
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 4214ac14cba49..ff9646d45c0ac 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -110,21 +110,6 @@ def test_getitem_listlike(self, idx_type, levels, float_frame):
with pytest.raises(KeyError, match="not in index"):
frame[idx]
- def test_getitem_callable(self, float_frame):
- # GH 12533
- result = float_frame[lambda x: "A"]
- expected = float_frame.loc[:, "A"]
- tm.assert_series_equal(result, expected)
-
- result = float_frame[lambda x: ["A", "B"]]
- expected = float_frame.loc[:, ["A", "B"]]
- tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
-
- df = float_frame[:3]
- result = df[lambda x: [True, False, True]]
- expected = float_frame.iloc[[0, 2], :]
- tm.assert_frame_equal(result, expected)
-
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
diff --git a/pandas/tests/indexing/test_at.py b/pandas/tests/indexing/test_at.py
index 2e06d8c73d7d1..46299fadf7789 100644
--- a/pandas/tests/indexing/test_at.py
+++ b/pandas/tests/indexing/test_at.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas import DataFrame
+from pandas import DataFrame, Series
import pandas._testing as tm
@@ -39,3 +39,72 @@ def test_at_with_duplicate_axes_requires_scalar_lookup(self):
df.at[1, ["A"]] = 1
with pytest.raises(ValueError, match=msg):
df.at[:, "A"] = 1
+
+
+class TestAtErrors:
+ # TODO: De-duplicate/parametrize
+ # test_at_series_raises_key_error, test_at_frame_raises_key_error,
+ # test_at_series_raises_key_error2, test_at_frame_raises_key_error2
+
+ def test_at_series_raises_key_error(self):
+ # GH#31724 .at should match .loc
+
+ ser = Series([1, 2, 3], index=[3, 2, 1])
+ result = ser.at[1]
+ assert result == 3
+ result = ser.loc[1]
+ assert result == 3
+
+ with pytest.raises(KeyError, match="a"):
+ ser.at["a"]
+ with pytest.raises(KeyError, match="a"):
+ # .at should match .loc
+ ser.loc["a"]
+
+ def test_at_frame_raises_key_error(self):
+ # GH#31724 .at should match .loc
+
+ df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
+
+ result = df.at[1, 0]
+ assert result == 3
+ result = df.loc[1, 0]
+ assert result == 3
+
+ with pytest.raises(KeyError, match="a"):
+ df.at["a", 0]
+ with pytest.raises(KeyError, match="a"):
+ df.loc["a", 0]
+
+ with pytest.raises(KeyError, match="a"):
+ df.at[1, "a"]
+ with pytest.raises(KeyError, match="a"):
+ df.loc[1, "a"]
+
+ def test_at_series_raises_key_error2(self):
+ # at should not fallback
+ # GH#7814
+ # GH#31724 .at should match .loc
+ ser = Series([1, 2, 3], index=list("abc"))
+ result = ser.at["a"]
+ assert result == 1
+ result = ser.loc["a"]
+ assert result == 1
+
+ with pytest.raises(KeyError, match="^0$"):
+ ser.at[0]
+ with pytest.raises(KeyError, match="^0$"):
+ ser.loc[0]
+
+ def test_at_frame_raises_key_error2(self):
+ # GH#31724 .at should match .loc
+ df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
+ result = df.at["a", "A"]
+ assert result == 1
+ result = df.loc["a", "A"]
+ assert result == 1
+
+ with pytest.raises(KeyError, match="^0$"):
+ df.at["a", 0]
+ with pytest.raises(KeyError, match="^0$"):
+ df.loc["a", 0]
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 6cdd73d37aec8..9885765bf53e4 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -2,7 +2,6 @@
import pytest
from pandas.core.dtypes.common import is_categorical_dtype
-from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
@@ -276,27 +275,6 @@ def test_slicing_doc_examples(self):
)
tm.assert_frame_equal(result, expected)
- def test_getitem_category_type(self):
- # GH 14580
- # test iloc() on Series with Categorical data
-
- s = Series([1, 2, 3]).astype("category")
-
- # get slice
- result = s.iloc[0:2]
- expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
- tm.assert_series_equal(result, expected)
-
- # get list of indexes
- result = s.iloc[[0, 1]]
- expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
- tm.assert_series_equal(result, expected)
-
- # get boolean array
- result = s.iloc[[True, False, False]]
- expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
- tm.assert_series_equal(result, expected)
-
def test_loc_listlike(self):
# list of labels
@@ -413,17 +391,6 @@ def test_loc_listlike_dtypes(self):
with pytest.raises(KeyError, match=msg):
df.loc[["a", "x"]]
- def test_getitem_with_listlike(self):
- # GH 16115
- cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
-
- expected = DataFrame(
- [[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
- )
- dummies = pd.get_dummies(cats)
- result = dummies[list(dummies.columns)]
- tm.assert_frame_equal(result, expected)
-
def test_ix_categorical_index(self):
# GH 12531
df = DataFrame(np.random.randn(3, 3), index=list("ABC"), columns=list("XYZ"))
@@ -512,68 +479,6 @@ def test_loc_and_at_with_categorical_index(self):
assert df.loc["B", 1] == 4
assert df.at["B", 1] == 4
- def test_getitem_bool_mask_categorical_index(self):
-
- df3 = DataFrame(
- {
- "A": np.arange(6, dtype="int64"),
- },
- index=CategoricalIndex(
- [1, 1, 2, 1, 3, 2], dtype=CDT([3, 2, 1], ordered=True), name="B"
- ),
- )
- df4 = DataFrame(
- {
- "A": np.arange(6, dtype="int64"),
- },
- index=CategoricalIndex(
- [1, 1, 2, 1, 3, 2], dtype=CDT([3, 2, 1], ordered=False), name="B"
- ),
- )
-
- result = df3[df3.index == "a"]
- expected = df3.iloc[[]]
- tm.assert_frame_equal(result, expected)
-
- result = df4[df4.index == "a"]
- expected = df4.iloc[[]]
- tm.assert_frame_equal(result, expected)
-
- result = df3[df3.index == 1]
- expected = df3.iloc[[0, 1, 3]]
- tm.assert_frame_equal(result, expected)
-
- result = df4[df4.index == 1]
- expected = df4.iloc[[0, 1, 3]]
- tm.assert_frame_equal(result, expected)
-
- # since we have an ordered categorical
-
- # CategoricalIndex([1, 1, 2, 1, 3, 2],
- # categories=[3, 2, 1],
- # ordered=True,
- # name='B')
- result = df3[df3.index < 2]
- expected = df3.iloc[[4]]
- tm.assert_frame_equal(result, expected)
-
- result = df3[df3.index > 1]
- expected = df3.iloc[[]]
- tm.assert_frame_equal(result, expected)
-
- # unordered
- # cannot be compared
-
- # CategoricalIndex([1, 1, 2, 1, 3, 2],
- # categories=[3, 2, 1],
- # ordered=False,
- # name='B')
- msg = "Unordered Categoricals can only compare equality or not"
- with pytest.raises(TypeError, match=msg):
- df4[df4.index < 2]
- with pytest.raises(TypeError, match=msg):
- df4[df4.index > 1]
-
def test_indexing_with_category(self):
# https://github.com/pandas-dev/pandas/issues/12564
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index fad3478499929..e7bf186ae6456 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -7,33 +7,6 @@
class TestDatetimeIndex:
- def test_setitem_with_datetime_tz(self):
- # 16889
- # support .loc with alignment and tz-aware DatetimeIndex
- mask = np.array([True, False, True, False])
-
- idx = date_range("20010101", periods=4, tz="UTC")
- df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
-
- result = df.copy()
- result.loc[mask, :] = df.loc[mask, :]
- tm.assert_frame_equal(result, df)
-
- result = df.copy()
- result.loc[mask] = df.loc[mask]
- tm.assert_frame_equal(result, df)
-
- idx = date_range("20010101", periods=4)
- df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
-
- result = df.copy()
- result.loc[mask, :] = df.loc[mask, :]
- tm.assert_frame_equal(result, df)
-
- result = df.copy()
- result.loc[mask] = df.loc[mask]
- tm.assert_frame_equal(result, df)
-
def test_indexing_with_datetime_tz(self):
# GH#8260
@@ -187,22 +160,6 @@ def test_indexing_with_datetimeindex_tz(self):
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
- def test_partial_setting_with_datetimelike_dtype(self):
-
- # GH9478
- # a datetimeindex alignment issue with partial setting
- df = DataFrame(
- np.arange(6.0).reshape(3, 2),
- columns=list("AB"),
- index=date_range("1/1/2000", periods=3, freq="1H"),
- )
- expected = df.copy()
- expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
-
- mask = df.A < 1
- df.loc[mask, "C"] = df.loc[mask].index
- tm.assert_frame_equal(df, expected)
-
def test_series_partial_set_datetime(self):
# GH 11497
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 6c80354610a78..f8dfda3dab486 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -7,7 +7,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Series, concat, date_range, isna
+from pandas import CategoricalDtype, DataFrame, Series, concat, date_range, isna
import pandas._testing as tm
from pandas.api.types import is_scalar
from pandas.core.indexing import IndexingError
@@ -748,6 +748,27 @@ def test_iloc_getitem_singlerow_slice_categoricaldtype_gives_series(self):
tm.assert_series_equal(result, expected)
+ def test_iloc_getitem_categorical_values(self):
+ # GH#14580
+ # test iloc() on Series with Categorical data
+
+ ser = Series([1, 2, 3]).astype("category")
+
+ # get slice
+ result = ser.iloc[0:2]
+ expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
+ tm.assert_series_equal(result, expected)
+
+ # get list of indexes
+ result = ser.iloc[[0, 1]]
+ expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
+ tm.assert_series_equal(result, expected)
+
+ # get boolean array
+ result = ser.iloc[[True, False, False]]
+ expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
+ tm.assert_series_equal(result, expected)
+
class TestILocSetItemDuplicateColumns:
def test_iloc_setitem_scalar_duplicate_columns(self):
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 3b0fae537a6e5..bfec071369a0c 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1501,6 +1501,62 @@ def test_loc_getitem_slice_floats_inexact(self):
s1 = df.loc[52195.1:52198.9]
assert len(s1) == 3
+ def test_loc_getitem_float_slice_float64index(self):
+ ser = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
+
+ assert len(ser.loc[12.0:]) == 8
+ assert len(ser.loc[12.5:]) == 7
+
+ idx = np.arange(10, 20, dtype=float)
+ idx[2] = 12.2
+ ser.index = idx
+ assert len(ser.loc[12.0:]) == 8
+ assert len(ser.loc[12.5:]) == 7
+
+
+class TestLocBooleanMask:
+ def test_loc_setitem_mask_with_datetimeindex_tz(self):
+ # GH#16889
+ # support .loc with alignment and tz-aware DatetimeIndex
+ mask = np.array([True, False, True, False])
+
+ idx = date_range("20010101", periods=4, tz="UTC")
+ df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
+
+ result = df.copy()
+ result.loc[mask, :] = df.loc[mask, :]
+ tm.assert_frame_equal(result, df)
+
+ result = df.copy()
+ result.loc[mask] = df.loc[mask]
+ tm.assert_frame_equal(result, df)
+
+ idx = date_range("20010101", periods=4)
+ df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
+
+ result = df.copy()
+ result.loc[mask, :] = df.loc[mask, :]
+ tm.assert_frame_equal(result, df)
+
+ result = df.copy()
+ result.loc[mask] = df.loc[mask]
+ tm.assert_frame_equal(result, df)
+
+ def test_loc_setitem_mask_and_label_with_datetimeindex(self):
+ # GH#9478
+ # a datetimeindex alignment issue with partial setting
+ df = DataFrame(
+ np.arange(6.0).reshape(3, 2),
+ columns=list("AB"),
+ index=date_range("1/1/2000", periods=3, freq="1H"),
+ )
+ expected = df.copy()
+ expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
+
+ mask = df.A < 1
+ df.loc[mask, "C"] = df.loc[mask].index
+ tm.assert_frame_equal(df, expected)
+
def test_series_loc_getitem_label_list_missing_values():
# gh-11428
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 72296bb222a5a..127d00c217a15 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -146,69 +146,6 @@ def test_frame_at_with_duplicate_axes(self):
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
- def test_series_at_raises_type_error(self):
- # at should not fallback
- # GH 7814
- # GH#31724 .at should match .loc
- ser = Series([1, 2, 3], index=list("abc"))
- result = ser.at["a"]
- assert result == 1
- result = ser.loc["a"]
- assert result == 1
-
- with pytest.raises(KeyError, match="^0$"):
- ser.at[0]
- with pytest.raises(KeyError, match="^0$"):
- ser.loc[0]
-
- def test_frame_raises_key_error(self):
- # GH#31724 .at should match .loc
- df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
- result = df.at["a", "A"]
- assert result == 1
- result = df.loc["a", "A"]
- assert result == 1
-
- with pytest.raises(KeyError, match="^0$"):
- df.at["a", 0]
- with pytest.raises(KeyError, match="^0$"):
- df.loc["a", 0]
-
- def test_series_at_raises_key_error(self):
- # GH#31724 .at should match .loc
-
- ser = Series([1, 2, 3], index=[3, 2, 1])
- result = ser.at[1]
- assert result == 3
- result = ser.loc[1]
- assert result == 3
-
- with pytest.raises(KeyError, match="a"):
- ser.at["a"]
- with pytest.raises(KeyError, match="a"):
- # .at should match .loc
- ser.loc["a"]
-
- def test_frame_at_raises_key_error(self):
- # GH#31724 .at should match .loc
-
- df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
-
- result = df.at[1, 0]
- assert result == 3
- result = df.loc[1, 0]
- assert result == 3
-
- with pytest.raises(KeyError, match="a"):
- df.at["a", 0]
- with pytest.raises(KeyError, match="a"):
- df.loc["a", 0]
-
- with pytest.raises(KeyError, match="a"):
- df.at[1, "a"]
- with pytest.raises(KeyError, match="a"):
- df.loc[1, "a"]
-
# TODO: belongs somewhere else?
def test_getitem_list_missing_key(self):
# GH 13822, incorrect error string with non-unique columns when missing
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index b2fc2e2d0619d..44fb8dc519322 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -57,23 +57,6 @@ def test_fancy_setitem():
assert (s[48:54] == -3).all()
-def test_dti_reset_index_round_trip():
- dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)
- d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
- d2 = d1.reset_index()
- assert d2.dtypes[0] == np.dtype("M8[ns]")
- d3 = d2.set_index("index")
- tm.assert_frame_equal(d1, d3, check_names=False)
-
- # #2329
- stamp = datetime(2012, 11, 22)
- df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])
- df = df.set_index("Date")
-
- assert df.index[0] == stamp
- assert df.reset_index()["Date"][0] == stamp
-
-
def test_slicing_datetimes():
# GH 7523
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 86af29eac1bae..2ad21d8221e25 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -86,16 +86,3 @@ def test_slice_float_get_set(datetime_series):
datetime_series[4.5:10.0]
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0] = 0
-
-
-def test_slice_floats2():
- s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
-
- assert len(s.loc[12.0:]) == 8
- assert len(s.loc[12.5:]) == 7
-
- i = np.arange(10, 20, dtype=float)
- i[2] = 12.2
- s.index = i
- assert len(s.loc[12.0:]) == 8
- assert len(s.loc[12.5:]) == 7
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index 8044b590b3463..3cd9d52f8e754 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -18,6 +18,7 @@
Series,
Timedelta,
Timestamp,
+ cut,
date_range,
)
import pandas._testing as tm
@@ -76,6 +77,35 @@ def test_astype_dict_like(self, dtype_class):
class TestAstype:
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_astype_empty_constructor_equality(self, dtype):
+ # see GH#15524
+
+ if dtype not in (
+ "S",
+ "V", # poor support (if any) currently
+ "M",
+ "m", # Generic timestamps raise a ValueError. Already tested.
+ ):
+ init_empty = Series([], dtype=dtype)
+ with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
+ as_type_empty = Series([]).astype(dtype)
+ tm.assert_series_equal(init_empty, as_type_empty)
+
+ @pytest.mark.parametrize("dtype", [str, np.str_])
+ @pytest.mark.parametrize(
+ "series",
+ [
+ Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
+ Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
+ ],
+ )
+ def test_astype_str_map(self, dtype, series):
+ # see GH#4405
+ result = series.astype(dtype)
+ expected = series.map(str)
+ tm.assert_series_equal(result, expected)
+
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
@@ -309,6 +339,21 @@ def test_astype_unicode(self):
class TestAstypeCategorical:
+ def test_astype_categorical_invalid_conversions(self):
+ # invalid conversion (these are NOT a dtype)
+ cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
+ ser = Series(np.random.randint(0, 10000, 100)).sort_values()
+ ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
+
+ msg = (
+ "dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
+ "not understood"
+ )
+ with pytest.raises(TypeError, match=msg):
+ ser.astype(Categorical)
+ with pytest.raises(TypeError, match=msg):
+ ser.astype("object").astype(Categorical)
+
def test_astype_categoricaldtype(self):
s = Series(["a", "b", "a"])
result = s.astype(CategoricalDtype(["a", "b"], ordered=True))
diff --git a/pandas/tests/series/methods/test_item.py b/pandas/tests/series/methods/test_item.py
new file mode 100644
index 0000000000000..a7ddc0c22dcf4
--- /dev/null
+++ b/pandas/tests/series/methods/test_item.py
@@ -0,0 +1,49 @@
+import pytest
+
+from pandas import Series, Timedelta, Timestamp, date_range
+
+
+class TestItem:
+ def test_item(self):
+ ser = Series([1])
+ result = ser.item()
+ assert result == 1
+ assert result == ser.iloc[0]
+ assert isinstance(result, int) # i.e. not np.int64
+
+ ser = Series([0.5], index=[3])
+ result = ser.item()
+ assert isinstance(result, float)
+ assert result == 0.5
+
+ ser = Series([1, 2])
+ msg = "can only convert an array of size 1"
+ with pytest.raises(ValueError, match=msg):
+ ser.item()
+
+ dti = date_range("2016-01-01", periods=2)
+ with pytest.raises(ValueError, match=msg):
+ dti.item()
+ with pytest.raises(ValueError, match=msg):
+ Series(dti).item()
+
+ val = dti[:1].item()
+ assert isinstance(val, Timestamp)
+ val = Series(dti)[:1].item()
+ assert isinstance(val, Timestamp)
+
+ tdi = dti - dti
+ with pytest.raises(ValueError, match=msg):
+ tdi.item()
+ with pytest.raises(ValueError, match=msg):
+ Series(tdi).item()
+
+ val = tdi[:1].item()
+ assert isinstance(val, Timedelta)
+ val = Series(tdi)[:1].item()
+ assert isinstance(val, Timedelta)
+
+ # Case where ser[0] would not work
+ ser = Series(dti, index=[5, 6])
+ val = ser[:1].item()
+ assert val == dti[0]
diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py
index 13d6a3b1447a1..40e567a8c33ca 100644
--- a/pandas/tests/series/methods/test_reset_index.py
+++ b/pandas/tests/series/methods/test_reset_index.py
@@ -1,12 +1,30 @@
+from datetime import datetime
+
import numpy as np
import pytest
import pandas as pd
-from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
+from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series, date_range
import pandas._testing as tm
class TestResetIndex:
+ def test_reset_index_dti_round_trip(self):
+ dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)
+ d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
+ d2 = d1.reset_index()
+ assert d2.dtypes[0] == np.dtype("M8[ns]")
+ d3 = d2.set_index("index")
+ tm.assert_frame_equal(d1, d3, check_names=False)
+
+ # GH#2329
+ stamp = datetime(2012, 11, 22)
+ df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])
+ df = df.set_index("Date")
+
+ assert df.index[0] == stamp
+ assert df.reset_index()["Date"][0] == stamp
+
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
diff --git a/pandas/tests/series/methods/test_values.py b/pandas/tests/series/methods/test_values.py
index e28a714ea656d..2982dcd52991d 100644
--- a/pandas/tests/series/methods/test_values.py
+++ b/pandas/tests/series/methods/test_values.py
@@ -18,3 +18,8 @@ def test_values_object_extension_dtypes(self, data):
result = Series(data).values
expected = np.array(data.astype(object))
tm.assert_numpy_array_equal(result, expected)
+
+ def test_values(self, datetime_series):
+ tm.assert_almost_equal(
+ datetime_series.values, datetime_series, check_dtype=False
+ )
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 717d8b5c90d85..9347ddd100639 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -7,7 +7,7 @@
from pandas.util._test_decorators import async_mark
import pandas as pd
-from pandas import DataFrame, Index, Series, Timedelta, Timestamp, date_range
+from pandas import DataFrame, Index, Series, date_range
import pandas._testing as tm
@@ -115,11 +115,6 @@ def test_not_hashable(self):
def test_contains(self, datetime_series):
tm.assert_contains_all(datetime_series.index, datetime_series)
- def test_values(self, datetime_series):
- tm.assert_almost_equal(
- datetime_series.values, datetime_series, check_dtype=False
- )
-
def test_raise_on_info(self):
s = Series(np.random.randn(10))
msg = "'Series' object has no attribute 'info'"
@@ -138,50 +133,6 @@ def test_class_axis(self):
# no exception and no empty docstring
assert pydoc.getdoc(Series.index)
- def test_item(self):
- s = Series([1])
- result = s.item()
- assert result == 1
- assert result == s.iloc[0]
- assert isinstance(result, int) # i.e. not np.int64
-
- ser = Series([0.5], index=[3])
- result = ser.item()
- assert isinstance(result, float)
- assert result == 0.5
-
- ser = Series([1, 2])
- msg = "can only convert an array of size 1"
- with pytest.raises(ValueError, match=msg):
- ser.item()
-
- dti = pd.date_range("2016-01-01", periods=2)
- with pytest.raises(ValueError, match=msg):
- dti.item()
- with pytest.raises(ValueError, match=msg):
- Series(dti).item()
-
- val = dti[:1].item()
- assert isinstance(val, Timestamp)
- val = Series(dti)[:1].item()
- assert isinstance(val, Timestamp)
-
- tdi = dti - dti
- with pytest.raises(ValueError, match=msg):
- tdi.item()
- with pytest.raises(ValueError, match=msg):
- Series(tdi).item()
-
- val = tdi[:1].item()
- assert isinstance(val, Timedelta)
- val = Series(tdi)[:1].item()
- assert isinstance(val, Timedelta)
-
- # Case where ser[0] would not work
- ser = Series(dti, index=[5, 6])
- val = ser[:1].item()
- assert val == dti[0]
-
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 2fbed92567f71..f5c3623fb9986 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -1,5 +1,3 @@
-import string
-
import numpy as np
import pytest
@@ -16,20 +14,6 @@ def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
- @pytest.mark.parametrize("dtype", [str, np.str_])
- @pytest.mark.parametrize(
- "series",
- [
- Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
- Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
- ],
- )
- def test_astype_str_map(self, dtype, series):
- # see gh-4405
- result = series.astype(dtype)
- expected = series.map(str)
- tm.assert_series_equal(result, expected)
-
def test_astype_from_categorical(self):
items = ["a", "b", "c", "a"]
s = Series(items)
@@ -120,36 +104,6 @@ def cmp(a, b):
s.astype("object").astype(CategoricalDtype()), roundtrip_expected
)
- def test_invalid_conversions(self):
- # invalid conversion (these are NOT a dtype)
- cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
- ser = Series(np.random.randint(0, 10000, 100)).sort_values()
- ser = pd.cut(ser, range(0, 10500, 500), right=False, labels=cat)
-
- msg = (
- "dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
- "not understood"
- )
- with pytest.raises(TypeError, match=msg):
- ser.astype(Categorical)
- with pytest.raises(TypeError, match=msg):
- ser.astype("object").astype(Categorical)
-
- @pytest.mark.parametrize("dtype", np.typecodes["All"])
- def test_astype_empty_constructor_equality(self, dtype):
- # see gh-15524
-
- if dtype not in (
- "S",
- "V", # poor support (if any) currently
- "M",
- "m", # Generic timestamps raise a ValueError. Already tested.
- ):
- init_empty = Series([], dtype=dtype)
- with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
- as_type_empty = Series([]).astype(dtype)
- tm.assert_series_equal(init_empty, as_type_empty)
-
def test_series_to_categorical(self):
# see gh-16524: test conversion of Series to Categorical
series = Series(["a", "b", "c"])
| https://api.github.com/repos/pandas-dev/pandas/pulls/37677 | 2020-11-06T23:51:28Z | 2020-11-08T00:07:13Z | 2020-11-08T00:07:13Z | 2020-11-08T00:58:43Z | |
BUG: setitem with boolean mask and series as value is broken for Series with EA type | diff --git a/pandas/conftest.py b/pandas/conftest.py
index b2daa2c5bc3f7..77e9af67590a6 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1143,6 +1143,26 @@ def any_nullable_int_dtype(request):
return request.param
+@pytest.fixture(params=tm.ALL_EA_INT_DTYPES + tm.FLOAT_EA_DTYPES)
+def any_numeric_dtype(request):
+ """
+ Parameterized fixture for any nullable integer dtype and
+ any float ea dtypes.
+
+ * 'UInt8'
+ * 'Int8'
+ * 'UInt16'
+ * 'Int16'
+ * 'UInt32'
+ * 'Int32'
+ * 'UInt64'
+ * 'Int64'
+ * 'Float32'
+ * 'Float64'
+ """
+ return request.param
+
+
@pytest.fixture(params=tm.SIGNED_EA_INT_DTYPES)
def any_signed_nullable_int_dtype(request):
"""
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 4ed7510a1d9e1..119019da529e4 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -144,6 +144,22 @@ def test_setitem_boolean_td64_values_cast_na(self, value):
expected = Series([NaT, 1, 2], dtype="timedelta64[ns]")
tm.assert_series_equal(series, expected)
+ def test_setitem_boolean_nullable_int_types(self, any_numeric_dtype):
+ # GH: 26468
+ ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
+ ser[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
+ expected = Series([5, 6, 2, 3], dtype=any_numeric_dtype)
+ tm.assert_series_equal(ser, expected)
+
+ ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
+ ser.loc[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
+ tm.assert_series_equal(ser, expected)
+
+ ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
+ loc_ser = Series(range(4), dtype=any_numeric_dtype)
+ ser.loc[ser > 6] = loc_ser.loc[loc_ser > 1]
+ tm.assert_series_equal(ser, expected)
+
class TestSetitemViewCopySemantics:
def test_setitem_invalidates_datetime_index_freq(self):
| - [x] closes #26468
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Was fixed already, added a test | https://api.github.com/repos/pandas-dev/pandas/pulls/37676 | 2020-11-06T22:51:37Z | 2020-11-17T01:21:38Z | 2020-11-17T01:21:37Z | 2020-11-17T08:35:51Z |
Bug in loc raised for numeric label even when label is in Index | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 9ac3585aa9002..a8f9c4d8fb2d9 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -464,6 +464,7 @@ Indexing
- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`MultiIndex` with a level named "0" (:issue:`37194`)
- Bug in :meth:`Series.__getitem__` when using an unsigned integer array as an indexer giving incorrect results or segfaulting instead of raising ``KeyError`` (:issue:`37218`)
- Bug in :meth:`Index.where` incorrectly casting numeric values to strings (:issue:`37591`)
+- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` raises when numeric label was given for object :class:`Index` although label was in :class:`Index` (:issue:`26491`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 98ec3b55e65d9..6d06e52c39941 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5200,13 +5200,8 @@ def _maybe_cast_slice_bound(self, label, side: str_t, kind):
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
- # reject them
- if is_float(label):
- self._invalid_indexer("slice", label)
-
- # we are trying to find integer bounds on a non-integer based index
- # this is rejected (generally .loc gets you here)
- elif is_integer(label):
+ # reject them, if index does not contain label
+ if (is_float(label) or is_integer(label)) and label not in self.values:
self._invalid_indexer("slice", label)
return label
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index c1a5db992d3df..3d45ebd4635d2 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1579,3 +1579,12 @@ def test_loc_setitem_dt64tz_values(self):
s2["a"] = expected
result = s2["a"]
assert result == expected
+
+
+@pytest.mark.parametrize("value", [1, 1.5])
+def test_loc_int_in_object_index(frame_or_series, value):
+ # GH: 26491
+ obj = frame_or_series(range(4), index=[value, "first", 2, "third"])
+ result = obj.loc[value:"third"]
+ expected = frame_or_series(range(4), index=[value, "first", 2, "third"])
+ tm.assert_equal(result, expected)
| - [x] closes #26491
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Additionally checking for the label inside values, this fixes the problem. | https://api.github.com/repos/pandas-dev/pandas/pulls/37675 | 2020-11-06T22:15:45Z | 2020-11-09T02:52:07Z | 2020-11-09T02:52:06Z | 2023-04-27T19:52:13Z |
CLN: only call _wrap_results one place in nanmedian | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 8e917bb770247..5da8bd300433e 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -681,26 +681,26 @@ def get_median(x):
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
if not skipna:
- return _wrap_results(
- np.apply_along_axis(get_median, axis, values), dtype
- )
+ res = np.apply_along_axis(get_median, axis, values)
+
+ else:
+ # fastpath for the skipna case
+ with warnings.catch_warnings():
+ # Suppress RuntimeWarning about All-NaN slice
+ warnings.filterwarnings("ignore", "All-NaN slice encountered")
+ res = np.nanmedian(values, axis)
- # fastpath for the skipna case
- with warnings.catch_warnings():
- # Suppress RuntimeWarning about All-NaN slice
- warnings.filterwarnings("ignore", "All-NaN slice encountered")
- res = np.nanmedian(values, axis)
- return _wrap_results(res, dtype)
-
- # must return the correct shape, but median is not defined for the
- # empty set so return nans of shape "everything but the passed axis"
- # since "axis" is where the reduction would occur if we had a nonempty
- # array
- ret = get_empty_reduction_result(values.shape, axis, np.float_, np.nan)
- return _wrap_results(ret, dtype)
-
- # otherwise return a scalar value
- return _wrap_results(get_median(values) if notempty else np.nan, dtype)
+ else:
+ # must return the correct shape, but median is not defined for the
+ # empty set so return nans of shape "everything but the passed axis"
+ # since "axis" is where the reduction would occur if we had a nonempty
+ # array
+ res = get_empty_reduction_result(values.shape, axis, np.float_, np.nan)
+
+ else:
+ # otherwise return a scalar value
+ res = get_median(values) if notempty else np.nan
+ return _wrap_results(res, dtype)
def get_empty_reduction_result(
| https://api.github.com/repos/pandas-dev/pandas/pulls/37673 | 2020-11-06T21:17:40Z | 2020-11-08T00:20:34Z | 2020-11-08T00:20:34Z | 2020-11-08T01:03:19Z | |
BUG: Fix bug in loc setitem changing the dtype when condition is False | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index c3cf64c84510e..eb3fcfd74819d 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -697,6 +697,7 @@ Indexing
- Bug in :meth:`DataFrame.loc.__getitem__` incorrectly raising ``KeyError`` when selecting a single column with a boolean key (:issue:`44322`).
- Bug in setting :meth:`DataFrame.iloc` with a single ``ExtensionDtype`` column and setting 2D values e.g. ``df.iloc[:] = df.values`` incorrectly raising (:issue:`44514`)
- Bug in indexing on columns with ``loc`` or ``iloc`` using a slice with a negative step with ``ExtensionDtype`` columns incorrectly raising (:issue:`44551`)
+- Bug in :meth:`DataFrame.loc.__setitem__` changing dtype when indexer was completely ``False`` (:issue:`37550`)
- Bug in :meth:`IntervalIndex.get_indexer_non_unique` returning boolean mask instead of array of integers for a non unique and non monotonic index (:issue:`44084`)
- Bug in :meth:`IntervalIndex.get_indexer_non_unique` not handling targets of ``dtype`` 'object' with NaNs correctly (:issue:`44482`)
-
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index fc2204724aceb..f043a8cee308c 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2058,6 +2058,8 @@ def ravel(i):
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer):
+ if is_empty_indexer(indexer[0], ser._values):
+ return ser._values.copy()
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index d30b726966e3d..9868cec633dff 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -922,6 +922,33 @@ def test_setitem_boolean_mask(self, mask_type, float_frame):
expected.values[np.array(mask)] = np.nan
tm.assert_frame_equal(result, expected)
+ @pytest.mark.xfail(reason="Currently empty indexers are treated as all False")
+ @pytest.mark.parametrize("box", [list, np.array, Series])
+ def test_setitem_loc_empty_indexer_raises_with_non_empty_value(self, box):
+ # GH#37672
+ df = DataFrame({"a": ["a"], "b": [1], "c": [1]})
+ if box == Series:
+ indexer = box([], dtype="object")
+ else:
+ indexer = box([])
+ msg = "Must have equal len keys and value when setting with an iterable"
+ with pytest.raises(ValueError, match=msg):
+ df.loc[indexer, ["b"]] = [1]
+
+ @pytest.mark.parametrize("box", [list, np.array, Series])
+ def test_setitem_loc_only_false_indexer_dtype_changed(self, box):
+ # GH#37550
+ # Dtype is only changed when value to set is a Series and indexer is
+ # empty/bool all False
+ df = DataFrame({"a": ["a"], "b": [1], "c": [1]})
+ indexer = box([False])
+ df.loc[indexer, ["b"]] = 10 - df["c"]
+ expected = DataFrame({"a": ["a"], "b": [1], "c": [1]})
+ tm.assert_frame_equal(df, expected)
+
+ df.loc[indexer, ["b"]] = 9
+ tm.assert_frame_equal(df, expected)
+
@pytest.mark.parametrize("indexer", [tm.setitem, tm.loc])
def test_setitem_boolean_mask_aligning(self, indexer):
# GH#39931
| - [x] closes #37550
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This fixes the bug, but I am not sure, if the solution is good enough.... | https://api.github.com/repos/pandas-dev/pandas/pulls/37672 | 2020-11-06T20:49:53Z | 2021-12-13T16:53:42Z | 2021-12-13T16:53:41Z | 2023-04-27T19:52:27Z |
TYP: Index._concat | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f350e18198057..545d1d834fe2d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4297,13 +4297,13 @@ def append(self, other):
return self._concat(to_concat, name)
- def _concat(self, to_concat, name):
+ def _concat(self, to_concat: List["Index"], name: Label) -> "Index":
"""
Concatenate multiple Index objects.
"""
- to_concat = [x._values if isinstance(x, Index) else x for x in to_concat]
+ to_concat_vals = [x._values for x in to_concat]
- result = concat_compat(to_concat)
+ result = concat_compat(to_concat_vals)
return Index(result, name=name)
def putmask(self, mask, value):
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 525c41bae8b51..8c9ee1f1d8efa 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -653,7 +653,7 @@ def map(self, mapper):
mapped = self._values.map(mapper)
return Index(mapped, name=self.name)
- def _concat(self, to_concat, name):
+ def _concat(self, to_concat: List["Index"], name: Label) -> "CategoricalIndex":
# if calling index is category, don't check dtype of others
codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
cat = self._data._from_backing_data(codes)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37671 | 2020-11-06T18:15:31Z | 2020-11-08T00:31:20Z | 2020-11-08T00:31:20Z | 2020-11-08T00:54:08Z | |
TST: fix warning for pie chart | diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index e6a61d35365a3..48a923e85eed7 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2650,11 +2650,20 @@ def test_pie_df(self):
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
+ import matplotlib as mpl
+
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
- df.plot.pie(subplots=True, ax=axes, legend=True)
+
+ # GH 37668
+ kwargs = {}
+ if mpl.__version__ >= "3.3":
+ kwargs = {"normalize": True}
+
+ with tm.assert_produces_warning(None):
+ df.plot.pie(subplots=True, ax=axes, legend=True, **kwargs)
base_expected = ["0", "1", "2", "3"]
for i, ax in enumerate(axes):
| - [ ] closes #37668
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Fix MatplotlibDeprecationWarning for pie chart by specifically passing ``normalize=True``. | https://api.github.com/repos/pandas-dev/pandas/pulls/37669 | 2020-11-06T16:13:48Z | 2020-11-08T03:00:21Z | 2020-11-08T03:00:20Z | 2020-11-09T11:33:52Z |
BUG: CategoricalIndex.equals casting non-categories to np.nan | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 690e6b8f725ad..73493bbeb0eac 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -380,7 +380,7 @@ Categorical
^^^^^^^^^^^
- :meth:`Categorical.fillna` will always return a copy, will validate a passed fill value regardless of whether there are any NAs to fill, and will disallow a ``NaT`` as a fill value for numeric categories (:issue:`36530`)
- Bug in :meth:`Categorical.__setitem__` that incorrectly raised when trying to set a tuple value (:issue:`20439`)
--
+- Bug in :meth:`CategoricalIndex.equals` incorrectly casting non-category entries to ``np.nan`` (:issue:`37667`)
Datetimelike
^^^^^^^^^^^^
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 525c41bae8b51..085d2f5e832a2 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -20,7 +20,7 @@
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas.core.dtypes.missing import is_valid_nat_for_dtype, notna
+from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
from pandas.core import accessor
from pandas.core.arrays.categorical import Categorical, contains
@@ -263,6 +263,7 @@ def _is_dtype_compat(self, other) -> Categorical:
values = other
if not is_list_like(values):
values = [values]
+
cat = Categorical(other, dtype=self.dtype)
other = CategoricalIndex(cat)
if not other.isin(values).all():
@@ -271,6 +272,12 @@ def _is_dtype_compat(self, other) -> Categorical:
)
other = other._values
+ if not ((other == values) | (isna(other) & isna(values))).all():
+ # GH#37667 see test_equals_non_category
+ raise TypeError(
+ "categories must match existing categories when appending"
+ )
+
return other
def equals(self, other: object) -> bool:
@@ -291,13 +298,10 @@ def equals(self, other: object) -> bool:
try:
other = self._is_dtype_compat(other)
- if isinstance(other, type(self)):
- other = other._data
- return self._data.equals(other)
except (TypeError, ValueError):
- pass
+ return False
- return False
+ return self._data.equals(other)
# --------------------------------------------------------------------
# Rendering Methods
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index cf2430d041d88..324a2535bc465 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -444,6 +444,14 @@ def test_equals_categorical_unordered(self):
assert not a.equals(c)
assert not b.equals(c)
+ def test_equals_non_category(self):
+ # GH#37667 Case where other contains a value not among ci's
+ # categories ("D") and also contains np.nan
+ ci = CategoricalIndex(["A", "B", np.nan, np.nan])
+ other = Index(["A", "B", "D", np.nan])
+
+ assert not ci.equals(other)
+
def test_frame_repr(self):
df = pd.DataFrame({"A": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"]))
result = repr(df)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37667 | 2020-11-06T16:04:19Z | 2020-11-08T00:34:52Z | 2020-11-08T00:34:52Z | 2020-11-08T01:05:39Z |
TST: match matplotlib warning message | diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index e6a61d35365a3..c9d1d84e0b7cf 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2739,11 +2739,11 @@ def test_errorbar_plot_different_kinds(self, kind):
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
- with tm.assert_produces_warning(UserWarning):
- # _check_plot_works creates subplots inside,
- # which leads to warnings like this:
- # UserWarning: To output multiple subplots,
- # the figure containing the passed axes is being cleared
+ msg = (
+ "To output multiple subplots, "
+ "the figure containing the passed axes is being cleared"
+ )
+ with tm.assert_produces_warning(UserWarning, match=msg):
# Similar warnings were observed in GH #13188
axes = _check_plot_works(
df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind
@@ -2820,11 +2820,11 @@ def test_errorbar_timeseries(self, kind):
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
- with tm.assert_produces_warning(UserWarning):
- # _check_plot_works creates subplots inside,
- # which leads to warnings like this:
- # UserWarning: To output multiple subplots,
- # the figure containing the passed axes is being cleared
+ msg = (
+ "To output multiple subplots, "
+ "the figure containing the passed axes is being cleared"
+ )
+ with tm.assert_produces_warning(UserWarning, match=msg):
# Similar warnings were observed in GH #13188
axes = _check_plot_works(tdf.plot, kind=kind, yerr=tdf_err, subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Follow up on #36982.
Since, currently ``assert_produces_warning`` now allows matching warning message,
instead of the comment we will match the message. | https://api.github.com/repos/pandas-dev/pandas/pulls/37666 | 2020-11-06T15:47:11Z | 2020-11-07T14:43:52Z | 2020-11-07T14:43:52Z | 2020-11-09T11:34:18Z |
REF: simplify cycling through colors | diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py
index b2c7b2610845c..cc2dde0f2179a 100644
--- a/pandas/plotting/_matplotlib/style.py
+++ b/pandas/plotting/_matplotlib/style.py
@@ -1,3 +1,4 @@
+import itertools
from typing import (
TYPE_CHECKING,
Collection,
@@ -74,7 +75,7 @@ def get_standard_colors(
num_colors=num_colors,
)
- return _cycle_colors(colors, num_colors=num_colors)
+ return list(_cycle_colors(colors, num_colors=num_colors))
def _derive_colors(
@@ -128,19 +129,14 @@ def _derive_colors(
return _get_colors_from_color_type(color_type, num_colors=num_colors)
-def _cycle_colors(colors: List[Color], num_colors: int) -> List[Color]:
- """Append more colors by cycling if there is not enough color.
+def _cycle_colors(colors: List[Color], num_colors: int) -> Iterator[Color]:
+ """Cycle colors until achieving max of `num_colors` or length of `colors`.
Extra colors will be ignored by matplotlib if there are more colors
than needed and nothing needs to be done here.
"""
- if len(colors) < num_colors:
- multiple = num_colors // len(colors) - 1
- mod = num_colors % len(colors)
- colors += multiple * colors
- colors += colors[:mod]
-
- return colors
+ max_colors = max(num_colors, len(colors))
+ yield from itertools.islice(itertools.cycle(colors), max_colors)
def _get_colors_from_colormap(
| - [ ] closes #37604
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I tried to address #37604, but it turns out that on some occasions it is absolutely mandatory to return more colors than ``num_colors`` says.
In particular, this is the test that cannot be updated and the underlying code cannot be easily modified as well.
```
pandas/tests/plotting/test_frame.py
...
class TestDataFramePlots(TestPlotBase):
...
def test_bar_user_colors(self):
df = DataFrame(
{"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
)
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y="A", color=df["color"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
```
When "y" is specified, then the number of colors must be the length of the series df["A"],
but inside the BarPlot class there is no opportunity to check for that condition.
So, let ``get_standard_colors`` return maximum of ``num_colors`` and ``len(colors)``.
This way all tests work without modifications,
just minor improvement in the cycling function. | https://api.github.com/repos/pandas-dev/pandas/pulls/37664 | 2020-11-06T13:23:06Z | 2020-11-08T02:11:13Z | 2020-11-08T02:11:12Z | 2020-11-09T11:33:41Z |
Move inconsistent namespace check to pre-commit, fixup more files | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 0c1e4e330c903..f9b396715664a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -119,6 +119,12 @@ repos:
entry: python scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module"
types: [python]
exclude: ^(asv_bench|pandas/tests|doc)/
+ - id: inconsistent-namespace-usage
+ name: 'Check for inconsistent use of pandas namespace in tests'
+ entry: python scripts/check_for_inconsistent_pandas_namespace.py
+ language: python
+ types: [python]
+ files: ^pandas/tests/
- id: FrameOrSeriesUnion
name: Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias
entry: Union\[.*(Series.*DataFrame|DataFrame.*Series).*\]
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index b5d63e259456b..c920500aac9cd 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -37,12 +37,6 @@ function invgrep {
return $((! $EXIT_STATUS))
}
-function check_namespace {
- local -r CLASS=${1}
- grep -R -l --include "*.py" " ${CLASS}(" pandas/tests | xargs grep -n "pd\.${CLASS}[(\.]"
- test $? -gt 0
-}
-
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
FLAKE8_FORMAT="##[error]%(path)s:%(row)s:%(col)s:%(code)s:%(text)s"
INVGREP_PREPEND="##[error]"
@@ -120,13 +114,6 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
MSG='Check for use of {foo!r} instead of {repr(foo)}' ; echo $MSG
invgrep -R --include=*.{py,pyx} '!r}' pandas
RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- # -------------------------------------------------------------------------
- MSG='Check for inconsistent use of pandas namespace in tests' ; echo $MSG
- for class in "Series" "DataFrame" "Index" "MultiIndex" "Timestamp" "Timedelta" "TimedeltaIndex" "DatetimeIndex" "Categorical"; do
- check_namespace ${class}
- RET=$(($RET + $?))
- done
echo $MSG "DONE"
fi
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 1c82d6f9a26ff..438a22c99a4eb 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -862,7 +862,7 @@ def test_infer_dtype_datetime_with_na(self, na_value, time_stamp):
@pytest.mark.parametrize(
"arr",
[
- np.array([pd.Timedelta("1 days"), pd.Timedelta("2 days")]),
+ np.array([Timedelta("1 days"), Timedelta("2 days")]),
np.array([np.timedelta64(1, "D"), np.timedelta64(2, "D")], dtype=object),
np.array([timedelta(1), timedelta(2)]),
],
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index e8d82b525c9f4..95f338cbc3240 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -192,7 +192,7 @@ def test_cast_category_to_extension_dtype(self, expected):
(
"datetime64[ns, MET]",
pd.DatetimeIndex(
- [pd.Timestamp("2015-01-01 00:00:00+0100", tz="MET")]
+ [Timestamp("2015-01-01 00:00:00+0100", tz="MET")]
).array,
),
],
@@ -254,7 +254,7 @@ def _compare_other(self, s, data, op_name, other):
@pytest.mark.parametrize(
"categories",
- [["a", "b"], [0, 1], [pd.Timestamp("2019"), pd.Timestamp("2020")]],
+ [["a", "b"], [0, 1], [Timestamp("2019"), Timestamp("2020")]],
)
def test_not_equal_with_na(self, categories):
# https://github.com/pandas-dev/pandas/issues/32276
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 15803d4b0ef94..5d0f6d6262899 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -425,7 +425,7 @@ def test_agg_over_numpy_arrays():
result = df.groupby("category").agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
- expected_index = pd.Index([1, 2], name="category")
+ expected_index = Index([1, 2], name="category")
expected_column = ["arraydata"]
expected = DataFrame(expected_data, index=expected_index, columns=expected_column)
@@ -497,7 +497,7 @@ def test_sum_uint64_overflow():
df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
- index = pd.Index(
+ index = Index(
[9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
)
expected = DataFrame(
@@ -596,7 +596,7 @@ def test_agg_lambda_with_timezone():
result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
expected = DataFrame(
[pd.Timestamp("2018-01-01", tz="UTC")],
- index=pd.Index([1], name="tag"),
+ index=Index([1], name="tag"),
columns=["date"],
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index 04b73b16ae2c7..1317f0f68216a 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -4,7 +4,6 @@
import numpy as np
import pytest
-import pandas as pd
from pandas import (
DataFrame,
Index,
@@ -260,7 +259,7 @@ def test_groupby_timedelta_cython_count():
df = DataFrame(
{"g": list("ab" * 2), "delt": np.arange(4).astype("timedelta64[ns]")}
)
- expected = Series([2, 2], index=pd.Index(["a", "b"], name="g"), name="delt")
+ expected = Series([2, 2], index=Index(["a", "b"], name="g"), name="delt")
result = df.groupby("g").delt.count()
tm.assert_series_equal(expected, result)
@@ -317,12 +316,12 @@ def test_count_non_nulls():
def test_count_object():
df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
- expected = Series([3, 3], index=pd.Index([2, 3], name="c"), name="a")
+ expected = Series([3, 3], index=Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
- expected = Series([1, 3], index=pd.Index([2, 3], name="c"), name="a")
+ expected = Series([1, 3], index=Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
@@ -354,7 +353,7 @@ def test_lower_int_prec_count():
)
result = df.groupby("grp").count()
expected = DataFrame(
- {"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=pd.Index(list("ab"), name="grp")
+ {"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=Index(list("ab"), name="grp")
)
tm.assert_frame_equal(result, expected)
@@ -374,5 +373,5 @@ def __eq__(self, other):
df = DataFrame({"a": [RaisingObject() for _ in range(4)], "grp": list("ab" * 2)})
result = df.groupby("grp").count()
- expected = DataFrame({"a": [2, 2]}, index=pd.Index(list("ab"), name="grp"))
+ expected = DataFrame({"a": [2, 2]}, index=Index(list("ab"), name="grp"))
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 4d6a1afe06e1c..4aefb73bf912c 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -614,12 +614,12 @@ def test_list_grouper_with_nat(self):
# Grouper in a list grouping
result = df.groupby([grouper])
- expected = {pd.Timestamp("2011-01-01"): Index(list(range(364)))}
+ expected = {Timestamp("2011-01-01"): Index(list(range(364)))}
tm.assert_dict_equal(result.groups, expected)
# Test case without a list
result = df.groupby(grouper)
- expected = {pd.Timestamp("2011-01-01"): 365}
+ expected = {Timestamp("2011-01-01"): 365}
tm.assert_dict_equal(result.groups, expected)
@pytest.mark.parametrize(
@@ -938,12 +938,12 @@ def test_groupby_with_small_elem(self):
grouped = df.groupby([pd.Grouper(freq="M"), "event"])
assert len(grouped.groups) == 2
assert grouped.ngroups == 2
- assert (pd.Timestamp("2014-09-30"), "start") in grouped.groups
- assert (pd.Timestamp("2013-10-31"), "start") in grouped.groups
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
- res = grouped.get_group((pd.Timestamp("2014-09-30"), "start"))
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
tm.assert_frame_equal(res, df.iloc[[0], :])
- res = grouped.get_group((pd.Timestamp("2013-10-31"), "start"))
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[1], :])
df = DataFrame(
@@ -953,12 +953,12 @@ def test_groupby_with_small_elem(self):
grouped = df.groupby([pd.Grouper(freq="M"), "event"])
assert len(grouped.groups) == 2
assert grouped.ngroups == 2
- assert (pd.Timestamp("2014-09-30"), "start") in grouped.groups
- assert (pd.Timestamp("2013-10-31"), "start") in grouped.groups
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
- res = grouped.get_group((pd.Timestamp("2014-09-30"), "start"))
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
tm.assert_frame_equal(res, df.iloc[[0, 2], :])
- res = grouped.get_group((pd.Timestamp("2013-10-31"), "start"))
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[1], :])
# length=3
@@ -969,15 +969,15 @@ def test_groupby_with_small_elem(self):
grouped = df.groupby([pd.Grouper(freq="M"), "event"])
assert len(grouped.groups) == 3
assert grouped.ngroups == 3
- assert (pd.Timestamp("2014-09-30"), "start") in grouped.groups
- assert (pd.Timestamp("2013-10-31"), "start") in grouped.groups
- assert (pd.Timestamp("2014-08-31"), "start") in grouped.groups
+ assert (Timestamp("2014-09-30"), "start") in grouped.groups
+ assert (Timestamp("2013-10-31"), "start") in grouped.groups
+ assert (Timestamp("2014-08-31"), "start") in grouped.groups
- res = grouped.get_group((pd.Timestamp("2014-09-30"), "start"))
+ res = grouped.get_group((Timestamp("2014-09-30"), "start"))
tm.assert_frame_equal(res, df.iloc[[0], :])
- res = grouped.get_group((pd.Timestamp("2013-10-31"), "start"))
+ res = grouped.get_group((Timestamp("2013-10-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[1], :])
- res = grouped.get_group((pd.Timestamp("2014-08-31"), "start"))
+ res = grouped.get_group((Timestamp("2014-08-31"), "start"))
tm.assert_frame_equal(res, df.iloc[[2], :])
def test_grouping_string_repr(self):
diff --git a/pandas/tests/groupby/test_missing.py b/pandas/tests/groupby/test_missing.py
index 2c2147795bc07..580148cb2a3a3 100644
--- a/pandas/tests/groupby/test_missing.py
+++ b/pandas/tests/groupby/test_missing.py
@@ -11,11 +11,11 @@ def test_groupby_column_index_name_lost_fill_funcs(func):
# GH: 29764 groupby loses index sometimes
df = DataFrame(
[[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]],
- columns=pd.Index(["type", "a", "b"], name="idx"),
+ columns=Index(["type", "a", "b"], name="idx"),
)
df_grouped = df.groupby(["type"])[["a", "b"]]
result = getattr(df_grouped, func)().columns
- expected = pd.Index(["a", "b"], name="idx")
+ expected = Index(["a", "b"], name="idx")
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py
index e48f10ebacb79..bd6d33c59a48a 100644
--- a/pandas/tests/groupby/test_quantile.py
+++ b/pandas/tests/groupby/test_quantile.py
@@ -194,7 +194,7 @@ def test_quantile_missing_group_values_correct_results(
df = DataFrame({"key": key, "val": val})
expected = DataFrame(
- expected_val, index=pd.Index(expected_key, name="key"), columns=["val"]
+ expected_val, index=Index(expected_key, name="key"), columns=["val"]
)
grp = df.groupby("key")
@@ -223,7 +223,7 @@ def test_groupby_quantile_nullable_array(values, q):
idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None])
true_quantiles = [0.0, 0.5, 1.0]
else:
- idx = pd.Index(["x", "y"], name="a")
+ idx = Index(["x", "y"], name="a")
true_quantiles = [0.5]
expected = pd.Series(true_quantiles * 2, index=idx, name="b")
@@ -251,6 +251,6 @@ def test_groupby_timedelta_quantile():
pd.Timedelta("0 days 00:00:02.990000"),
]
},
- index=pd.Index([1, 2], name="group"),
+ index=Index([1, 2], name="group"),
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index 612079447576f..c3282758a23f2 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -452,7 +452,7 @@ def test_groupby_groups_datetimeindex(self):
result = df.groupby(level="date").groups
dates = ["2015-01-05", "2015-01-04", "2015-01-03", "2015-01-02", "2015-01-01"]
expected = {
- Timestamp(date): pd.DatetimeIndex([date], name="date") for date in dates
+ Timestamp(date): DatetimeIndex([date], name="date") for date in dates
}
tm.assert_dict_equal(result, expected)
@@ -460,7 +460,7 @@ def test_groupby_groups_datetimeindex(self):
for date in dates:
result = grouped.get_group(date)
data = [[df.loc[date, "A"], df.loc[date, "B"]]]
- expected_index = pd.DatetimeIndex([date], name="date", freq="D")
+ expected_index = DatetimeIndex([date], name="date", freq="D")
expected = DataFrame(data, columns=list("AB"), index=expected_index)
tm.assert_frame_equal(result, expected)
@@ -484,7 +484,7 @@ def test_groupby_groups_datetimeindex_tz(self):
)
df["datetime"] = df["datetime"].apply(lambda d: Timestamp(d, tz="US/Pacific"))
- exp_idx1 = pd.DatetimeIndex(
+ exp_idx1 = DatetimeIndex(
[
"2011-07-19 07:00:00",
"2011-07-19 07:00:00",
@@ -508,13 +508,13 @@ def test_groupby_groups_datetimeindex_tz(self):
tm.assert_frame_equal(result, expected)
# by level
- didx = pd.DatetimeIndex(dates, tz="Asia/Tokyo")
+ didx = DatetimeIndex(dates, tz="Asia/Tokyo")
df = DataFrame(
{"value1": np.arange(6, dtype="int64"), "value2": [1, 2, 3, 1, 2, 3]},
index=didx,
)
- exp_idx = pd.DatetimeIndex(
+ exp_idx = DatetimeIndex(
["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],
tz="Asia/Tokyo",
)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 1aeff7426c33a..d7426a5e3b42e 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -1134,7 +1134,7 @@ def test_categorical_and_not_categorical_key(observed):
# GH 32494
df_with_categorical = DataFrame(
{
- "A": pd.Categorical(["a", "b", "a"], categories=["a", "b", "c"]),
+ "A": Categorical(["a", "b", "a"], categories=["a", "b", "c"]),
"B": [1, 2, 3],
"C": ["a", "b", "a"],
}
diff --git a/pandas/tests/indexes/datetimes/test_shift.py b/pandas/tests/indexes/datetimes/test_shift.py
index a2a673ed5d9e0..3c202005f7933 100644
--- a/pandas/tests/indexes/datetimes/test_shift.py
+++ b/pandas/tests/indexes/datetimes/test_shift.py
@@ -20,25 +20,25 @@ class TestDatetimeIndexShift:
def test_dti_shift_tzaware(self, tz_naive_fixture):
# GH#9903
tz = tz_naive_fixture
- idx = pd.DatetimeIndex([], name="xxx", tz=tz)
+ idx = DatetimeIndex([], name="xxx", tz=tz)
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
- idx = pd.DatetimeIndex(
+ idx = DatetimeIndex(
["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
- exp = pd.DatetimeIndex(
+ exp = DatetimeIndex(
["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
- exp = pd.DatetimeIndex(
+ exp = DatetimeIndex(
["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"],
name="xxx",
tz=tz,
@@ -51,21 +51,21 @@ def test_dti_shift_freqs(self):
# GH#8083
drange = pd.date_range("20130101", periods=5)
result = drange.shift(1)
- expected = pd.DatetimeIndex(
+ expected = DatetimeIndex(
["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"],
freq="D",
)
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
- expected = pd.DatetimeIndex(
+ expected = DatetimeIndex(
["2012-12-31", "2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04"],
freq="D",
)
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq="2D")
- expected = pd.DatetimeIndex(
+ expected = DatetimeIndex(
["2013-01-07", "2013-01-08", "2013-01-09", "2013-01-10", "2013-01-11"],
freq="D",
)
@@ -84,7 +84,7 @@ def test_dti_shift_int(self):
def test_dti_shift_no_freq(self):
# GH#19147
- dti = pd.DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None)
+ dti = DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
dti.shift(2)
diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py
index 150a797169c14..b60ae8819023f 100644
--- a/pandas/tests/indexes/period/test_formats.py
+++ b/pandas/tests/indexes/period/test_formats.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas import PeriodIndex
+from pandas import PeriodIndex, Series
import pandas._testing as tm
@@ -154,7 +154,7 @@ def test_representation_to_series(self):
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9],
):
- result = repr(pd.Series(idx))
+ result = repr(Series(idx))
assert result == expected
def test_summary(self):
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 045816b3c9513..0c990b0456b5c 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -273,7 +273,7 @@ def test_equals_numeric_other_index_type(self, other):
def test_lookups_datetimelike_values(self, vals):
# If we have datetime64 or timedelta64 values, make sure they are
# wrappped correctly GH#31163
- ser = pd.Series(vals, index=range(3, 6))
+ ser = Series(vals, index=range(3, 6))
ser.index = ser.index.astype("float64")
expected = vals[1]
@@ -642,7 +642,7 @@ def test_range_float_union_dtype():
def test_uint_index_does_not_convert_to_float64(box):
# https://github.com/pandas-dev/pandas/issues/28279
# https://github.com/pandas-dev/pandas/issues/28023
- series = pd.Series(
+ series = Series(
[0, 1, 2, 3, 4, 5],
index=[
7606741985629028552,
diff --git a/pandas/tests/indexes/timedeltas/test_formats.py b/pandas/tests/indexes/timedeltas/test_formats.py
index 1dfc5b5305008..8a8e2abd17165 100644
--- a/pandas/tests/indexes/timedeltas/test_formats.py
+++ b/pandas/tests/indexes/timedeltas/test_formats.py
@@ -1,7 +1,7 @@
import pytest
import pandas as pd
-from pandas import TimedeltaIndex
+from pandas import Series, TimedeltaIndex
class TestTimedeltaIndexRendering:
@@ -62,7 +62,7 @@ def test_representation_to_series(self):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
- result = repr(pd.Series(idx))
+ result = repr(Series(idx))
assert result == expected
def test_summary(self):
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 47b7bd0983305..3e5f9d481ce48 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -436,7 +436,7 @@ def test_frame_mixedtype_orient(self): # GH10289
def test_v12_compat(self, datapath):
dti = pd.date_range("2000-01-03", "2000-01-07")
# freq doesnt roundtrip
- dti = pd.DatetimeIndex(np.asarray(dti), freq=None)
+ dti = DatetimeIndex(np.asarray(dti), freq=None)
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
@@ -466,7 +466,7 @@ def test_v12_compat(self, datapath):
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
# freq doesnt round-trip
- index = pd.DatetimeIndex(list(index), freq=None)
+ index = DatetimeIndex(list(index), freq=None)
df_mixed = DataFrame(
dict(
@@ -1189,7 +1189,7 @@ def test_tz_range_is_utc(self, tz_range):
)
assert dumps(tz_range, iso_dates=True) == exp
- dti = pd.DatetimeIndex(tz_range)
+ dti = DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index 8d7d5d85cbb48..43a31ff1e4b58 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -205,8 +205,8 @@ def test_with_missing_lzma_runtime():
import sys
import pytest
sys.modules['lzma'] = None
- import pandas
- df = pandas.DataFrame()
+ import pandas as pd
+ df = pd.DataFrame()
with pytest.raises(RuntimeError, match='lzma module'):
df.to_csv('foo.csv', compression='xz')
"""
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index 0832724110203..50e7cf9bd8eda 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -5,7 +5,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, Timestamp
import pandas._testing as tm
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
@@ -306,21 +306,21 @@ def test_groupby_resample_interpolate():
expected_ind = pd.MultiIndex.from_tuples(
[
(50, "2018-01-07"),
- (50, pd.Timestamp("2018-01-08")),
- (50, pd.Timestamp("2018-01-09")),
- (50, pd.Timestamp("2018-01-10")),
- (50, pd.Timestamp("2018-01-11")),
- (50, pd.Timestamp("2018-01-12")),
- (50, pd.Timestamp("2018-01-13")),
- (50, pd.Timestamp("2018-01-14")),
- (50, pd.Timestamp("2018-01-15")),
- (50, pd.Timestamp("2018-01-16")),
- (50, pd.Timestamp("2018-01-17")),
- (50, pd.Timestamp("2018-01-18")),
- (50, pd.Timestamp("2018-01-19")),
- (50, pd.Timestamp("2018-01-20")),
- (50, pd.Timestamp("2018-01-21")),
- (60, pd.Timestamp("2018-01-14")),
+ (50, Timestamp("2018-01-08")),
+ (50, Timestamp("2018-01-09")),
+ (50, Timestamp("2018-01-10")),
+ (50, Timestamp("2018-01-11")),
+ (50, Timestamp("2018-01-12")),
+ (50, Timestamp("2018-01-13")),
+ (50, Timestamp("2018-01-14")),
+ (50, Timestamp("2018-01-15")),
+ (50, Timestamp("2018-01-16")),
+ (50, Timestamp("2018-01-17")),
+ (50, Timestamp("2018-01-18")),
+ (50, Timestamp("2018-01-19")),
+ (50, Timestamp("2018-01-20")),
+ (50, Timestamp("2018-01-21")),
+ (60, Timestamp("2018-01-14")),
],
names=["volume", "week_starting"],
)
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index 895de2b748c34..613e7d423d87f 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -98,7 +98,7 @@ def test_examples2(self):
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
- trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
+ trades, quotes, on="time", by="ticker", tolerance=Timedelta("2ms")
)
expected = pd.DataFrame(
@@ -126,7 +126,7 @@ def test_examples2(self):
quotes,
on="time",
by="ticker",
- tolerance=pd.Timedelta("10ms"),
+ tolerance=Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
@@ -591,7 +591,7 @@ def test_non_sorted(self):
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
- ids=["pd.Timedelta", "datetime.timedelta"],
+ ids=["Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
@@ -652,7 +652,7 @@ def test_tolerance_tz(self):
"value2": list("ABCDE"),
}
)
- result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
+ result = pd.merge_asof(left, right, on="date", tolerance=Timedelta("1 day"))
expected = pd.DataFrame(
{
@@ -698,7 +698,7 @@ def test_index_tolerance(self):
left_index=True,
right_index=True,
by="ticker",
- tolerance=pd.Timedelta("1day"),
+ tolerance=Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
@@ -792,7 +792,7 @@ def test_allow_exact_matches_and_tolerance2(self):
df2,
on="time",
allow_exact_matches=False,
- tolerance=pd.Timedelta("10ms"),
+ tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
@@ -827,7 +827,7 @@ def test_allow_exact_matches_and_tolerance3(self):
df2,
on="time",
allow_exact_matches=False,
- tolerance=pd.Timedelta("10ms"),
+ tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
@@ -1342,9 +1342,9 @@ def test_merge_index_column_tz(self):
def test_left_index_right_index_tolerance(self):
# https://github.com/pandas-dev/pandas/issues/35558
- dr1 = pd.date_range(
- start="1/1/2020", end="1/20/2020", freq="2D"
- ) + pd.Timedelta(seconds=0.4)
+ dr1 = pd.date_range(start="1/1/2020", end="1/20/2020", freq="2D") + Timedelta(
+ seconds=0.4
+ )
dr2 = pd.date_range(start="1/1/2020", end="2/1/2020")
df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1))
@@ -1358,6 +1358,6 @@ def test_left_index_right_index_tolerance(self):
df2,
left_index=True,
right_index=True,
- tolerance=pd.Timedelta(seconds=0.5),
+ tolerance=Timedelta(seconds=0.5),
)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 71bcce12796f5..7b794668803c3 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -188,7 +188,7 @@ def test_getitem_slice_date(self, slc, positions):
class TestSeriesGetitemListLike:
- @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])
+ @pytest.mark.parametrize("box", [list, np.array, Index, pd.Series])
def test_getitem_no_matches(self, box):
# GH#33462 we expect the same behavior for list/ndarray/Index/Series
ser = Series(["A", "B"])
@@ -212,7 +212,7 @@ def test_getitem_intlist_intindex_periodvalues(self):
tm.assert_series_equal(result, exp)
assert result.dtype == "Period[D]"
- @pytest.mark.parametrize("box", [list, np.array, pd.Index])
+ @pytest.mark.parametrize("box", [list, np.array, Index])
def test_getitem_intlist_intervalindex_non_int(self, box):
# GH#33404 fall back to positional since ints are unambiguous
dti = date_range("2000-01-03", periods=3)._with_freq(None)
@@ -224,11 +224,11 @@ def test_getitem_intlist_intervalindex_non_int(self, box):
result = ser[key]
tm.assert_series_equal(result, expected)
- @pytest.mark.parametrize("box", [list, np.array, pd.Index])
+ @pytest.mark.parametrize("box", [list, np.array, Index])
@pytest.mark.parametrize("dtype", [np.int64, np.float64, np.uint64])
def test_getitem_intlist_multiindex_numeric_level(self, dtype, box):
# GH#33404 do _not_ fall back to positional since ints are ambiguous
- idx = pd.Index(range(4)).astype(dtype)
+ idx = Index(range(4)).astype(dtype)
dti = date_range("2000-01-03", periods=3)
mi = pd.MultiIndex.from_product([idx, dti])
ser = Series(range(len(mi))[::-1], index=mi)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 88087110fc221..1f2adaafbbccd 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -487,7 +487,7 @@ def test_categorical_assigning_ops():
def test_getitem_categorical_str():
# GH#31765
- ser = Series(range(5), index=pd.Categorical(["a", "b", "c", "a", "b"]))
+ ser = Series(range(5), index=Categorical(["a", "b", "c", "a", "b"]))
result = ser["a"]
expected = ser.iloc[[0, 3]]
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 7325505ce233b..75e7f8a17eda3 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -251,7 +251,7 @@ class County:
def __repr__(self) -> str:
return self.name + ", " + self.state
- cat = pd.Categorical([County() for _ in range(61)])
+ cat = Categorical([County() for _ in range(61)])
idx = Index(cat)
ser = idx.to_series()
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 183d2814920e4..21c7477918d02 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -90,14 +90,14 @@ def test_empty_df_expanding(expander):
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
- x = pd.Series([np.nan])
+ x = Series([np.nan])
result = x.expanding(min_periods=0).sum()
- expected = pd.Series([0.0])
+ expected = Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
- expected = pd.Series([np.nan])
+ expected = Series([np.nan])
tm.assert_series_equal(result, expected)
@@ -252,6 +252,6 @@ def test_expanding_sem(constructor):
obj = getattr(pd, constructor)([0, 1, 2])
result = obj.expanding().sem()
if isinstance(result, DataFrame):
- result = pd.Series(result[0].values)
- expected = pd.Series([np.nan] + [0.707107] * 2)
+ result = Series(result[0].values)
+ expected = Series([np.nan] + [0.707107] * 2)
tm.assert_series_equal(result, expected)
diff --git a/scripts/check_for_inconsistent_pandas_namespace.py b/scripts/check_for_inconsistent_pandas_namespace.py
new file mode 100644
index 0000000000000..4b4515cdf7e11
--- /dev/null
+++ b/scripts/check_for_inconsistent_pandas_namespace.py
@@ -0,0 +1,64 @@
+"""
+Check that test suite file doesn't use the pandas namespace inconsistently.
+
+We check for cases of ``Series`` and ``pd.Series`` appearing in the same file
+(likewise for some other common classes).
+
+This is meant to be run as a pre-commit hook - to run it manually, you can do:
+
+ pre-commit run inconsistent-namespace-usage --all-files
+"""
+
+import argparse
+from pathlib import Path
+import re
+from typing import Optional, Sequence
+
+PATTERN = r"""
+ (
+ (?<!pd\.)(?<!\w) # check class_name start with pd. or character
+ {class_name}\( # match DataFrame but not pd.DataFrame or tm.makeDataFrame
+ .* # match anything
+ pd\.{class_name}\( # only match e.g. pd.DataFrame
+ )|
+ (
+ pd\.{class_name}\( # only match e.g. pd.DataFrame
+ .* # match anything
+ (?<!pd\.)(?<!\w) # check class_name start with pd. or character
+ {class_name}\( # match DataFrame but not pd.DataFrame or tm.makeDataFrame
+ )
+ """
+CLASS_NAMES = (
+ "Series",
+ "DataFrame",
+ "Index",
+ "MultiIndex",
+ "Timestamp",
+ "Timedelta",
+ "TimedeltaIndex",
+ "DatetimeIndex",
+ "Categorical",
+)
+ERROR_MESSAGE = "Found both `pd.{class_name}` and `{class_name}` in {path}"
+
+
+def main(argv: Optional[Sequence[str]] = None) -> None:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("paths", nargs="*", type=Path)
+ args = parser.parse_args(argv)
+
+ for class_name in CLASS_NAMES:
+ pattern = re.compile(
+ PATTERN.format(class_name=class_name).encode(),
+ flags=re.MULTILINE | re.DOTALL | re.VERBOSE,
+ )
+ for path in args.paths:
+ contents = path.read_bytes()
+ match = pattern.search(contents)
+ assert match is None, ERROR_MESSAGE.format(
+ class_name=class_name, path=str(path)
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/tests/test_inconsistent_namespace_check.py b/scripts/tests/test_inconsistent_namespace_check.py
new file mode 100644
index 0000000000000..37e6d288d9341
--- /dev/null
+++ b/scripts/tests/test_inconsistent_namespace_check.py
@@ -0,0 +1,28 @@
+from pathlib import Path
+
+import pytest
+
+from scripts.check_for_inconsistent_pandas_namespace import main
+
+BAD_FILE_0 = "cat_0 = Categorical()\ncat_1 = pd.Categorical()"
+BAD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = Categorical()"
+GOOD_FILE_0 = "cat_0 = Categorical()\ncat_1 = Categorical()"
+GOOD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = pd.Categorical()"
+
+
+@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1])
+def test_inconsistent_usage(tmpdir, content):
+ tmpfile = Path(tmpdir / "tmpfile.py")
+ tmpfile.touch()
+ tmpfile.write_text(content)
+ msg = fr"Found both `pd\.Categorical` and `Categorical` in {str(tmpfile)}"
+ with pytest.raises(AssertionError, match=msg):
+ main((str(tmpfile),))
+
+
+@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1])
+def test_consistent_usage(tmpdir, content):
+ tmpfile = Path(tmpdir / "tmpfile.py")
+ tmpfile.touch()
+ tmpfile.write_text(content)
+ main((str(tmpfile),)) # Should not raise.
| xref https://github.com/pandas-dev/pandas/pull/37401#discussion_r515312381 | https://api.github.com/repos/pandas-dev/pandas/pulls/37662 | 2020-11-06T08:10:27Z | 2020-11-08T21:24:34Z | 2020-11-08T21:24:34Z | 2020-11-08T21:43:01Z |
BUG: RollingGroupby when groupby key is in the index | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index e0fa68e3b9f80..a29ae1912e338 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -25,6 +25,7 @@ Bug fixes
~~~~~~~~~
- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
- Bug in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
+- Bug in :class:`RollingGroupby` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index a976350a419fe..5d561c84ab462 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -762,19 +762,39 @@ def _apply(
use_numba_cache,
**kwargs,
)
- # Compose MultiIndex result from grouping levels then rolling level
- # Aggregate the MultiIndex data as tuples then the level names
- grouped_object_index = self.obj.index
- grouped_index_name = [*grouped_object_index.names]
- groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings]
- result_index_names = groupby_keys + grouped_index_name
+ # Reconstruct the resulting MultiIndex from tuples
+ # 1st set of levels = group by labels
+ # 2nd set of levels = original index
+ # Ignore 2nd set of levels if a group by label include an index level
+ result_index_names = [
+ grouping.name for grouping in self._groupby.grouper._groupings
+ ]
+ grouped_object_index = None
+
+ column_keys = [
+ key
+ for key in result_index_names
+ if key not in self.obj.index.names or key is None
+ ]
+
+ if len(column_keys) == len(result_index_names):
+ grouped_object_index = self.obj.index
+ grouped_index_name = [*grouped_object_index.names]
+ result_index_names += grouped_index_name
+ else:
+ # Our result will have still kept the column in the result
+ result = result.drop(columns=column_keys, errors="ignore")
result_index_data = []
for key, values in self._groupby.grouper.indices.items():
for value in values:
data = [
*com.maybe_make_list(key),
- *com.maybe_make_list(grouped_object_index[value]),
+ *com.maybe_make_list(
+ grouped_object_index[value]
+ if grouped_object_index is not None
+ else []
+ ),
]
result_index_data.append(tuple(data))
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 101d65c885c9b..65906df819054 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Series
+from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
from pandas.core.groupby.groupby import get_groupby
@@ -601,3 +601,33 @@ def test_groupby_rolling_nans_in_index(self, rollings, key):
df = df.set_index("a")
with pytest.raises(ValueError, match=f"{key} must be monotonic"):
df.groupby("c").rolling("60min", **rollings)
+
+ def test_groupby_rolling_group_keys(self):
+ # GH 37641
+ arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
+ index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))
+
+ s = Series([1, 2, 3], index=index)
+ result = s.groupby(["idx1", "idx2"], group_keys=False).rolling(1).mean()
+ expected = Series(
+ [1.0, 2.0, 3.0],
+ index=MultiIndex.from_tuples(
+ [("val1", "val1"), ("val1", "val1"), ("val2", "val2")],
+ names=["idx1", "idx2"],
+ ),
+ )
+ tm.assert_series_equal(result, expected)
+
+ def test_groupby_rolling_index_level_and_column_label(self):
+ arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
+ index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))
+
+ df = DataFrame({"A": [1, 1, 2], "B": range(3)}, index=index)
+ result = df.groupby(["idx1", "A"]).rolling(1).mean()
+ expected = DataFrame(
+ {"B": [0.0, 1.0, 2.0]},
+ index=MultiIndex.from_tuples(
+ [("val1", 1), ("val1", 1), ("val2", 2)], names=["idx1", "A"]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #37641
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37661 | 2020-11-06T07:07:49Z | 2020-11-09T18:56:37Z | 2020-11-09T18:56:37Z | 2020-11-10T13:40:23Z |
REF: implement _wrap_reduction_result | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 63c414d96c8de..c1b5897164d76 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -1,4 +1,4 @@
-from typing import Any, Sequence, TypeVar
+from typing import Any, Optional, Sequence, TypeVar
import numpy as np
@@ -254,6 +254,11 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs):
msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
raise TypeError(msg)
+ def _wrap_reduction_result(self, axis: Optional[int], result):
+ if axis is None or self.ndim == 1:
+ return self._box_func(result)
+ return self._from_backing_data(result)
+
# ------------------------------------------------------------------------
def __repr__(self) -> str:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index edbf24ca87f5c..57d934a633911 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1957,7 +1957,7 @@ def min(self, *, skipna=True, **kwargs):
return np.nan
else:
pointer = self._codes.min()
- return self.categories[pointer]
+ return self._wrap_reduction_result(None, pointer)
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def max(self, *, skipna=True, **kwargs):
@@ -1993,7 +1993,7 @@ def max(self, *, skipna=True, **kwargs):
return np.nan
else:
pointer = self._codes.max()
- return self.categories[pointer]
+ return self._wrap_reduction_result(None, pointer)
def mode(self, dropna=True):
"""
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 7a0d88f29b9b0..8d90035491d28 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1283,9 +1283,7 @@ def min(self, *, axis=None, skipna=True, **kwargs):
return self._from_backing_data(result)
result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
- if lib.is_scalar(result):
- return self._box_func(result)
- return self._from_backing_data(result)
+ return self._wrap_reduction_result(axis, result)
def max(self, *, axis=None, skipna=True, **kwargs):
"""
@@ -1316,9 +1314,7 @@ def max(self, *, axis=None, skipna=True, **kwargs):
return self._from_backing_data(result)
result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
- if lib.is_scalar(result):
- return self._box_func(result)
- return self._from_backing_data(result)
+ return self._wrap_reduction_result(axis, result)
def mean(self, *, skipna=True, axis: Optional[int] = 0):
"""
@@ -1357,9 +1353,7 @@ def mean(self, *, skipna=True, axis: Optional[int] = 0):
result = nanops.nanmean(
self._ndarray, axis=axis, skipna=skipna, mask=self.isna()
)
- if axis is None or self.ndim == 1:
- return self._box_func(result)
- return self._from_backing_data(result)
+ return self._wrap_reduction_result(axis, result)
def median(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs):
nv.validate_median((), kwargs)
@@ -1378,9 +1372,7 @@ def median(self, *, axis: Optional[int] = None, skipna: bool = True, **kwargs):
return self._from_backing_data(result)
result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
- if axis is None or self.ndim == 1:
- return self._box_func(result)
- return self._from_backing_data(result)
+ return self._wrap_reduction_result(axis, result)
class DatelikeOps(DatetimeLikeArrayMixin):
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index e1a424b719a4a..20fae20c395e6 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -12,7 +12,6 @@
from pandas.core.dtypes.missing import isna
from pandas.core import nanops, ops
-from pandas.core.array_algos import masked_reductions
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.strings.object_array import ObjectStringArrayMixin
@@ -273,39 +272,46 @@ def _values_for_factorize(self) -> Tuple[np.ndarray, int]:
def any(self, *, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), dict(out=out, keepdims=keepdims))
- return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
+ result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
+ return self._wrap_reduction_result(axis, result)
def all(self, *, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_all((), dict(out=out, keepdims=keepdims))
- return nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
+ result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
+ return self._wrap_reduction_result(axis, result)
- def min(self, *, skipna: bool = True, **kwargs) -> Scalar:
+ def min(self, *, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_min((), kwargs)
- return masked_reductions.min(
- values=self.to_numpy(), mask=self.isna(), skipna=skipna
+ result = nanops.nanmin(
+ values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
+ return self._wrap_reduction_result(axis, result)
- def max(self, *, skipna: bool = True, **kwargs) -> Scalar:
+ def max(self, *, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_max((), kwargs)
- return masked_reductions.max(
- values=self.to_numpy(), mask=self.isna(), skipna=skipna
+ result = nanops.nanmax(
+ values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
+ return self._wrap_reduction_result(axis, result)
def sum(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar:
nv.validate_sum((), kwargs)
- return nanops.nansum(
+ result = nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
+ return self._wrap_reduction_result(axis, result)
def prod(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar:
nv.validate_prod((), kwargs)
- return nanops.nanprod(
+ result = nanops.nanprod(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
+ return self._wrap_reduction_result(axis, result)
def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_mean((), dict(dtype=dtype, out=out, keepdims=keepdims))
- return nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
+ result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
+ return self._wrap_reduction_result(axis, result)
def median(
self, *, axis=None, out=None, overwrite_input=False, keepdims=False, skipna=True
@@ -313,7 +319,8 @@ def median(
nv.validate_median(
(), dict(out=out, overwrite_input=overwrite_input, keepdims=keepdims)
)
- return nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
+ result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
+ return self._wrap_reduction_result(axis, result)
def std(
self, *, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True
@@ -321,7 +328,8 @@ def std(
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="std"
)
- return nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
+ result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
+ return self._wrap_reduction_result(axis, result)
def var(
self, *, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True
@@ -329,7 +337,8 @@ def var(
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="var"
)
- return nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
+ result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
+ return self._wrap_reduction_result(axis, result)
def sem(
self, *, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True
@@ -337,19 +346,22 @@ def sem(
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="sem"
)
- return nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
+ result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
+ return self._wrap_reduction_result(axis, result)
def kurt(self, *, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="kurt"
)
- return nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
+ result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
+ return self._wrap_reduction_result(axis, result)
def skew(self, *, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
nv.validate_stat_ddof_func(
(), dict(dtype=dtype, out=out, keepdims=keepdims), fname="skew"
)
- return nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
+ result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
+ return self._wrap_reduction_result(axis, result)
# ------------------------------------------------------------------------
# Additional Methods
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 8231a5fa0509b..a51dd1098a359 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -3,6 +3,8 @@
import numpy as np
from pandas._libs import lib, missing as libmissing
+from pandas._typing import Scalar
+from pandas.compat.numpy import function as nv
from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype
from pandas.core.dtypes.common import (
@@ -15,6 +17,7 @@
)
from pandas.core import ops
+from pandas.core.array_algos import masked_reductions
from pandas.core.arrays import IntegerArray, PandasArray
from pandas.core.arrays.integer import _IntegerDtype
from pandas.core.construction import extract_array
@@ -301,6 +304,20 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs):
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
+ def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
+ nv.validate_min((), kwargs)
+ result = masked_reductions.min(
+ values=self.to_numpy(), mask=self.isna(), skipna=skipna
+ )
+ return self._wrap_reduction_result(axis, result)
+
+ def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
+ nv.validate_max((), kwargs)
+ result = masked_reductions.max(
+ values=self.to_numpy(), mask=self.isna(), skipna=skipna
+ )
+ return self._wrap_reduction_result(axis, result)
+
def value_counts(self, dropna=False):
from pandas import value_counts
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 8a87df18b6adb..c227c071546ce 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -381,9 +381,7 @@ def sum(
result = nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
- if axis is None or self.ndim == 1:
- return self._box_func(result)
- return self._from_backing_data(result)
+ return self._wrap_reduction_result(axis, result)
def std(
self,
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 8e917bb770247..b045e789b52a8 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -344,6 +344,7 @@ def _wrap_results(result, dtype: DtypeObj, fill_value=None):
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
+
if tz is not None:
# we get here e.g. via nanmean when we call it on a DTA[tz]
result = Timestamp(result, tz=tz)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37660 | 2020-11-06T03:45:04Z | 2020-11-08T02:14:32Z | 2020-11-08T02:14:32Z | 2020-11-08T02:38:59Z | |
CI: catch windows py38 OSError | diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd
index 1990afd77a8fb..3666d00707ac8 100644
--- a/pandas/_libs/tslibs/tzconversion.pxd
+++ b/pandas/_libs/tslibs/tzconversion.pxd
@@ -2,7 +2,9 @@ from cpython.datetime cimport tzinfo
from numpy cimport int64_t
-cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz, bint* fold=*)
+cdef int64_t tz_convert_utc_to_tzlocal(
+ int64_t utc_val, tzinfo tz, bint* fold=*
+) except? -1
cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz)
cdef int64_t tz_localize_to_utc_single(
int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=*
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 4a3fac1954ab7..f08a86b1262e6 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -355,7 +355,9 @@ cdef inline str _render_tstamp(int64_t val):
# ----------------------------------------------------------------------
# Timezone Conversion
-cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz, bint* fold=NULL):
+cdef int64_t tz_convert_utc_to_tzlocal(
+ int64_t utc_val, tzinfo tz, bint* fold=NULL
+) except? -1:
"""
Parameters
----------
@@ -549,8 +551,10 @@ cdef inline int64_t _tzlocal_get_offset_components(int64_t val, tzinfo tz,
return int(td.total_seconds() * 1_000_000_000)
+# OSError may be thrown by tzlocal on windows at or close to 1970-01-01
+# see https://github.com/pandas-dev/pandas/pull/37591#issuecomment-720628241
cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True,
- bint* fold=NULL):
+ bint* fold=NULL) except? -1:
"""
Convert the i8 representation of a datetime from a tzlocal timezone to
UTC, or vice-versa.
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index fbb51b70d34fd..374d185f45844 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1,9 +1,11 @@
from datetime import timedelta
from decimal import Decimal
+from dateutil.tz import tzlocal
import numpy as np
import pytest
+from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
@@ -1172,6 +1174,12 @@ def test_min_max_dt64_with_NaT(self):
def test_min_max_dt64_with_NaT_skipna_false(self, tz_naive_fixture):
# GH#36907
tz = tz_naive_fixture
+ if isinstance(tz, tzlocal) and is_platform_windows():
+ pytest.xfail(
+ reason="GH#37659 OSError raised within tzlocal bc Windows "
+ "chokes in times before 1970-01-01"
+ )
+
df = DataFrame(
{
"a": [
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 1d64fde103e9e..0359ee17f87c5 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -1,8 +1,11 @@
from datetime import datetime
+from dateutil.tz import tzlocal
import numpy as np
import pytest
+from pandas.compat import IS64
+
import pandas as pd
from pandas import (
DateOffset,
@@ -106,24 +109,27 @@ def test_repeat(self, tz_naive_fixture):
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
- def test_resolution(self, tz_naive_fixture):
+ @pytest.mark.parametrize(
+ "freq,expected",
+ [
+ ("A", "day"),
+ ("Q", "day"),
+ ("M", "day"),
+ ("D", "day"),
+ ("H", "hour"),
+ ("T", "minute"),
+ ("S", "second"),
+ ("L", "millisecond"),
+ ("U", "microsecond"),
+ ],
+ )
+ def test_resolution(self, tz_naive_fixture, freq, expected):
tz = tz_naive_fixture
- for freq, expected in zip(
- ["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
- [
- "day",
- "day",
- "day",
- "day",
- "hour",
- "minute",
- "second",
- "millisecond",
- "microsecond",
- ],
- ):
- idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
- assert idx.resolution == expected
+ if freq == "A" and not IS64 and isinstance(tz, tzlocal):
+ pytest.xfail(reason="OverflowError inside tzlocal past 2038")
+
+ idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
+ assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index fba123e47feb2..fca1316493e85 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -1,6 +1,7 @@
from datetime import date, datetime, time as dt_time, timedelta
from typing import Dict, List, Optional, Tuple, Type
+from dateutil.tz import tzlocal
import numpy as np
import pytest
@@ -14,6 +15,7 @@
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import ApplyTypeError, _get_offset, _offset_map
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
+from pandas.compat import IS64
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
@@ -129,6 +131,8 @@ def test_apply_out_of_range(self, tz_naive_fixture):
tz = tz_naive_fixture
if self._offset is None:
return
+ if isinstance(tz, tzlocal) and not IS64:
+ pytest.xfail(reason="OverflowError inside tzlocal past 2038")
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
| Still seeing these following #37652, so catching again one level higher. | https://api.github.com/repos/pandas-dev/pandas/pulls/37659 | 2020-11-06T02:50:11Z | 2020-11-07T01:24:28Z | 2020-11-07T01:24:28Z | 2020-11-07T01:30:23Z |
REF: dont support dt64tz in nanmean | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 6dc05c23c026f..cfb02e5b1e987 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -8,7 +8,7 @@
from pandas._config import get_option
-from pandas._libs import NaT, Timedelta, Timestamp, iNaT, lib
+from pandas._libs import NaT, Timedelta, iNaT, lib
from pandas._typing import ArrayLike, Dtype, DtypeObj, F, Scalar
from pandas.compat._optional import import_optional_dependency
@@ -330,7 +330,7 @@ def _na_ok_dtype(dtype: DtypeObj) -> bool:
return not issubclass(dtype.type, np.integer)
-def _wrap_results(result, dtype: DtypeObj, fill_value=None):
+def _wrap_results(result, dtype: np.dtype, fill_value=None):
""" wrap our results if needed """
if result is NaT:
pass
@@ -340,15 +340,11 @@ def _wrap_results(result, dtype: DtypeObj, fill_value=None):
# GH#24293
fill_value = iNaT
if not isinstance(result, np.ndarray):
- tz = getattr(dtype, "tz", None)
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
- if tz is not None:
- # we get here e.g. via nanmean when we call it on a DTA[tz]
- result = Timestamp(result, tz=tz)
- elif isna(result):
+ if isna(result):
result = np.datetime64("NaT", "ns")
else:
result = np.int64(result).view("datetime64[ns]")
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 458fba2e13b0f..359a7eecf6f7b 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -988,11 +988,10 @@ def prng(self):
class TestDatetime64NaNOps:
- @pytest.mark.parametrize("tz", [None, "UTC"])
# Enabling mean changes the behavior of DataFrame.mean
# See https://github.com/pandas-dev/pandas/issues/24752
- def test_nanmean(self, tz):
- dti = pd.date_range("2016-01-01", periods=3, tz=tz)
+ def test_nanmean(self):
+ dti = pd.date_range("2016-01-01", periods=3)
expected = dti[1]
for obj in [dti, DatetimeArray(dti), Series(dti)]:
| nanmean is annotated as taking a ndarray, no reason for it to support dt64tz | https://api.github.com/repos/pandas-dev/pandas/pulls/37658 | 2020-11-06T01:36:47Z | 2020-11-09T00:38:23Z | 2020-11-09T00:38:23Z | 2020-11-09T00:44:40Z |
BUG: unpickling modifies Block.ndim | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index a122154904996..e0fa68e3b9f80 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -24,6 +24,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
+- Bug in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index fda4da8694ea3..767c653f8a404 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -278,14 +278,17 @@ def __getstate__(self):
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
- def unpickle_block(values, mgr_locs):
- return make_block(values, placement=mgr_locs)
+ def unpickle_block(values, mgr_locs, ndim: int):
+ # TODO(EA2D): ndim would be unnecessary with 2D EAs
+ return make_block(values, placement=mgr_locs, ndim=ndim)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
+ ndim = len(self.axes)
self.blocks = tuple(
- unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
+ unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)
+ for b in state["blocks"]
)
else:
raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 925f6b5f125c7..34b36e2549b62 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -576,3 +576,15 @@ def test_pickle_datetimes(datetime_series):
def test_pickle_strings(string_series):
unp_series = tm.round_trip_pickle(string_series)
tm.assert_series_equal(unp_series, string_series)
+
+
+def test_pickle_preserves_block_ndim():
+ # GH#37631
+ ser = Series(list("abc")).astype("category").iloc[[0]]
+ res = tm.round_trip_pickle(ser)
+
+ assert res._mgr.blocks[0].ndim == 1
+ assert res._mgr.blocks[0].shape == (1,)
+
+ # GH#37631 OP issue was about indexing, underlying problem was pickle
+ tm.assert_series_equal(res[[True]], ser)
| - [x] closes #37631
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37657 | 2020-11-06T00:38:38Z | 2020-11-09T00:38:09Z | 2020-11-09T00:38:08Z | 2020-11-09T11:20:22Z |
TST: split up tests/plotting/test_frame.py into subdir & modules #34769 | diff --git a/pandas/tests/plotting/frame/__init__.py b/pandas/tests/plotting/frame/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
similarity index 61%
rename from pandas/tests/plotting/test_frame.py
rename to pandas/tests/plotting/frame/test_frame.py
index bb7507243412f..3c43e0b693a1b 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -39,14 +39,6 @@ def setup_method(self, method):
}
)
- def _assert_ytickslabels_visibility(self, axes, expected):
- for ax, exp in zip(axes, expected):
- self._check_visible(ax.get_yticklabels(), visible=exp)
-
- def _assert_xtickslabels_visibility(self, axes, expected):
- for ax, exp in zip(axes, expected):
- self._check_visible(ax.get_xticklabels(), visible=exp)
-
@pytest.mark.slow
def test_plot(self):
from pandas.plotting._matplotlib.compat import mpl_ge_3_1_0
@@ -174,74 +166,6 @@ def test_integer_array_plot(self):
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.hexbin, x="x", y="y")
- def test_mpl2_color_cycle_str(self):
- # GH 15516
- df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
- colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter("always", "MatplotlibDeprecationWarning")
-
- for color in colors:
- _check_plot_works(df.plot, color=color)
-
- # if warning is raised, check that it is the exact problematic one
- # GH 36972
- if w:
- match = "Support for uppercase single-letter colors is deprecated"
- warning_message = str(w[0].message)
- msg = "MatplotlibDeprecationWarning related to CN colors was raised"
- assert match not in warning_message, msg
-
- def test_color_single_series_list(self):
- # GH 3486
- df = DataFrame({"A": [1, 2, 3]})
- _check_plot_works(df.plot, color=["red"])
-
- @pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)])
- def test_rgb_tuple_color(self, color):
- # GH 16695
- df = DataFrame({"x": [1, 2], "y": [3, 4]})
- _check_plot_works(df.plot, x="x", y="y", color=color)
-
- def test_color_empty_string(self):
- df = DataFrame(np.random.randn(10, 2))
- with pytest.raises(ValueError):
- df.plot(color="")
-
- def test_color_and_style_arguments(self):
- df = DataFrame({"x": [1, 2], "y": [3, 4]})
- # passing both 'color' and 'style' arguments should be allowed
- # if there is no color symbol in the style strings:
- ax = df.plot(color=["red", "black"], style=["-", "--"])
- # check that the linestyles are correctly set:
- linestyle = [line.get_linestyle() for line in ax.lines]
- assert linestyle == ["-", "--"]
- # check that the colors are correctly set:
- color = [line.get_color() for line in ax.lines]
- assert color == ["red", "black"]
- # passing both 'color' and 'style' arguments should not be allowed
- # if there is a color symbol in the style strings:
- with pytest.raises(ValueError):
- df.plot(color=["red", "black"], style=["k-", "r--"])
-
- @pytest.mark.parametrize(
- "color, expected",
- [
- ("green", ["green"] * 4),
- (["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]),
- ],
- )
- def test_color_and_marker(self, color, expected):
- # GH 21003
- df = DataFrame(np.random.random((7, 4)))
- ax = df.plot(color=color, style="d--")
- # check colors
- result = [i.get_color() for i in ax.lines]
- assert result == expected
- # check markers and linestyles
- assert all(i.get_linestyle() == "--" for i in ax.lines)
- assert all(i.get_marker() == "d" for i in ax.lines)
-
def test_nonnumeric_exclude(self):
df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]})
ax = df.plot()
@@ -411,405 +335,6 @@ def test_unsorted_index_lims(self):
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
- @pytest.mark.slow
- def test_subplots(self):
- df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
-
- for kind in ["bar", "barh", "line", "area"]:
- axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
- self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
- assert axes.shape == (3,)
-
- for ax, column in zip(axes, df.columns):
- self._check_legend_labels(ax, labels=[pprint_thing(column)])
-
- for ax in axes[:-2]:
- self._check_visible(ax.xaxis) # xaxis must be visible for grid
- self._check_visible(ax.get_xticklabels(), visible=False)
- if not (kind == "bar" and self.mpl_ge_3_1_0):
- # change https://github.com/pandas-dev/pandas/issues/26714
- self._check_visible(ax.get_xticklabels(minor=True), visible=False)
- self._check_visible(ax.xaxis.get_label(), visible=False)
- self._check_visible(ax.get_yticklabels())
-
- self._check_visible(axes[-1].xaxis)
- self._check_visible(axes[-1].get_xticklabels())
- self._check_visible(axes[-1].get_xticklabels(minor=True))
- self._check_visible(axes[-1].xaxis.get_label())
- self._check_visible(axes[-1].get_yticklabels())
-
- axes = df.plot(kind=kind, subplots=True, sharex=False)
- for ax in axes:
- self._check_visible(ax.xaxis)
- self._check_visible(ax.get_xticklabels())
- self._check_visible(ax.get_xticklabels(minor=True))
- self._check_visible(ax.xaxis.get_label())
- self._check_visible(ax.get_yticklabels())
-
- axes = df.plot(kind=kind, subplots=True, legend=False)
- for ax in axes:
- assert ax.get_legend() is None
-
- @pytest.mark.parametrize(
- "kwargs, expected",
- [
- # behavior without keyword
- ({}, [True, False, True, False]),
- # set sharey=True should be identical
- ({"sharey": True}, [True, False, True, False]),
- # sharey=False, all yticklabels should be visible
- ({"sharey": False}, [True, True, True, True]),
- ],
- )
- def test_groupby_boxplot_sharey(self, kwargs, expected):
- # https://github.com/pandas-dev/pandas/issues/20968
- # sharey can now be switched check whether the right
- # pair of axes is turned on or off
- df = DataFrame(
- {
- "a": [-1.43, -0.15, -3.70, -1.43, -0.14],
- "b": [0.56, 0.84, 0.29, 0.56, 0.85],
- "c": [0, 1, 2, 3, 1],
- },
- index=[0, 1, 2, 3, 4],
- )
- axes = df.groupby("c").boxplot(**kwargs)
- self._assert_ytickslabels_visibility(axes, expected)
-
- @pytest.mark.parametrize(
- "kwargs, expected",
- [
- # behavior without keyword
- ({}, [True, True, True, True]),
- # set sharex=False should be identical
- ({"sharex": False}, [True, True, True, True]),
- # sharex=True, xticklabels should be visible
- # only for bottom plots
- ({"sharex": True}, [False, False, True, True]),
- ],
- )
- def test_groupby_boxplot_sharex(self, kwargs, expected):
- # https://github.com/pandas-dev/pandas/issues/20968
- # sharex can now be switched check whether the right
- # pair of axes is turned on or off
- df = DataFrame(
- {
- "a": [-1.43, -0.15, -3.70, -1.43, -0.14],
- "b": [0.56, 0.84, 0.29, 0.56, 0.85],
- "c": [0, 1, 2, 3, 1],
- },
- index=[0, 1, 2, 3, 4],
- )
- axes = df.groupby("c").boxplot(**kwargs)
- self._assert_xtickslabels_visibility(axes, expected)
-
- @pytest.mark.slow
- def test_subplots_timeseries(self):
- idx = date_range(start="2014-07-01", freq="M", periods=10)
- df = DataFrame(np.random.rand(10, 3), index=idx)
-
- for kind in ["line", "area"]:
- axes = df.plot(kind=kind, subplots=True, sharex=True)
- self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
-
- for ax in axes[:-2]:
- # GH 7801
- self._check_visible(ax.xaxis) # xaxis must be visible for grid
- self._check_visible(ax.get_xticklabels(), visible=False)
- self._check_visible(ax.get_xticklabels(minor=True), visible=False)
- self._check_visible(ax.xaxis.get_label(), visible=False)
- self._check_visible(ax.get_yticklabels())
-
- self._check_visible(axes[-1].xaxis)
- self._check_visible(axes[-1].get_xticklabels())
- self._check_visible(axes[-1].get_xticklabels(minor=True))
- self._check_visible(axes[-1].xaxis.get_label())
- self._check_visible(axes[-1].get_yticklabels())
- self._check_ticks_props(axes, xrot=0)
-
- axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
- for ax in axes:
- self._check_visible(ax.xaxis)
- self._check_visible(ax.get_xticklabels())
- self._check_visible(ax.get_xticklabels(minor=True))
- self._check_visible(ax.xaxis.get_label())
- self._check_visible(ax.get_yticklabels())
- self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
-
- def test_subplots_timeseries_y_axis(self):
- # GH16953
- data = {
- "numeric": np.array([1, 2, 5]),
- "timedelta": [
- pd.Timedelta(-10, unit="s"),
- pd.Timedelta(10, unit="m"),
- pd.Timedelta(10, unit="h"),
- ],
- "datetime_no_tz": [
- pd.to_datetime("2017-08-01 00:00:00"),
- pd.to_datetime("2017-08-01 02:00:00"),
- pd.to_datetime("2017-08-02 00:00:00"),
- ],
- "datetime_all_tz": [
- pd.to_datetime("2017-08-01 00:00:00", utc=True),
- pd.to_datetime("2017-08-01 02:00:00", utc=True),
- pd.to_datetime("2017-08-02 00:00:00", utc=True),
- ],
- "text": ["This", "should", "fail"],
- }
- testdata = DataFrame(data)
-
- y_cols = ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"]
- for col in y_cols:
- ax = testdata.plot(y=col)
- result = ax.get_lines()[0].get_data()[1]
- expected = testdata[col].values
- assert (result == expected).all()
-
- msg = "no numeric data to plot"
- with pytest.raises(TypeError, match=msg):
- testdata.plot(y="text")
-
- @pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
- def test_subplots_timeseries_y_axis_not_supported(self):
- """
- This test will fail for:
- period:
- since period isn't yet implemented in ``select_dtypes``
- and because it will need a custom value converter +
- tick formatter (as was done for x-axis plots)
-
- categorical:
- because it will need a custom value converter +
- tick formatter (also doesn't work for x-axis, as of now)
-
- datetime_mixed_tz:
- because of the way how pandas handles ``Series`` of
- ``datetime`` objects with different timezone,
- generally converting ``datetime`` objects in a tz-aware
- form could help with this problem
- """
- data = {
- "numeric": np.array([1, 2, 5]),
- "period": [
- pd.Period("2017-08-01 00:00:00", freq="H"),
- pd.Period("2017-08-01 02:00", freq="H"),
- pd.Period("2017-08-02 00:00:00", freq="H"),
- ],
- "categorical": pd.Categorical(
- ["c", "b", "a"], categories=["a", "b", "c"], ordered=False
- ),
- "datetime_mixed_tz": [
- pd.to_datetime("2017-08-01 00:00:00", utc=True),
- pd.to_datetime("2017-08-01 02:00:00"),
- pd.to_datetime("2017-08-02 00:00:00"),
- ],
- }
- testdata = DataFrame(data)
- ax_period = testdata.plot(x="numeric", y="period")
- assert (
- ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
- ).all()
- ax_categorical = testdata.plot(x="numeric", y="categorical")
- assert (
- ax_categorical.get_lines()[0].get_data()[1]
- == testdata["categorical"].values
- ).all()
- ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
- assert (
- ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
- == testdata["datetime_mixed_tz"].values
- ).all()
-
- @pytest.mark.slow
- def test_subplots_layout_multi_column(self):
- # GH 6667
- df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
-
- axes = df.plot(subplots=True, layout=(2, 2))
- self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
- assert axes.shape == (2, 2)
-
- axes = df.plot(subplots=True, layout=(-1, 2))
- self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
- assert axes.shape == (2, 2)
-
- axes = df.plot(subplots=True, layout=(2, -1))
- self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
- assert axes.shape == (2, 2)
-
- axes = df.plot(subplots=True, layout=(1, 4))
- self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
- assert axes.shape == (1, 4)
-
- axes = df.plot(subplots=True, layout=(-1, 4))
- self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
- assert axes.shape == (1, 4)
-
- axes = df.plot(subplots=True, layout=(4, -1))
- self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
- assert axes.shape == (4, 1)
-
- with pytest.raises(ValueError):
- df.plot(subplots=True, layout=(1, 1))
- with pytest.raises(ValueError):
- df.plot(subplots=True, layout=(-1, -1))
-
- @pytest.mark.slow
- @pytest.mark.parametrize(
- "kwargs, expected_axes_num, expected_layout, expected_shape",
- [
- ({}, 1, (1, 1), (1,)),
- ({"layout": (3, 3)}, 1, (3, 3), (3, 3)),
- ],
- )
- def test_subplots_layout_single_column(
- self, kwargs, expected_axes_num, expected_layout, expected_shape
- ):
- # GH 6667
- df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
- axes = df.plot(subplots=True, **kwargs)
- self._check_axes_shape(
- axes,
- axes_num=expected_axes_num,
- layout=expected_layout,
- )
- assert axes.shape == expected_shape
-
- @pytest.mark.slow
- def test_subplots_warnings(self):
- # GH 9464
- with tm.assert_produces_warning(None):
- df = DataFrame(np.random.randn(100, 4))
- df.plot(subplots=True, layout=(3, 2))
-
- df = DataFrame(
- np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
- )
- df.plot(subplots=True, layout=(3, 2))
-
- @pytest.mark.slow
- def test_subplots_multiple_axes(self):
- # GH 5353, 6970, GH 7069
- fig, axes = self.plt.subplots(2, 3)
- df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
-
- returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
- self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
- assert returned.shape == (3,)
- assert returned[0].figure is fig
- # draw on second row
- returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
- self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
- assert returned.shape == (3,)
- assert returned[0].figure is fig
- self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
- tm.close()
-
- with pytest.raises(ValueError):
- fig, axes = self.plt.subplots(2, 3)
- # pass different number of axes from required
- df.plot(subplots=True, ax=axes)
-
- # pass 2-dim axes and invalid layout
- # invalid lauout should not affect to input and return value
- # (show warning is tested in
- # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
- fig, axes = self.plt.subplots(2, 2)
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", UserWarning)
- df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
-
- returned = df.plot(
- subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
- )
- self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
- assert returned.shape == (4,)
-
- returned = df.plot(
- subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
- )
- self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
- assert returned.shape == (4,)
-
- returned = df.plot(
- subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
- )
- self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
- assert returned.shape == (4,)
-
- # single column
- fig, axes = self.plt.subplots(1, 1)
- df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
-
- axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
- self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
- assert axes.shape == (1,)
-
- def test_subplots_ts_share_axes(self):
- # GH 3964
- fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
- self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
- df = DataFrame(
- np.random.randn(10, 9),
- index=date_range(start="2014-07-01", freq="M", periods=10),
- )
- for i, ax in enumerate(axes.ravel()):
- df[i].plot(ax=ax, fontsize=5)
-
- # Rows other than bottom should not be visible
- for ax in axes[0:-1].ravel():
- self._check_visible(ax.get_xticklabels(), visible=False)
-
- # Bottom row should be visible
- for ax in axes[-1].ravel():
- self._check_visible(ax.get_xticklabels(), visible=True)
-
- # First column should be visible
- for ax in axes[[0, 1, 2], [0]].ravel():
- self._check_visible(ax.get_yticklabels(), visible=True)
-
- # Other columns should not be visible
- for ax in axes[[0, 1, 2], [1]].ravel():
- self._check_visible(ax.get_yticklabels(), visible=False)
- for ax in axes[[0, 1, 2], [2]].ravel():
- self._check_visible(ax.get_yticklabels(), visible=False)
-
- def test_subplots_sharex_axes_existing_axes(self):
- # GH 9158
- d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
- df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
-
- axes = df[["A", "B"]].plot(subplots=True)
- df["C"].plot(ax=axes[0], secondary_y=True)
-
- self._check_visible(axes[0].get_xticklabels(), visible=False)
- self._check_visible(axes[1].get_xticklabels(), visible=True)
- for ax in axes.ravel():
- self._check_visible(ax.get_yticklabels(), visible=True)
-
- @pytest.mark.slow
- def test_subplots_dup_columns(self):
- # GH 10962
- df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
- axes = df.plot(subplots=True)
- for ax in axes:
- self._check_legend_labels(ax, labels=["a"])
- assert len(ax.lines) == 1
- tm.close()
-
- axes = df.plot(subplots=True, secondary_y="a")
- for ax in axes:
- # (right) is only attached when subplots=False
- self._check_legend_labels(ax, labels=["a"])
- assert len(ax.lines) == 1
- tm.close()
-
- ax = df.plot(secondary_y="a")
- self._check_legend_labels(ax, labels=["a (right)"] * 5)
- assert len(ax.lines) == 0
- assert len(ax.right_ax.lines) == 5
-
def test_negative_log(self):
df = -DataFrame(
np.random.rand(6, 4),
@@ -952,60 +477,6 @@ def test_area_lim(self):
ymin, ymax = ax.get_ylim()
assert ymax == 0
- @pytest.mark.slow
- def test_bar_colors(self):
- import matplotlib.pyplot as plt
-
- default_colors = self._unpack_cycler(plt.rcParams)
-
- df = DataFrame(np.random.randn(5, 5))
- ax = df.plot.bar()
- self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
- tm.close()
-
- custom_colors = "rgcby"
- ax = df.plot.bar(color=custom_colors)
- self._check_colors(ax.patches[::5], facecolors=custom_colors)
- tm.close()
-
- from matplotlib import cm
-
- # Test str -> colormap functionality
- ax = df.plot.bar(colormap="jet")
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
- self._check_colors(ax.patches[::5], facecolors=rgba_colors)
- tm.close()
-
- # Test colormap functionality
- ax = df.plot.bar(colormap=cm.jet)
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
- self._check_colors(ax.patches[::5], facecolors=rgba_colors)
- tm.close()
-
- ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
- self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
- tm.close()
-
- ax = df.plot(kind="bar", color="green")
- self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
- tm.close()
-
- def test_bar_user_colors(self):
- df = DataFrame(
- {"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
- )
- # This should *only* work when `y` is specified, else
- # we use one color per column
- ax = df.plot.bar(y="A", color=df["color"])
- result = [p.get_facecolor() for p in ax.patches]
- expected = [
- (1.0, 0.0, 0.0, 1.0),
- (0.0, 0.0, 1.0, 1.0),
- (0.0, 0.0, 1.0, 1.0),
- (1.0, 0.0, 0.0, 1.0),
- ]
- assert result == expected
-
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(np.random.randn(5, 5))
@@ -1065,42 +536,6 @@ def test_bar_barwidth(self):
for r in ax.patches:
assert r.get_height() == width
- @pytest.mark.slow
- @pytest.mark.parametrize(
- "kwargs",
- [
- {"kind": "bar", "stacked": False},
- {"kind": "bar", "stacked": True},
- {"kind": "barh", "stacked": False},
- {"kind": "barh", "stacked": True},
- {"kind": "bar", "subplots": True},
- {"kind": "barh", "subplots": True},
- ],
- )
- def test_bar_barwidth_position(self, kwargs):
- df = DataFrame(np.random.randn(5, 5))
- self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs)
-
- @pytest.mark.slow
- def test_bar_barwidth_position_int(self):
- # GH 12979
- df = DataFrame(np.random.randn(5, 5))
-
- for w in [1, 1.0]:
- ax = df.plot.bar(stacked=True, width=w)
- ticks = ax.xaxis.get_ticklocs()
- tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
- assert ax.get_xlim() == (-0.75, 4.75)
- # check left-edge of bars
- assert ax.patches[0].get_x() == -0.5
- assert ax.patches[-1].get_x() == 3.5
-
- self._check_bar_alignment(df, kind="bar", stacked=True, width=1)
- self._check_bar_alignment(df, kind="barh", stacked=False, width=1)
- self._check_bar_alignment(df, kind="barh", stacked=True, width=1)
- self._check_bar_alignment(df, kind="bar", subplots=True, width=1)
- self._check_bar_alignment(df, kind="barh", subplots=True, width=1)
-
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(np.random.rand(5, 5))
@@ -1226,60 +661,6 @@ def test_scatterplot_object_data(self):
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
- @pytest.mark.slow
- def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
- # addressing issue #10611, to ensure colobar does not
- # interfere with x-axis label and ticklabels with
- # ipython inline backend.
- random_array = np.random.random((1000, 3))
- df = DataFrame(random_array, columns=["A label", "B label", "C label"])
-
- ax1 = df.plot.scatter(x="A label", y="B label")
- ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
-
- vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
- vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
- assert vis1 == vis2
-
- vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
- vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
- assert vis1 == vis2
-
- assert (
- ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
- )
-
- @pytest.mark.slow
- def test_if_hexbin_xaxis_label_is_visible(self):
- # addressing issue #10678, to ensure colobar does not
- # interfere with x-axis label and ticklabels with
- # ipython inline backend.
- random_array = np.random.random((1000, 3))
- df = DataFrame(random_array, columns=["A label", "B label", "C label"])
-
- ax = df.plot.hexbin("A label", "B label", gridsize=12)
- assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())
- assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
- assert ax.xaxis.get_label().get_visible()
-
- @pytest.mark.slow
- def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
- import matplotlib.pyplot as plt
-
- random_array = np.random.random((1000, 3))
- df = DataFrame(random_array, columns=["A label", "B label", "C label"])
-
- fig, axes = plt.subplots(1, 2)
- df.plot.scatter("A label", "B label", c="C label", ax=axes[0])
- df.plot.scatter("A label", "B label", c="C label", ax=axes[1])
- plt.tight_layout()
-
- points = np.array([ax.get_position().get_points() for ax in fig.axes])
- axes_x_coords = points[:, :, 0]
- parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
- colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
- assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
-
@pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")])
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self, x, y):
@@ -1340,17 +721,6 @@ def test_plot_scatter_with_c(self):
float_array = np.array([0.0, 1.0])
df.plot.scatter(x="A", y="B", c=float_array, cmap="spring")
- @pytest.mark.parametrize("cmap", [None, "Greys"])
- def test_scatter_with_c_column_name_with_colors(self, cmap):
- # https://github.com/pandas-dev/pandas/issues/34316
- df = DataFrame(
- [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]],
- columns=["length", "width"],
- )
- df["species"] = ["r", "r", "g", "g", "b"]
- ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap)
- assert ax.collections[0].colorbar is None
-
def test_plot_scatter_with_s(self):
# this refers to GH 32904
df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"])
@@ -1358,39 +728,6 @@ def test_plot_scatter_with_s(self):
ax = df.plot.scatter(x="a", y="b", s="c")
tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes())
- def test_scatter_colors(self):
- df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
- with pytest.raises(TypeError):
- df.plot.scatter(x="a", y="b", c="c", color="green")
-
- default_colors = self._unpack_cycler(self.plt.rcParams)
-
- ax = df.plot.scatter(x="a", y="b", c="c")
- tm.assert_numpy_array_equal(
- ax.collections[0].get_facecolor()[0],
- np.array(self.colorconverter.to_rgba(default_colors[0])),
- )
-
- ax = df.plot.scatter(x="a", y="b", color="white")
- tm.assert_numpy_array_equal(
- ax.collections[0].get_facecolor()[0],
- np.array([1, 1, 1, 1], dtype=np.float64),
- )
-
- def test_scatter_colorbar_different_cmap(self):
- # GH 33389
- import matplotlib.pyplot as plt
-
- df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]})
- df["x2"] = df["x"] + 1
-
- fig, ax = plt.subplots()
- df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax)
- df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax)
-
- assert ax.collections[0].cmap.name == "cividis"
- assert ax.collections[1].cmap.name == "magma"
-
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(
@@ -1424,155 +761,6 @@ def test_plot_bar(self):
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
- def _check_bar_alignment(
- self,
- df,
- kind="bar",
- stacked=False,
- subplots=False,
- align="center",
- width=0.5,
- position=0.5,
- ):
-
- axes = df.plot(
- kind=kind,
- stacked=stacked,
- subplots=subplots,
- align=align,
- width=width,
- position=position,
- grid=True,
- )
-
- axes = self._flatten_visible(axes)
-
- for ax in axes:
- if kind == "bar":
- axis = ax.xaxis
- ax_min, ax_max = ax.get_xlim()
- min_edge = min(p.get_x() for p in ax.patches)
- max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
- elif kind == "barh":
- axis = ax.yaxis
- ax_min, ax_max = ax.get_ylim()
- min_edge = min(p.get_y() for p in ax.patches)
- max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
- else:
- raise ValueError
-
- # GH 7498
- # compare margins between lim and bar edges
- tm.assert_almost_equal(ax_min, min_edge - 0.25)
- tm.assert_almost_equal(ax_max, max_edge + 0.25)
-
- p = ax.patches[0]
- if kind == "bar" and (stacked is True or subplots is True):
- edge = p.get_x()
- center = edge + p.get_width() * position
- elif kind == "bar" and stacked is False:
- center = p.get_x() + p.get_width() * len(df.columns) * position
- edge = p.get_x()
- elif kind == "barh" and (stacked is True or subplots is True):
- center = p.get_y() + p.get_height() * position
- edge = p.get_y()
- elif kind == "barh" and stacked is False:
- center = p.get_y() + p.get_height() * len(df.columns) * position
- edge = p.get_y()
- else:
- raise ValueError
-
- # Check the ticks locates on integer
- assert (axis.get_ticklocs() == np.arange(len(df))).all()
-
- if align == "center":
- # Check whether the bar locates on center
- tm.assert_almost_equal(axis.get_ticklocs()[0], center)
- elif align == "edge":
- # Check whether the bar's edge starts from the tick
- tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
- else:
- raise ValueError
-
- return axes
-
- @pytest.mark.slow
- @pytest.mark.parametrize(
- "kwargs",
- [
- # stacked center
- dict(kind="bar", stacked=True),
- dict(kind="bar", stacked=True, width=0.9),
- dict(kind="barh", stacked=True),
- dict(kind="barh", stacked=True, width=0.9),
- # center
- dict(kind="bar", stacked=False),
- dict(kind="bar", stacked=False, width=0.9),
- dict(kind="barh", stacked=False),
- dict(kind="barh", stacked=False, width=0.9),
- # subplots center
- dict(kind="bar", subplots=True),
- dict(kind="bar", subplots=True, width=0.9),
- dict(kind="barh", subplots=True),
- dict(kind="barh", subplots=True, width=0.9),
- # align edge
- dict(kind="bar", stacked=True, align="edge"),
- dict(kind="bar", stacked=True, width=0.9, align="edge"),
- dict(kind="barh", stacked=True, align="edge"),
- dict(kind="barh", stacked=True, width=0.9, align="edge"),
- dict(kind="bar", stacked=False, align="edge"),
- dict(kind="bar", stacked=False, width=0.9, align="edge"),
- dict(kind="barh", stacked=False, align="edge"),
- dict(kind="barh", stacked=False, width=0.9, align="edge"),
- dict(kind="bar", subplots=True, align="edge"),
- dict(kind="bar", subplots=True, width=0.9, align="edge"),
- dict(kind="barh", subplots=True, align="edge"),
- dict(kind="barh", subplots=True, width=0.9, align="edge"),
- ],
- )
- def test_bar_align_multiple_columns(self, kwargs):
- # GH2157
- df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
- self._check_bar_alignment(df, **kwargs)
-
- @pytest.mark.slow
- @pytest.mark.parametrize(
- "kwargs",
- [
- dict(kind="bar", stacked=False),
- dict(kind="bar", stacked=True),
- dict(kind="barh", stacked=False),
- dict(kind="barh", stacked=True),
- dict(kind="bar", subplots=True),
- dict(kind="barh", subplots=True),
- ],
- )
- def test_bar_align_single_column(self, kwargs):
- df = DataFrame(np.random.randn(5))
- self._check_bar_alignment(df, **kwargs)
-
- @pytest.mark.slow
- def test_bar_log_no_subplots(self):
- # GH3254, GH3298 matplotlib/matplotlib#1882, #1892
- # regressions in 1.2.1
- expected = np.array([0.1, 1.0, 10.0, 100])
-
- # no subplots
- df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
- ax = df.plot.bar(grid=True, log=True)
- tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
-
- @pytest.mark.slow
- def test_bar_log_subplots(self):
- expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
-
- ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
- log=True, subplots=True
- )
-
- tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
- tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
-
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
@@ -1655,26 +843,6 @@ def test_boxplot_return_type(self):
result = df.plot.box(return_type="both")
self._check_box_return_type(result, "both")
- @pytest.mark.slow
- def test_boxplot_subplots_return_type(self):
- df = self.hist_df
-
- # normal style: return_type=None
- result = df.plot.box(subplots=True)
- assert isinstance(result, Series)
- self._check_box_return_type(
- result, None, expected_keys=["height", "weight", "category"]
- )
-
- for t in ["dict", "axes", "both"]:
- returned = df.plot.box(return_type=t, subplots=True)
- self._check_box_return_type(
- returned,
- t,
- expected_keys=["height", "weight", "category"],
- check_ax_title=False,
- )
-
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
@@ -2075,352 +1243,6 @@ def test_line_label_none(self):
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == "None"
- @pytest.mark.slow
- def test_line_colors(self):
- from matplotlib import cm
-
- custom_colors = "rgcby"
- df = DataFrame(np.random.randn(5, 5))
-
- ax = df.plot(color=custom_colors)
- self._check_colors(ax.get_lines(), linecolors=custom_colors)
-
- tm.close()
-
- ax2 = df.plot(color=custom_colors)
- lines2 = ax2.get_lines()
-
- for l1, l2 in zip(ax.get_lines(), lines2):
- assert l1.get_color() == l2.get_color()
-
- tm.close()
-
- ax = df.plot(colormap="jet")
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
- self._check_colors(ax.get_lines(), linecolors=rgba_colors)
- tm.close()
-
- ax = df.plot(colormap=cm.jet)
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
- self._check_colors(ax.get_lines(), linecolors=rgba_colors)
- tm.close()
-
- # make color a list if plotting one column frame
- # handles cases like df.plot(color='DodgerBlue')
- ax = df.loc[:, [0]].plot(color="DodgerBlue")
- self._check_colors(ax.lines, linecolors=["DodgerBlue"])
-
- ax = df.plot(color="red")
- self._check_colors(ax.get_lines(), linecolors=["red"] * 5)
- tm.close()
-
- # GH 10299
- custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
- ax = df.plot(color=custom_colors)
- self._check_colors(ax.get_lines(), linecolors=custom_colors)
- tm.close()
-
- @pytest.mark.slow
- def test_dont_modify_colors(self):
- colors = ["r", "g", "b"]
- DataFrame(np.random.rand(10, 2)).plot(color=colors)
- assert len(colors) == 3
-
- @pytest.mark.slow
- def test_line_colors_and_styles_subplots(self):
- # GH 9894
- from matplotlib import cm
-
- default_colors = self._unpack_cycler(self.plt.rcParams)
-
- df = DataFrame(np.random.randn(5, 5))
-
- axes = df.plot(subplots=True)
- for ax, c in zip(axes, list(default_colors)):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- # single color char
- axes = df.plot(subplots=True, color="k")
- for ax in axes:
- self._check_colors(ax.get_lines(), linecolors=["k"])
- tm.close()
-
- # single color str
- axes = df.plot(subplots=True, color="green")
- for ax in axes:
- self._check_colors(ax.get_lines(), linecolors=["green"])
- tm.close()
-
- custom_colors = "rgcby"
- axes = df.plot(color=custom_colors, subplots=True)
- for ax, c in zip(axes, list(custom_colors)):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- axes = df.plot(color=list(custom_colors), subplots=True)
- for ax, c in zip(axes, list(custom_colors)):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- # GH 10299
- custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
- axes = df.plot(color=custom_colors, subplots=True)
- for ax, c in zip(axes, list(custom_colors)):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
- for cmap in ["jet", cm.jet]:
- axes = df.plot(colormap=cmap, subplots=True)
- for ax, c in zip(axes, rgba_colors):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- # make color a list if plotting one column frame
- # handles cases like df.plot(color='DodgerBlue')
- axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
- self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
-
- # single character style
- axes = df.plot(style="r", subplots=True)
- for ax in axes:
- self._check_colors(ax.get_lines(), linecolors=["r"])
- tm.close()
-
- # list of styles
- styles = list("rgcby")
- axes = df.plot(style=styles, subplots=True)
- for ax, c in zip(axes, styles):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- @pytest.mark.slow
- def test_area_colors(self):
- from matplotlib import cm
- from matplotlib.collections import PolyCollection
-
- custom_colors = "rgcby"
- df = DataFrame(np.random.rand(5, 5))
-
- ax = df.plot.area(color=custom_colors)
- self._check_colors(ax.get_lines(), linecolors=custom_colors)
- poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
- self._check_colors(poly, facecolors=custom_colors)
-
- handles, labels = ax.get_legend_handles_labels()
- self._check_colors(handles, facecolors=custom_colors)
-
- for h in handles:
- assert h.get_alpha() is None
- tm.close()
-
- ax = df.plot.area(colormap="jet")
- jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
- self._check_colors(ax.get_lines(), linecolors=jet_colors)
- poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
- self._check_colors(poly, facecolors=jet_colors)
-
- handles, labels = ax.get_legend_handles_labels()
- self._check_colors(handles, facecolors=jet_colors)
- for h in handles:
- assert h.get_alpha() is None
- tm.close()
-
- # When stacked=False, alpha is set to 0.5
- ax = df.plot.area(colormap=cm.jet, stacked=False)
- self._check_colors(ax.get_lines(), linecolors=jet_colors)
- poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
- jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
- self._check_colors(poly, facecolors=jet_with_alpha)
-
- handles, labels = ax.get_legend_handles_labels()
- linecolors = jet_with_alpha
- self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)
- for h in handles:
- assert h.get_alpha() == 0.5
-
- @pytest.mark.slow
- def test_hist_colors(self):
- default_colors = self._unpack_cycler(self.plt.rcParams)
-
- df = DataFrame(np.random.randn(5, 5))
- ax = df.plot.hist()
- self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
- tm.close()
-
- custom_colors = "rgcby"
- ax = df.plot.hist(color=custom_colors)
- self._check_colors(ax.patches[::10], facecolors=custom_colors)
- tm.close()
-
- from matplotlib import cm
-
- # Test str -> colormap functionality
- ax = df.plot.hist(colormap="jet")
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
- self._check_colors(ax.patches[::10], facecolors=rgba_colors)
- tm.close()
-
- # Test colormap functionality
- ax = df.plot.hist(colormap=cm.jet)
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
- self._check_colors(ax.patches[::10], facecolors=rgba_colors)
- tm.close()
-
- ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
- self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
-
- ax = df.plot(kind="hist", color="green")
- self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
- tm.close()
-
- @pytest.mark.slow
- @td.skip_if_no_scipy
- def test_kde_colors(self):
- from matplotlib import cm
-
- custom_colors = "rgcby"
- df = DataFrame(np.random.rand(5, 5))
-
- ax = df.plot.kde(color=custom_colors)
- self._check_colors(ax.get_lines(), linecolors=custom_colors)
- tm.close()
-
- ax = df.plot.kde(colormap="jet")
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
- self._check_colors(ax.get_lines(), linecolors=rgba_colors)
- tm.close()
-
- ax = df.plot.kde(colormap=cm.jet)
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
- self._check_colors(ax.get_lines(), linecolors=rgba_colors)
-
- @pytest.mark.slow
- @td.skip_if_no_scipy
- def test_kde_colors_and_styles_subplots(self):
- from matplotlib import cm
-
- default_colors = self._unpack_cycler(self.plt.rcParams)
-
- df = DataFrame(np.random.randn(5, 5))
-
- axes = df.plot(kind="kde", subplots=True)
- for ax, c in zip(axes, list(default_colors)):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- # single color char
- axes = df.plot(kind="kde", color="k", subplots=True)
- for ax in axes:
- self._check_colors(ax.get_lines(), linecolors=["k"])
- tm.close()
-
- # single color str
- axes = df.plot(kind="kde", color="red", subplots=True)
- for ax in axes:
- self._check_colors(ax.get_lines(), linecolors=["red"])
- tm.close()
-
- custom_colors = "rgcby"
- axes = df.plot(kind="kde", color=custom_colors, subplots=True)
- for ax, c in zip(axes, list(custom_colors)):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
- for cmap in ["jet", cm.jet]:
- axes = df.plot(kind="kde", colormap=cmap, subplots=True)
- for ax, c in zip(axes, rgba_colors):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- # make color a list if plotting one column frame
- # handles cases like df.plot(color='DodgerBlue')
- axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
- self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
-
- # single character style
- axes = df.plot(kind="kde", style="r", subplots=True)
- for ax in axes:
- self._check_colors(ax.get_lines(), linecolors=["r"])
- tm.close()
-
- # list of styles
- styles = list("rgcby")
- axes = df.plot(kind="kde", style=styles, subplots=True)
- for ax, c in zip(axes, styles):
- self._check_colors(ax.get_lines(), linecolors=[c])
- tm.close()
-
- @pytest.mark.slow
- def test_boxplot_colors(self):
- def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
- # TODO: outside this func?
- if fliers_c is None:
- fliers_c = "k"
- self._check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
- self._check_colors(
- bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])
- )
- self._check_colors(
- bp["medians"], linecolors=[medians_c] * len(bp["medians"])
- )
- self._check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
- self._check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
-
- default_colors = self._unpack_cycler(self.plt.rcParams)
-
- df = DataFrame(np.random.randn(5, 5))
- bp = df.plot.box(return_type="dict")
- _check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
- tm.close()
-
- dict_colors = dict(
- boxes="#572923", whiskers="#982042", medians="#804823", caps="#123456"
- )
- bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
- _check_colors(
- bp,
- dict_colors["boxes"],
- dict_colors["whiskers"],
- dict_colors["medians"],
- dict_colors["caps"],
- "r",
- )
- tm.close()
-
- # partial colors
- dict_colors = dict(whiskers="c", medians="m")
- bp = df.plot.box(color=dict_colors, return_type="dict")
- _check_colors(bp, default_colors[0], "c", "m")
- tm.close()
-
- from matplotlib import cm
-
- # Test str -> colormap functionality
- bp = df.plot.box(colormap="jet", return_type="dict")
- jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]
- _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
- tm.close()
-
- # Test colormap functionality
- bp = df.plot.box(colormap=cm.jet, return_type="dict")
- _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
- tm.close()
-
- # string color is applied to all artists except fliers
- bp = df.plot.box(color="DodgerBlue", return_type="dict")
- _check_colors(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
-
- # tuple is also applied to all artists except fliers
- bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict")
- _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
-
- with pytest.raises(ValueError):
- # Color contains invalid key results in ValueError
- df.plot.box(color=dict(boxes="red", xxxx="blue"))
-
@pytest.mark.parametrize(
"props, expected",
[
@@ -2438,19 +1260,6 @@ def test_specified_props_kwd_plot_box(self, props, expected):
assert result[expected][0].get_color() == "C1"
- def test_default_color_cycle(self):
- import cycler
- import matplotlib.pyplot as plt
-
- colors = list("rgbk")
- plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
-
- df = DataFrame(np.random.randn(5, 3))
- ax = df.plot()
-
- expected = self._unpack_cycler(plt.rcParams)[:3]
- self._check_colors(ax.get_lines(), linecolors=expected)
-
def test_unordered_ts(self):
df = DataFrame(
np.array([3.0, 2.0, 1.0]),
@@ -2592,19 +1401,6 @@ def test_hexbin_cmap(self, kwargs, expected):
ax = df.plot.hexbin(x="A", y="B", **kwargs)
assert ax.collections[0].cmap.name == expected
- @pytest.mark.slow
- def test_no_color_bar(self):
- df = self.hexbin_df
- ax = df.plot.hexbin(x="A", y="B", colorbar=None)
- assert ax.collections[0].colorbar is None
-
- @pytest.mark.slow
- def test_mixing_cmap_and_colormap_raises(self):
- df = self.hexbin_df
- msg = "Only specify one of `cmap` and `colormap`"
- with pytest.raises(TypeError, match=msg):
- df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn")
-
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(
@@ -3047,53 +1843,6 @@ def test_memory_leak(self):
# need to actually access something to get an error
results[key].lines
- @pytest.mark.slow
- def test_df_subplots_patterns_minorticks(self):
- # GH 10657
- import matplotlib.pyplot as plt
-
- df = DataFrame(
- np.random.randn(10, 2),
- index=date_range("1/1/2000", periods=10),
- columns=list("AB"),
- )
-
- # shared subplots
- fig, axes = plt.subplots(2, 1, sharex=True)
- axes = df.plot(subplots=True, ax=axes)
- for ax in axes:
- assert len(ax.lines) == 1
- self._check_visible(ax.get_yticklabels(), visible=True)
- # xaxis of 1st ax must be hidden
- self._check_visible(axes[0].get_xticklabels(), visible=False)
- self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
- self._check_visible(axes[1].get_xticklabels(), visible=True)
- self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
- tm.close()
-
- fig, axes = plt.subplots(2, 1)
- with tm.assert_produces_warning(UserWarning):
- axes = df.plot(subplots=True, ax=axes, sharex=True)
- for ax in axes:
- assert len(ax.lines) == 1
- self._check_visible(ax.get_yticklabels(), visible=True)
- # xaxis of 1st ax must be hidden
- self._check_visible(axes[0].get_xticklabels(), visible=False)
- self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
- self._check_visible(axes[1].get_xticklabels(), visible=True)
- self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
- tm.close()
-
- # not shared
- fig, axes = plt.subplots(2, 1)
- axes = df.plot(subplots=True, ax=axes)
- for ax in axes:
- assert len(ax.lines) == 1
- self._check_visible(ax.get_yticklabels(), visible=True)
- self._check_visible(ax.get_xticklabels(), visible=True)
- self._check_visible(ax.get_xticklabels(minor=True), visible=True)
- tm.close()
-
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
@@ -3219,12 +1968,6 @@ def test_df_grid_settings(self):
kws={"x": "a", "y": "b"},
)
- def test_invalid_colormap(self):
- df = DataFrame(np.random.randn(3, 2), columns=["A", "B"])
-
- with pytest.raises(ValueError):
- df.plot(colormap="invalid_colormap")
-
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
@@ -3256,22 +1999,6 @@ def test_plain_axes(self):
Series(np.random.rand(10)).plot(ax=ax)
Series(np.random.rand(10)).plot(ax=iax)
- def test_passed_bar_colors(self):
- import matplotlib as mpl
-
- color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
- colormap = mpl.colors.ListedColormap(color_tuples)
- barplot = DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
- assert color_tuples == [c.get_facecolor() for c in barplot.patches]
-
- def test_rcParams_bar_colors(self):
- import matplotlib as mpl
-
- color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
- with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}):
- barplot = DataFrame([[1, 2, 3]]).plot(kind="bar")
- assert color_tuples == [c.get_facecolor() for c in barplot.patches]
-
@pytest.mark.parametrize("method", ["line", "barh", "bar"])
def test_secondary_axis_font_size(self, method):
# GH: 12565
@@ -3360,22 +2087,6 @@ def test_xlim_plot_line_correctly_in_mixed_plot_type(self):
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
assert xticklabels == indexes
- def test_subplots_sharex_false(self):
- # test when sharex is set to False, two plots should have different
- # labels, GH 25160
- df = DataFrame(np.random.rand(10, 2))
- df.iloc[5:, 1] = np.nan
- df.iloc[:5, 0] = np.nan
-
- figs, axs = self.plt.subplots(2, 1)
- df.plot.line(ax=axs, subplots=True, sharex=False)
-
- expected_ax1 = np.arange(4.5, 10, 0.5)
- expected_ax2 = np.arange(-0.5, 5, 0.5)
-
- tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)
- tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)
-
def test_plot_no_rows(self):
# GH 27758
df = DataFrame(columns=["foo"], dtype=int)
@@ -3419,16 +2130,6 @@ def test_missing_markers_legend_using_style(self):
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=[".", ".", "."])
- def test_colors_of_columns_with_same_name(self):
- # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136
- # Creating a DataFrame with duplicate column labels and testing colors of them.
- df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]})
- df1 = DataFrame({"a": [2, 4, 6]})
- df_concat = pd.concat([df, df1], axis=1)
- result = df_concat.plot()
- for legend, line in zip(result.get_legend().legendHandles, result.lines):
- assert legend.get_color() == line.get_color()
-
@pytest.mark.parametrize(
"index_name, old_label, new_label",
[
@@ -3478,34 +2179,6 @@ def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel):
assert ax.get_xlabel() == (xcol if xlabel is None else xlabel)
assert ax.get_ylabel() == (ycol if ylabel is None else ylabel)
- @pytest.mark.parametrize(
- "index_name, old_label, new_label",
- [
- (None, "", "new"),
- ("old", "old", "new"),
- (None, "", ""),
- (None, "", 1),
- (None, "", [1, 2]),
- ],
- )
- @pytest.mark.parametrize("kind", ["line", "area", "bar"])
- def test_xlabel_ylabel_dataframe_subplots(
- self, kind, index_name, old_label, new_label
- ):
- # GH 9093
- df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"])
- df.index.name = index_name
-
- # default is the ylabel is not shown and xlabel is index name
- axes = df.plot(kind=kind, subplots=True)
- assert all(ax.get_ylabel() == "" for ax in axes)
- assert all(ax.get_xlabel() == old_label for ax in axes)
-
- # old xlabel will be overriden and assigned ylabel will be used as ylabel
- axes = df.plot(kind=kind, ylabel=new_label, xlabel=new_label, subplots=True)
- assert all(ax.get_ylabel() == str(new_label) for ax in axes)
- assert all(ax.get_xlabel() == str(new_label) for ax in axes)
-
def _generate_4_axes_via_gridspec():
import matplotlib as mpl
diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py
new file mode 100644
index 0000000000000..d9fe7363a15ad
--- /dev/null
+++ b/pandas/tests/plotting/frame/test_frame_color.py
@@ -0,0 +1,655 @@
+""" Test cases for DataFrame.plot """
+
+import warnings
+
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DataFrame
+import pandas._testing as tm
+from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
+
+
+@td.skip_if_no_mpl
+class TestDataFrameColor(TestPlotBase):
+ def setup_method(self, method):
+ TestPlotBase.setup_method(self, method)
+ import matplotlib as mpl
+
+ mpl.rcdefaults()
+
+ self.tdf = tm.makeTimeDataFrame()
+ self.hexbin_df = DataFrame(
+ {
+ "A": np.random.uniform(size=20),
+ "B": np.random.uniform(size=20),
+ "C": np.arange(20) + np.random.uniform(size=20),
+ }
+ )
+
+ def test_mpl2_color_cycle_str(self):
+ # GH 15516
+ df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
+ colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always", "MatplotlibDeprecationWarning")
+
+ for color in colors:
+ _check_plot_works(df.plot, color=color)
+
+ # if warning is raised, check that it is the exact problematic one
+ # GH 36972
+ if w:
+ match = "Support for uppercase single-letter colors is deprecated"
+ warning_message = str(w[0].message)
+ msg = "MatplotlibDeprecationWarning related to CN colors was raised"
+ assert match not in warning_message, msg
+
+ def test_color_single_series_list(self):
+ # GH 3486
+ df = DataFrame({"A": [1, 2, 3]})
+ _check_plot_works(df.plot, color=["red"])
+
+ @pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)])
+ def test_rgb_tuple_color(self, color):
+ # GH 16695
+ df = DataFrame({"x": [1, 2], "y": [3, 4]})
+ _check_plot_works(df.plot, x="x", y="y", color=color)
+
+ def test_color_empty_string(self):
+ df = DataFrame(np.random.randn(10, 2))
+ with pytest.raises(ValueError):
+ df.plot(color="")
+
+ def test_color_and_style_arguments(self):
+ df = DataFrame({"x": [1, 2], "y": [3, 4]})
+ # passing both 'color' and 'style' arguments should be allowed
+ # if there is no color symbol in the style strings:
+ ax = df.plot(color=["red", "black"], style=["-", "--"])
+ # check that the linestyles are correctly set:
+ linestyle = [line.get_linestyle() for line in ax.lines]
+ assert linestyle == ["-", "--"]
+ # check that the colors are correctly set:
+ color = [line.get_color() for line in ax.lines]
+ assert color == ["red", "black"]
+ # passing both 'color' and 'style' arguments should not be allowed
+ # if there is a color symbol in the style strings:
+ with pytest.raises(ValueError):
+ df.plot(color=["red", "black"], style=["k-", "r--"])
+
+ @pytest.mark.parametrize(
+ "color, expected",
+ [
+ ("green", ["green"] * 4),
+ (["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]),
+ ],
+ )
+ def test_color_and_marker(self, color, expected):
+ # GH 21003
+ df = DataFrame(np.random.random((7, 4)))
+ ax = df.plot(color=color, style="d--")
+ # check colors
+ result = [i.get_color() for i in ax.lines]
+ assert result == expected
+ # check markers and linestyles
+ assert all(i.get_linestyle() == "--" for i in ax.lines)
+ assert all(i.get_marker() == "d" for i in ax.lines)
+
+ @pytest.mark.slow
+ def test_bar_colors(self):
+ import matplotlib.pyplot as plt
+
+ default_colors = self._unpack_cycler(plt.rcParams)
+
+ df = DataFrame(np.random.randn(5, 5))
+ ax = df.plot.bar()
+ self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
+ tm.close()
+
+ custom_colors = "rgcby"
+ ax = df.plot.bar(color=custom_colors)
+ self._check_colors(ax.patches[::5], facecolors=custom_colors)
+ tm.close()
+
+ from matplotlib import cm
+
+ # Test str -> colormap functionality
+ ax = df.plot.bar(colormap="jet")
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
+ self._check_colors(ax.patches[::5], facecolors=rgba_colors)
+ tm.close()
+
+ # Test colormap functionality
+ ax = df.plot.bar(colormap=cm.jet)
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
+ self._check_colors(ax.patches[::5], facecolors=rgba_colors)
+ tm.close()
+
+ ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
+ self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
+ tm.close()
+
+ ax = df.plot(kind="bar", color="green")
+ self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
+ tm.close()
+
+ def test_bar_user_colors(self):
+ df = DataFrame(
+ {"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
+ )
+ # This should *only* work when `y` is specified, else
+ # we use one color per column
+ ax = df.plot.bar(y="A", color=df["color"])
+ result = [p.get_facecolor() for p in ax.patches]
+ expected = [
+ (1.0, 0.0, 0.0, 1.0),
+ (0.0, 0.0, 1.0, 1.0),
+ (0.0, 0.0, 1.0, 1.0),
+ (1.0, 0.0, 0.0, 1.0),
+ ]
+ assert result == expected
+
+ @pytest.mark.slow
+ def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
+ # addressing issue #10611, to ensure colobar does not
+ # interfere with x-axis label and ticklabels with
+ # ipython inline backend.
+ random_array = np.random.random((1000, 3))
+ df = DataFrame(random_array, columns=["A label", "B label", "C label"])
+
+ ax1 = df.plot.scatter(x="A label", y="B label")
+ ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
+
+ vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
+ vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
+ assert vis1 == vis2
+
+ vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
+ vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
+ assert vis1 == vis2
+
+ assert (
+ ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
+ )
+
+ @pytest.mark.slow
+ def test_if_hexbin_xaxis_label_is_visible(self):
+ # addressing issue #10678, to ensure colobar does not
+ # interfere with x-axis label and ticklabels with
+ # ipython inline backend.
+ random_array = np.random.random((1000, 3))
+ df = DataFrame(random_array, columns=["A label", "B label", "C label"])
+
+ ax = df.plot.hexbin("A label", "B label", gridsize=12)
+ assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())
+ assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
+ assert ax.xaxis.get_label().get_visible()
+
+ @pytest.mark.slow
+ def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
+ import matplotlib.pyplot as plt
+
+ random_array = np.random.random((1000, 3))
+ df = DataFrame(random_array, columns=["A label", "B label", "C label"])
+
+ fig, axes = plt.subplots(1, 2)
+ df.plot.scatter("A label", "B label", c="C label", ax=axes[0])
+ df.plot.scatter("A label", "B label", c="C label", ax=axes[1])
+ plt.tight_layout()
+
+ points = np.array([ax.get_position().get_points() for ax in fig.axes])
+ axes_x_coords = points[:, :, 0]
+ parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
+ colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
+ assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
+
+ @pytest.mark.parametrize("cmap", [None, "Greys"])
+ def test_scatter_with_c_column_name_with_colors(self, cmap):
+ # https://github.com/pandas-dev/pandas/issues/34316
+ df = DataFrame(
+ [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]],
+ columns=["length", "width"],
+ )
+ df["species"] = ["r", "r", "g", "g", "b"]
+ ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap)
+ assert ax.collections[0].colorbar is None
+
+ def test_scatter_colors(self):
+ df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
+ with pytest.raises(TypeError):
+ df.plot.scatter(x="a", y="b", c="c", color="green")
+
+ default_colors = self._unpack_cycler(self.plt.rcParams)
+
+ ax = df.plot.scatter(x="a", y="b", c="c")
+ tm.assert_numpy_array_equal(
+ ax.collections[0].get_facecolor()[0],
+ np.array(self.colorconverter.to_rgba(default_colors[0])),
+ )
+
+ ax = df.plot.scatter(x="a", y="b", color="white")
+ tm.assert_numpy_array_equal(
+ ax.collections[0].get_facecolor()[0],
+ np.array([1, 1, 1, 1], dtype=np.float64),
+ )
+
+ def test_scatter_colorbar_different_cmap(self):
+ # GH 33389
+ import matplotlib.pyplot as plt
+
+ df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]})
+ df["x2"] = df["x"] + 1
+
+ fig, ax = plt.subplots()
+ df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax)
+ df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax)
+
+ assert ax.collections[0].cmap.name == "cividis"
+ assert ax.collections[1].cmap.name == "magma"
+
+ @pytest.mark.slow
+ def test_line_colors(self):
+ from matplotlib import cm
+
+ custom_colors = "rgcby"
+ df = DataFrame(np.random.randn(5, 5))
+
+ ax = df.plot(color=custom_colors)
+ self._check_colors(ax.get_lines(), linecolors=custom_colors)
+
+ tm.close()
+
+ ax2 = df.plot(color=custom_colors)
+ lines2 = ax2.get_lines()
+
+ for l1, l2 in zip(ax.get_lines(), lines2):
+ assert l1.get_color() == l2.get_color()
+
+ tm.close()
+
+ ax = df.plot(colormap="jet")
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+ tm.close()
+
+ ax = df.plot(colormap=cm.jet)
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+ tm.close()
+
+ # make color a list if plotting one column frame
+ # handles cases like df.plot(color='DodgerBlue')
+ ax = df.loc[:, [0]].plot(color="DodgerBlue")
+ self._check_colors(ax.lines, linecolors=["DodgerBlue"])
+
+ ax = df.plot(color="red")
+ self._check_colors(ax.get_lines(), linecolors=["red"] * 5)
+ tm.close()
+
+ # GH 10299
+ custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
+ ax = df.plot(color=custom_colors)
+ self._check_colors(ax.get_lines(), linecolors=custom_colors)
+ tm.close()
+
+ @pytest.mark.slow
+ def test_dont_modify_colors(self):
+ colors = ["r", "g", "b"]
+ DataFrame(np.random.rand(10, 2)).plot(color=colors)
+ assert len(colors) == 3
+
+ @pytest.mark.slow
+ def test_line_colors_and_styles_subplots(self):
+ # GH 9894
+ from matplotlib import cm
+
+ default_colors = self._unpack_cycler(self.plt.rcParams)
+
+ df = DataFrame(np.random.randn(5, 5))
+
+ axes = df.plot(subplots=True)
+ for ax, c in zip(axes, list(default_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ # single color char
+ axes = df.plot(subplots=True, color="k")
+ for ax in axes:
+ self._check_colors(ax.get_lines(), linecolors=["k"])
+ tm.close()
+
+ # single color str
+ axes = df.plot(subplots=True, color="green")
+ for ax in axes:
+ self._check_colors(ax.get_lines(), linecolors=["green"])
+ tm.close()
+
+ custom_colors = "rgcby"
+ axes = df.plot(color=custom_colors, subplots=True)
+ for ax, c in zip(axes, list(custom_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ axes = df.plot(color=list(custom_colors), subplots=True)
+ for ax, c in zip(axes, list(custom_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ # GH 10299
+ custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
+ axes = df.plot(color=custom_colors, subplots=True)
+ for ax, c in zip(axes, list(custom_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ for cmap in ["jet", cm.jet]:
+ axes = df.plot(colormap=cmap, subplots=True)
+ for ax, c in zip(axes, rgba_colors):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ # make color a list if plotting one column frame
+ # handles cases like df.plot(color='DodgerBlue')
+ axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
+ self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
+
+ # single character style
+ axes = df.plot(style="r", subplots=True)
+ for ax in axes:
+ self._check_colors(ax.get_lines(), linecolors=["r"])
+ tm.close()
+
+ # list of styles
+ styles = list("rgcby")
+ axes = df.plot(style=styles, subplots=True)
+ for ax, c in zip(axes, styles):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ @pytest.mark.slow
+ def test_area_colors(self):
+ from matplotlib import cm
+ from matplotlib.collections import PolyCollection
+
+ custom_colors = "rgcby"
+ df = DataFrame(np.random.rand(5, 5))
+
+ ax = df.plot.area(color=custom_colors)
+ self._check_colors(ax.get_lines(), linecolors=custom_colors)
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
+ self._check_colors(poly, facecolors=custom_colors)
+
+ handles, labels = ax.get_legend_handles_labels()
+ self._check_colors(handles, facecolors=custom_colors)
+
+ for h in handles:
+ assert h.get_alpha() is None
+ tm.close()
+
+ ax = df.plot.area(colormap="jet")
+ jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ self._check_colors(ax.get_lines(), linecolors=jet_colors)
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
+ self._check_colors(poly, facecolors=jet_colors)
+
+ handles, labels = ax.get_legend_handles_labels()
+ self._check_colors(handles, facecolors=jet_colors)
+ for h in handles:
+ assert h.get_alpha() is None
+ tm.close()
+
+ # When stacked=False, alpha is set to 0.5
+ ax = df.plot.area(colormap=cm.jet, stacked=False)
+ self._check_colors(ax.get_lines(), linecolors=jet_colors)
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
+ jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
+ self._check_colors(poly, facecolors=jet_with_alpha)
+
+ handles, labels = ax.get_legend_handles_labels()
+ linecolors = jet_with_alpha
+ self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)
+ for h in handles:
+ assert h.get_alpha() == 0.5
+
+ @pytest.mark.slow
+ def test_hist_colors(self):
+ default_colors = self._unpack_cycler(self.plt.rcParams)
+
+ df = DataFrame(np.random.randn(5, 5))
+ ax = df.plot.hist()
+ self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
+ tm.close()
+
+ custom_colors = "rgcby"
+ ax = df.plot.hist(color=custom_colors)
+ self._check_colors(ax.patches[::10], facecolors=custom_colors)
+ tm.close()
+
+ from matplotlib import cm
+
+ # Test str -> colormap functionality
+ ax = df.plot.hist(colormap="jet")
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
+ self._check_colors(ax.patches[::10], facecolors=rgba_colors)
+ tm.close()
+
+ # Test colormap functionality
+ ax = df.plot.hist(colormap=cm.jet)
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
+ self._check_colors(ax.patches[::10], facecolors=rgba_colors)
+ tm.close()
+
+ ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
+ self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
+
+ ax = df.plot(kind="hist", color="green")
+ self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
+ tm.close()
+
+ @pytest.mark.slow
+ @td.skip_if_no_scipy
+ def test_kde_colors(self):
+ from matplotlib import cm
+
+ custom_colors = "rgcby"
+ df = DataFrame(np.random.rand(5, 5))
+
+ ax = df.plot.kde(color=custom_colors)
+ self._check_colors(ax.get_lines(), linecolors=custom_colors)
+ tm.close()
+
+ ax = df.plot.kde(colormap="jet")
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+ tm.close()
+
+ ax = df.plot.kde(colormap=cm.jet)
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ self._check_colors(ax.get_lines(), linecolors=rgba_colors)
+
+ @pytest.mark.slow
+ @td.skip_if_no_scipy
+ def test_kde_colors_and_styles_subplots(self):
+ from matplotlib import cm
+
+ default_colors = self._unpack_cycler(self.plt.rcParams)
+
+ df = DataFrame(np.random.randn(5, 5))
+
+ axes = df.plot(kind="kde", subplots=True)
+ for ax, c in zip(axes, list(default_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ # single color char
+ axes = df.plot(kind="kde", color="k", subplots=True)
+ for ax in axes:
+ self._check_colors(ax.get_lines(), linecolors=["k"])
+ tm.close()
+
+ # single color str
+ axes = df.plot(kind="kde", color="red", subplots=True)
+ for ax in axes:
+ self._check_colors(ax.get_lines(), linecolors=["red"])
+ tm.close()
+
+ custom_colors = "rgcby"
+ axes = df.plot(kind="kde", color=custom_colors, subplots=True)
+ for ax, c in zip(axes, list(custom_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
+ for cmap in ["jet", cm.jet]:
+ axes = df.plot(kind="kde", colormap=cmap, subplots=True)
+ for ax, c in zip(axes, rgba_colors):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ # make color a list if plotting one column frame
+ # handles cases like df.plot(color='DodgerBlue')
+ axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
+ self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
+
+ # single character style
+ axes = df.plot(kind="kde", style="r", subplots=True)
+ for ax in axes:
+ self._check_colors(ax.get_lines(), linecolors=["r"])
+ tm.close()
+
+ # list of styles
+ styles = list("rgcby")
+ axes = df.plot(kind="kde", style=styles, subplots=True)
+ for ax, c in zip(axes, styles):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ @pytest.mark.slow
+ def test_boxplot_colors(self):
+ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
+ # TODO: outside this func?
+ if fliers_c is None:
+ fliers_c = "k"
+ self._check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
+ self._check_colors(
+ bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])
+ )
+ self._check_colors(
+ bp["medians"], linecolors=[medians_c] * len(bp["medians"])
+ )
+ self._check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
+ self._check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
+
+ default_colors = self._unpack_cycler(self.plt.rcParams)
+
+ df = DataFrame(np.random.randn(5, 5))
+ bp = df.plot.box(return_type="dict")
+ _check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
+ tm.close()
+
+ dict_colors = dict(
+ boxes="#572923", whiskers="#982042", medians="#804823", caps="#123456"
+ )
+ bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
+ _check_colors(
+ bp,
+ dict_colors["boxes"],
+ dict_colors["whiskers"],
+ dict_colors["medians"],
+ dict_colors["caps"],
+ "r",
+ )
+ tm.close()
+
+ # partial colors
+ dict_colors = dict(whiskers="c", medians="m")
+ bp = df.plot.box(color=dict_colors, return_type="dict")
+ _check_colors(bp, default_colors[0], "c", "m")
+ tm.close()
+
+ from matplotlib import cm
+
+ # Test str -> colormap functionality
+ bp = df.plot.box(colormap="jet", return_type="dict")
+ jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]
+ _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
+ tm.close()
+
+ # Test colormap functionality
+ bp = df.plot.box(colormap=cm.jet, return_type="dict")
+ _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
+ tm.close()
+
+ # string color is applied to all artists except fliers
+ bp = df.plot.box(color="DodgerBlue", return_type="dict")
+ _check_colors(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
+
+ # tuple is also applied to all artists except fliers
+ bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict")
+ _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
+
+ with pytest.raises(ValueError):
+ # Color contains invalid key results in ValueError
+ df.plot.box(color=dict(boxes="red", xxxx="blue"))
+
+ def test_default_color_cycle(self):
+ import cycler
+ import matplotlib.pyplot as plt
+
+ colors = list("rgbk")
+ plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
+
+ df = DataFrame(np.random.randn(5, 3))
+ ax = df.plot()
+
+ expected = self._unpack_cycler(plt.rcParams)[:3]
+ self._check_colors(ax.get_lines(), linecolors=expected)
+
+ @pytest.mark.slow
+ def test_no_color_bar(self):
+ df = self.hexbin_df
+ ax = df.plot.hexbin(x="A", y="B", colorbar=None)
+ assert ax.collections[0].colorbar is None
+
+ @pytest.mark.slow
+ def test_mixing_cmap_and_colormap_raises(self):
+ df = self.hexbin_df
+ msg = "Only specify one of `cmap` and `colormap`"
+ with pytest.raises(TypeError, match=msg):
+ df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn")
+
+ def test_passed_bar_colors(self):
+ import matplotlib as mpl
+
+ color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
+ colormap = mpl.colors.ListedColormap(color_tuples)
+ barplot = DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
+ assert color_tuples == [c.get_facecolor() for c in barplot.patches]
+
+ def test_rcParams_bar_colors(self):
+ import matplotlib as mpl
+
+ color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
+ with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}):
+ barplot = DataFrame([[1, 2, 3]]).plot(kind="bar")
+ assert color_tuples == [c.get_facecolor() for c in barplot.patches]
+
+ def test_colors_of_columns_with_same_name(self):
+ # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136
+ # Creating a DataFrame with duplicate column labels and testing colors of them.
+ df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]})
+ df1 = DataFrame({"a": [2, 4, 6]})
+ df_concat = pd.concat([df, df1], axis=1)
+ result = df_concat.plot()
+ for legend, line in zip(result.get_legend().legendHandles, result.lines):
+ assert legend.get_color() == line.get_color()
+
+ def test_invalid_colormap(self):
+ df = DataFrame(np.random.randn(3, 2), columns=["A", "B"])
+
+ with pytest.raises(ValueError):
+ df.plot(colormap="invalid_colormap")
diff --git a/pandas/tests/plotting/frame/test_frame_groupby.py b/pandas/tests/plotting/frame/test_frame_groupby.py
new file mode 100644
index 0000000000000..9c1676d6d97fb
--- /dev/null
+++ b/pandas/tests/plotting/frame/test_frame_groupby.py
@@ -0,0 +1,90 @@
+""" Test cases for DataFrame.plot """
+
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame
+import pandas._testing as tm
+from pandas.tests.plotting.common import TestPlotBase
+
+
+@td.skip_if_no_mpl
+class TestDataFramePlotsGroupby(TestPlotBase):
+ def setup_method(self, method):
+ TestPlotBase.setup_method(self, method)
+ import matplotlib as mpl
+
+ mpl.rcdefaults()
+
+ self.tdf = tm.makeTimeDataFrame()
+ self.hexbin_df = DataFrame(
+ {
+ "A": np.random.uniform(size=20),
+ "B": np.random.uniform(size=20),
+ "C": np.arange(20) + np.random.uniform(size=20),
+ }
+ )
+
+ def _assert_ytickslabels_visibility(self, axes, expected):
+ for ax, exp in zip(axes, expected):
+ self._check_visible(ax.get_yticklabels(), visible=exp)
+
+ def _assert_xtickslabels_visibility(self, axes, expected):
+ for ax, exp in zip(axes, expected):
+ self._check_visible(ax.get_xticklabels(), visible=exp)
+
+ @pytest.mark.parametrize(
+ "kwargs, expected",
+ [
+ # behavior without keyword
+ ({}, [True, False, True, False]),
+ # set sharey=True should be identical
+ ({"sharey": True}, [True, False, True, False]),
+ # sharey=False, all yticklabels should be visible
+ ({"sharey": False}, [True, True, True, True]),
+ ],
+ )
+ def test_groupby_boxplot_sharey(self, kwargs, expected):
+ # https://github.com/pandas-dev/pandas/issues/20968
+ # sharey can now be switched check whether the right
+ # pair of axes is turned on or off
+ df = DataFrame(
+ {
+ "a": [-1.43, -0.15, -3.70, -1.43, -0.14],
+ "b": [0.56, 0.84, 0.29, 0.56, 0.85],
+ "c": [0, 1, 2, 3, 1],
+ },
+ index=[0, 1, 2, 3, 4],
+ )
+ axes = df.groupby("c").boxplot(**kwargs)
+ self._assert_ytickslabels_visibility(axes, expected)
+
+ @pytest.mark.parametrize(
+ "kwargs, expected",
+ [
+ # behavior without keyword
+ ({}, [True, True, True, True]),
+ # set sharex=False should be identical
+ ({"sharex": False}, [True, True, True, True]),
+ # sharex=True, xticklabels should be visible
+ # only for bottom plots
+ ({"sharex": True}, [False, False, True, True]),
+ ],
+ )
+ def test_groupby_boxplot_sharex(self, kwargs, expected):
+ # https://github.com/pandas-dev/pandas/issues/20968
+ # sharex can now be switched check whether the right
+ # pair of axes is turned on or off
+
+ df = DataFrame(
+ {
+ "a": [-1.43, -0.15, -3.70, -1.43, -0.14],
+ "b": [0.56, 0.84, 0.29, 0.56, 0.85],
+ "c": [0, 1, 2, 3, 1],
+ },
+ index=[0, 1, 2, 3, 4],
+ )
+ axes = df.groupby("c").boxplot(**kwargs)
+ self._assert_xtickslabels_visibility(axes, expected)
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py
new file mode 100644
index 0000000000000..413c5b8a87dc7
--- /dev/null
+++ b/pandas/tests/plotting/frame/test_frame_subplots.py
@@ -0,0 +1,677 @@
+""" Test cases for DataFrame.plot """
+
+import string
+import warnings
+
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DataFrame, Series, date_range
+import pandas._testing as tm
+from pandas.tests.plotting.common import TestPlotBase
+
+from pandas.io.formats.printing import pprint_thing
+
+
+@td.skip_if_no_mpl
+class TestDataFramePlotsSubplots(TestPlotBase):
+ def setup_method(self, method):
+ TestPlotBase.setup_method(self, method)
+ import matplotlib as mpl
+
+ mpl.rcdefaults()
+
+ self.tdf = tm.makeTimeDataFrame()
+ self.hexbin_df = DataFrame(
+ {
+ "A": np.random.uniform(size=20),
+ "B": np.random.uniform(size=20),
+ "C": np.arange(20) + np.random.uniform(size=20),
+ }
+ )
+
+ @pytest.mark.slow
+ def test_subplots(self):
+ df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
+
+ for kind in ["bar", "barh", "line", "area"]:
+ axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+ assert axes.shape == (3,)
+
+ for ax, column in zip(axes, df.columns):
+ self._check_legend_labels(ax, labels=[pprint_thing(column)])
+
+ for ax in axes[:-2]:
+ self._check_visible(ax.xaxis) # xaxis must be visible for grid
+ self._check_visible(ax.get_xticklabels(), visible=False)
+ if not (kind == "bar" and self.mpl_ge_3_1_0):
+ # change https://github.com/pandas-dev/pandas/issues/26714
+ self._check_visible(ax.get_xticklabels(minor=True), visible=False)
+ self._check_visible(ax.xaxis.get_label(), visible=False)
+ self._check_visible(ax.get_yticklabels())
+
+ self._check_visible(axes[-1].xaxis)
+ self._check_visible(axes[-1].get_xticklabels())
+ self._check_visible(axes[-1].get_xticklabels(minor=True))
+ self._check_visible(axes[-1].xaxis.get_label())
+ self._check_visible(axes[-1].get_yticklabels())
+
+ axes = df.plot(kind=kind, subplots=True, sharex=False)
+ for ax in axes:
+ self._check_visible(ax.xaxis)
+ self._check_visible(ax.get_xticklabels())
+ self._check_visible(ax.get_xticklabels(minor=True))
+ self._check_visible(ax.xaxis.get_label())
+ self._check_visible(ax.get_yticklabels())
+
+ axes = df.plot(kind=kind, subplots=True, legend=False)
+ for ax in axes:
+ assert ax.get_legend() is None
+
+ @pytest.mark.slow
+ def test_subplots_timeseries(self):
+ idx = date_range(start="2014-07-01", freq="M", periods=10)
+ df = DataFrame(np.random.rand(10, 3), index=idx)
+
+ for kind in ["line", "area"]:
+ axes = df.plot(kind=kind, subplots=True, sharex=True)
+ self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
+
+ for ax in axes[:-2]:
+ # GH 7801
+ self._check_visible(ax.xaxis) # xaxis must be visible for grid
+ self._check_visible(ax.get_xticklabels(), visible=False)
+ self._check_visible(ax.get_xticklabels(minor=True), visible=False)
+ self._check_visible(ax.xaxis.get_label(), visible=False)
+ self._check_visible(ax.get_yticklabels())
+
+ self._check_visible(axes[-1].xaxis)
+ self._check_visible(axes[-1].get_xticklabels())
+ self._check_visible(axes[-1].get_xticklabels(minor=True))
+ self._check_visible(axes[-1].xaxis.get_label())
+ self._check_visible(axes[-1].get_yticklabels())
+ self._check_ticks_props(axes, xrot=0)
+
+ axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
+ for ax in axes:
+ self._check_visible(ax.xaxis)
+ self._check_visible(ax.get_xticklabels())
+ self._check_visible(ax.get_xticklabels(minor=True))
+ self._check_visible(ax.xaxis.get_label())
+ self._check_visible(ax.get_yticklabels())
+ self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
+
+ def test_subplots_timeseries_y_axis(self):
+ # GH16953
+ data = {
+ "numeric": np.array([1, 2, 5]),
+ "timedelta": [
+ pd.Timedelta(-10, unit="s"),
+ pd.Timedelta(10, unit="m"),
+ pd.Timedelta(10, unit="h"),
+ ],
+ "datetime_no_tz": [
+ pd.to_datetime("2017-08-01 00:00:00"),
+ pd.to_datetime("2017-08-01 02:00:00"),
+ pd.to_datetime("2017-08-02 00:00:00"),
+ ],
+ "datetime_all_tz": [
+ pd.to_datetime("2017-08-01 00:00:00", utc=True),
+ pd.to_datetime("2017-08-01 02:00:00", utc=True),
+ pd.to_datetime("2017-08-02 00:00:00", utc=True),
+ ],
+ "text": ["This", "should", "fail"],
+ }
+ testdata = DataFrame(data)
+
+ y_cols = ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"]
+ for col in y_cols:
+ ax = testdata.plot(y=col)
+ result = ax.get_lines()[0].get_data()[1]
+ expected = testdata[col].values
+ assert (result == expected).all()
+
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
+ testdata.plot(y="text")
+
+ @pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
+ def test_subplots_timeseries_y_axis_not_supported(self):
+ """
+ This test will fail for:
+ period:
+ since period isn't yet implemented in ``select_dtypes``
+ and because it will need a custom value converter +
+ tick formatter (as was done for x-axis plots)
+
+ categorical:
+ because it will need a custom value converter +
+ tick formatter (also doesn't work for x-axis, as of now)
+
+ datetime_mixed_tz:
+ because of the way how pandas handles ``Series`` of
+ ``datetime`` objects with different timezone,
+ generally converting ``datetime`` objects in a tz-aware
+ form could help with this problem
+ """
+ data = {
+ "numeric": np.array([1, 2, 5]),
+ "period": [
+ pd.Period("2017-08-01 00:00:00", freq="H"),
+ pd.Period("2017-08-01 02:00", freq="H"),
+ pd.Period("2017-08-02 00:00:00", freq="H"),
+ ],
+ "categorical": pd.Categorical(
+ ["c", "b", "a"], categories=["a", "b", "c"], ordered=False
+ ),
+ "datetime_mixed_tz": [
+ pd.to_datetime("2017-08-01 00:00:00", utc=True),
+ pd.to_datetime("2017-08-01 02:00:00"),
+ pd.to_datetime("2017-08-02 00:00:00"),
+ ],
+ }
+ testdata = DataFrame(data)
+ ax_period = testdata.plot(x="numeric", y="period")
+ assert (
+ ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
+ ).all()
+ ax_categorical = testdata.plot(x="numeric", y="categorical")
+ assert (
+ ax_categorical.get_lines()[0].get_data()[1]
+ == testdata["categorical"].values
+ ).all()
+ ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
+ assert (
+ ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
+ == testdata["datetime_mixed_tz"].values
+ ).all()
+
+ @pytest.mark.slow
+ def test_subplots_layout_multi_column(self):
+ # GH 6667
+ df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
+
+ axes = df.plot(subplots=True, layout=(2, 2))
+ self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+ assert axes.shape == (2, 2)
+
+ axes = df.plot(subplots=True, layout=(-1, 2))
+ self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+ assert axes.shape == (2, 2)
+
+ axes = df.plot(subplots=True, layout=(2, -1))
+ self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
+ assert axes.shape == (2, 2)
+
+ axes = df.plot(subplots=True, layout=(1, 4))
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
+ assert axes.shape == (1, 4)
+
+ axes = df.plot(subplots=True, layout=(-1, 4))
+ self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
+ assert axes.shape == (1, 4)
+
+ axes = df.plot(subplots=True, layout=(4, -1))
+ self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
+ assert axes.shape == (4, 1)
+
+ with pytest.raises(ValueError):
+ df.plot(subplots=True, layout=(1, 1))
+ with pytest.raises(ValueError):
+ df.plot(subplots=True, layout=(-1, -1))
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize(
+ "kwargs, expected_axes_num, expected_layout, expected_shape",
+ [
+ ({}, 1, (1, 1), (1,)),
+ ({"layout": (3, 3)}, 1, (3, 3), (3, 3)),
+ ],
+ )
+ def test_subplots_layout_single_column(
+ self, kwargs, expected_axes_num, expected_layout, expected_shape
+ ):
+
+ # GH 6667
+ df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
+ axes = df.plot(subplots=True, **kwargs)
+ self._check_axes_shape(
+ axes,
+ axes_num=expected_axes_num,
+ layout=expected_layout,
+ )
+ assert axes.shape == expected_shape
+
+ @pytest.mark.slow
+ def test_subplots_warnings(self):
+ # GH 9464
+ with tm.assert_produces_warning(None):
+ df = DataFrame(np.random.randn(100, 4))
+ df.plot(subplots=True, layout=(3, 2))
+
+ df = DataFrame(
+ np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
+ )
+ df.plot(subplots=True, layout=(3, 2))
+
+ @pytest.mark.slow
+ def test_subplots_multiple_axes(self):
+ # GH 5353, 6970, GH 7069
+ fig, axes = self.plt.subplots(2, 3)
+ df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
+
+ returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
+ self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+ assert returned.shape == (3,)
+ assert returned[0].figure is fig
+ # draw on second row
+ returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
+ self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
+ assert returned.shape == (3,)
+ assert returned[0].figure is fig
+ self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
+ tm.close()
+
+ with pytest.raises(ValueError):
+ fig, axes = self.plt.subplots(2, 3)
+ # pass different number of axes from required
+ df.plot(subplots=True, ax=axes)
+
+ # pass 2-dim axes and invalid layout
+ # invalid lauout should not affect to input and return value
+ # (show warning is tested in
+ # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
+ fig, axes = self.plt.subplots(2, 2)
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", UserWarning)
+ df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
+
+ returned = df.plot(
+ subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
+ )
+ self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+ assert returned.shape == (4,)
+
+ returned = df.plot(
+ subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
+ )
+ self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+ assert returned.shape == (4,)
+
+ returned = df.plot(
+ subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
+ )
+ self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
+ assert returned.shape == (4,)
+
+ # single column
+ fig, axes = self.plt.subplots(1, 1)
+ df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
+
+ axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
+ self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
+ assert axes.shape == (1,)
+
+ def test_subplots_ts_share_axes(self):
+ # GH 3964
+ fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
+ self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
+ df = DataFrame(
+ np.random.randn(10, 9),
+ index=date_range(start="2014-07-01", freq="M", periods=10),
+ )
+ for i, ax in enumerate(axes.ravel()):
+ df[i].plot(ax=ax, fontsize=5)
+
+ # Rows other than bottom should not be visible
+ for ax in axes[0:-1].ravel():
+ self._check_visible(ax.get_xticklabels(), visible=False)
+
+ # Bottom row should be visible
+ for ax in axes[-1].ravel():
+ self._check_visible(ax.get_xticklabels(), visible=True)
+
+ # First column should be visible
+ for ax in axes[[0, 1, 2], [0]].ravel():
+ self._check_visible(ax.get_yticklabels(), visible=True)
+
+ # Other columns should not be visible
+ for ax in axes[[0, 1, 2], [1]].ravel():
+ self._check_visible(ax.get_yticklabels(), visible=False)
+ for ax in axes[[0, 1, 2], [2]].ravel():
+ self._check_visible(ax.get_yticklabels(), visible=False)
+
+ def test_subplots_sharex_axes_existing_axes(self):
+ # GH 9158
+ d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
+ df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
+
+ axes = df[["A", "B"]].plot(subplots=True)
+ df["C"].plot(ax=axes[0], secondary_y=True)
+
+ self._check_visible(axes[0].get_xticklabels(), visible=False)
+ self._check_visible(axes[1].get_xticklabels(), visible=True)
+ for ax in axes.ravel():
+ self._check_visible(ax.get_yticklabels(), visible=True)
+
+ @pytest.mark.slow
+ def test_subplots_dup_columns(self):
+ # GH 10962
+ df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
+ axes = df.plot(subplots=True)
+ for ax in axes:
+ self._check_legend_labels(ax, labels=["a"])
+ assert len(ax.lines) == 1
+ tm.close()
+
+ axes = df.plot(subplots=True, secondary_y="a")
+ for ax in axes:
+ # (right) is only attached when subplots=False
+ self._check_legend_labels(ax, labels=["a"])
+ assert len(ax.lines) == 1
+ tm.close()
+
+ ax = df.plot(secondary_y="a")
+ self._check_legend_labels(ax, labels=["a (right)"] * 5)
+ assert len(ax.lines) == 0
+ assert len(ax.right_ax.lines) == 5
+
+ @pytest.mark.slow
+ def test_bar_log_no_subplots(self):
+ # GH3254, GH3298 matplotlib/matplotlib#1882, #1892
+ # regressions in 1.2.1
+ expected = np.array([0.1, 1.0, 10.0, 100])
+
+ # no subplots
+ df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
+ ax = df.plot.bar(grid=True, log=True)
+ tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
+
+ @pytest.mark.slow
+ def test_bar_log_subplots(self):
+ expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
+
+ ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
+ log=True, subplots=True
+ )
+
+ tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
+ tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
+
+ @pytest.mark.slow
+ def test_boxplot_subplots_return_type(self):
+ df = self.hist_df
+
+ # normal style: return_type=None
+ result = df.plot.box(subplots=True)
+ assert isinstance(result, Series)
+ self._check_box_return_type(
+ result, None, expected_keys=["height", "weight", "category"]
+ )
+
+ for t in ["dict", "axes", "both"]:
+ returned = df.plot.box(return_type=t, subplots=True)
+ self._check_box_return_type(
+ returned,
+ t,
+ expected_keys=["height", "weight", "category"],
+ check_ax_title=False,
+ )
+
+ @pytest.mark.slow
+ def test_df_subplots_patterns_minorticks(self):
+ # GH 10657
+ import matplotlib.pyplot as plt
+
+ df = DataFrame(
+ np.random.randn(10, 2),
+ index=date_range("1/1/2000", periods=10),
+ columns=list("AB"),
+ )
+
+ # shared subplots
+ fig, axes = plt.subplots(2, 1, sharex=True)
+ axes = df.plot(subplots=True, ax=axes)
+ for ax in axes:
+ assert len(ax.lines) == 1
+ self._check_visible(ax.get_yticklabels(), visible=True)
+ # xaxis of 1st ax must be hidden
+ self._check_visible(axes[0].get_xticklabels(), visible=False)
+ self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
+ self._check_visible(axes[1].get_xticklabels(), visible=True)
+ self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
+ tm.close()
+
+ fig, axes = plt.subplots(2, 1)
+ with tm.assert_produces_warning(UserWarning):
+ axes = df.plot(subplots=True, ax=axes, sharex=True)
+ for ax in axes:
+ assert len(ax.lines) == 1
+ self._check_visible(ax.get_yticklabels(), visible=True)
+ # xaxis of 1st ax must be hidden
+ self._check_visible(axes[0].get_xticklabels(), visible=False)
+ self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
+ self._check_visible(axes[1].get_xticklabels(), visible=True)
+ self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
+ tm.close()
+
+ # not shared
+ fig, axes = plt.subplots(2, 1)
+ axes = df.plot(subplots=True, ax=axes)
+ for ax in axes:
+ assert len(ax.lines) == 1
+ self._check_visible(ax.get_yticklabels(), visible=True)
+ self._check_visible(ax.get_xticklabels(), visible=True)
+ self._check_visible(ax.get_xticklabels(minor=True), visible=True)
+ tm.close()
+
+ def test_subplots_sharex_false(self):
+ # test when sharex is set to False, two plots should have different
+ # labels, GH 25160
+ df = DataFrame(np.random.rand(10, 2))
+ df.iloc[5:, 1] = np.nan
+ df.iloc[:5, 0] = np.nan
+
+ figs, axs = self.plt.subplots(2, 1)
+ df.plot.line(ax=axs, subplots=True, sharex=False)
+
+ expected_ax1 = np.arange(4.5, 10, 0.5)
+ expected_ax2 = np.arange(-0.5, 5, 0.5)
+
+ tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)
+ tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)
+
+ @pytest.mark.parametrize(
+ "index_name, old_label, new_label",
+ [
+ (None, "", "new"),
+ ("old", "old", "new"),
+ (None, "", ""),
+ (None, "", 1),
+ (None, "", [1, 2]),
+ ],
+ )
+ @pytest.mark.parametrize("kind", ["line", "area", "bar"])
+ def test_xlabel_ylabel_dataframe_subplots(
+ self, kind, index_name, old_label, new_label
+ ):
+ # GH 9093
+ df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"])
+ df.index.name = index_name
+
+ # default is the ylabel is not shown and xlabel is index name
+ axes = df.plot(kind=kind, subplots=True)
+ assert all(ax.get_ylabel() == "" for ax in axes)
+ assert all(ax.get_xlabel() == old_label for ax in axes)
+
+ # old xlabel will be overriden and assigned ylabel will be used as ylabel
+ axes = df.plot(kind=kind, ylabel=new_label, xlabel=new_label, subplots=True)
+ assert all(ax.get_ylabel() == str(new_label) for ax in axes)
+ assert all(ax.get_xlabel() == str(new_label) for ax in axes)
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize(
+ "kwargs",
+ [
+ # stacked center
+ dict(kind="bar", stacked=True),
+ dict(kind="bar", stacked=True, width=0.9),
+ dict(kind="barh", stacked=True),
+ dict(kind="barh", stacked=True, width=0.9),
+ # center
+ dict(kind="bar", stacked=False),
+ dict(kind="bar", stacked=False, width=0.9),
+ dict(kind="barh", stacked=False),
+ dict(kind="barh", stacked=False, width=0.9),
+ # subplots center
+ dict(kind="bar", subplots=True),
+ dict(kind="bar", subplots=True, width=0.9),
+ dict(kind="barh", subplots=True),
+ dict(kind="barh", subplots=True, width=0.9),
+ # align edge
+ dict(kind="bar", stacked=True, align="edge"),
+ dict(kind="bar", stacked=True, width=0.9, align="edge"),
+ dict(kind="barh", stacked=True, align="edge"),
+ dict(kind="barh", stacked=True, width=0.9, align="edge"),
+ dict(kind="bar", stacked=False, align="edge"),
+ dict(kind="bar", stacked=False, width=0.9, align="edge"),
+ dict(kind="barh", stacked=False, align="edge"),
+ dict(kind="barh", stacked=False, width=0.9, align="edge"),
+ dict(kind="bar", subplots=True, align="edge"),
+ dict(kind="bar", subplots=True, width=0.9, align="edge"),
+ dict(kind="barh", subplots=True, align="edge"),
+ dict(kind="barh", subplots=True, width=0.9, align="edge"),
+ ],
+ )
+ def test_bar_align_multiple_columns(self, kwargs):
+ # GH2157
+ df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
+ self._check_bar_alignment(df, **kwargs)
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize(
+ "kwargs",
+ [
+ dict(kind="bar", stacked=False),
+ dict(kind="bar", stacked=True),
+ dict(kind="barh", stacked=False),
+ dict(kind="barh", stacked=True),
+ dict(kind="bar", subplots=True),
+ dict(kind="barh", subplots=True),
+ ],
+ )
+ def test_bar_align_single_column(self, kwargs):
+ df = DataFrame(np.random.randn(5))
+ self._check_bar_alignment(df, **kwargs)
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"kind": "bar", "stacked": False},
+ {"kind": "bar", "stacked": True},
+ {"kind": "barh", "stacked": False},
+ {"kind": "barh", "stacked": True},
+ {"kind": "bar", "subplots": True},
+ {"kind": "barh", "subplots": True},
+ ],
+ )
+ def test_bar_barwidth_position(self, kwargs):
+ df = DataFrame(np.random.randn(5, 5))
+ self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs)
+
+ @pytest.mark.slow
+ def test_bar_barwidth_position_int(self):
+ # GH 12979
+ df = DataFrame(np.random.randn(5, 5))
+
+ for w in [1, 1.0]:
+ ax = df.plot.bar(stacked=True, width=w)
+ ticks = ax.xaxis.get_ticklocs()
+ tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
+ assert ax.get_xlim() == (-0.75, 4.75)
+ # check left-edge of bars
+ assert ax.patches[0].get_x() == -0.5
+ assert ax.patches[-1].get_x() == 3.5
+
+ self._check_bar_alignment(df, kind="bar", stacked=True, width=1)
+ self._check_bar_alignment(df, kind="barh", stacked=False, width=1)
+ self._check_bar_alignment(df, kind="barh", stacked=True, width=1)
+ self._check_bar_alignment(df, kind="bar", subplots=True, width=1)
+ self._check_bar_alignment(df, kind="barh", subplots=True, width=1)
+
+ def _check_bar_alignment(
+ self,
+ df,
+ kind="bar",
+ stacked=False,
+ subplots=False,
+ align="center",
+ width=0.5,
+ position=0.5,
+ ):
+
+ axes = df.plot(
+ kind=kind,
+ stacked=stacked,
+ subplots=subplots,
+ align=align,
+ width=width,
+ position=position,
+ grid=True,
+ )
+
+ axes = self._flatten_visible(axes)
+
+ for ax in axes:
+ if kind == "bar":
+ axis = ax.xaxis
+ ax_min, ax_max = ax.get_xlim()
+ min_edge = min(p.get_x() for p in ax.patches)
+ max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
+ elif kind == "barh":
+ axis = ax.yaxis
+ ax_min, ax_max = ax.get_ylim()
+ min_edge = min(p.get_y() for p in ax.patches)
+ max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
+ else:
+ raise ValueError
+
+ # GH 7498
+ # compare margins between lim and bar edges
+ tm.assert_almost_equal(ax_min, min_edge - 0.25)
+ tm.assert_almost_equal(ax_max, max_edge + 0.25)
+
+ p = ax.patches[0]
+ if kind == "bar" and (stacked is True or subplots is True):
+ edge = p.get_x()
+ center = edge + p.get_width() * position
+ elif kind == "bar" and stacked is False:
+ center = p.get_x() + p.get_width() * len(df.columns) * position
+ edge = p.get_x()
+ elif kind == "barh" and (stacked is True or subplots is True):
+ center = p.get_y() + p.get_height() * position
+ edge = p.get_y()
+ elif kind == "barh" and stacked is False:
+ center = p.get_y() + p.get_height() * len(df.columns) * position
+ edge = p.get_y()
+ else:
+ raise ValueError
+
+ # Check the ticks locates on integer
+ assert (axis.get_ticklocs() == np.arange(len(df))).all()
+
+ if align == "center":
+ # Check whether the bar locates on center
+ tm.assert_almost_equal(axis.get_ticklocs()[0], center)
+ elif align == "edge":
+ # Check whether the bar's edge starts from the tick
+ tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
+ else:
+ raise ValueError
+
+ return axes
| Created instead of #37538
- [x] closes #34769
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I moved the file to a new directory.
What is the best structure for this folder?
[before_test_ploting_frame](https://gist.github.com/Mikhaylov-yv/999de1451e8ecc836215b30db34ef841)
[after_test_ploting_frame](https://gist.github.com/Mikhaylov-yv/98eca32c21f59cc9e7653cd96d5fabd2) | https://api.github.com/repos/pandas-dev/pandas/pulls/37655 | 2020-11-05T21:29:47Z | 2020-11-13T02:51:08Z | 2020-11-13T02:51:08Z | 2020-11-13T05:52:50Z |
BUG: don't suppress OSError in tzlocal | diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 4c62b16d430bd..4a3fac1954ab7 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -500,9 +500,11 @@ cdef int64_t[:] _tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
return converted
+# OSError may be thrown by tzlocal on windows at or close to 1970-01-01
+# see https://github.com/pandas-dev/pandas/pull/37591#issuecomment-720628241
cdef inline int64_t _tzlocal_get_offset_components(int64_t val, tzinfo tz,
bint to_utc,
- bint *fold=NULL):
+ bint *fold=NULL) except? -1:
"""
Calculate offset in nanoseconds needed to convert the i8 representation of
a datetime from a tzlocal timezone to UTC, or vice-versa.
| Seeing a bunch of suppressed exceptions in the py38 windows tests. Hopefully this will cause them to surface correctly. | https://api.github.com/repos/pandas-dev/pandas/pulls/37652 | 2020-11-05T16:06:59Z | 2020-11-06T00:09:03Z | 2020-11-06T00:09:03Z | 2020-11-06T00:11:38Z |
REF: implement Categorical.encode_with_my_categories | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 3e2da3e95f396..87a049c77dc32 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1693,9 +1693,8 @@ def _validate_listlike(self, target: ArrayLike) -> np.ndarray:
# Indexing on codes is more efficient if categories are the same,
# so we can apply some optimizations based on the degree of
# dtype-matching.
- codes = recode_for_categories(
- target.codes, target.categories, self.categories, copy=False
- )
+ cat = self._encode_with_my_categories(target)
+ codes = cat._codes
else:
codes = self.categories.get_indexer(target)
@@ -1867,8 +1866,8 @@ def _validate_setitem_value(self, value):
"without identical categories"
)
# is_dtype_equal implies categories_match_up_to_permutation
- new_codes = self._validate_listlike(value)
- value = Categorical.from_codes(new_codes, dtype=self.dtype)
+ value = self._encode_with_my_categories(value)
+ return value._codes
# wrap scalars and hashable-listlikes in list
rvalue = value if not is_hashable(value) else [value]
@@ -2100,8 +2099,8 @@ def equals(self, other: object) -> bool:
if not isinstance(other, Categorical):
return False
elif self._categories_match_up_to_permutation(other):
- other_codes = self._validate_listlike(other)
- return np.array_equal(self._codes, other_codes)
+ other = self._encode_with_my_categories(other)
+ return np.array_equal(self._codes, other._codes)
return False
@classmethod
@@ -2112,6 +2111,23 @@ def _concat_same_type(self, to_concat):
# ------------------------------------------------------------------
+ def _encode_with_my_categories(self, other: "Categorical") -> "Categorical":
+ """
+ Re-encode another categorical using this Categorical's categories.
+
+ Notes
+ -----
+ This assumes we have already checked
+ self._categories_match_up_to_permutation(other).
+ """
+ # Indexing on codes is more efficient if categories are the same,
+ # so we can apply some optimizations based on the degree of
+ # dtype-matching.
+ codes = recode_for_categories(
+ other.codes, other.categories, self.categories, copy=False
+ )
+ return self._from_backing_data(codes)
+
def _categories_match_up_to_permutation(self, other: "Categorical") -> bool:
"""
Returns True if categoricals are the same dtype
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 99dc01ef421d1..a38d9cbad0d64 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -301,7 +301,7 @@ def _maybe_unwrap(x):
categories = first.categories
ordered = first.ordered
- all_codes = [first._validate_listlike(x) for x in to_union]
+ all_codes = [first._encode_with_my_categories(x)._codes for x in to_union]
new_codes = np.concatenate(all_codes)
if sort_categories and not ignore_order and ordered:
| Categorical._validate_listlike is misleading, since it treats not-in-categories entries as NAs, whereas we usually want to raise when we see those. This implements the more obviously-named encode_with_my_categories | https://api.github.com/repos/pandas-dev/pandas/pulls/37650 | 2020-11-05T15:18:13Z | 2020-11-09T00:37:38Z | 2020-11-09T00:37:38Z | 2020-11-09T00:54:30Z |
BUG: Series.to_dict does not return native Python types | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index bf67ff6525005..982cfaeecf6f0 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -324,6 +324,7 @@ Numeric
Conversion
^^^^^^^^^^
+- Bug in :meth:`Series.to_dict` with ``orient='records'`` now returns python native types (:issue:`25969`)
-
-
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 520c0cf100784..669bfe08d42b0 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -71,6 +71,7 @@
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
+ is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
@@ -170,6 +171,29 @@ def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scal
return value
+def maybe_box_native(value: Scalar) -> Scalar:
+ """
+ If passed a scalar cast the scalar to a python native type.
+
+ Parameters
+ ----------
+ value : scalar or Series
+
+ Returns
+ -------
+ scalar or Series
+ """
+ if is_datetime_or_timedelta_dtype(value):
+ value = maybe_box_datetimelike(value)
+ elif is_float(value):
+ value = float(value)
+ elif is_integer(value):
+ value = int(value)
+ elif is_bool(value):
+ value = bool(value)
+ return value
+
+
def maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
"""
Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f1328b0eeee5e..3fe330f659513 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -95,7 +95,7 @@
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
- maybe_box_datetimelike,
+ maybe_box_native,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
@@ -1655,7 +1655,7 @@ def to_dict(self, orient: str = "dict", into=dict):
(
"data",
[
- list(map(maybe_box_datetimelike, t))
+ list(map(maybe_box_native, t))
for t in self.itertuples(index=False, name=None)
],
),
@@ -1663,7 +1663,7 @@ def to_dict(self, orient: str = "dict", into=dict):
)
elif orient == "series":
- return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())
+ return into_c((k, v) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
@@ -1672,8 +1672,7 @@ def to_dict(self, orient: str = "dict", into=dict):
for row in self.itertuples(index=False, name=None)
)
return [
- into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())
- for row in rows
+ into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows
]
elif orient == "index":
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0e1d2f5dafb3f..cbb66918a661b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -58,6 +58,7 @@
from pandas.core.dtypes.cast import (
convert_dtypes,
+ maybe_box_native,
maybe_cast_to_extension_array,
validate_numeric_casting,
)
@@ -1591,7 +1592,7 @@ def to_dict(self, into=dict):
"""
# GH16122
into_c = com.standardize_mapping(into)
- return into_c(self.items())
+ return into_c((k, maybe_box_native(v)) for k, v in self.items())
def to_frame(self, name=None) -> DataFrame:
"""
diff --git a/pandas/tests/dtypes/cast/test_maybe_box_native.py b/pandas/tests/dtypes/cast/test_maybe_box_native.py
new file mode 100644
index 0000000000000..3f62f31dac219
--- /dev/null
+++ b/pandas/tests/dtypes/cast/test_maybe_box_native.py
@@ -0,0 +1,40 @@
+from datetime import datetime
+
+import numpy as np
+import pytest
+
+from pandas.core.dtypes.cast import maybe_box_native
+
+from pandas import (
+ Interval,
+ Period,
+ Timedelta,
+ Timestamp,
+)
+
+
+@pytest.mark.parametrize(
+ "obj,expected_dtype",
+ [
+ (b"\x00\x10", bytes),
+ (int(4), int),
+ (np.uint(4), int),
+ (np.int32(-4), int),
+ (np.uint8(4), int),
+ (float(454.98), float),
+ (np.float16(0.4), float),
+ (np.float64(1.4), float),
+ (np.bool_(False), bool),
+ (datetime(2005, 2, 25), datetime),
+ (np.datetime64("2005-02-25"), Timestamp),
+ (Timestamp("2005-02-25"), Timestamp),
+ (np.timedelta64(1, "D"), Timedelta),
+ (Timedelta(1, "D"), Timedelta),
+ (Interval(0, 1), Interval),
+ (Period("4Q2005"), Period),
+ ],
+)
+def test_maybe_box_native(obj, expected_dtype):
+ boxed_obj = maybe_box_native(obj)
+ result_dtype = type(boxed_obj)
+ assert result_dtype is expected_dtype
diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py
index c17ad197ac09a..6d0d4e045e491 100644
--- a/pandas/tests/frame/methods/test_to_dict.py
+++ b/pandas/tests/frame/methods/test_to_dict.py
@@ -263,31 +263,44 @@ def test_to_dict_wide(self):
expected = {f"A_{i:d}": i for i in range(256)}
assert result == expected
- def test_to_dict_orient_dtype(self):
- # GH22620 & GH21256
-
- df = DataFrame(
- {
- "bool": [True, True, False],
- "datetime": [
+ @pytest.mark.parametrize(
+ "data,dtype",
+ (
+ ([True, True, False], bool),
+ [
+ [
datetime(2018, 1, 1),
datetime(2019, 2, 2),
datetime(2020, 3, 3),
],
- "float": [1.0, 2.0, 3.0],
- "int": [1, 2, 3],
- "str": ["X", "Y", "Z"],
- }
- )
+ Timestamp,
+ ],
+ [[1.0, 2.0, 3.0], float],
+ [[1, 2, 3], int],
+ [["X", "Y", "Z"], str],
+ ),
+ )
+ def test_to_dict_orient_dtype(self, data, dtype):
+ # GH22620 & GH21256
- expected = {
- "int": int,
- "float": float,
- "str": str,
- "datetime": Timestamp,
- "bool": bool,
- }
+ df = DataFrame({"a": data})
+ d = df.to_dict(orient="records")
+ assert all(type(record["a"]) is dtype for record in d)
+
+ @pytest.mark.parametrize(
+ "data,expected_dtype",
+ (
+ [np.uint64(2), int],
+ [np.int64(-9), int],
+ [np.float64(1.1), float],
+ [np.bool_(True), bool],
+ [np.datetime64("2005-02-25"), Timestamp],
+ ),
+ )
+ def test_to_dict_scalar_constructor_orient_dtype(self, data, expected_dtype):
+ # GH22620 & GH21256
- for df_dict in df.to_dict("records"):
- result = {col: type(df_dict[col]) for col in list(df.columns)}
- assert result == expected
+ df = DataFrame({"a": data}, index=[0])
+ d = df.to_dict(orient="records")
+ result = type(d[0]["a"])
+ assert result is expected_dtype
diff --git a/pandas/tests/series/methods/test_to_dict.py b/pandas/tests/series/methods/test_to_dict.py
index 47badb0a1bb52..4c3d9592eebe3 100644
--- a/pandas/tests/series/methods/test_to_dict.py
+++ b/pandas/tests/series/methods/test_to_dict.py
@@ -1,5 +1,6 @@
import collections
+import numpy as np
import pytest
from pandas import Series
@@ -20,3 +21,18 @@ def test_to_dict(self, mapping, datetime_series):
from_method = Series(datetime_series.to_dict(collections.Counter))
from_constructor = Series(collections.Counter(datetime_series.items()))
tm.assert_series_equal(from_method, from_constructor)
+
+ @pytest.mark.parametrize(
+ "input",
+ (
+ {"a": np.int64(64), "b": 10},
+ {"a": np.int64(64), "b": 10, "c": "ABC"},
+ {"a": np.uint64(64), "b": 10, "c": "ABC"},
+ ),
+ )
+ def test_to_dict_return_types(self, input):
+ # GH25969
+
+ d = Series(input).to_dict()
+ assert isinstance(d["a"], int)
+ assert isinstance(d["b"], int)
| - [x] xref #25969
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This resolves the issue of return types from `to_dict`. #25969 also discusses return types from `.items()`, which relates to an outstanding NumPy issue numpy/numpy#14139, and I don't address that part here atm | https://api.github.com/repos/pandas-dev/pandas/pulls/37648 | 2020-11-05T15:01:59Z | 2021-02-19T01:12:56Z | 2021-02-19T01:12:56Z | 2023-01-27T15:38:09Z |
BUG: preserve fold in Timestamp.replace | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 690e6b8f725ad..57064bd348aa5 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -412,7 +412,7 @@ Timezones
^^^^^^^^^
- Bug in :func:`date_range` was raising AmbiguousTimeError for valid input with ``ambiguous=False`` (:issue:`35297`)
--
+- Bug in :meth:`Timestamp.replace` was losing fold information (:issue:`37610`)
Numeric
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 88ad008b42c21..e10ac6a05ead8 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -774,7 +774,7 @@ default 'raise'
microsecond : int, optional
nanosecond : int, optional
tzinfo : tz-convertible, optional
- fold : int, optional, default is 0
+ fold : int, optional
Returns
-------
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 9076325d01bab..b3ae69d7a3237 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1374,7 +1374,7 @@ default 'raise'
microsecond=None,
nanosecond=None,
tzinfo=object,
- fold=0,
+ fold=None,
):
"""
implements datetime.replace, handles nanoseconds.
@@ -1390,7 +1390,7 @@ default 'raise'
microsecond : int, optional
nanosecond : int, optional
tzinfo : tz-convertible, optional
- fold : int, optional, default is 0
+ fold : int, optional
Returns
-------
@@ -1407,6 +1407,11 @@ default 'raise'
# set to naive if needed
tzobj = self.tzinfo
value = self.value
+
+ # GH 37610. Preserve fold when replacing.
+ if fold is None:
+ fold = self.fold
+
if tzobj is not None:
value = tz_convert_from_utc_single(value, tzobj)
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index e8196cd8328e7..88f99a6784ba1 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -424,3 +424,14 @@ def test_timestamp(self):
# should agree with datetime.timestamp method
dt = ts.to_pydatetime()
assert dt.timestamp() == ts.timestamp()
+
+
+@pytest.mark.parametrize("fold", [0, 1])
+def test_replace_preserves_fold(fold):
+ # GH 37610. Check that replace preserves Timestamp fold property
+ tz = gettz("Europe/Moscow")
+
+ ts = Timestamp(year=2009, month=10, day=25, hour=2, minute=30, fold=fold, tzinfo=tz)
+ ts_replaced = ts.replace(second=1)
+
+ assert ts_replaced.fold == fold
| - [X] closes #37610
- [X] 1 tests added / 1 passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
## Problem
We currently lose `fold` information (whether the Timestamp corresponds to the first or second instance of wall clock time in a DST transition) when calling `Timestamp.replace`.
## Solution
A simple addition to the code of `Timestamp.replace` should fix this.
## Test
I added the test for the use case I came up with in the issue discussion. The OP example is losing `fold` when deleting timezone information with `Timetsamp.replace`, and that's not really a bug, but replacing a valid dateutil timezone with itself and losing `fold` definitely is.
The proposed solution fixes the original example as well. I just don't think we should be tracking it in tests, as it's not clear to me why `fold` must be preserved in a `Timestamp` with no timezone information (but it is the convention recommended in [PEP 495, fold section](https://www.python.org/dev/peps/pep-0495/#the-fold-attribute) to let users keep invalid fold and to just ignore it).
## Some details
IIRC, we ignore `fold`, when it doesn't do anything, so we should be safe preserving `fold` while replacing tzinfo with None, as in the OP example. I remember this coming up when we introduced fold support, and we made sure that the fold-aware functions don't care what fold is outside of a DST transition with a dateutil timezone (this was done to satisfy the requirements of [PEP 495, fold section](https://www.python.org/dev/peps/pep-0495/#the-fold-attribute)). | https://api.github.com/repos/pandas-dev/pandas/pulls/37644 | 2020-11-05T09:54:34Z | 2020-11-08T02:22:48Z | 2020-11-08T02:22:48Z | 2020-11-09T13:10:19Z |
REF: de-duplicate _validate_insert_value with _validate_scalar | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 67ac2a3688214..63c414d96c8de 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -45,7 +45,7 @@ def _box_func(self, x):
"""
return x
- def _validate_insert_value(self, value):
+ def _validate_scalar(self, value):
# used by NDArrayBackedExtensionIndex.insert
raise AbstractMethodError(self)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 626fb495dec03..edbf24ca87f5c 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1177,9 +1177,6 @@ def map(self, mapper):
# -------------------------------------------------------------
# Validators; ideally these can be de-duplicated
- def _validate_insert_value(self, value) -> int:
- return self._validate_fill_value(value)
-
def _validate_searchsorted_value(self, value):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
@@ -1219,6 +1216,8 @@ def _validate_fill_value(self, fill_value):
)
return fill_value
+ _validate_scalar = _validate_fill_value
+
# -------------------------------------------------------------
def __array__(self, dtype=None) -> np.ndarray:
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 404511895ddf0..7a0d88f29b9b0 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -479,10 +479,12 @@ def _validate_fill_value(self, fill_value):
f"Got '{str(fill_value)}'."
)
try:
- fill_value = self._validate_scalar(fill_value)
+ return self._validate_scalar(fill_value)
except TypeError as err:
+ if "Cannot compare tz-naive and tz-aware" in str(err):
+ # tzawareness-compat
+ raise
raise ValueError(msg) from err
- return self._unbox(fill_value, setitem=True)
def _validate_shift_value(self, fill_value):
# TODO(2.0): once this deprecation is enforced, use _validate_fill_value
@@ -511,7 +513,14 @@ def _validate_shift_value(self, fill_value):
return self._unbox(fill_value, setitem=True)
- def _validate_scalar(self, value, allow_listlike: bool = False):
+ def _validate_scalar(
+ self,
+ value,
+ *,
+ allow_listlike: bool = False,
+ setitem: bool = True,
+ unbox: bool = True,
+ ):
"""
Validate that the input value can be cast to our scalar_type.
@@ -521,6 +530,11 @@ def _validate_scalar(self, value, allow_listlike: bool = False):
allow_listlike: bool, default False
When raising an exception, whether the message should say
listlike inputs are allowed.
+ setitem : bool, default True
+ Whether to check compatibility with setitem strictness.
+ unbox : bool, default True
+ Whether to unbox the result before returning. Note: unbox=False
+ skips the setitem compatibility check.
Returns
-------
@@ -546,7 +560,12 @@ def _validate_scalar(self, value, allow_listlike: bool = False):
msg = self._validation_error_message(value, allow_listlike)
raise TypeError(msg)
- return value
+ if not unbox:
+ # NB: In general NDArrayBackedExtensionArray will unbox here;
+ # this option exists to prevent a performance hit in
+ # TimedeltaIndex.get_loc
+ return value
+ return self._unbox_scalar(value, setitem=setitem)
def _validation_error_message(self, value, allow_listlike: bool = False) -> str:
"""
@@ -611,7 +630,7 @@ def _validate_listlike(self, value, allow_object: bool = False):
def _validate_searchsorted_value(self, value):
if not is_list_like(value):
- value = self._validate_scalar(value, True)
+ return self._validate_scalar(value, allow_listlike=True, setitem=False)
else:
value = self._validate_listlike(value)
@@ -621,12 +640,7 @@ def _validate_setitem_value(self, value):
if is_list_like(value):
value = self._validate_listlike(value)
else:
- value = self._validate_scalar(value, True)
-
- return self._unbox(value, setitem=True)
-
- def _validate_insert_value(self, value):
- value = self._validate_scalar(value)
+ return self._validate_scalar(value, allow_listlike=True)
return self._unbox(value, setitem=True)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index f8ece2a9fe7d4..7b10334804ef9 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -889,9 +889,6 @@ def _validate_fillna_value(self, value):
)
raise TypeError(msg) from err
- def _validate_insert_value(self, value):
- return self._validate_scalar(value)
-
def _validate_setitem_value(self, value):
needs_float_conversion = False
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 98ec3b55e65d9..f350e18198057 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2292,7 +2292,7 @@ def fillna(self, value=None, downcast=None):
DataFrame.fillna : Fill NaN values of a DataFrame.
Series.fillna : Fill NaN Values of a Series.
"""
- value = self._validate_scalar(value)
+ value = self._require_scalar(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
@@ -4140,7 +4140,7 @@ def _validate_fill_value(self, value):
return value
@final
- def _validate_scalar(self, value):
+ def _require_scalar(self, value):
"""
Check that this is a scalar value that we can use for setitem-like
operations without changing dtype.
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 8cbd0d83c78d7..525c41bae8b51 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -382,7 +382,7 @@ def astype(self, dtype, copy=True):
@doc(Index.fillna)
def fillna(self, value, downcast=None):
- value = self._validate_scalar(value)
+ value = self._require_scalar(value)
cat = self._data.fillna(value)
return type(self)._simple_new(cat, name=self.name)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 9e2ac6013cb43..2cb66557b3bab 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -581,7 +581,7 @@ def _get_insert_freq(self, loc, item):
"""
Find the `freq` for self.insert(loc, item).
"""
- value = self._data._validate_insert_value(item)
+ value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index cd1871e4687f3..921c7aac2c85b 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -335,7 +335,7 @@ def insert(self, loc: int, item):
ValueError if the item is not valid for this dtype.
"""
arr = self._data
- code = arr._validate_insert_value(item)
+ code = arr._validate_scalar(item)
new_vals = np.concatenate((arr._ndarray[:loc], [code], arr._ndarray[loc:]))
new_arr = arr._from_backing_data(new_vals)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index c700acc24f411..2aec86c9cdfae 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -903,7 +903,7 @@ def insert(self, loc, item):
-------
IntervalIndex
"""
- left_insert, right_insert = self._data._validate_insert_value(item)
+ left_insert, right_insert = self._data._validate_scalar(item)
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 66fd6943de721..cf5fa4bbb3d75 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -215,7 +215,7 @@ def get_loc(self, key, method=None, tolerance=None):
raise InvalidIndexError(key)
try:
- key = self._data._validate_scalar(key)
+ key = self._data._validate_scalar(key, unbox=False)
except TypeError as err:
raise KeyError(key) from err
| There is only one place where `_validate_insert_value` isnt identical to `_validate_scalar`, and that is in `DatetimeLikeArrayMixin._validate_scalar`, which ATM does not unbox (which all the other `_validate_scalar` methods do). This updates `_validate_scalar` to unbox.
If we stopped there, `TimedeltaIndex.get_loc` would take a pretty significant performance hit, so I added a `unbox=True` kwarg to `_validate_scalar` that TDI.get_loc uses to keep perf flat.
Made `_validate_scalar` keyword-only past the first (non-self) argument.
Last, renamed `Index._validate_scalar` to `Index._require_scalar` to disambiguate the names. (one of the two uses of `Index._validate_scalar` is wrong and will be removed, but thats a whole different can of worms) | https://api.github.com/repos/pandas-dev/pandas/pulls/37640 | 2020-11-05T03:19:43Z | 2020-11-06T00:31:07Z | 2020-11-06T00:31:07Z | 2020-11-06T00:39:03Z |
REF: move get_filepath_buffer into get_handle | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 1c271e74aafba..1bd35131622ab 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -1024,9 +1024,10 @@ Writing CSVs to binary file objects
.. versionadded:: 1.2.0
-``df.to_csv(..., mode="w+b")`` allows writing a CSV to a file object
-opened binary mode. For this to work, it is necessary that ``mode``
-contains a "b":
+``df.to_csv(..., mode="wb")`` allows writing a CSV to a file object
+opened binary mode. In most cases, it is not necessary to specify
+``mode`` as Pandas will auto-detect whether the file object is
+opened in text or binary mode.
.. ipython:: python
@@ -1034,7 +1035,7 @@ contains a "b":
data = pd.DataFrame([0, 1, 2])
buffer = io.BytesIO()
- data.to_csv(buffer, mode="w+b", encoding="utf-8", compression="gzip")
+ data.to_csv(buffer, encoding="utf-8", compression="gzip")
.. _io.float_precision:
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 22a0fb7a45318..00349c2597f54 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -84,7 +84,8 @@ Support for binary file handles in ``to_csv``
:meth:`to_csv` supports file handles in binary mode (:issue:`19827` and :issue:`35058`)
with ``encoding`` (:issue:`13068` and :issue:`23854`) and ``compression`` (:issue:`22555`).
-``mode`` has to contain a ``b`` for binary handles to be supported.
+If Pandas does not automatically detect whether the file handle is opened in binary or text mode,
+it is necessary to provide ``mode="wb"``.
For example:
@@ -94,7 +95,7 @@ For example:
data = pd.DataFrame([0, 1, 2])
buffer = io.BytesIO()
- data.to_csv(buffer, mode="w+b", encoding="utf-8", compression="gzip")
+ data.to_csv(buffer, encoding="utf-8", compression="gzip")
Support for short caption and table position in ``to_latex``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -514,6 +515,7 @@ I/O
- :func:`read_csv` was closing user-provided binary file handles when ``engine="c"`` and an ``encoding`` was requested (:issue:`36980`)
- Bug in :meth:`DataFrame.to_hdf` was not dropping missing rows with ``dropna=True`` (:issue:`35719`)
- Bug in :func:`read_html` was raising a ``TypeError`` when supplying a ``pathlib.Path`` argument to the ``io`` parameter (:issue:`37705`)
+- :meth:`to_excel` and :meth:`to_markdown` support writing to fsspec URLs such as S3 and Google Cloud Storage (:issue:`33987`)
Plotting
^^^^^^^^
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 55a1c17b0aa53..7f01bcaa1c50e 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -146,10 +146,5 @@
CompressionOptions = Optional[Union[str, CompressionDict]]
-# let's bind types
-ModeVar = TypeVar("ModeVar", str, None, Optional[str])
-EncodingVar = TypeVar("EncodingVar", str, None, Optional[str])
-
-
# type of float formatter in DataFrameFormatter
FloatFormatType = Union[str, Callable, "EngFormatter"]
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9ce5ef2fc3cfe..bae06339a1e60 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -157,7 +157,7 @@
from pandas.core.series import Series
from pandas.core.sorting import get_group_index, lexsort_indexer, nargsort
-from pandas.io.common import get_filepath_or_buffer
+from pandas.io.common import get_handle
from pandas.io.formats import console, format as fmt
from pandas.io.formats.info import DataFrameInfo
import pandas.plotting
@@ -2301,10 +2301,10 @@ def to_markdown(
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
- ioargs = get_filepath_or_buffer(buf, mode=mode, storage_options=storage_options)
- assert not isinstance(ioargs.filepath_or_buffer, (str, mmap.mmap))
- ioargs.filepath_or_buffer.writelines(result)
- ioargs.close()
+
+ with get_handle(buf, mode, storage_options=storage_options) as handles:
+ assert not isinstance(handles.handle, (str, mmap.mmap))
+ handles.handle.writelines(result)
return None
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 24c1ae971686e..170950e069828 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3221,7 +3221,7 @@ def to_csv(
File path or object, if None is provided the result is returned as
a string. If a non-binary file object is passed, it should be opened
with `newline=''`, disabling universal newlines. If a binary
- file object is passed, `mode` needs to contain a `'b'`.
+ file object is passed, `mode` might need to contain a `'b'`.
.. versionchanged:: 0.24.0
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 910eb23d9a2d0..695c1671abd61 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -8,21 +8,7 @@
import mmap
import os
import pathlib
-from typing import (
- IO,
- TYPE_CHECKING,
- Any,
- AnyStr,
- Dict,
- Generic,
- List,
- Mapping,
- Optional,
- Tuple,
- Type,
- Union,
- cast,
-)
+from typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, cast
from urllib.parse import (
urljoin,
urlparse as parse_url,
@@ -37,10 +23,8 @@
Buffer,
CompressionDict,
CompressionOptions,
- EncodingVar,
FileOrBuffer,
FilePathOrBuffer,
- ModeVar,
StorageOptions,
)
from pandas.compat import get_lzma_file, import_lzma
@@ -55,16 +39,10 @@
_VALID_URLS.discard("")
-if TYPE_CHECKING:
- from io import IOBase
-
-
@dataclasses.dataclass
-class IOArgs(Generic[ModeVar, EncodingVar]):
+class IOArgs:
"""
- Return value of io/common.py:get_filepath_or_buffer.
-
- This is used to easily close created fsspec objects.
+ Return value of io/common.py:_get_filepath_or_buffer.
Note (copy&past from io/parsers):
filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
@@ -73,29 +51,19 @@ class IOArgs(Generic[ModeVar, EncodingVar]):
"""
filepath_or_buffer: FileOrBuffer
- encoding: EncodingVar
- mode: Union[ModeVar, str]
+ encoding: str
+ mode: str
compression: CompressionDict
should_close: bool = False
- def close(self) -> None:
- """
- Close the buffer if it was created by get_filepath_or_buffer.
- """
- if self.should_close:
- assert not isinstance(self.filepath_or_buffer, str)
- try:
- self.filepath_or_buffer.close()
- except (OSError, ValueError):
- pass
- self.should_close = False
-
@dataclasses.dataclass
class IOHandles:
"""
Return value of io/common.py:get_handle
+ Can be used as a context manager.
+
This is used to easily close created buffers and to handle corner cases when
TextIOWrapper is inserted.
@@ -105,6 +73,7 @@ class IOHandles:
"""
handle: Buffer
+ compression: CompressionDict
created_handles: List[Buffer] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
is_mmap: bool = False
@@ -129,6 +98,12 @@ def close(self) -> None:
self.created_handles = []
self.is_wrapped = False
+ def __enter__(self) -> "IOHandles":
+ return self
+
+ def __exit__(self, *args: Any) -> None:
+ self.close()
+
def is_url(url) -> bool:
"""
@@ -239,18 +214,13 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool:
)
-# https://github.com/python/mypy/issues/8708
-# error: Incompatible default for argument "encoding" (default has type "None",
-# argument has type "str")
-# error: Incompatible default for argument "mode" (default has type "None",
-# argument has type "str")
-def get_filepath_or_buffer(
+def _get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
- encoding: EncodingVar = None, # type: ignore[assignment]
+ encoding: str = "utf-8",
compression: CompressionOptions = None,
- mode: ModeVar = None, # type: ignore[assignment]
+ mode: str = "r",
storage_options: StorageOptions = None,
-) -> IOArgs[ModeVar, EncodingVar]:
+) -> IOArgs:
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
@@ -284,12 +254,7 @@ def get_filepath_or_buffer(
compression_method = infer_compression(filepath_or_buffer, compression_method)
# GH21227 internal compression is not used for non-binary handles.
- if (
- compression_method
- and hasattr(filepath_or_buffer, "write")
- and mode
- and "b" not in mode
- ):
+ if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
warnings.warn(
"compression has no effect when passing a non-binary object as input.",
RuntimeWarning,
@@ -306,8 +271,7 @@ def get_filepath_or_buffer(
# bz2 and xz do not write the byte order mark for utf-16 and utf-32
# print a warning when writing such files
if (
- mode
- and "w" in mode
+ "w" in mode
and compression_method in ["bz2", "xz"]
and encoding in ["utf-16", "utf-32"]
):
@@ -319,7 +283,7 @@ def get_filepath_or_buffer(
# Use binary mode when converting path-like objects to file-like objects (fsspec)
# except when text mode is explicitly requested. The original mode is returned if
# fsspec is not used.
- fsspec_mode = mode or "rb"
+ fsspec_mode = mode
if "t" not in fsspec_mode and "b" not in fsspec_mode:
fsspec_mode += "b"
@@ -504,12 +468,8 @@ def infer_compression(
------
ValueError on invalid compression specified.
"""
- # No compression has been explicitly specified
- if compression is None:
- return None
-
# Infer compression
- if compression == "infer":
+ if compression in ("infer", None):
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
@@ -540,6 +500,7 @@ def get_handle(
memory_map: bool = False,
is_text: bool = True,
errors: Optional[str] = None,
+ storage_options: StorageOptions = None,
) -> IOHandles:
"""
Get file handle for given path/buffer and mode.
@@ -583,66 +544,73 @@ def get_handle(
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
+ storage_options: StorageOptions = None
+ Passed to _get_filepath_or_buffer
.. versionchanged:: 1.2.0
Returns the dataclass IOHandles
"""
- need_text_wrapping: Tuple[Type["IOBase"], ...]
- try:
- from s3fs import S3File
-
- need_text_wrapping = (BufferedIOBase, RawIOBase, S3File)
- except ImportError:
- need_text_wrapping = (BufferedIOBase, RawIOBase)
- # fsspec is an optional dependency. If it is available, add its file-object
- # class to the list of classes that need text wrapping. If fsspec is too old and is
- # needed, get_filepath_or_buffer would already have thrown an exception.
- try:
- from fsspec.spec import AbstractFileSystem
-
- need_text_wrapping = (*need_text_wrapping, AbstractFileSystem)
- except ImportError:
- pass
-
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
if encoding is None:
encoding = "utf-8"
- # Convert pathlib.Path/py.path.local or string
- handle = stringify_path(path_or_buf)
+ # read_csv does not know whether the buffer is opened in binary/text mode
+ if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
+ mode += "b"
+
+ # open URLs
+ ioargs = _get_filepath_or_buffer(
+ path_or_buf,
+ encoding=encoding,
+ compression=compression,
+ mode=mode,
+ storage_options=storage_options,
+ )
- compression, compression_args = get_compression_method(compression)
- compression = infer_compression(handle, compression)
+ handle = ioargs.filepath_or_buffer
+ handles: List[Buffer]
# memory mapping needs to be the first step
handle, memory_map, handles = _maybe_memory_map(
- handle, memory_map, encoding, mode, errors
+ handle, memory_map, ioargs.encoding, ioargs.mode, errors
)
is_path = isinstance(handle, str)
+ compression_args = dict(ioargs.compression)
+ compression = compression_args.pop("method")
+
if compression:
+ # compression libraries do not like an explicit text-mode
+ ioargs.mode = ioargs.mode.replace("t", "")
+
# GZ Compression
if compression == "gzip":
if is_path:
assert isinstance(handle, str)
- handle = gzip.GzipFile(filename=handle, mode=mode, **compression_args)
+ handle = gzip.GzipFile(
+ filename=handle,
+ mode=ioargs.mode,
+ **compression_args,
+ )
else:
handle = gzip.GzipFile(
fileobj=handle, # type: ignore[arg-type]
- mode=mode,
+ mode=ioargs.mode,
**compression_args,
)
# BZ Compression
elif compression == "bz2":
handle = bz2.BZ2File(
- handle, mode=mode, **compression_args # type: ignore[arg-type]
+ handle, # type: ignore[arg-type]
+ mode=ioargs.mode,
+ **compression_args,
)
# ZIP Compression
elif compression == "zip":
- handle = _BytesZipFile(handle, mode, **compression_args)
+ handle = _BytesZipFile(handle, ioargs.mode, **compression_args)
if handle.mode == "r":
handles.append(handle)
zip_names = handle.namelist()
@@ -658,7 +626,7 @@ def get_handle(
# XZ Compression
elif compression == "xz":
- handle = get_lzma_file(lzma)(handle, mode)
+ handle = get_lzma_file(lzma)(handle, ioargs.mode)
# Unrecognized Compression
else:
@@ -668,42 +636,50 @@ def get_handle(
assert not isinstance(handle, str)
handles.append(handle)
- elif is_path:
+ elif isinstance(handle, str):
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
- assert isinstance(handle, str)
- if encoding and "b" not in mode:
+ if ioargs.encoding and "b" not in ioargs.mode:
# Encoding
- handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
+ handle = open(
+ handle,
+ ioargs.mode,
+ encoding=ioargs.encoding,
+ errors=errors,
+ newline="",
+ )
else:
# Binary mode
- handle = open(handle, mode)
+ handle = open(handle, ioargs.mode)
handles.append(handle)
# Convert BytesIO or file objects passed with an encoding
is_wrapped = False
- if is_text and (
- compression
- or isinstance(handle, need_text_wrapping)
- or "b" in getattr(handle, "mode", "")
- ):
+ if is_text and (compression or _is_binary_mode(handle, ioargs.mode)):
handle = TextIOWrapper(
handle, # type: ignore[arg-type]
- encoding=encoding,
+ encoding=ioargs.encoding,
errors=errors,
newline="",
)
handles.append(handle)
- # do not mark as wrapped when the user provided a string
- is_wrapped = not is_path
+ # only marked as wrapped when the caller provided a handle
+ is_wrapped = not (
+ isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
+ )
handles.reverse() # close the most recently added buffer first
+ if ioargs.should_close:
+ assert not isinstance(ioargs.filepath_or_buffer, str)
+ handles.append(ioargs.filepath_or_buffer)
+
assert not isinstance(handle, str)
return IOHandles(
handle=handle,
created_handles=handles,
is_wrapped=is_wrapped,
is_mmap=memory_map,
+ compression=ioargs.compression,
)
@@ -804,7 +780,7 @@ def _maybe_memory_map(
mode: str,
errors: Optional[str],
) -> Tuple[FileOrBuffer, bool, List[Buffer]]:
- """Try to use memory map file/buffer."""
+ """Try to memory map file/buffer."""
handles: List[Buffer] = []
memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
if not memory_map:
@@ -834,3 +810,27 @@ def _maybe_memory_map(
memory_map = False
return handle, memory_map, handles
+
+
+def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:
+ """Test whether file exists."""
+ exists = False
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if not isinstance(filepath_or_buffer, str):
+ return exists
+ try:
+ exists = os.path.exists(filepath_or_buffer)
+ # gh-5874: if the filepath is too long will raise here
+ except (TypeError, ValueError):
+ pass
+ return exists
+
+
+def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool:
+ """Whether the handle is opened in binary mode"""
+ # classes that expect bytes
+ binary_classes = [BufferedIOBase, RawIOBase]
+
+ return isinstance(handle, tuple(binary_classes)) or "b" in getattr(
+ handle, "mode", mode
+ )
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index dd30bf37793d0..c2e9828e3ea42 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -16,14 +16,7 @@
from pandas.core.frame import DataFrame
-from pandas.io.common import (
- IOArgs,
- get_filepath_or_buffer,
- is_url,
- stringify_path,
- urlopen,
- validate_header_arg,
-)
+from pandas.io.common import IOHandles, get_handle, stringify_path, validate_header_arg
from pandas.io.excel._util import (
fill_mi_header,
get_default_writer,
@@ -313,7 +306,9 @@ def read_excel(
storage_options: StorageOptions = None,
):
+ should_close = False
if not isinstance(io, ExcelFile):
+ should_close = True
io = ExcelFile(io, storage_options=storage_options, engine=engine)
elif engine and engine != io.engine:
raise ValueError(
@@ -321,7 +316,7 @@ def read_excel(
"an ExcelFile - ExcelFile already has the engine set"
)
- return io.parse(
+ data = io.parse(
sheet_name=sheet_name,
header=header,
names=names,
@@ -346,41 +341,29 @@ def read_excel(
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
)
+ if should_close:
+ io.close()
+ return data
class BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
- self.ioargs = IOArgs(
- filepath_or_buffer=filepath_or_buffer,
- encoding=None,
- mode=None,
- compression={"method": None},
+ self.handles = IOHandles(
+ handle=filepath_or_buffer, compression={"method": None}
)
- # If filepath_or_buffer is a url, load the data into a BytesIO
- if is_url(filepath_or_buffer):
- self.ioargs = IOArgs(
- filepath_or_buffer=BytesIO(urlopen(filepath_or_buffer).read()),
- should_close=True,
- encoding=None,
- mode=None,
- compression={"method": None},
- )
- elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
- self.ioargs = get_filepath_or_buffer(
- filepath_or_buffer, storage_options=storage_options
+ if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
+ self.handles = get_handle(
+ filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
)
- if isinstance(self.ioargs.filepath_or_buffer, self._workbook_class):
- self.book = self.ioargs.filepath_or_buffer
- elif hasattr(self.ioargs.filepath_or_buffer, "read"):
+ if isinstance(self.handles.handle, self._workbook_class):
+ self.book = self.handles.handle
+ elif hasattr(self.handles.handle, "read"):
# N.B. xlrd.Book has a read attribute too
- assert not isinstance(self.ioargs.filepath_or_buffer, str)
- self.ioargs.filepath_or_buffer.seek(0)
- self.book = self.load_workbook(self.ioargs.filepath_or_buffer)
- elif isinstance(self.ioargs.filepath_or_buffer, str):
- self.book = self.load_workbook(self.ioargs.filepath_or_buffer)
- elif isinstance(self.ioargs.filepath_or_buffer, bytes):
- self.book = self.load_workbook(BytesIO(self.ioargs.filepath_or_buffer))
+ self.handles.handle.seek(0)
+ self.book = self.load_workbook(self.handles.handle)
+ elif isinstance(self.handles.handle, bytes):
+ self.book = self.load_workbook(BytesIO(self.handles.handle))
else:
raise ValueError(
"Must explicitly set engine if not passing in buffer or path for io."
@@ -396,7 +379,7 @@ def load_workbook(self, filepath_or_buffer):
pass
def close(self):
- self.ioargs.close()
+ self.handles.close()
@property
@abc.abstractmethod
@@ -581,7 +564,7 @@ class ExcelWriter(metaclass=abc.ABCMeta):
Format string for datetime objects written into Excel files.
(e.g. 'YYYY-MM-DD HH:MM:SS').
mode : {'w', 'a'}, default 'w'
- File mode to use (write or append).
+ File mode to use (write or append). Append does not work with fsspec URLs.
.. versionadded:: 0.24.0
@@ -739,7 +722,16 @@ def __init__(
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
- self.path = path
+ # use mode to open the file
+ if "b" not in mode:
+ mode += "b"
+ # use "a" for the user to append data to excel but internally use "r+" to let
+ # the excel backend first read the existing file and then write any data to it
+ mode = mode.replace("a", "r+")
+
+ self.handles = IOHandles(path, compression={"copression": None})
+ if not isinstance(path, ExcelWriter):
+ self.handles = get_handle(path, mode, is_text=False)
self.sheets = {}
self.cur_sheet = None
@@ -755,10 +747,7 @@ def __init__(
self.mode = mode
def __fspath__(self):
- # pandas\io\excel\_base.py:744: error: Argument 1 to "stringify_path"
- # has incompatible type "Optional[Any]"; expected "Union[str, Path,
- # IO[Any], IOBase]" [arg-type]
- return stringify_path(self.path) # type: ignore[arg-type]
+ return getattr(self.handles.handle, "name", "")
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
@@ -828,7 +817,9 @@ def __exit__(self, exc_type, exc_value, traceback):
def close(self):
"""synonym for save, to make it more file-like"""
- return self.save()
+ content = self.save()
+ self.handles.close()
+ return content
def _is_ods_stream(stream: Union[BufferedIOBase, RawIOBase]) -> bool:
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 4f9f8a29c0010..c5c3927216850 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -19,7 +19,7 @@ class ODFReader(BaseExcelReader):
filepath_or_buffer : string, path to be parsed or
an open readable stream.
storage_options : dict, optional
- passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
+ passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
"""
def __init__(
@@ -69,6 +69,7 @@ def get_sheet_by_name(self, name: str):
if table.getAttribute("name") == name:
return table
+ self.close()
raise ValueError(f"sheet {name} not found")
def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:
@@ -190,6 +191,7 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar:
result = cast(pd.Timestamp, result)
return result.time()
else:
+ self.close()
raise ValueError(f"Unrecognized type {cell_type}")
def _get_cell_string_value(self, cell) -> str:
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index cbac60dfabaa7..c19d51540d2dd 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -34,7 +34,7 @@ def save(self) -> None:
"""
for sheet in self.sheets.values():
self.book.spreadsheet.addElement(sheet)
- self.book.save(self.path)
+ self.book.save(self.handles.handle)
def write_cells(
self,
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index a5cadf4d93389..f643037dc216a 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -22,10 +22,12 @@ def __init__(self, path, engine=None, mode="w", **engine_kwargs):
super().__init__(path, mode=mode, **engine_kwargs)
- if self.mode == "a": # Load from existing workbook
+ # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
+ # the file and later write to it
+ if "r+" in self.mode: # Load from existing workbook
from openpyxl import load_workbook
- self.book = load_workbook(self.path)
+ self.book = load_workbook(self.handles.handle)
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
@@ -37,7 +39,7 @@ def save(self):
"""
Save workbook to disk.
"""
- self.book.save(self.path)
+ self.book.save(self.handles.handle)
@classmethod
def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, "Serialisable"]:
@@ -452,7 +454,7 @@ def __init__(
filepath_or_buffer : string, path object or Workbook
Object to be parsed.
storage_options : dict, optional
- passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
+ passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
"""
import_optional_dependency("openpyxl")
super().__init__(filepath_or_buffer, storage_options=storage_options)
@@ -474,6 +476,7 @@ def close(self):
# https://stackoverflow.com/questions/31416842/
# openpyxl-does-not-close-excel-workbook-in-read-only-mode
self.book.close()
+ super().close()
@property
def sheet_names(self) -> List[str]:
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index ac94f4dd3df74..de4f7bba1a179 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -20,7 +20,7 @@ def __init__(
filepath_or_buffer : str, path object, or Workbook
Object to be parsed.
storage_options : dict, optional
- passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
+ passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
"""
import_optional_dependency("pyxlsb")
# This will call load_workbook on the filepath or buffer
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index dfd5dde0329ae..c655db4bc772b 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -18,7 +18,7 @@ def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
filepath_or_buffer : string, path object or Workbook
Object to be parsed.
storage_options : dict, optional
- passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``)
+ passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``)
"""
err_msg = "Install xlrd >= 1.0.0 for Excel support"
import_optional_dependency("xlrd", extra=err_msg)
diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py
index 16c4d377d7610..77b631a41371e 100644
--- a/pandas/io/excel/_xlsxwriter.py
+++ b/pandas/io/excel/_xlsxwriter.py
@@ -186,7 +186,7 @@ def __init__(
**engine_kwargs,
)
- self.book = Workbook(path, **engine_kwargs)
+ self.book = Workbook(self.handles.handle, **engine_kwargs)
def save(self):
"""
diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py
index 3592c2684f5a5..7f0ce3844c099 100644
--- a/pandas/io/excel/_xlwt.py
+++ b/pandas/io/excel/_xlwt.py
@@ -34,7 +34,7 @@ def save(self):
"""
Save workbook to disk.
"""
- self.book.save(self.path)
+ self.book.save(self.handles.handle)
def write_cells(
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 198acd5862d45..9e63976bf8cf9 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -7,7 +7,7 @@
from pandas import DataFrame, Int64Index, RangeIndex
-from pandas.io.common import get_filepath_or_buffer
+from pandas.io.common import get_handle
def to_feather(
@@ -41,8 +41,6 @@ def to_feather(
import_optional_dependency("pyarrow")
from pyarrow import feather
- ioargs = get_filepath_or_buffer(path, mode="wb", storage_options=storage_options)
-
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
@@ -79,9 +77,10 @@ def to_feather(
if df.columns.inferred_type not in valid_types:
raise ValueError("feather must have string column names")
- feather.write_feather(df, ioargs.filepath_or_buffer, **kwargs)
-
- ioargs.close()
+ with get_handle(
+ path, "wb", storage_options=storage_options, is_text=False
+ ) as handles:
+ feather.write_feather(df, handles.handle, **kwargs)
def read_feather(
@@ -129,12 +128,10 @@ def read_feather(
import_optional_dependency("pyarrow")
from pyarrow import feather
- ioargs = get_filepath_or_buffer(path, storage_options=storage_options)
-
- df = feather.read_feather(
- ioargs.filepath_or_buffer, columns=columns, use_threads=bool(use_threads)
- )
+ with get_handle(
+ path, "rb", storage_options=storage_options, is_text=False
+ ) as handles:
- ioargs.close()
-
- return df
+ return feather.read_feather(
+ handles.handle, columns=columns, use_threads=bool(use_threads)
+ )
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 20226dbb3c9d4..cbe2ed1ed838d 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -28,7 +28,7 @@
from pandas.core.indexes.api import Index
-from pandas.io.common import get_filepath_or_buffer, get_handle
+from pandas.io.common import get_handle
if TYPE_CHECKING:
from pandas.io.formats.format import DataFrameFormatter
@@ -59,13 +59,11 @@ def __init__(
self.obj = self.fmt.frame
- self.ioargs = get_filepath_or_buffer(
- path_or_buf,
- encoding=encoding,
- compression=compression,
- mode=mode,
- storage_options=storage_options,
- )
+ self.filepath_or_buffer = path_or_buf
+ self.encoding = encoding
+ self.compression = compression
+ self.mode = mode
+ self.storage_options = storage_options
self.sep = sep
self.index_label = self._initialize_index_label(index_label)
@@ -227,15 +225,15 @@ def save(self) -> None:
Create the writer & save.
"""
# apply compression and byte/text conversion
- handles = get_handle(
- self.ioargs.filepath_or_buffer,
- self.ioargs.mode,
- encoding=self.ioargs.encoding,
+ with get_handle(
+ self.filepath_or_buffer,
+ self.mode,
+ encoding=self.encoding,
errors=self.errors,
- compression=self.ioargs.compression,
- )
+ compression=self.compression,
+ storage_options=self.storage_options,
+ ) as handles:
- try:
# Note: self.encoding is irrelevant here
self.writer = csvlib.writer(
handles.handle, # type: ignore[arg-type]
@@ -249,12 +247,6 @@ def save(self) -> None:
self._save()
- finally:
- # close compression and byte/text wrapper
- handles.close()
- # close any fsspec-like objects
- self.ioargs.close()
-
def _save(self) -> None:
if self._need_to_save_header:
self._save_header()
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 040279b9f3e67..f30007f6ed907 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -3,7 +3,6 @@
import functools
from io import StringIO
from itertools import islice
-import os
from typing import Any, Callable, Mapping, Optional, Tuple, Type, Union
import numpy as np
@@ -28,9 +27,11 @@
from pandas.io.common import (
IOHandles,
- get_compression_method,
- get_filepath_or_buffer,
+ file_exists,
get_handle,
+ is_fsspec_url,
+ is_url,
+ stringify_path,
)
from pandas.io.json._normalize import convert_to_line_delimits
from pandas.io.json._table_schema import build_table_schema, parse_table_schema
@@ -96,24 +97,11 @@ def to_json(
s = convert_to_line_delimits(s)
if path_or_buf is not None:
- # open fsspec URLs
- ioargs = get_filepath_or_buffer(
- path_or_buf,
- compression=compression,
- mode="wt",
- storage_options=storage_options,
- )
# apply compression and byte/text conversion
- handles = get_handle(
- ioargs.filepath_or_buffer, "w", compression=ioargs.compression
- )
- try:
+ with get_handle(
+ path_or_buf, "wt", compression=compression, storage_options=storage_options
+ ) as handles:
handles.handle.write(s)
- finally:
- # close compression and byte/text wrapper
- handles.close()
- # close any fsspec-like objects
- ioargs.close()
else:
return s
@@ -549,15 +537,8 @@ def read_json(
if convert_axes is None and orient != "table":
convert_axes = True
- ioargs = get_filepath_or_buffer(
- path_or_buf,
- encoding=encoding or "utf-8",
- compression=compression,
- storage_options=storage_options,
- )
-
json_reader = JsonReader(
- ioargs.filepath_or_buffer,
+ path_or_buf,
orient=orient,
typ=typ,
dtype=dtype,
@@ -567,20 +548,18 @@ def read_json(
numpy=numpy,
precise_float=precise_float,
date_unit=date_unit,
- encoding=ioargs.encoding,
+ encoding=encoding,
lines=lines,
chunksize=chunksize,
- compression=ioargs.compression,
+ compression=compression,
nrows=nrows,
+ storage_options=storage_options,
)
if chunksize:
return json_reader
- result = json_reader.read()
- ioargs.close()
-
- return result
+ return json_reader.read()
class JsonReader(abc.Iterator):
@@ -609,11 +588,9 @@ def __init__(
chunksize: Optional[int],
compression: CompressionOptions,
nrows: Optional[int],
+ storage_options: StorageOptions = None,
):
- compression_method, compression = get_compression_method(compression)
- compression = dict(compression, method=compression_method)
-
self.orient = orient
self.typ = typ
self.dtype = dtype
@@ -625,6 +602,7 @@ def __init__(
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
+ self.storage_options = storage_options
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
@@ -669,20 +647,19 @@ def _get_data_from_filepath(self, filepath_or_buffer):
It returns input types (2) and (3) unchanged.
"""
# if it is a string but the file does not exist, it might be a JSON string
- exists = False
- if isinstance(filepath_or_buffer, str):
- try:
- exists = os.path.exists(filepath_or_buffer)
- # gh-5874: if the filepath is too long will raise here
- except (TypeError, ValueError):
- pass
-
- if exists or not isinstance(filepath_or_buffer, str):
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if (
+ not isinstance(filepath_or_buffer, str)
+ or is_url(filepath_or_buffer)
+ or is_fsspec_url(filepath_or_buffer)
+ or file_exists(filepath_or_buffer)
+ ):
self.handles = get_handle(
filepath_or_buffer,
"r",
encoding=self.encoding,
compression=self.compression,
+ storage_options=self.storage_options,
)
filepath_or_buffer = self.handles.handle
diff --git a/pandas/io/orc.py b/pandas/io/orc.py
index 5a734f0878a0c..d9e9f3e1770be 100644
--- a/pandas/io/orc.py
+++ b/pandas/io/orc.py
@@ -5,7 +5,7 @@
from pandas._typing import FilePathOrBuffer
-from pandas.io.common import get_filepath_or_buffer
+from pandas.io.common import get_handle
if TYPE_CHECKING:
from pandas import DataFrame
@@ -48,10 +48,6 @@ def read_orc(
if distutils.version.LooseVersion(pyarrow.__version__) < "0.13.0":
raise ImportError("pyarrow must be >= 0.13.0 for read_orc")
- import pyarrow.orc
-
- ioargs = get_filepath_or_buffer(path)
- orc_file = pyarrow.orc.ORCFile(ioargs.filepath_or_buffer)
- result = orc_file.read(columns=columns, **kwargs).to_pandas()
- ioargs.close()
- return result
+ with get_handle(path, "rb", is_text=False) as handles:
+ orc_file = pyarrow.orc.ORCFile(handles.handle)
+ return orc_file.read(columns=columns, **kwargs).to_pandas()
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 88f57e18593f2..c76e18ae353a0 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -1,6 +1,7 @@
""" parquet compat """
import io
+import os
from typing import Any, AnyStr, Dict, List, Optional
from warnings import catch_warnings
@@ -10,7 +11,7 @@
from pandas import DataFrame, get_option
-from pandas.io.common import get_filepath_or_buffer, is_fsspec_url, stringify_path
+from pandas.io.common import get_handle, is_fsspec_url, stringify_path
def get_engine(engine: str) -> "BaseImpl":
@@ -102,19 +103,21 @@ def write(
table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
+ path = stringify_path(path)
+ # get_handle could be used here (for write_table, not for write_to_dataset)
+ # but it would complicate the code.
if is_fsspec_url(path) and "filesystem" not in kwargs:
# make fsspec instance, which pyarrow will use to open paths
- import_optional_dependency("fsspec")
- import fsspec.core
+ fsspec = import_optional_dependency("fsspec")
fs, path = fsspec.core.url_to_fs(path, **(storage_options or {}))
kwargs["filesystem"] = fs
- else:
- if storage_options:
- raise ValueError(
- "storage_options passed with file object or non-fsspec file path"
- )
- path = stringify_path(path)
+
+ elif storage_options:
+ raise ValueError(
+ "storage_options passed with file object or non-fsspec file path"
+ )
+
if partition_cols is not None:
# writes to multiple files under the given path
self.api.parquet.write_to_dataset(
@@ -131,32 +134,31 @@ def write(
def read(
self, path, columns=None, storage_options: StorageOptions = None, **kwargs
):
- if is_fsspec_url(path) and "filesystem" not in kwargs:
- import_optional_dependency("fsspec")
- import fsspec.core
+ path = stringify_path(path)
+ handles = None
+ fs = kwargs.pop("filesystem", None)
+ if is_fsspec_url(path) and fs is None:
+ fsspec = import_optional_dependency("fsspec")
fs, path = fsspec.core.url_to_fs(path, **(storage_options or {}))
- should_close = False
- else:
- if storage_options:
- raise ValueError(
- "storage_options passed with buffer or non-fsspec filepath"
- )
- fs = kwargs.pop("filesystem", None)
- should_close = False
- path = stringify_path(path)
-
- if not fs:
- ioargs = get_filepath_or_buffer(path)
- path = ioargs.filepath_or_buffer
- should_close = ioargs.should_close
+ elif storage_options:
+ raise ValueError(
+ "storage_options passed with buffer or non-fsspec filepath"
+ )
+ if not fs and isinstance(path, str) and not os.path.isdir(path):
+ # use get_handle only when we are very certain that it is not a directory
+ # fsspec resources can also point to directories
+ # this branch is used for example when reading from non-fsspec URLs
+ handles = get_handle(path, "rb", is_text=False)
+ path = handles.handle
kwargs["use_pandas_metadata"] = True
result = self.api.parquet.read_table(
path, columns=columns, filesystem=fs, **kwargs
).to_pandas()
- if should_close:
- path.close()
+
+ if handles is not None:
+ handles.close()
return result
@@ -196,6 +198,8 @@ def write(
if partition_cols is not None:
kwargs["file_scheme"] = "hive"
+ # cannot use get_handle as write() does not accept file buffers
+ path = stringify_path(path)
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
@@ -203,12 +207,10 @@ def write(
kwargs["open_with"] = lambda path, _: fsspec.open(
path, "wb", **(storage_options or {})
).open()
- else:
- if storage_options:
- raise ValueError(
- "storage_options passed with file object or non-fsspec file path"
- )
- path = get_filepath_or_buffer(path).filepath_or_buffer
+ elif storage_options:
+ raise ValueError(
+ "storage_options passed with file object or non-fsspec file path"
+ )
with catch_warnings(record=True):
self.api.write(
@@ -223,18 +225,28 @@ def write(
def read(
self, path, columns=None, storage_options: StorageOptions = None, **kwargs
):
+ path = stringify_path(path)
+ parquet_kwargs = {}
+ handles = None
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
- open_with = lambda path, _: fsspec.open(
+ parquet_kwargs["open_with"] = lambda path, _: fsspec.open(
path, "rb", **(storage_options or {})
).open()
- parquet_file = self.api.ParquetFile(path, open_with=open_with)
- else:
- path = get_filepath_or_buffer(path).filepath_or_buffer
- parquet_file = self.api.ParquetFile(path)
-
- return parquet_file.to_pandas(columns=columns, **kwargs)
+ elif isinstance(path, str) and not os.path.isdir(path):
+ # use get_handle only when we are very certain that it is not a directory
+ # fsspec resources can also point to directories
+ # this branch is used for example when reading from non-fsspec URLs
+ handles = get_handle(path, "rb", is_text=False)
+ path = handles.handle
+ parquet_file = self.api.ParquetFile(path, **parquet_kwargs)
+
+ result = parquet_file.to_pandas(columns=columns, **kwargs)
+
+ if handles is not None:
+ handles.close()
+ return result
def to_parquet(
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 5725e2304e1d2..d7930f35a1421 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -10,7 +10,18 @@
import re
import sys
from textwrap import fill
-from typing import Any, Dict, Iterable, List, Optional, Sequence, Set
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Type,
+ cast,
+)
import warnings
import numpy as np
@@ -63,7 +74,7 @@
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
-from pandas.io.common import get_filepath_or_buffer, get_handle, validate_header_arg
+from pandas.io.common import IOHandles, get_handle, stringify_path, validate_header_arg
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
@@ -428,17 +439,6 @@ def _validate_names(names):
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
- storage_options = kwds.get("storage_options", None)
-
- ioargs = get_filepath_or_buffer(
- filepath_or_buffer,
- kwds.get("encoding", None),
- kwds.get("compression", "infer"),
- storage_options=storage_options,
- )
- kwds["compression"] = ioargs.compression
- kwds["encoding"] = ioargs.encoding
-
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
@@ -452,7 +452,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
_validate_names(kwds.get("names", None))
# Create the parser.
- parser = TextFileReader(ioargs.filepath_or_buffer, **kwds)
+ parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
@@ -460,10 +460,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
try:
data = parser.read(nrows)
finally:
- # close compression and byte/text wrapper
parser.close()
- # close any fsspec-like objects
- ioargs.close()
return data
@@ -777,7 +774,7 @@ class TextFileReader(abc.Iterator):
def __init__(self, f, engine=None, **kwds):
- self.f = f
+ self.f = stringify_path(f)
if engine is not None:
engine_specified = True
@@ -802,6 +799,7 @@ def __init__(self, f, engine=None, **kwds):
self._currow = 0
options = self._get_options_with_defaults(engine)
+ options["storage_options"] = kwds.get("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
@@ -862,14 +860,11 @@ def _get_options_with_defaults(self, engine):
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f):
- next_attr = "__next__"
-
- # The C engine doesn't need the file-like to have the "next" or
- # "__next__" attribute. However, the Python engine explicitly calls
- # "next(...)" when iterating through such an object, meaning it
- # needs to have that attribute ("next" for Python 2.x, "__next__"
- # for Python 3.x)
- if engine != "c" and not hasattr(f, next_attr):
+ # The C engine doesn't need the file-like to have the "__next__"
+ # attribute. However, the Python engine explicitly calls
+ # "__next__(...)" when iterating through such an object, meaning it
+ # needs to have that attribute
+ if engine != "c" and not hasattr(f, "__next__"):
msg = "The 'python' engine cannot iterate through this file buffer."
raise ValueError(msg)
@@ -1037,28 +1032,17 @@ def __next__(self):
raise
def _make_engine(self, engine="c"):
- mapping = {
- # pandas\io\parsers.py:1099: error: Dict entry 0 has incompatible
- # type "str": "Type[CParserWrapper]"; expected "str":
- # "Type[ParserBase]" [dict-item]
- "c": CParserWrapper, # type: ignore[dict-item]
- # pandas\io\parsers.py:1100: error: Dict entry 1 has incompatible
- # type "str": "Type[PythonParser]"; expected "str":
- # "Type[ParserBase]" [dict-item]
- "python": PythonParser, # type: ignore[dict-item]
- # pandas\io\parsers.py:1101: error: Dict entry 2 has incompatible
- # type "str": "Type[FixedWidthFieldParser]"; expected "str":
- # "Type[ParserBase]" [dict-item]
- "python-fwf": FixedWidthFieldParser, # type: ignore[dict-item]
+ mapping: Dict[str, Type[ParserBase]] = {
+ "c": CParserWrapper,
+ "python": PythonParser,
+ "python-fwf": FixedWidthFieldParser,
}
- try:
- klass = mapping[engine]
- except KeyError:
+ if engine not in mapping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mapping.keys()})"
)
- else:
- return klass(self.f, **self.options)
+ # error: Too many arguments for "ParserBase"
+ return mapping[engine](self.f, **self.options) # type: ignore[call-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
@@ -1275,13 +1259,14 @@ def _validate_parse_dates_arg(parse_dates):
class ParserBase:
def __init__(self, kwds):
+
self.names = kwds.get("names")
- self.orig_names = None
+ self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.get("index_col", None)
- self.unnamed_cols = set()
- self.index_names = None
+ self.unnamed_cols: Set = set()
+ self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
@@ -1357,6 +1342,21 @@ def __init__(self, kwds):
self._first_chunk = True
+ self.handles: Optional[IOHandles] = None
+
+ def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
+ """
+ Let the readers open IOHanldes after they are done with their potential raises.
+ """
+ self.handles = get_handle(
+ src,
+ "r",
+ encoding=kwds.get("encoding", None),
+ compression=kwds.get("compression", None),
+ memory_map=kwds.get("memory_map", False),
+ storage_options=kwds.get("storage_options", None),
+ )
+
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
@@ -1406,9 +1406,8 @@ def _validate_parse_dates_presence(self, columns: List[str]) -> None:
)
def close(self):
- # pandas\io\parsers.py:1409: error: "ParserBase" has no attribute
- # "handles" [attr-defined]
- self.handles.close() # type: ignore[attr-defined]
+ if self.handles is not None:
+ self.handles.close()
@property
def _has_complex_date_col(self):
@@ -1842,23 +1841,24 @@ def _do_date_conversions(self, names, data):
class CParserWrapper(ParserBase):
- def __init__(self, src, **kwds):
+ def __init__(self, src: FilePathOrBuffer, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
- self.handles = get_handle(
- src,
- mode="r",
- encoding=kwds.get("encoding", None),
- compression=kwds.get("compression", None),
- memory_map=kwds.get("memory_map", False),
- is_text=True,
- )
- kwds.pop("encoding", None)
- kwds.pop("memory_map", None)
- kwds.pop("compression", None)
+ # #2442
+ kwds["allow_leading_cols"] = self.index_col is not False
+
+ # GH20529, validate usecol arg before TextReader
+ self.usecols, self.usecols_dtype = _validate_usecols_arg(kwds["usecols"])
+ kwds["usecols"] = self.usecols
+
+ # open handles
+ self._open_handles(src, kwds)
+ assert self.handles is not None
+ for key in ("storage_options", "encoding", "memory_map", "compression"):
+ kwds.pop(key, None)
if self.handles.is_mmap and hasattr(self.handles.handle, "mmap"):
# pandas\io\parsers.py:1861: error: Item "IO[Any]" of
# "Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase,
@@ -1885,13 +1885,6 @@ def __init__(self, src, **kwds):
# no attribute "mmap" [union-attr]
self.handles.handle = self.handles.handle.mmap # type: ignore[union-attr]
- # #2442
- kwds["allow_leading_cols"] = self.index_col is not False
-
- # GH20529, validate usecol arg before TextReader
- self.usecols, self.usecols_dtype = _validate_usecols_arg(kwds["usecols"])
- kwds["usecols"] = self.usecols
-
self._reader = parsers.TextReader(self.handles.handle, **kwds)
self.unnamed_cols = self._reader.unnamed_cols
@@ -1935,6 +1928,8 @@ def __init__(self, src, **kwds):
usecols = _evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
+ # assert for mypy, orig_names is List or None, None would error in issubset
+ assert self.orig_names is not None
if self.usecols_dtype == "string" and not set(usecols).issubset(
self.orig_names
):
@@ -2015,9 +2010,10 @@ def _set(x):
x = usecols[x]
if not is_integer(x):
- # pandas\io\parsers.py:2037: error: Item "None" of
- # "Optional[Any]" has no attribute "index" [union-attr]
- x = names.index(x) # type: ignore[union-attr]
+ # assert for mypy, names is List or None, None would error when calling
+ # .index()
+ assert names is not None
+ x = names.index(x)
self._reader.set_noconvert(x)
@@ -2112,10 +2108,9 @@ def read(self, nrows=None):
# ugh, mutation
- # pandas\io\parsers.py:2131: error: Argument 1 to "list" has
- # incompatible type "Optional[Any]"; expected "Iterable[Any]"
- # [arg-type]
- names = list(self.orig_names) # type: ignore[arg-type]
+ # assert for mypy, orig_names is List or None, None would error in list(...)
+ assert self.orig_names is not None
+ names = list(self.orig_names)
names = self._maybe_dedup_names(names)
if self.usecols is not None:
@@ -2225,20 +2220,17 @@ def count_empty_vals(vals) -> int:
class PythonParser(ParserBase):
- def __init__(self, f, **kwds):
+ def __init__(self, f: Union[FilePathOrBuffer, List], **kwds):
"""
Workhorse function for processing nested list into DataFrame
"""
ParserBase.__init__(self, kwds)
- self.data = None
- self.buf = []
+ self.data: Optional[Iterator[str]] = None
+ self.buf: List = []
self.pos = 0
self.line_pos = 0
- self.encoding = kwds["encoding"]
- self.compression = kwds["compression"]
- self.memory_map = kwds["memory_map"]
self.skiprows = kwds["skiprows"]
if callable(self.skiprows):
@@ -2278,21 +2270,16 @@ def __init__(self, f, **kwds):
self.decimal = kwds["decimal"]
self.comment = kwds["comment"]
- self._comment_lines = []
-
- self.handles = get_handle(
- f,
- "r",
- encoding=self.encoding,
- compression=self.compression,
- memory_map=self.memory_map,
- )
# Set self.data to something that can read lines.
- if hasattr(self.handles.handle, "readline"):
- self._make_reader(self.handles.handle)
+ if isinstance(f, list):
+ # read_excel: f is a list
+ self.data = cast(Iterator[str], f)
else:
- self.data = self.handles.handle
+ self._open_handles(f, kwds)
+ assert self.handles is not None
+ assert hasattr(self.handles.handle, "readline")
+ self._make_reader(self.handles.handle)
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
@@ -2429,11 +2416,11 @@ class MyDialect(csv.Dialect):
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
- # Note: self.encoding is irrelevant here
+ # Note: encoding is irrelevant here
line_rdr = csv.reader(StringIO(line), dialect=dia)
self.buf.extend(list(line_rdr))
- # Note: self.encoding is irrelevant here
+ # Note: encoding is irrelevant here
reader = csv.reader(f, dialect=dia, strict=True)
else:
@@ -2894,10 +2881,9 @@ def _next_line(self):
else:
while self.skipfunc(self.pos):
self.pos += 1
- # pandas\io\parsers.py:2865: error: Argument 1 to "next" has
- # incompatible type "Optional[Any]"; expected "Iterator[Any]"
- # [arg-type]
- next(self.data) # type: ignore[arg-type]
+ # assert for mypy, data is Iterator[str] or None, would error in next
+ assert self.data is not None
+ next(self.data)
while True:
orig_line = self._next_iter_line(row_num=self.pos + 1)
@@ -2958,10 +2944,9 @@ def _next_iter_line(self, row_num):
row_num : The row number of the line being parsed.
"""
try:
- # pandas\io\parsers.py:2926: error: Argument 1 to "next" has
- # incompatible type "Optional[Any]"; expected "Iterator[Any]"
- # [arg-type]
- return next(self.data) # type: ignore[arg-type]
+ # assert for mypy, data is Iterator[str] or None, would error in next
+ assert self.data is not None
+ return next(self.data)
except csv.Error as e:
if self.warn_bad_lines or self.error_bad_lines:
msg = str(e)
@@ -3251,10 +3236,10 @@ def _get_lines(self, rows=None):
try:
if rows is not None:
for _ in range(rows):
- # pandas\io\parsers.py:3209: error: Argument 1 to
- # "next" has incompatible type "Optional[Any]";
- # expected "Iterator[Any]" [arg-type]
- new_rows.append(next(self.data)) # type: ignore[arg-type]
+ # assert for mypy, data is Iterator[str] or None, would
+ # error in next
+ assert self.data is not None
+ new_rows.append(next(self.data))
lines.extend(new_rows)
else:
rows = 0
@@ -3756,11 +3741,7 @@ def __init__(self, f, **kwds):
PythonParser.__init__(self, f, **kwds)
def _make_reader(self, f):
- # pandas\io\parsers.py:3730: error: Incompatible types in assignment
- # (expression has type "FixedWidthReader", variable has type
- # "Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper,
- # mmap, None]") [assignment]
- self.data = FixedWidthReader( # type: ignore[assignment]
+ self.data = FixedWidthReader(
f,
self.colspecs,
self.delimiter,
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 6fa044b4651a5..7d09029aded1b 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -6,7 +6,7 @@
from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions
from pandas.compat import pickle_compat as pc
-from pandas.io.common import get_filepath_or_buffer, get_handle
+from pandas.io.common import get_handle
def to_pickle(
@@ -86,24 +86,17 @@ def to_pickle(
>>> import os
>>> os.remove("./dummy.pkl")
"""
- ioargs = get_filepath_or_buffer(
+ if protocol < 0:
+ protocol = pickle.HIGHEST_PROTOCOL
+
+ with get_handle(
filepath_or_buffer,
+ "wb",
compression=compression,
- mode="wb",
+ is_text=False,
storage_options=storage_options,
- )
- handles = get_handle(
- ioargs.filepath_or_buffer, "wb", compression=ioargs.compression, is_text=False
- )
- if protocol < 0:
- protocol = pickle.HIGHEST_PROTOCOL
- try:
+ ) as handles:
pickle.dump(obj, handles.handle, protocol=protocol) # type: ignore[arg-type]
- finally:
- # close compression and byte/text wrapper
- handles.close()
- # close any fsspec-like objects
- ioargs.close()
def read_pickle(
@@ -183,35 +176,31 @@ def read_pickle(
>>> import os
>>> os.remove("./dummy.pkl")
"""
- ioargs = get_filepath_or_buffer(
- filepath_or_buffer, compression=compression, storage_options=storage_options
- )
- handles = get_handle(
- ioargs.filepath_or_buffer, "rb", compression=ioargs.compression, is_text=False
- )
-
- # 1) try standard library Pickle
- # 2) try pickle_compat (older pandas version) to handle subclass changes
- # 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError
-
- try:
- excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError)
- # TypeError for Cython complaints about object.__new__ vs Tick.__new__
+ excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError)
+ with get_handle(
+ filepath_or_buffer,
+ "rb",
+ compression=compression,
+ is_text=False,
+ storage_options=storage_options,
+ ) as handles:
+
+ # 1) try standard library Pickle
+ # 2) try pickle_compat (older pandas version) to handle subclass changes
+ # 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError
+
try:
- with warnings.catch_warnings(record=True):
- # We want to silence any warnings about, e.g. moved modules.
- warnings.simplefilter("ignore", Warning)
- return pickle.load(handles.handle) # type: ignore[arg-type]
- except excs_to_catch:
- # e.g.
- # "No module named 'pandas.core.sparse.series'"
- # "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib"
- return pc.load(handles.handle, encoding=None)
- except UnicodeDecodeError:
- # e.g. can occur for files written in py27; see GH#28645 and GH#31988
- return pc.load(handles.handle, encoding="latin-1")
- finally:
- # close compression and byte/text wrapper
- handles.close()
- # close any fsspec-like objects
- ioargs.close()
+ # TypeError for Cython complaints about object.__new__ vs Tick.__new__
+ try:
+ with warnings.catch_warnings(record=True):
+ # We want to silence any warnings about, e.g. moved modules.
+ warnings.simplefilter("ignore", Warning)
+ return pickle.load(handles.handle) # type: ignore[arg-type]
+ except excs_to_catch:
+ # e.g.
+ # "No module named 'pandas.core.sparse.series'"
+ # "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib"
+ return pc.load(handles.handle, encoding=None)
+ except UnicodeDecodeError:
+ # e.g. can occur for files written in py27; see GH#28645 and GH#31988
+ return pc.load(handles.handle, encoding="latin-1")
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index e9b74199cbc42..e9c1bf26f6675 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -24,7 +24,7 @@
import pandas as pd
-from pandas.io.common import get_filepath_or_buffer
+from pandas.io.common import get_handle
from pandas.io.sas._sas import Parser
import pandas.io.sas.sas_constants as const
from pandas.io.sas.sasreader import ReaderBase
@@ -168,12 +168,9 @@ def __init__(
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
- self.ioargs = get_filepath_or_buffer(path_or_buf)
- if isinstance(self.ioargs.filepath_or_buffer, str):
- self.ioargs.filepath_or_buffer = open(path_or_buf, "rb")
- self.ioargs.should_close = True
+ self.handles = get_handle(path_or_buf, "rb", is_text=False)
- self._path_or_buf = cast(IO[Any], self.ioargs.filepath_or_buffer)
+ self._path_or_buf = cast(IO[Any], self.handles.handle)
try:
self._get_properties()
@@ -198,7 +195,7 @@ def column_types(self):
return np.asarray(self._column_types, dtype=np.dtype("S1"))
def close(self):
- self.ioargs.close()
+ self.handles.close()
def _get_properties(self):
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 4303cef2df60d..2f5de16a7ad6c 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -19,7 +19,7 @@
import pandas as pd
-from pandas.io.common import get_filepath_or_buffer
+from pandas.io.common import get_handle
from pandas.io.sas.sasreader import ReaderBase
_correct_line1 = (
@@ -253,13 +253,10 @@ def __init__(
self._index = index
self._chunksize = chunksize
- self.ioargs = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding)
-
- if isinstance(self.ioargs.filepath_or_buffer, str):
- self.ioargs.filepath_or_buffer = open(self.ioargs.filepath_or_buffer, "rb")
- self.ioargs.should_close = True
-
- self.filepath_or_buffer = cast(IO[bytes], self.ioargs.filepath_or_buffer)
+ self.handles = get_handle(
+ filepath_or_buffer, "rb", encoding=encoding, is_text=False
+ )
+ self.filepath_or_buffer = cast(IO[bytes], self.handles.handle)
try:
self._read_header()
@@ -268,7 +265,7 @@ def __init__(
raise
def close(self):
- self.ioargs.close()
+ self.handles.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 446e2daaa1f9c..3f0370209e9a8 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -6,7 +6,7 @@
from pandas._typing import FilePathOrBuffer, Label
-from pandas.io.common import get_filepath_or_buffer, stringify_path
+from pandas.io.common import stringify_path
if TYPE_CHECKING:
from pandas import DataFrame
@@ -109,25 +109,23 @@ def read_sas(
else:
raise ValueError("unable to infer format of SAS file")
- ioargs = get_filepath_or_buffer(filepath_or_buffer, encoding)
-
reader: ReaderBase
if format.lower() == "xport":
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(
- ioargs.filepath_or_buffer,
+ filepath_or_buffer,
index=index,
- encoding=ioargs.encoding,
+ encoding=encoding,
chunksize=chunksize,
)
elif format.lower() == "sas7bdat":
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(
- ioargs.filepath_or_buffer,
+ filepath_or_buffer,
index=index,
- encoding=ioargs.encoding,
+ encoding=encoding,
chunksize=chunksize,
)
else:
@@ -136,7 +134,4 @@ def read_sas(
if iterator or chunksize:
return reader
- try:
- return reader.read()
- finally:
- ioargs.close()
+ return reader.read()
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 7c7997f128086..1f8d9b6213a71 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -53,12 +53,7 @@
from pandas.core.indexes.base import Index
from pandas.core.series import Series
-from pandas.io.common import (
- IOHandles,
- get_filepath_or_buffer,
- get_handle,
- stringify_path,
-)
+from pandas.io.common import get_handle
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
@@ -1062,20 +1057,15 @@ def __init__(
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
- self.ioargs = get_filepath_or_buffer(
- path_or_buf, storage_options=storage_options
- )
-
- if isinstance(self.ioargs.filepath_or_buffer, (str, bytes)):
- self.ioargs.filepath_or_buffer = open(self.ioargs.filepath_or_buffer, "rb")
- self.ioargs.should_close = True
- elif hasattr(path_or_buf, "read"):
+ with get_handle(
+ path_or_buf,
+ "rb",
+ storage_options=storage_options,
+ is_text=False,
+ ) as handles:
# Copy to BytesIO, and ensure no encoding
- contents = self.ioargs.filepath_or_buffer.read()
- self.ioargs.close()
- self.ioargs.filepath_or_buffer = BytesIO(contents) # type: ignore[arg-type]
- self.ioargs.should_close = True
- self.path_or_buf = cast(BytesIO, self.ioargs.filepath_or_buffer)
+ contents = handles.handle.read()
+ self.path_or_buf = BytesIO(contents) # type: ignore[arg-type]
self._read_header()
self._setup_dtype()
@@ -1090,7 +1080,7 @@ def __exit__(self, exc_type, exc_value, traceback) -> None:
def close(self) -> None:
""" close the handle if its open """
- self.ioargs.close()
+ self.path_or_buf.close()
def _set_encoding(self) -> None:
"""
@@ -1932,48 +1922,6 @@ def read_stata(
return data
-def _open_file_binary_write(
- fname: FilePathOrBuffer,
- compression: CompressionOptions,
- storage_options: StorageOptions = None,
-) -> Tuple[IOHandles, CompressionOptions]:
- """
- Open a binary file or no-op if file-like.
-
- Parameters
- ----------
- fname : string path, path object or buffer
- The file name or buffer.
- compression : {str, dict, None}
- The compression method to use.
-
- storage_options : dict, optional
- Extra options that make sense for a particular storage connection, e.g.
- host, port, username, password, etc., if using a URL that will
- be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
- will be raised if providing this argument with a local path or
- a file-like buffer. See the fsspec and backend storage implementation
- docs for the set of allowed keys and values
-
- .. versionadded:: 1.2.0
- """
- ioargs = get_filepath_or_buffer(
- fname, mode="wb", compression=compression, storage_options=storage_options
- )
- handles = get_handle(
- ioargs.filepath_or_buffer,
- "wb",
- compression=ioargs.compression,
- is_text=False,
- )
- if ioargs.filepath_or_buffer != fname and not isinstance(
- ioargs.filepath_or_buffer, str
- ):
- # add handle created by get_filepath_or_buffer
- handles.created_handles.append(ioargs.filepath_or_buffer)
- return handles, ioargs.compression
-
-
def _set_endianness(endianness: str) -> str:
if endianness.lower() in ["<", "little"]:
return "<"
@@ -2231,7 +2179,7 @@ def __init__(
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
- self._fname = stringify_path(fname)
+ self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
self._converted_names: Dict[Label, str] = {}
@@ -2511,45 +2459,53 @@ def _encode_strings(self) -> None:
self.data[col] = encoded
def write_file(self) -> None:
- self.handles, compression = _open_file_binary_write(
- self._fname, self._compression, storage_options=self.storage_options
- )
- if compression is not None:
- # ZipFile creates a file (with the same name) for each write call.
- # Write it first into a buffer and then write the buffer to the ZipFile.
- self._output_file = self.handles.handle
- self.handles.handle = BytesIO()
- try:
- self._write_header(data_label=self._data_label, time_stamp=self._time_stamp)
- self._write_map()
- self._write_variable_types()
- self._write_varnames()
- self._write_sortlist()
- self._write_formats()
- self._write_value_label_names()
- self._write_variable_labels()
- self._write_expansion_fields()
- self._write_characteristics()
- records = self._prepare_data()
- self._write_data(records)
- self._write_strls()
- self._write_value_labels()
- self._write_file_close_tag()
- self._write_map()
- except Exception as exc:
- self._close()
- if isinstance(self._fname, (str, Path)):
- try:
- os.unlink(self._fname)
- except OSError:
- warnings.warn(
- f"This save was not successful but {self._fname} could not "
- "be deleted. This file is not valid.",
- ResourceWarning,
- )
- raise exc
- else:
- self._close()
+ with get_handle(
+ self._fname,
+ "wb",
+ compression=self._compression,
+ is_text=False,
+ storage_options=self.storage_options,
+ ) as self.handles:
+
+ if self.handles.compression["method"] is not None:
+ # ZipFile creates a file (with the same name) for each write call.
+ # Write it first into a buffer and then write the buffer to the ZipFile.
+ self._output_file = self.handles.handle
+ self.handles.handle = BytesIO()
+
+ try:
+ self._write_header(
+ data_label=self._data_label, time_stamp=self._time_stamp
+ )
+ self._write_map()
+ self._write_variable_types()
+ self._write_varnames()
+ self._write_sortlist()
+ self._write_formats()
+ self._write_value_label_names()
+ self._write_variable_labels()
+ self._write_expansion_fields()
+ self._write_characteristics()
+ records = self._prepare_data()
+ self._write_data(records)
+ self._write_strls()
+ self._write_value_labels()
+ self._write_file_close_tag()
+ self._write_map()
+ except Exception as exc:
+ self._close()
+ if isinstance(self._fname, (str, Path)):
+ try:
+ os.unlink(self._fname)
+ except OSError:
+ warnings.warn(
+ f"This save was not successful but {self._fname} could not "
+ "be deleted. This file is not valid.",
+ ResourceWarning,
+ )
+ raise exc
+ else:
+ self._close()
def _close(self) -> None:
"""
@@ -2566,8 +2522,6 @@ def _close(self) -> None:
self.handles.handle = self._output_file
self.handles.handle.write(bio.read()) # type: ignore[arg-type]
bio.close()
- # close any created handles
- self.handles.close()
def _write_map(self) -> None:
"""No-op, future compatibility"""
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index 3103f6e1ba0b1..7babc6853aef3 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -1034,12 +1034,12 @@ def test_to_csv_compression(self, df, encoding, compression):
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
- handles = get_handle(
+ with get_handle(
filename, "w", compression=compression, encoding=encoding
- )
- df.to_csv(handles.handle, encoding=encoding)
- assert not handles.handle.closed
- handles.close()
+ ) as handles:
+ df.to_csv(handles.handle, encoding=encoding)
+ assert not handles.handle.closed
+
result = pd.read_csv(
filename,
compression=compression,
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 0a297286aa208..00e41a19a7980 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -525,7 +525,7 @@ def test_sheets(self, frame, tsframe, path):
writer = ExcelWriter(path)
frame.to_excel(writer, "test1")
tsframe.to_excel(writer, "test2")
- writer.save()
+ writer.close()
reader = ExcelFile(path)
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(frame, recons)
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index 3584ec047d4d2..a9673ded7c377 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -602,19 +602,22 @@ def test_to_csv_errors(self, errors):
# No use in reading back the data as it is not the same anymore
# due to the error handling
- def test_to_csv_binary_handle(self):
+ @pytest.mark.parametrize("mode", ["wb", "w"])
+ def test_to_csv_binary_handle(self, mode):
"""
- Binary file objects should work if 'mode' contains a 'b'.
+ Binary file objects should work (if 'mode' contains a 'b') or even without
+ it in most cases.
GH 35058 and GH 19827
"""
df = tm.makeDataFrame()
with tm.ensure_clean() as path:
with open(path, mode="w+b") as handle:
- df.to_csv(handle, mode="w+b")
+ df.to_csv(handle, mode=mode)
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
- def test_to_csv_encoding_binary_handle(self):
+ @pytest.mark.parametrize("mode", ["wb", "w"])
+ def test_to_csv_encoding_binary_handle(self, mode):
"""
Binary file objects should honor a specified encoding.
@@ -626,14 +629,14 @@ def test_to_csv_encoding_binary_handle(self):
df = pd.read_csv(buffer, encoding="utf-8-sig")
buffer = io.BytesIO()
- df.to_csv(buffer, mode="w+b", encoding="utf-8-sig", index=False)
+ df.to_csv(buffer, mode=mode, encoding="utf-8-sig", index=False)
buffer.seek(0) # tests whether file handle wasn't closed
assert buffer.getvalue().startswith(content)
# example from GH 13068
with tm.ensure_clean() as path:
with open(path, "w+b") as handle:
- DataFrame().to_csv(handle, mode="w+b", encoding="utf-8-sig")
+ DataFrame().to_csv(handle, mode=mode, encoding="utf-8-sig")
handle.seek(0)
assert handle.read().startswith(b'\xef\xbb\xbf""')
diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py
index b773664adda72..5680669f75aa3 100644
--- a/pandas/tests/io/parser/test_compression.py
+++ b/pandas/tests/io/parser/test_compression.py
@@ -75,7 +75,7 @@ def test_zip_error_invalid_zip(parser_and_data):
parser, _, _ = parser_and_data
with tm.ensure_clean() as path:
- with open(path, "wb") as f:
+ with open(path, "rb") as f:
with pytest.raises(zipfile.BadZipfile, match="File is not a zip file"):
parser.read_csv(f, compression="zip")
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 2a6f3d1ad9380..c7a7101b5fe17 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -106,20 +106,19 @@ def test_infer_compression_from_path(self, extension, expected, path_type):
assert compression == expected
@pytest.mark.parametrize("path_type", [str, CustomFSPath, Path])
- def test_get_filepath_or_buffer_with_path(self, path_type):
+ def test_get_handle_with_path(self, path_type):
# ignore LocalPath: it creates strange paths: /absolute/~/sometest
filename = path_type("~/sometest")
- ioargs = icom.get_filepath_or_buffer(filename)
- assert ioargs.filepath_or_buffer != filename
- assert os.path.isabs(ioargs.filepath_or_buffer)
- assert os.path.expanduser(filename) == ioargs.filepath_or_buffer
- assert not ioargs.should_close
+ with icom.get_handle(filename, "w") as handles:
+ assert os.path.isabs(handles.handle.name)
+ assert os.path.expanduser(filename) == handles.handle.name
- def test_get_filepath_or_buffer_with_buffer(self):
+ def test_get_handle_with_buffer(self):
input_buffer = StringIO()
- ioargs = icom.get_filepath_or_buffer(input_buffer)
- assert ioargs.filepath_or_buffer == input_buffer
- assert not ioargs.should_close
+ with icom.get_handle(input_buffer, "r") as handles:
+ assert handles.handle == input_buffer
+ assert not input_buffer.closed
+ input_buffer.close()
def test_iterator(self):
reader = pd.read_csv(StringIO(self.data1), chunksize=1)
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index 43a31ff1e4b58..158504082e657 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -47,18 +47,14 @@ def test_compression_size(obj, method, compression_only):
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as path:
- handles = icom.get_handle(path, "w", compression=compression_only)
- getattr(obj, method)(handles.handle)
- assert not handles.handle.closed
- handles.close()
- assert handles.handle.closed
+ with icom.get_handle(path, "w", compression=compression_only) as handles:
+ getattr(obj, method)(handles.handle)
+ assert not handles.handle.closed
compressed_size = os.path.getsize(path)
with tm.ensure_clean() as path:
- handles = icom.get_handle(path, "w", compression=None)
- getattr(obj, method)(handles.handle)
- assert not handles.handle.closed
- handles.close()
- assert handles.handle.closed
+ with icom.get_handle(path, "w", compression=None) as handles:
+ getattr(obj, method)(handles.handle)
+ assert not handles.handle.closed
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@@ -111,10 +107,9 @@ def test_compression_warning(compression_only):
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
- handles = icom.get_handle(path, "w", compression=compression_only)
- with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):
- df.to_csv(handles.handle, compression=compression_only)
- handles.close()
+ with icom.get_handle(path, "w", compression=compression_only) as handles:
+ with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):
+ df.to_csv(handles.handle, compression=compression_only)
def test_compression_binary(compression_only):
diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py
index f8081a6a69e83..312ea5abdfe39 100644
--- a/pandas/tests/io/test_fsspec.py
+++ b/pandas/tests/io/test_fsspec.py
@@ -7,6 +7,7 @@
DataFrame,
date_range,
read_csv,
+ read_excel,
read_feather,
read_json,
read_parquet,
@@ -66,11 +67,53 @@ def test_reasonable_error(monkeypatch, cleared_fs):
def test_to_csv(cleared_fs):
df1.to_csv("memory://test/test.csv", index=True)
+
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
+@pytest.mark.parametrize("ext", ["xls", "xlsx"])
+def test_to_excel(cleared_fs, ext):
+ if ext == "xls":
+ pytest.importorskip("xlwt")
+ else:
+ pytest.importorskip("openpyxl")
+
+ path = f"memory://test/test.{ext}"
+ df1.to_excel(path, index=True)
+
+ df2 = read_excel(path, parse_dates=["dt"], index_col=0)
+
+ tm.assert_frame_equal(df1, df2)
+
+
+@pytest.mark.parametrize("binary_mode", [False, True])
+def test_to_csv_fsspec_object(cleared_fs, binary_mode):
+ fsspec = pytest.importorskip("fsspec")
+
+ path = "memory://test/test.csv"
+ mode = "wb" if binary_mode else "w"
+ fsspec_object = fsspec.open(path, mode=mode).open()
+
+ df1.to_csv(fsspec_object, index=True)
+ assert not fsspec_object.closed
+ fsspec_object.close()
+
+ mode = mode.replace("w", "r")
+ fsspec_object = fsspec.open(path, mode=mode).open()
+
+ df2 = read_csv(
+ fsspec_object,
+ parse_dates=["dt"],
+ index_col=0,
+ )
+ assert not fsspec_object.closed
+ fsspec_object.close()
+
+ tm.assert_frame_equal(df1, df2)
+
+
def test_csv_options(fsspectest):
df = DataFrame({"a": [0]})
df.to_csv(
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 65e174cd32e22..10b3f7ce2cd0b 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, date_range, read_csv
+from pandas import DataFrame, date_range, read_csv, read_excel, read_json, read_parquet
import pandas._testing as tm
from pandas.util import _test_decorators as td
@@ -24,35 +24,23 @@ def open(*args, **kwargs):
gcs_buffer.seek(0)
return gcs_buffer
+ def ls(self, path, **kwargs):
+ # needed for pyarrow
+ return [{"name": path, "type": "file"}]
+
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
return gcs_buffer
@td.skip_if_no("gcsfs")
-def test_read_csv_gcs(gcs_buffer):
- from fsspec import registry
-
- registry.target.clear() # remove state
-
- df1 = DataFrame(
- {
- "int": [1, 3],
- "float": [2.0, np.nan],
- "str": ["t", "s"],
- "dt": date_range("2018-06-18", periods=2),
- }
- )
-
- gcs_buffer.write(df1.to_csv(index=False).encode())
-
- df2 = read_csv("gs://test/test.csv", parse_dates=["dt"])
-
- tm.assert_frame_equal(df1, df2)
-
+@pytest.mark.parametrize("format", ["csv", "json", "parquet", "excel", "markdown"])
+def test_to_read_gcs(gcs_buffer, format):
+ """
+ Test that many to/read functions support GCS.
-@td.skip_if_no("gcsfs")
-def test_to_csv_gcs(gcs_buffer):
+ GH 33987
+ """
from fsspec import registry
registry.target.clear() # remove state
@@ -66,9 +54,26 @@ def test_to_csv_gcs(gcs_buffer):
}
)
- df1.to_csv("gs://test/test.csv", index=True)
-
- df2 = read_csv("gs://test/test.csv", parse_dates=["dt"], index_col=0)
+ path = f"gs://test/test.{format}"
+
+ if format == "csv":
+ df1.to_csv(path, index=True)
+ df2 = read_csv(path, parse_dates=["dt"], index_col=0)
+ elif format == "excel":
+ path = "gs://test/test.xls"
+ df1.to_excel(path)
+ df2 = read_excel(path, parse_dates=["dt"], index_col=0)
+ elif format == "json":
+ df1.to_json(path)
+ df2 = read_json(path, convert_dates=["dt"])
+ elif format == "parquet":
+ pytest.importorskip("pyarrow")
+ df1.to_parquet(path)
+ df2 = read_parquet(path)
+ elif format == "markdown":
+ pytest.importorskip("tabulate")
+ df1.to_markdown(path)
+ df2 = df1
tm.assert_frame_equal(df1, df2)
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 285601b37b80f..123e115cd2f2a 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -339,6 +339,17 @@ def check_error_on_write(self, df, engine, exc):
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
+ @tm.network
+ def test_parquet_read_from_url(self, df_compat, engine):
+ if engine != "auto":
+ pytest.importorskip(engine)
+ url = (
+ "https://raw.githubusercontent.com/pandas-dev/pandas/"
+ "master/pandas/tests/io/data/parquet/simple.parquet"
+ )
+ df = pd.read_parquet(url)
+ tm.assert_frame_equal(df, df_compat)
+
class TestBasic(Base):
def test_error(self, engine):
@@ -653,16 +664,6 @@ def test_s3_roundtrip_for_dir(
repeat=1,
)
- @tm.network
- @td.skip_if_no("pyarrow")
- def test_parquet_read_from_url(self, df_compat):
- url = (
- "https://raw.githubusercontent.com/pandas-dev/pandas/"
- "master/pandas/tests/io/data/parquet/simple.parquet"
- )
- df = pd.read_parquet(url)
- tm.assert_frame_equal(df, df_compat)
-
@td.skip_if_no("pyarrow")
def test_read_file_like_obj_support(self, df_compat):
buffer = BytesIO()
@@ -704,9 +705,7 @@ def test_partition_cols_string(self, pa, df_full):
assert len(dataset.partitions.partition_names) == 1
assert dataset.partitions.partition_names == set(partition_cols_list)
- @pytest.mark.parametrize(
- "path_type", [lambda path: path, lambda path: pathlib.Path(path)]
- )
+ @pytest.mark.parametrize("path_type", [str, pathlib.Path])
def test_partition_cols_pathlib(self, pa, df_compat, path_type):
# GH 35902
diff --git a/pandas/tests/series/methods/test_to_csv.py b/pandas/tests/series/methods/test_to_csv.py
index 714173158f4d6..72db87362584d 100644
--- a/pandas/tests/series/methods/test_to_csv.py
+++ b/pandas/tests/series/methods/test_to_csv.py
@@ -143,11 +143,11 @@ def test_to_csv_compression(self, s, encoding, compression):
tm.assert_series_equal(s, result)
# test the round trip using file handle - to_csv -> read_csv
- handles = get_handle(
+ with get_handle(
filename, "w", compression=compression, encoding=encoding
- )
- s.to_csv(handles.handle, encoding=encoding, header=True)
- handles.close()
+ ) as handles:
+ s.to_csv(handles.handle, encoding=encoding, header=True)
+
result = pd.read_csv(
filename,
compression=compression,
| - [x] closes #33987
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
This PR makes `get_filepath_buffer` a private function and is called inside `get_handle` - one function to rule all of IO ;)
This PR will make it easier for future PRs to make the IO-related interface of `read/to_*` more consistent as most of them should support compression/memory mapping (for reading)/(binary) file handles/storage options.
Notes to keep track of future follow-up PRs:
- context manager for `get_handle`
- `storage_options` for `to_excel` | https://api.github.com/repos/pandas-dev/pandas/pulls/37639 | 2020-11-05T03:10:46Z | 2020-11-13T13:39:29Z | 2020-11-13T13:39:29Z | 2020-12-11T04:34:31Z |
TST/REF: collect indexing tests by method | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 9eaa0d0ae6876..4214ac14cba49 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -66,21 +66,6 @@ def test_getitem_dupe_cols(self):
with pytest.raises(KeyError, match=re.escape(msg)):
df[["baf"]]
- @pytest.mark.parametrize("key_type", [iter, np.array, Series, Index])
- def test_loc_iterable(self, float_frame, key_type):
- idx = key_type(["A", "B", "C"])
- result = float_frame.loc[:, idx]
- expected = float_frame.loc[:, ["A", "B", "C"]]
- tm.assert_frame_equal(result, expected)
-
- def test_loc_timedelta_0seconds(self):
- # GH#10583
- df = DataFrame(np.random.normal(size=(10, 4)))
- df.index = pd.timedelta_range(start="0s", periods=10, freq="s")
- expected = df.loc[pd.Timedelta("0s") :, :]
- result = df.loc["0s":, :]
- tm.assert_frame_equal(expected, result)
-
@pytest.mark.parametrize(
"idx_type",
[
@@ -125,28 +110,20 @@ def test_getitem_listlike(self, idx_type, levels, float_frame):
with pytest.raises(KeyError, match="not in index"):
frame[idx]
- @pytest.mark.parametrize(
- "val,expected", [(2 ** 63 - 1, Series([1])), (2 ** 63, Series([2]))]
- )
- def test_loc_uint64(self, val, expected):
- # see gh-19399
- df = DataFrame([1, 2], index=[2 ** 63 - 1, 2 ** 63])
- result = df.loc[val]
-
- expected.name = val
- tm.assert_series_equal(result, expected)
-
def test_getitem_callable(self, float_frame):
# GH 12533
result = float_frame[lambda x: "A"]
- tm.assert_series_equal(result, float_frame.loc[:, "A"])
+ expected = float_frame.loc[:, "A"]
+ tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
+ expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
- tm.assert_frame_equal(result, float_frame.iloc[[0, 2], :])
+ expected = float_frame.iloc[[0, 2], :]
+ tm.assert_frame_equal(result, expected)
def test_setitem_list(self, float_frame):
@@ -181,11 +158,6 @@ def test_setitem_list(self, float_frame):
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
- def test_setitem_list_not_dataframe(self, float_frame):
- data = np.random.randn(len(float_frame), 2)
- float_frame[["A", "B"]] = data
- tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
-
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
@@ -273,14 +245,6 @@ def test_setitem_multi_index(self):
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
- def test_setitem_callable(self):
- # GH 12533
- df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
- df[lambda x: "A"] = [11, 12, 13, 14]
-
- exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
- tm.assert_frame_equal(df, exp)
-
def test_setitem_other_callable(self):
# GH 13299
def inc(x):
@@ -518,18 +482,13 @@ def test_setitem(self, float_frame):
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
- @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
- def test_setitem_dtype(self, dtype, float_frame):
- arr = np.random.randn(len(float_frame))
-
- float_frame[dtype] = np.array(arr, dtype=dtype)
- assert float_frame[dtype].dtype.name == dtype
-
def test_setitem_tuple(self, float_frame):
float_frame["A", "B"] = float_frame["A"]
- tm.assert_series_equal(
- float_frame["A", "B"], float_frame["A"], check_names=False
- )
+ assert ("A", "B") in float_frame.columns
+
+ result = float_frame["A", "B"]
+ expected = float_frame["A"]
+ tm.assert_series_equal(result, expected, check_names=False)
def test_setitem_always_copy(self, float_frame):
s = float_frame["A"].copy()
@@ -588,25 +547,6 @@ def test_setitem_boolean(self, float_frame):
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
- @pytest.mark.parametrize(
- "mask_type",
- [lambda df: df > np.abs(df) / 2, lambda df: (df > np.abs(df) / 2).values],
- ids=["dataframe", "array"],
- )
- def test_setitem_boolean_mask(self, mask_type, float_frame):
-
- # Test for issue #18582
- df = float_frame.copy()
- mask = mask_type(df)
-
- # index with boolean mask
- result = df.copy()
- result[mask] = np.nan
-
- expected = df.copy()
- expected.values[np.array(mask)] = np.nan
- tm.assert_frame_equal(result, expected)
-
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
@@ -821,19 +761,6 @@ def test_getitem_empty_frame_with_boolean(self):
df2 = df[df > 0]
tm.assert_frame_equal(df, df2)
- def test_slice_floats(self):
- index = [52195.504153, 52196.303147, 52198.369883]
- df = DataFrame(np.random.rand(3, 2), index=index)
-
- s1 = df.loc[52195.1:52196.5]
- assert len(s1) == 2
-
- s1 = df.loc[52195.1:52196.6]
- assert len(s1) == 2
-
- s1 = df.loc[52195.1:52198.9]
- assert len(s1) == 3
-
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
@@ -883,15 +810,6 @@ def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
assert (float_frame["C"] == 4).all()
- def test_setitem_slice_position(self):
- # GH#31469
- df = DataFrame(np.zeros((100, 1)))
- df[-4:] = 1
- arr = np.zeros((100, 1))
- arr[-4:] = 1
- expected = DataFrame(arr)
- tm.assert_frame_equal(df, expected)
-
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
@@ -1000,14 +918,13 @@ def test_getitem_fancy_ints(self, float_frame):
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
- def test_getitem_setitem_fancy_exceptions(self, float_frame):
- ix = float_frame.iloc
+ def test_iloc_getitem_setitem_fancy_exceptions(self, float_frame):
with pytest.raises(IndexingError, match="Too many indexers"):
- ix[:, :, :]
+ float_frame.iloc[:, :, :]
with pytest.raises(IndexError, match="too many indices for array"):
# GH#32257 we let numpy do validation, get their exception
- ix[:, :, :] = 1
+ float_frame.iloc[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index e1ce10970f07b..cb04a61b9e1cb 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -22,6 +22,18 @@
class TestDataFrameSetItem:
+ @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
+ def test_setitem_dtype(self, dtype, float_frame):
+ arr = np.random.randn(len(float_frame))
+
+ float_frame[dtype] = np.array(arr, dtype=dtype)
+ assert float_frame[dtype].dtype.name == dtype
+
+ def test_setitem_list_not_dataframe(self, float_frame):
+ data = np.random.randn(len(float_frame), 2)
+ float_frame[["A", "B"]] = data
+ tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
+
def test_setitem_error_msmgs(self):
# GH 7432
@@ -285,3 +297,45 @@ def test_iloc_setitem_bool_indexer(self, klass):
df.iloc[indexer, 1] = df.iloc[indexer, 1] * 2
expected = DataFrame({"flag": ["x", "y", "z"], "value": [2, 3, 4]})
tm.assert_frame_equal(df, expected)
+
+
+class TestDataFrameSetItemSlicing:
+ def test_setitem_slice_position(self):
+ # GH#31469
+ df = DataFrame(np.zeros((100, 1)))
+ df[-4:] = 1
+ arr = np.zeros((100, 1))
+ arr[-4:] = 1
+ expected = DataFrame(arr)
+ tm.assert_frame_equal(df, expected)
+
+
+class TestDataFrameSetItemCallable:
+ def test_setitem_callable(self):
+ # GH#12533
+ df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
+ df[lambda x: "A"] = [11, 12, 13, 14]
+
+ exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
+ tm.assert_frame_equal(df, exp)
+
+
+class TestDataFrameSetItemBooleanMask:
+ @pytest.mark.parametrize(
+ "mask_type",
+ [lambda df: df > np.abs(df) / 2, lambda df: (df > np.abs(df) / 2).values],
+ ids=["dataframe", "array"],
+ )
+ def test_setitem_boolean_mask(self, mask_type, float_frame):
+
+ # Test for issue #18582
+ df = float_frame.copy()
+ mask = mask_type(df)
+
+ # index with boolean mask
+ result = df.copy()
+ result[mask] = np.nan
+
+ expected = df.copy()
+ expected.values[np.array(mask)] = np.nan
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/indexing/test_sparse.py b/pandas/tests/frame/indexing/test_sparse.py
index c0cd7faafb4db..47e4ae1f9f9e1 100644
--- a/pandas/tests/frame/indexing/test_sparse.py
+++ b/pandas/tests/frame/indexing/test_sparse.py
@@ -1,12 +1,6 @@
-import numpy as np
-import pytest
-
-import pandas.util._test_decorators as td
-
import pandas as pd
import pandas._testing as tm
from pandas.arrays import SparseArray
-from pandas.core.arrays.sparse import SparseDtype
class TestSparseDataFrameIndexing:
@@ -23,34 +17,3 @@ def test_getitem_sparse_column(self):
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
- @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
- @td.skip_if_no_scipy
- def test_loc_getitem_from_spmatrix(self, spmatrix_t, dtype):
- import scipy.sparse
-
- spmatrix_t = getattr(scipy.sparse, spmatrix_t)
-
- # The bug is triggered by a sparse matrix with purely sparse columns. So the
- # recipe below generates a rectangular matrix of dimension (5, 7) where all the
- # diagonal cells are ones, meaning the last two columns are purely sparse.
- rows, cols = 5, 7
- spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype)
- df = pd.DataFrame.sparse.from_spmatrix(spmatrix)
-
- # regression test for #34526
- itr_idx = range(2, rows)
- result = df.loc[itr_idx].values
- expected = spmatrix.toarray()[itr_idx]
- tm.assert_numpy_array_equal(result, expected)
-
- # regression test for #34540
- result = df.loc[itr_idx].dtypes.values
- expected = np.full(cols, SparseDtype(dtype, fill_value=0))
- tm.assert_numpy_array_equal(result, expected)
-
- def test_all_sparse(self):
- df = pd.DataFrame({"A": pd.array([0, 0], dtype=pd.SparseDtype("int64"))})
- result = df.loc[[0, 1]]
- tm.assert_frame_equal(result, df)
diff --git a/pandas/tests/indexing/test_at.py b/pandas/tests/indexing/test_at.py
index 9c2d88f1589c2..2e06d8c73d7d1 100644
--- a/pandas/tests/indexing/test_at.py
+++ b/pandas/tests/indexing/test_at.py
@@ -1,14 +1,41 @@
from datetime import datetime, timezone
-import pandas as pd
+import numpy as np
+import pytest
+
+from pandas import DataFrame
import pandas._testing as tm
def test_at_timezone():
# https://github.com/pandas-dev/pandas/issues/33544
- result = pd.DataFrame({"foo": [datetime(2000, 1, 1)]})
+ result = DataFrame({"foo": [datetime(2000, 1, 1)]})
result.at[0, "foo"] = datetime(2000, 1, 2, tzinfo=timezone.utc)
- expected = pd.DataFrame(
+ expected = DataFrame(
{"foo": [datetime(2000, 1, 2, tzinfo=timezone.utc)]}, dtype=object
)
tm.assert_frame_equal(result, expected)
+
+
+class TestAtWithDuplicates:
+ def test_at_with_duplicate_axes_requires_scalar_lookup(self):
+ # GH#33041 check that falling back to loc doesn't allow non-scalar
+ # args to slip in
+
+ arr = np.random.randn(6).reshape(3, 2)
+ df = DataFrame(arr, columns=["A", "A"])
+
+ msg = "Invalid call for scalar access"
+ with pytest.raises(ValueError, match=msg):
+ df.at[[1, 2]]
+ with pytest.raises(ValueError, match=msg):
+ df.at[1, ["A"]]
+ with pytest.raises(ValueError, match=msg):
+ df.at[:, "A"]
+
+ with pytest.raises(ValueError, match=msg):
+ df.at[[1, 2]] = 1
+ with pytest.raises(ValueError, match=msg):
+ df.at[1, ["A"]] = 1
+ with pytest.raises(ValueError, match=msg):
+ df.at[:, "A"] = 1
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 854ca176fd2f4..6cdd73d37aec8 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -73,16 +73,6 @@ def test_loc_scalar(self):
with pytest.raises(KeyError, match="^1$"):
df.loc[1]
- def test_getitem_scalar(self):
-
- cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
-
- s = Series([1, 2], index=cats)
-
- expected = s.iloc[0]
- result = s[cats[0]]
- assert result == expected
-
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 4879f805b5a2d..fad3478499929 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -1,6 +1,3 @@
-from datetime import date, datetime, timedelta
-
-from dateutil import tz
import numpy as np
import pytest
@@ -206,26 +203,6 @@ def test_partial_setting_with_datetimelike_dtype(self):
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
- def test_loc_setitem_datetime(self):
-
- # GH 9516
- dt1 = Timestamp("20130101 09:00:00")
- dt2 = Timestamp("20130101 10:00:00")
-
- for conv in [
- lambda x: x,
- lambda x: x.to_datetime64(),
- lambda x: x.to_pydatetime(),
- lambda x: np.datetime64(x),
- ]:
-
- df = DataFrame()
- df.loc[conv(dt1), "one"] = 100
- df.loc[conv(dt2), "one"] = 200
-
- expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2])
- tm.assert_frame_equal(df, expected)
-
def test_series_partial_set_datetime(self):
# GH 11497
@@ -245,7 +222,8 @@ def test_series_partial_set_datetime(self):
exp = Series(
[0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name="idx"), name="s"
)
- tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
+ result = ser.loc[keys]
+ tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
Timestamp("2011-01-03"),
@@ -273,7 +251,8 @@ def test_series_partial_set_period(self):
pd.Period("2011-01-01", freq="D"),
]
exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name="idx"), name="s")
- tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
+ result = ser.loc[keys]
+ tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
pd.Period("2011-01-03", freq="D"),
@@ -297,33 +276,6 @@ def test_nanosecond_getitem_setitem_with_tz(self):
expected = DataFrame(-1, index=index, columns=["a"])
tm.assert_frame_equal(result, expected)
- def test_loc_getitem_across_dst(self):
- # GH 21846
- idx = pd.date_range(
- "2017-10-29 01:30:00", tz="Europe/Berlin", periods=5, freq="30 min"
- )
- series2 = Series([0, 1, 2, 3, 4], index=idx)
-
- t_1 = Timestamp("2017-10-29 02:30:00+02:00", tz="Europe/Berlin", freq="30min")
- t_2 = Timestamp("2017-10-29 02:00:00+01:00", tz="Europe/Berlin", freq="30min")
- result = series2.loc[t_1:t_2]
- expected = Series([2, 3], index=idx[2:4])
- tm.assert_series_equal(result, expected)
-
- result = series2[t_1]
- expected = 2
- assert result == expected
-
- def test_loc_incremental_setitem_with_dst(self):
- # GH 20724
- base = datetime(2015, 11, 1, tzinfo=tz.gettz("US/Pacific"))
- idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
- result = Series([0], index=[idxs[0]])
- for ts in idxs:
- result.loc[ts] = 1
- expected = Series(1, index=idxs)
- tm.assert_series_equal(result, expected)
-
def test_loc_setitem_with_existing_dst(self):
# GH 18308
start = Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid")
@@ -339,39 +291,3 @@ def test_loc_setitem_with_existing_dst(self):
dtype=object,
)
tm.assert_frame_equal(result, expected)
-
- def test_loc_str_slicing(self):
- ix = pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
- ser = ix.to_series()
- result = ser.loc[:"2017-12"]
- expected = ser.iloc[:-1]
-
- tm.assert_series_equal(result, expected)
-
- def test_loc_label_slicing(self):
- ix = pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
- ser = ix.to_series()
- result = ser.loc[: ix[-2]]
- expected = ser.iloc[:-1]
-
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize(
- "slice_, positions",
- [
- [slice(date(2018, 1, 1), None), [0, 1, 2]],
- [slice(date(2019, 1, 2), None), [2]],
- [slice(date(2020, 1, 1), None), []],
- [slice(None, date(2020, 1, 1)), [0, 1, 2]],
- [slice(None, date(2019, 1, 1)), [0]],
- ],
- )
- def test_getitem_slice_date(self, slice_, positions):
- # https://github.com/pandas-dev/pandas/issues/31501
- s = Series(
- [0, 1, 2],
- pd.DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),
- )
- result = s[slice_]
- expected = s.take(positions)
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index c1a5db992d3df..fff4c0f78f38a 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1,15 +1,28 @@
""" test label based indexing with loc """
-from datetime import time
+from datetime import datetime, time, timedelta
from io import StringIO
import re
+from dateutil.tz import gettz
import numpy as np
import pytest
from pandas.compat.numpy import is_numpy_dev
+import pandas.util._test_decorators as td
import pandas as pd
-from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ Series,
+ SparseDtype,
+ Timedelta,
+ Timestamp,
+ date_range,
+ timedelta_range,
+ to_datetime,
+)
import pandas._testing as tm
from pandas.api.types import is_scalar
from pandas.tests.indexing.common import Base
@@ -1014,6 +1027,73 @@ def test_loc_getitem_time_object(self, frame_or_series):
expected.index = expected.index._with_freq(None)
tm.assert_equal(result, expected)
+ @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
+ @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
+ @td.skip_if_no_scipy
+ def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype):
+ import scipy.sparse
+
+ spmatrix_t = getattr(scipy.sparse, spmatrix_t)
+
+ # The bug is triggered by a sparse matrix with purely sparse columns. So the
+ # recipe below generates a rectangular matrix of dimension (5, 7) where all the
+ # diagonal cells are ones, meaning the last two columns are purely sparse.
+ rows, cols = 5, 7
+ spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype)
+ df = DataFrame.sparse.from_spmatrix(spmatrix)
+
+ # regression test for GH#34526
+ itr_idx = range(2, rows)
+ result = df.loc[itr_idx].values
+ expected = spmatrix.toarray()[itr_idx]
+ tm.assert_numpy_array_equal(result, expected)
+
+ # regression test for GH#34540
+ result = df.loc[itr_idx].dtypes.values
+ expected = np.full(cols, SparseDtype(dtype, fill_value=0))
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_loc_getitem_listlike_all_retains_sparse(self):
+ df = DataFrame({"A": pd.array([0, 0], dtype=SparseDtype("int64"))})
+ result = df.loc[[0, 1]]
+ tm.assert_frame_equal(result, df)
+
+ @pytest.mark.parametrize("key_type", [iter, np.array, Series, Index])
+ def test_loc_getitem_iterable(self, float_frame, key_type):
+ idx = key_type(["A", "B", "C"])
+ result = float_frame.loc[:, idx]
+ expected = float_frame.loc[:, ["A", "B", "C"]]
+ tm.assert_frame_equal(result, expected)
+
+ def test_loc_getitem_timedelta_0seconds(self):
+ # GH#10583
+ df = DataFrame(np.random.normal(size=(10, 4)))
+ df.index = timedelta_range(start="0s", periods=10, freq="s")
+ expected = df.loc[Timedelta("0s") :, :]
+ result = df.loc["0s":, :]
+ tm.assert_frame_equal(expected, result)
+
+ @pytest.mark.parametrize(
+ "val,expected", [(2 ** 63 - 1, Series([1])), (2 ** 63, Series([2]))]
+ )
+ def test_loc_getitem_uint64_scalar(self, val, expected):
+ # see GH#19399
+ df = DataFrame([1, 2], index=[2 ** 63 - 1, 2 ** 63])
+ result = df.loc[val]
+
+ expected.name = val
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_setitem_int_label_with_float64index(self):
+ # note labels are floats
+ ser = Series(["a", "b", "c"], index=[0, 0.5, 1])
+ tmp = ser.copy()
+
+ ser.loc[1] = "zoo"
+ tmp.iloc[2] = "zoo"
+
+ tm.assert_series_equal(ser, tmp)
+
class TestLocWithMultiIndex:
@pytest.mark.parametrize(
@@ -1103,6 +1183,11 @@ def test_loc_setitem_multiindex_slice(self):
tm.assert_series_equal(result, expected)
+ def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self):
+ times = date_range("2000-01-01", freq="10min", periods=100000)
+ ser = Series(range(100000), times)
+ ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
+
class TestLocSetitemWithExpansion:
@pytest.mark.slow
@@ -1113,6 +1198,59 @@ def test_loc_setitem_with_expansion_large_dataframe(self):
expected = DataFrame({"x": range(10 ** 6 + 1)}, dtype="int64")
tm.assert_frame_equal(result, expected)
+ def test_loc_setitem_empty_series(self):
+ # GH#5226
+
+ # partially set with an empty object series
+ ser = Series(dtype=object)
+ ser.loc[1] = 1
+ tm.assert_series_equal(ser, Series([1], index=[1]))
+ ser.loc[3] = 3
+ tm.assert_series_equal(ser, Series([1, 3], index=[1, 3]))
+
+ ser = Series(dtype=object)
+ ser.loc[1] = 1.0
+ tm.assert_series_equal(ser, Series([1.0], index=[1]))
+ ser.loc[3] = 3.0
+ tm.assert_series_equal(ser, Series([1.0, 3.0], index=[1, 3]))
+
+ ser = Series(dtype=object)
+ ser.loc["foo"] = 1
+ tm.assert_series_equal(ser, Series([1], index=["foo"]))
+ ser.loc["bar"] = 3
+ tm.assert_series_equal(ser, Series([1, 3], index=["foo", "bar"]))
+ ser.loc[3] = 4
+ tm.assert_series_equal(ser, Series([1, 3, 4], index=["foo", "bar", 3]))
+
+ def test_loc_setitem_incremental_with_dst(self):
+ # GH#20724
+ base = datetime(2015, 11, 1, tzinfo=gettz("US/Pacific"))
+ idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
+ result = Series([0], index=[idxs[0]])
+ for ts in idxs:
+ result.loc[ts] = 1
+ expected = Series(1, index=idxs)
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_setitem_datetime_keys_cast(self):
+ # GH#9516
+ dt1 = Timestamp("20130101 09:00:00")
+ dt2 = Timestamp("20130101 10:00:00")
+
+ for conv in [
+ lambda x: x,
+ lambda x: x.to_datetime64(),
+ lambda x: x.to_pydatetime(),
+ lambda x: np.datetime64(x),
+ ]:
+
+ df = DataFrame()
+ df.loc[conv(dt1), "one"] = 100
+ df.loc[conv(dt2), "one"] = 200
+
+ expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2])
+ tm.assert_frame_equal(df, expected)
+
class TestLocCallable:
def test_frame_loc_getitem_callable(self):
@@ -1280,6 +1418,85 @@ def test_frame_loc_setitem_callable(self):
tm.assert_frame_equal(res, exp)
+class TestPartialStringSlicing:
+ def test_loc_getitem_partial_string_slicing_datetimeindex(self):
+ # GH#35509
+ df = DataFrame(
+ {"col1": ["a", "b", "c"], "col2": [1, 2, 3]},
+ index=to_datetime(["2020-08-01", "2020-07-02", "2020-08-05"]),
+ )
+ expected = DataFrame(
+ {"col1": ["a", "c"], "col2": [1, 3]},
+ index=to_datetime(["2020-08-01", "2020-08-05"]),
+ )
+ result = df.loc["2020-08"]
+ tm.assert_frame_equal(result, expected)
+
+ def test_loc_getitem_partial_string_slicing_with_periodindex(self):
+ pi = pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
+ ser = pi.to_series()
+ result = ser.loc[:"2017-12"]
+ expected = ser.iloc[:-1]
+
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_getitem_partial_string_slicing_with_timedeltaindex(self):
+ ix = timedelta_range(start="1 day", end="2 days", freq="1H")
+ ser = ix.to_series()
+ result = ser.loc[:"1 days"]
+ expected = ser.iloc[:-1]
+
+ tm.assert_series_equal(result, expected)
+
+
+class TestLabelSlicing:
+ def test_loc_getitem_label_slice_across_dst(self):
+ # GH#21846
+ idx = date_range(
+ "2017-10-29 01:30:00", tz="Europe/Berlin", periods=5, freq="30 min"
+ )
+ series2 = Series([0, 1, 2, 3, 4], index=idx)
+
+ t_1 = Timestamp("2017-10-29 02:30:00+02:00", tz="Europe/Berlin", freq="30min")
+ t_2 = Timestamp("2017-10-29 02:00:00+01:00", tz="Europe/Berlin", freq="30min")
+ result = series2.loc[t_1:t_2]
+ expected = Series([2, 3], index=idx[2:4])
+ tm.assert_series_equal(result, expected)
+
+ result = series2[t_1]
+ expected = 2
+ assert result == expected
+
+ def test_loc_getitem_label_slice_period(self):
+ ix = pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
+ ser = ix.to_series()
+ result = ser.loc[: ix[-2]]
+ expected = ser.iloc[:-1]
+
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_getitem_label_slice_timedelta64(self):
+ ix = timedelta_range(start="1 day", end="2 days", freq="1H")
+ ser = ix.to_series()
+ result = ser.loc[: ix[-2]]
+ expected = ser.iloc[:-1]
+
+ tm.assert_series_equal(result, expected)
+
+ def test_loc_getitem_slice_floats_inexact(self):
+ index = [52195.504153, 52196.303147, 52198.369883]
+ df = DataFrame(np.random.rand(3, 2), index=index)
+
+ s1 = df.loc[52195.1:52196.5]
+ assert len(s1) == 2
+
+ s1 = df.loc[52195.1:52196.6]
+ assert len(s1) == 2
+
+ s1 = df.loc[52195.1:52198.9]
+ assert len(s1) == 3
+
+
def test_series_loc_getitem_label_list_missing_values():
# gh-11428
key = np.array(
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 80b7947eb5239..01db937153b3a 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -351,31 +351,6 @@ def test_partial_set_invalid(self):
tm.assert_index_equal(df.index, Index(orig.index.tolist() + ["a"]))
assert df.index.dtype == "object"
- def test_partial_set_empty_series(self):
-
- # GH5226
-
- # partially set with an empty object series
- s = Series(dtype=object)
- s.loc[1] = 1
- tm.assert_series_equal(s, Series([1], index=[1]))
- s.loc[3] = 3
- tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
-
- s = Series(dtype=object)
- s.loc[1] = 1.0
- tm.assert_series_equal(s, Series([1.0], index=[1]))
- s.loc[3] = 3.0
- tm.assert_series_equal(s, Series([1.0, 3.0], index=[1, 3]))
-
- s = Series(dtype=object)
- s.loc["foo"] = 1
- tm.assert_series_equal(s, Series([1], index=["foo"]))
- s.loc["bar"] = 3
- tm.assert_series_equal(s, Series([1, 3], index=["foo", "bar"]))
- s.loc[3] = 4
- tm.assert_series_equal(s, Series([1, 3, 4], index=["foo", "bar", 3]))
-
def test_partial_set_empty_frame(self):
# partially set with an empty object
@@ -504,10 +479,12 @@ def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series(dtype=object))
- tm.assert_frame_equal(df, DataFrame({0: Series(dtype=object)}))
+ expected = DataFrame({0: Series(dtype=object)})
+ tm.assert_frame_equal(df, expected)
df = DataFrame(Series(name="foo", dtype=object))
- tm.assert_frame_equal(df, DataFrame({"foo": Series(dtype=object)}))
+ expected = DataFrame({"foo": Series(dtype=object)})
+ tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
@@ -565,19 +542,17 @@ def test_partial_set_empty_frame_empty_consistencies(self):
],
)
def test_loc_with_list_of_strings_representing_datetimes(
- self, idx, labels, expected_idx
+ self, idx, labels, expected_idx, frame_or_series
):
# GH 11278
- s = Series(range(20), index=idx)
- df = DataFrame(range(20), index=idx)
+ obj = frame_or_series(range(20), index=idx)
expected_value = [3, 7, 11]
- expected_s = Series(expected_value, expected_idx)
- expected_df = DataFrame(expected_value, expected_idx)
+ expected = frame_or_series(expected_value, expected_idx)
- tm.assert_series_equal(expected_s, s.loc[labels])
- tm.assert_series_equal(expected_s, s[labels])
- tm.assert_frame_equal(expected_df, df.loc[labels])
+ tm.assert_equal(expected, obj.loc[labels])
+ if frame_or_series is Series:
+ tm.assert_series_equal(expected, obj[labels])
@pytest.mark.parametrize(
"idx,labels",
@@ -651,16 +626,6 @@ def test_loc_with_list_of_strings_representing_datetimes_not_matched_type(
with pytest.raises(KeyError, match=msg):
df.loc[labels]
- def test_indexing_timeseries_regression(self):
- # Issue 34860
- arr = date_range("1/1/2008", "1/1/2009")
- result = arr.to_series()["2008"]
-
- rng = date_range(start="2008-01-01", end="2008-12-31")
- expected = Series(rng, index=rng)
-
- tm.assert_series_equal(result, expected)
-
def test_index_name_empty(self):
# GH 31368
df = DataFrame({}, index=pd.RangeIndex(0, name="df_index"))
@@ -689,16 +654,3 @@ def test_slice_irregular_datetime_index_with_nan(self):
expected = DataFrame(range(len(index[:3])), index=index[:3])
result = df["2012-01-01":"2012-01-04"]
tm.assert_frame_equal(result, expected)
-
- def test_slice_datetime_index(self):
- # GH35509
- df = DataFrame(
- {"col1": ["a", "b", "c"], "col2": [1, 2, 3]},
- index=pd.to_datetime(["2020-08-01", "2020-07-02", "2020-08-05"]),
- )
- expected = DataFrame(
- {"col1": ["a", "c"], "col2": [1, 3]},
- index=pd.to_datetime(["2020-08-01", "2020-08-05"]),
- )
- result = df.loc["2020-08"]
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 4337f01ea33e0..72296bb222a5a 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -146,28 +146,6 @@ def test_frame_at_with_duplicate_axes(self):
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
- def test_frame_at_with_duplicate_axes_requires_scalar_lookup(self):
- # GH#33041 check that falling back to loc doesn't allow non-scalar
- # args to slip in
-
- arr = np.random.randn(6).reshape(3, 2)
- df = DataFrame(arr, columns=["A", "A"])
-
- msg = "Invalid call for scalar access"
- with pytest.raises(ValueError, match=msg):
- df.at[[1, 2]]
- with pytest.raises(ValueError, match=msg):
- df.at[1, ["A"]]
- with pytest.raises(ValueError, match=msg):
- df.at[:, "A"]
-
- with pytest.raises(ValueError, match=msg):
- df.at[[1, 2]] = 1
- with pytest.raises(ValueError, match=msg):
- df.at[1, ["A"]] = 1
- with pytest.raises(ValueError, match=msg):
- df.at[:, "A"] = 1
-
def test_series_at_raises_type_error(self):
# at should not fallback
# GH 7814
diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py
index 7da368e4bb321..9461bb74b2a87 100644
--- a/pandas/tests/indexing/test_timedelta.py
+++ b/pandas/tests/indexing/test_timedelta.py
@@ -104,19 +104,3 @@ def test_roundtrip_thru_setitem(self):
assert expected == result
tm.assert_frame_equal(df, df_copy)
-
- def test_loc_str_slicing(self):
- ix = pd.timedelta_range(start="1 day", end="2 days", freq="1H")
- ser = ix.to_series()
- result = ser.loc[:"1 days"]
- expected = ser.iloc[:-1]
-
- tm.assert_series_equal(result, expected)
-
- def test_loc_slicing(self):
- ix = pd.timedelta_range(start="1 day", end="2 days", freq="1H")
- ser = ix.to_series()
- result = ser.loc[: ix[-2]]
- expected = ser.iloc[:-1]
-
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index c25b8936c1b29..b2fc2e2d0619d 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -4,14 +4,23 @@
from datetime import datetime, timedelta
import re
+from dateutil.tz import gettz, tzutc
import numpy as np
import pytest
+import pytz
-from pandas._libs import iNaT
-import pandas._libs.index as _index
+from pandas._libs import iNaT, index as libindex
import pandas as pd
-from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ NaT,
+ Series,
+ Timestamp,
+ date_range,
+ period_range,
+)
import pandas._testing as tm
@@ -65,13 +74,6 @@ def test_dti_reset_index_round_trip():
assert df.reset_index()["Date"][0] == stamp
-@pytest.mark.slow
-def test_slice_locs_indexerror():
- times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)]
- s = Series(range(100000), times)
- s.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
-
-
def test_slicing_datetimes():
# GH 7523
@@ -114,8 +116,6 @@ def test_slicing_datetimes():
def test_getitem_setitem_datetime_tz_pytz():
- from pytz import timezone as tz
-
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
@@ -134,23 +134,20 @@ def test_getitem_setitem_datetime_tz_pytz():
# repeat with datetimes
result = ts.copy()
- result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
- result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
+ result[datetime(1990, 1, 1, 9, tzinfo=pytz.timezone("UTC"))] = 0
+ result[datetime(1990, 1, 1, 9, tzinfo=pytz.timezone("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
- date = tz("US/Central").localize(datetime(1990, 1, 1, 3))
+ date = pytz.timezone("US/Central").localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil():
- from dateutil.tz import tzutc
-
- from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
tz = (
lambda x: tzutc() if x == "UTC" else gettz(x)
@@ -295,7 +292,6 @@ def test_getitem_setitem_datetimeindex():
def test_getitem_setitem_periodindex():
- from pandas import period_range
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
@@ -466,72 +462,50 @@ def test_duplicate_dates_indexing(dups):
assert ts[datetime(2000, 1, 6)] == 0
-def test_range_slice():
- idx = DatetimeIndex(["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"])
-
- ts = Series(np.random.randn(len(idx)), index=idx)
-
- result = ts["1/2/2000":]
- expected = ts[1:]
- tm.assert_series_equal(result, expected)
-
- result = ts["1/2/2000":"1/3/2000"]
- expected = ts[1:4]
- tm.assert_series_equal(result, expected)
-
-
def test_groupby_average_dup_values(dups):
result = dups.groupby(level=0).mean()
expected = dups.groupby(dups.index).mean()
tm.assert_series_equal(result, expected)
-def test_indexing_over_size_cutoff():
- import datetime
-
+def test_indexing_over_size_cutoff(monkeypatch):
# #1821
- old_cutoff = _index._SIZE_CUTOFF
- try:
- _index._SIZE_CUTOFF = 1000
-
- # create large list of non periodic datetime
- dates = []
- sec = datetime.timedelta(seconds=1)
- half_sec = datetime.timedelta(microseconds=500000)
- d = datetime.datetime(2011, 12, 5, 20, 30)
- n = 1100
- for i in range(n):
- dates.append(d)
- dates.append(d + sec)
- dates.append(d + sec + half_sec)
- dates.append(d + sec + sec + half_sec)
- d += 3 * sec
-
- # duplicate some values in the list
- duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
- for p in duplicate_positions:
- dates[p + 1] = dates[p]
-
- df = DataFrame(
- np.random.randn(len(dates), 4), index=dates, columns=list("ABCD")
- )
-
- pos = n * 3
- timestamp = df.index[pos]
- assert timestamp in df.index
-
- # it works!
- df.loc[timestamp]
- assert len(df.loc[[timestamp]]) > 0
- finally:
- _index._SIZE_CUTOFF = old_cutoff
+ monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)
+
+ # create large list of non periodic datetime
+ dates = []
+ sec = timedelta(seconds=1)
+ half_sec = timedelta(microseconds=500000)
+ d = datetime(2011, 12, 5, 20, 30)
+ n = 1100
+ for i in range(n):
+ dates.append(d)
+ dates.append(d + sec)
+ dates.append(d + sec + half_sec)
+ dates.append(d + sec + sec + half_sec)
+ d += 3 * sec
+
+ # duplicate some values in the list
+ duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
+ for p in duplicate_positions:
+ dates[p + 1] = dates[p]
+
+ df = DataFrame(np.random.randn(len(dates), 4), index=dates, columns=list("ABCD"))
+
+ pos = n * 3
+ timestamp = df.index[pos]
+ assert timestamp in df.index
+
+ # it works!
+ df.loc[timestamp]
+ assert len(df.loc[[timestamp]]) > 0
def test_indexing_over_size_cutoff_period_index(monkeypatch):
# GH 27136
- monkeypatch.setattr(_index, "_SIZE_CUTOFF", 1000)
+ monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)
n = 1100
idx = pd.period_range("1/1/2000", freq="T", periods=n)
@@ -654,19 +628,3 @@ def test_indexing():
msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central', freq='S'\)"
with pytest.raises(KeyError, match=msg):
df[df.index[2]]
-
-
-"""
-test NaT support
-"""
-
-
-def test_setitem_tuple_with_datetimetz():
- # GH 20441
- arr = date_range("2017", periods=4, tz="US/Eastern")
- index = [(0, 1), (0, 2), (0, 3), (0, 4)]
- result = Series(arr, index=index)
- expected = result.copy()
- result[(0, 1)] = np.nan
- expected.iloc[0] = np.nan
- tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 2933983a5b18b..71bcce12796f5 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -1,7 +1,7 @@
"""
Series.__getitem__ test classes are organized by the type of key passed.
"""
-from datetime import datetime, time
+from datetime import date, datetime, time
import numpy as np
import pytest
@@ -9,7 +9,16 @@
from pandas._libs.tslibs import conversion, timezones
import pandas as pd
-from pandas import DataFrame, Index, Series, Timestamp, date_range, period_range
+from pandas import (
+ Categorical,
+ DataFrame,
+ DatetimeIndex,
+ Index,
+ Series,
+ Timestamp,
+ date_range,
+ period_range,
+)
import pandas._testing as tm
from pandas.core.indexing import IndexingError
@@ -93,8 +102,46 @@ def test_getitem_time_object(self):
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
+ # ------------------------------------------------------------------
+ # Series with CategoricalIndex
+
+ def test_getitem_scalar_categorical_index(self):
+ cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
+
+ ser = Series([1, 2], index=cats)
+
+ expected = ser.iloc[0]
+ result = ser[cats[0]]
+ assert result == expected
+
class TestSeriesGetitemSlices:
+ def test_getitem_partial_str_slice_with_datetimeindex(self):
+ # GH#34860
+ arr = date_range("1/1/2008", "1/1/2009")
+ ser = arr.to_series()
+ result = ser["2008"]
+
+ rng = date_range(start="2008-01-01", end="2008-12-31")
+ expected = Series(rng, index=rng)
+
+ tm.assert_series_equal(result, expected)
+
+ def test_getitem_slice_strings_with_datetimeindex(self):
+ idx = DatetimeIndex(
+ ["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"]
+ )
+
+ ts = Series(np.random.randn(len(idx)), index=idx)
+
+ result = ts["1/2/2000":]
+ expected = ts[1:]
+ tm.assert_series_equal(result, expected)
+
+ result = ts["1/2/2000":"1/3/2000"]
+ expected = ts[1:4]
+ tm.assert_series_equal(result, expected)
+
def test_getitem_slice_2d(self, datetime_series):
# GH#30588 multi-dimensional indexing deprecated
@@ -119,6 +166,26 @@ def test_getitem_median_slice_bug(self):
expected = s[indexer[0]]
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize(
+ "slc, positions",
+ [
+ [slice(date(2018, 1, 1), None), [0, 1, 2]],
+ [slice(date(2019, 1, 2), None), [2]],
+ [slice(date(2020, 1, 1), None), []],
+ [slice(None, date(2020, 1, 1)), [0, 1, 2]],
+ [slice(None, date(2019, 1, 1)), [0]],
+ ],
+ )
+ def test_getitem_slice_date(self, slc, positions):
+ # https://github.com/pandas-dev/pandas/issues/31501
+ ser = Series(
+ [0, 1, 2],
+ DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),
+ )
+ result = ser[slc]
+ expected = ser.take(positions)
+ tm.assert_series_equal(result, expected)
+
class TestSeriesGetitemListLike:
@pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index f35f1375732cb..86af29eac1bae 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -71,17 +71,6 @@ def test_getitem_setitem_slice_integers():
assert not (s[4:] == 0).any()
-def test_setitem_float_labels():
- # note labels are floats
- s = Series(["a", "b", "c"], index=[0, 0.5, 1])
- tmp = s.copy()
-
- s.loc[1] = "zoo"
- tmp.iloc[2] = "zoo"
-
- tm.assert_series_equal(s, tmp)
-
-
def test_slice_float_get_set(datetime_series):
msg = (
"cannot do slice indexing on DatetimeIndex with these indexers "
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index b4c5ac0195d26..7e25e5200d610 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -61,6 +61,16 @@ def test_setitem_with_different_tz_casts_to_object(self):
)
tm.assert_series_equal(ser, expected)
+ def test_setitem_tuple_with_datetimetz_values(self):
+ # GH#20441
+ arr = date_range("2017", periods=4, tz="US/Eastern")
+ index = [(0, 1), (0, 2), (0, 3), (0, 4)]
+ result = Series(arr, index=index)
+ expected = result.copy()
+ result[(0, 1)] = np.nan
+ expected.iloc[0] = np.nan
+ tm.assert_series_equal(result, expected)
+
class TestSetitemPeriodDtype:
@pytest.mark.parametrize("na_val", [None, np.nan])
| https://api.github.com/repos/pandas-dev/pandas/pulls/37638 | 2020-11-05T02:54:40Z | 2020-11-06T00:26:14Z | 2020-11-06T00:26:14Z | 2020-11-06T00:27:00Z | |
DOC: test organization | diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst
index f8a6bb6deb52d..e842c827b417f 100644
--- a/doc/source/development/index.rst
+++ b/doc/source/development/index.rst
@@ -16,6 +16,7 @@ Development
code_style
maintaining
internals
+ test_writing
extending
developer
policies
diff --git a/doc/source/development/test_writing.rst b/doc/source/development/test_writing.rst
new file mode 100644
index 0000000000000..27d0f44f75633
--- /dev/null
+++ b/doc/source/development/test_writing.rst
@@ -0,0 +1,147 @@
+.. _test_organization:
+
+Test organization
+=================
+Ideally, there should be one, and only one, obvious place for a test to reside.
+Until we reach that ideal, these are some rules of thumb for where a test should
+be located.
+
+1. Does your test depend only on code in ``pd._libs.tslibs``?
+This test likely belongs in one of:
+
+ - tests.tslibs
+
+ .. note::
+
+ No file in ``tests.tslibs`` should import from any pandas modules outside of ``pd._libs.tslibs``
+
+ - tests.scalar
+ - tests.tseries.offsets
+
+2. Does your test depend only on code in pd._libs?
+This test likely belongs in one of:
+
+ - tests.libs
+ - tests.groupby.test_libgroupby
+
+3. Is your test for an arithmetic or comparison method?
+This test likely belongs in one of:
+
+ - tests.arithmetic
+
+ .. note::
+
+ These are intended for tests that can be shared to test the behavior of DataFrame/Series/Index/ExtensionArray using the ``box_with_array`` fixture.
+
+ - tests.frame.test_arithmetic
+ - tests.series.test_arithmetic
+
+4. Is your test for a reduction method (min, max, sum, prod, ...)?
+This test likely belongs in one of:
+
+ - tests.reductions
+
+ .. note::
+
+ These are intended for tests that can be shared to test the behavior of DataFrame/Series/Index/ExtensionArray.
+
+ - tests.frame.test_reductions
+ - tests.series.test_reductions
+ - tests.test_nanops
+
+5. Is your test for an indexing method?
+ This is the most difficult case for deciding where a test belongs, because
+ there are many of these tests, and many of them test more than one method
+ (e.g. both ``Series.__getitem__`` and ``Series.loc.__getitem__``)
+
+ A) Is the test specifically testing an Index method (e.g. ``Index.get_loc``, ``Index.get_indexer``)?
+ This test likely belongs in one of:
+ - tests.indexes.test_indexing
+ - tests.indexes.fooindex.test_indexing
+
+ Within that files there should be a method-specific test class e.g. ``TestGetLoc``.
+
+ In most cases, neither ``Series`` nor ``DataFrame`` objects should be needed in these tests.
+
+ B) Is the test for a Series or DataFrame indexing method *other* than ``__getitem__`` or ``__setitem__``, e.g. ``xs``, ``where``, ``take``, ``mask``, ``lookup``, or ``insert``?
+ This test likely belongs in one of:
+ - tests.frame.indexing.test_methodname
+ - tests.series.indexing.test_methodname
+
+ C) Is the test for any of ``loc``, ``iloc``, ``at``, or ``iat``?
+ This test likely belongs in one of:
+ - tests.indexing.test_loc
+ - tests.indexing.test_iloc
+ - tests.indexing.test_at
+ - tests.indexing.test_iat
+
+ Within the appropriate file, test classes correspond to either types of indexers (e.g. ``TestLocBooleanMask``) or major use cases (e.g. ``TestLocSetitemWithExpansion``).
+
+ See the note in section D) about tests that test multiple indexing methods.
+
+ D) Is the test for ``Series.__getitem__``, ``Series.__setitem__``, ``DataFrame.__getitem__``, or ``DataFrame.__setitem__``?
+ This test likely belongs in one of:
+ - tests.series.test_getitem
+ - tests.series.test_setitem
+ - tests.frame.test_getitem
+ - tests.frame.test_setitem
+
+ If many cases such a test may test multiple similar methods, e.g.
+
+ .. code-block:: python
+ import pandas as pd
+ import pandas._testing as tm
+
+ def test_getitem_listlike_of_ints():
+ ser = pd.Series(range(5))
+
+ result = ser[[3, 4]]
+ expected = pd.Series([2, 3])
+ tm.assert_series_equal(result, expected)
+
+ result = ser.loc[[3, 4]]
+ tm.assert_series_equal(result, expected)
+
+ In cases like this, the test location should be based on the *underlying* method being tested. Or in the case of a test for a bugfix, the location of the actual bug. So in this example, we know that ``Series.__getitem__`` calls ``Series.loc.__getitem__``, so this is *really* a test for ``loc.__getitem__``. So this test belongs in ``tests.indexing.test_loc``
+
+6. Is your test for a DataFrame or Series method?
+ A) Is the method a plotting method?
+ This test likely belongs in one of:
+
+ - tests.plotting
+
+ B) Is the method an IO method?
+ This test likely belongs in one of:
+
+ - tests.io
+
+ C) Otherwise
+ This test likely belongs in one of:
+
+ - tests.series.methods.test_mymethod
+ - tests.frame.methods.test_mymethod
+
+ .. note::
+
+ If a test can be shared between DataFrame/Series using the ``frame_or_series`` fixture, by convention it goes in tests.frame file.
+
+ - tests.generic.methods.test_mymethod
+
+ .. note::
+
+ The generic/methods/ directory is only for methods with tests that are fully parametrized over Series/DataFrame
+
+7. Is your test for an Index method, not depending on Series/DataFrame?
+This test likely belongs in one of:
+
+ - tests.indexes
+
+8) Is your test for one of the pandas-provided ExtensionArrays (Categorical, DatetimeArray, TimedeltaArray, PeriodArray, IntervalArray, PandasArray, FloatArray, BoolArray, IntervalArray, StringArray)?
+This test likely belongs in one of:
+
+ - tests.arrays
+
+9) Is your test for *all* ExtensionArray subclasses (the "EA Interface")?
+This test likely belongs in one of:
+
+ - tests.extension
diff --git a/pandas/tests/series/methods/test_item.py b/pandas/tests/series/methods/test_item.py
index a7ddc0c22dcf4..90e8f6d39c5cc 100644
--- a/pandas/tests/series/methods/test_item.py
+++ b/pandas/tests/series/methods/test_item.py
@@ -1,3 +1,7 @@
+"""
+Series.item method, mainly testing that we get python scalars as opposed to
+numpy scalars.
+"""
import pytest
from pandas import Series, Timedelta, Timestamp, date_range
@@ -5,6 +9,7 @@
class TestItem:
def test_item(self):
+ # We are testing that we get python scalars as opposed to numpy scalars
ser = Series([1])
result = ser.item()
assert result == 1
| Documenting the MO behind the test organization we've been doing.
1) I expect this is not valid rst ATM
2) The style does not match the rest of the docs, so probably needs a re-write.
| https://api.github.com/repos/pandas-dev/pandas/pulls/37637 | 2020-11-04T23:49:27Z | 2020-11-11T00:19:19Z | 2020-11-11T00:19:19Z | 2020-11-11T15:35:04Z |
DOC: Fix typo | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3ec575a849abe..7cdb37a76d563 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6448,7 +6448,7 @@ def update(
See Also
--------
dict.update : Similar method for dictionaries.
- DataFrame.merge : For column(s)-on-columns(s) operations.
+ DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
@@ -7984,7 +7984,7 @@ def join(
See Also
--------
- DataFrame.merge : For column(s)-on-columns(s) operations.
+ DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
| "columns(s)" sounded odd, I believe it was supposed to be just "column(s)".
| https://api.github.com/repos/pandas-dev/pandas/pulls/37636 | 2020-11-04T20:41:02Z | 2020-11-05T12:35:47Z | 2020-11-05T12:35:47Z | 2020-11-05T14:00:30Z |
TST/REF: collect tests for get_numeric_data | diff --git a/pandas/tests/frame/methods/test_get_numeric_data.py b/pandas/tests/frame/methods/test_get_numeric_data.py
new file mode 100644
index 0000000000000..d73dbdf045be3
--- /dev/null
+++ b/pandas/tests/frame/methods/test_get_numeric_data.py
@@ -0,0 +1,96 @@
+import numpy as np
+
+from pandas import Categorical, DataFrame, Index, Series, Timestamp
+import pandas._testing as tm
+from pandas.core.arrays import IntervalArray, integer_array
+
+
+class TestGetNumericData:
+ def test_get_numeric_data_preserve_dtype(self):
+ # get the numeric data
+ obj = DataFrame({"A": [1, "2", 3.0]})
+ result = obj._get_numeric_data()
+ expected = DataFrame(index=[0, 1, 2], dtype=object)
+ tm.assert_frame_equal(result, expected)
+
+ def test_get_numeric_data(self):
+
+ datetime64name = np.dtype("M8[ns]").name
+ objectname = np.dtype(np.object_).name
+
+ df = DataFrame(
+ {"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
+ index=np.arange(10),
+ )
+ result = df.dtypes
+ expected = Series(
+ [
+ np.dtype("float64"),
+ np.dtype("int64"),
+ np.dtype(objectname),
+ np.dtype(datetime64name),
+ ],
+ index=["a", "b", "c", "f"],
+ )
+ tm.assert_series_equal(result, expected)
+
+ df = DataFrame(
+ {
+ "a": 1.0,
+ "b": 2,
+ "c": "foo",
+ "d": np.array([1.0] * 10, dtype="float32"),
+ "e": np.array([1] * 10, dtype="int32"),
+ "f": np.array([1] * 10, dtype="int16"),
+ "g": Timestamp("20010102"),
+ },
+ index=np.arange(10),
+ )
+
+ result = df._get_numeric_data()
+ expected = df.loc[:, ["a", "b", "d", "e", "f"]]
+ tm.assert_frame_equal(result, expected)
+
+ only_obj = df.loc[:, ["c", "g"]]
+ result = only_obj._get_numeric_data()
+ expected = df.loc[:, []]
+ tm.assert_frame_equal(result, expected)
+
+ df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
+ result = df._get_numeric_data()
+ expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
+ tm.assert_frame_equal(result, expected)
+
+ df = result.copy()
+ result = df._get_numeric_data()
+ expected = df
+ tm.assert_frame_equal(result, expected)
+
+ def test_get_numeric_data_mixed_dtype(self):
+ # numeric and object columns
+
+ df = DataFrame(
+ {
+ "a": [1, 2, 3],
+ "b": [True, False, True],
+ "c": ["foo", "bar", "baz"],
+ "d": [None, None, None],
+ "e": [3.14, 0.577, 2.773],
+ }
+ )
+ result = df._get_numeric_data()
+ tm.assert_index_equal(result.columns, Index(["a", "b", "e"]))
+
+ def test_get_numeric_data_extension_dtype(self):
+ # GH#22290
+ df = DataFrame(
+ {
+ "A": integer_array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"),
+ "B": Categorical(list("abcabc")),
+ "C": integer_array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"),
+ "D": IntervalArray.from_breaks(range(7)),
+ }
+ )
+ result = df._get_numeric_data()
+ expected = df.loc[:, ["A", "C"]]
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 34aa11eb76306..5513262af8100 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -16,7 +16,6 @@
option_context,
)
import pandas._testing as tm
-from pandas.core.arrays import IntervalArray, integer_array
from pandas.core.internals import ObjectBlock
from pandas.core.internals.blocks import IntBlock
@@ -306,73 +305,6 @@ def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
- def test_get_numeric_data(self):
-
- datetime64name = np.dtype("M8[ns]").name
- objectname = np.dtype(np.object_).name
-
- df = DataFrame(
- {"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
- index=np.arange(10),
- )
- result = df.dtypes
- expected = Series(
- [
- np.dtype("float64"),
- np.dtype("int64"),
- np.dtype(objectname),
- np.dtype(datetime64name),
- ],
- index=["a", "b", "c", "f"],
- )
- tm.assert_series_equal(result, expected)
-
- df = DataFrame(
- {
- "a": 1.0,
- "b": 2,
- "c": "foo",
- "d": np.array([1.0] * 10, dtype="float32"),
- "e": np.array([1] * 10, dtype="int32"),
- "f": np.array([1] * 10, dtype="int16"),
- "g": Timestamp("20010102"),
- },
- index=np.arange(10),
- )
-
- result = df._get_numeric_data()
- expected = df.loc[:, ["a", "b", "d", "e", "f"]]
- tm.assert_frame_equal(result, expected)
-
- only_obj = df.loc[:, ["c", "g"]]
- result = only_obj._get_numeric_data()
- expected = df.loc[:, []]
- tm.assert_frame_equal(result, expected)
-
- df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
- result = df._get_numeric_data()
- expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
- tm.assert_frame_equal(result, expected)
-
- df = result.copy()
- result = df._get_numeric_data()
- expected = df
- tm.assert_frame_equal(result, expected)
-
- def test_get_numeric_data_extension_dtype(self):
- # GH 22290
- df = DataFrame(
- {
- "A": integer_array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"),
- "B": Categorical(list("abcabc")),
- "C": integer_array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"),
- "D": IntervalArray.from_breaks(range(7)),
- }
- )
- result = df._get_numeric_data()
- expected = df.loc[:, ["A", "C"]]
- tm.assert_frame_equal(result, expected)
-
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
@@ -390,21 +322,6 @@ def test_stale_cached_series_bug_473(self):
exp = Y["g"].sum() # noqa
assert pd.isna(Y["g"]["c"])
- def test_get_X_columns(self):
- # numeric and object columns
-
- df = DataFrame(
- {
- "a": [1, 2, 3],
- "b": [True, False, True],
- "c": ["foo", "bar", "baz"],
- "d": [None, None, None],
- "e": [3.14, 0.577, 2.773],
- }
- )
-
- tm.assert_index_equal(df._get_numeric_data().columns, pd.Index(["a", "b", "e"]))
-
def test_strange_column_corruption_issue(self):
# FIXME: dont leave commented-out
# (wesm) Unclear how exactly this is related to internal matters
@@ -458,7 +375,7 @@ def test_update_inplace_sets_valid_block_values():
df["a"].fillna(1, inplace=True)
# check we havent put a Series into any block.values
- assert isinstance(df._mgr.blocks[0].values, pd.Categorical)
+ assert isinstance(df._mgr.blocks[0].values, Categorical)
# smoketest for OP bug from GH#35731
assert df.isnull().sum().sum() == 0
diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py
index da02a82890adc..757f71730819d 100644
--- a/pandas/tests/generic/test_frame.py
+++ b/pandas/tests/generic/test_frame.py
@@ -61,14 +61,6 @@ def test_nonzero_single_element(self):
with pytest.raises(ValueError, match=msg):
bool(df)
- def test_get_numeric_data_preserve_dtype(self):
-
- # get the numeric data
- o = DataFrame({"A": [1, "2", 3.0]})
- result = o._get_numeric_data()
- expected = DataFrame(index=[0, 1, 2], dtype=object)
- self._compare(result, expected)
-
def test_metadata_propagation_indiv_groupby(self):
# groupby
df = DataFrame(
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index 0a05a42f0fc39..474661e0f2e0a 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -35,31 +35,11 @@ def test_set_axis_name_raises(self):
with pytest.raises(ValueError, match=msg):
s._set_axis_name(name="a", axis=1)
- def test_get_numeric_data_preserve_dtype(self):
-
- # get the numeric data
- o = Series([1, 2, 3])
- result = o._get_numeric_data()
- self._compare(result, o)
-
- o = Series([1, "2", 3.0])
- result = o._get_numeric_data()
- expected = Series([], dtype=object, index=pd.Index([], dtype=object))
- self._compare(result, expected)
-
- o = Series([True, False, True])
- result = o._get_numeric_data()
- self._compare(result, o)
-
+ def test_get_bool_data_preserve_dtype(self):
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
- o = Series(date_range("20130101", periods=3))
- result = o._get_numeric_data()
- expected = Series([], dtype="M8[ns]", index=pd.Index([], dtype=object))
- self._compare(result, expected)
-
def test_nonzero_single_element(self):
# allow single item via bool method
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index c1a5db992d3df..2f43f0ae4b031 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -119,6 +119,11 @@ def test_setitem_from_duplicate_axis(self):
class TestLoc2:
# TODO: better name, just separating out things that rely on base class
+ def test_loc_getitem_missing_unicode_key(self):
+ df = DataFrame({"a": [1]})
+ with pytest.raises(KeyError, match="\u05d0"):
+ df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError
+
def test_loc_getitem_dups(self):
# GH 5678
# repeated getitems on a dup index returning a ndarray
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index bddc50a3cbcc1..88b91ecc79060 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1146,12 +1146,6 @@ def test_make_block_no_pandas_array():
assert result.is_extension is False
-def test_missing_unicode_key():
- df = DataFrame({"a": [1]})
- with pytest.raises(KeyError, match="\u05d0"):
- df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError
-
-
def test_single_block_manager_fastpath_deprecated():
# GH#33092
ser = Series(range(3))
diff --git a/pandas/tests/series/methods/test_get_numeric_data.py b/pandas/tests/series/methods/test_get_numeric_data.py
new file mode 100644
index 0000000000000..dc0becf46a24c
--- /dev/null
+++ b/pandas/tests/series/methods/test_get_numeric_data.py
@@ -0,0 +1,25 @@
+from pandas import Index, Series, date_range
+import pandas._testing as tm
+
+
+class TestGetNumericData:
+ def test_get_numeric_data_preserve_dtype(self):
+
+ # get the numeric data
+ obj = Series([1, 2, 3])
+ result = obj._get_numeric_data()
+ tm.assert_series_equal(result, obj)
+
+ obj = Series([1, "2", 3.0])
+ result = obj._get_numeric_data()
+ expected = Series([], dtype=object, index=Index([], dtype=object))
+ tm.assert_series_equal(result, expected)
+
+ obj = Series([True, False, True])
+ result = obj._get_numeric_data()
+ tm.assert_series_equal(result, obj)
+
+ obj = Series(date_range("20130101", periods=3))
+ result = obj._get_numeric_data()
+ expected = Series([], dtype="M8[ns]", index=Index([], dtype=object))
+ tm.assert_series_equal(result, expected)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37634 | 2020-11-04T18:32:46Z | 2020-11-06T00:26:51Z | 2020-11-06T00:26:51Z | 2020-11-06T00:27:33Z | |
Added dataclass into dataframe API reference | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a3130ec27713d..cd22cf35eff02 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -360,7 +360,7 @@ class DataFrame(NDFrame, OpsMixin):
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
- Dict can contain Series, arrays, constants, or list-like objects. If
+ Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
@@ -420,6 +420,16 @@ class DataFrame(NDFrame, OpsMixin):
0 1 2 3
1 4 5 6
2 7 8 9
+
+ Constructing DataFrame from dataclass:
+
+ >>> from dataclasses import make_dataclass
+ >>> Point = make_dataclass("Point", [("x", int), ("y", int)])
+ >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
+ x y
+ 0 0 0
+ 1 0 3
+ 2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Added dataclass constructor for dataframe into the Pandas in pandas/init.py line 363 and 426. | https://api.github.com/repos/pandas-dev/pandas/pulls/37632 | 2020-11-04T15:53:54Z | 2020-11-04T17:26:23Z | 2020-11-04T17:26:23Z | 2020-11-04T17:34:04Z |
DOC: Fix typo | diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 38e36c8ff8d01..c9940c78b8d7d 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -91,7 +91,7 @@
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
- values are used as-is determine the groups. A label or list of
+ values are used as-is to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
| I'm not entirely sure about this one, but "If an ndarray is passed, the values are used as-is determine the groups." does sound odd to me, so I do believe it was missing the "to".
| https://api.github.com/repos/pandas-dev/pandas/pulls/37630 | 2020-11-04T14:39:25Z | 2020-11-04T16:53:39Z | 2020-11-04T16:53:39Z | 2020-11-04T16:58:27Z |
Backport PR #37461 on branch 1.1.x: BUG: Metadata propagation for groupby iterator | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index cf728d94b2a55..a122154904996 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -23,7 +23,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
--
+- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 3aaeef3b63760..5ea4f0cbb6a0c 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -131,9 +131,16 @@ def get_iterator(self, data: FrameOrSeries, axis: int = 0):
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
- yield key, group
+ yield key, group.__finalize__(data, method="groupby")
def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> "DataSplitter":
+ """
+ Returns
+ -------
+ Generator yielding subsetted objects
+
+ __finalize__ has not been called for the the subsetted objects returned.
+ """
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
@@ -955,7 +962,8 @@ class SeriesSplitter(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
# fastpath equivalent to `sdata.iloc[slice_obj]`
mgr = sdata._mgr.get_slice(slice_obj)
- return type(sdata)(mgr, name=sdata.name, fastpath=True)
+ # __finalize__ not called here, must be applied by caller if applicable
+ return sdata._constructor(mgr, name=sdata.name, fastpath=True)
class FrameSplitter(DataSplitter):
@@ -971,7 +979,8 @@ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
# else:
# return sdata.iloc[:, slice_obj]
mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
- return type(sdata)(mgr)
+ # __finalize__ not called here, must be applied by caller if applicable
+ return sdata._constructor(mgr)
def get_splitter(
diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py
index 7271911c5f80f..a97f4d95a677d 100644
--- a/pandas/tests/groupby/test_groupby_subclass.py
+++ b/pandas/tests/groupby/test_groupby_subclass.py
@@ -51,6 +51,15 @@ def test_groupby_preserves_subclass(obj, groupby_func):
tm.assert_series_equal(result1, result2)
+def test_groupby_preserves_metadata():
+ # GH-37343
+ custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]})
+ assert "testattr" in custom_df._metadata
+ custom_df.testattr = "hello"
+ for _, group_df in custom_df.groupby("c"):
+ assert group_df.testattr == "hello"
+
+
@pytest.mark.parametrize(
"obj", [DataFrame, tm.SubclassedDataFrame],
)
| Backport PR #37461 | https://api.github.com/repos/pandas-dev/pandas/pulls/37628 | 2020-11-04T14:18:37Z | 2020-11-04T15:54:21Z | 2020-11-04T15:54:21Z | 2020-11-04T15:54:30Z |
TST: 32bit dtype compat test_groupby_dropna | diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index 02ce4dcf2ae2b..e38fa5e8de87e 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -343,7 +343,7 @@ def test_groupby_nan_included():
df = pd.DataFrame(data)
grouped = df.groupby("group", dropna=False)
result = grouped.indices
- dtype = "int64"
+ dtype = np.intp
expected = {
"g1": np.array([0, 2], dtype=dtype),
"g2": np.array([3], dtype=dtype),
| Part of #36579
Related to ``test_groupby_nan_included``. | https://api.github.com/repos/pandas-dev/pandas/pulls/37623 | 2020-11-04T07:07:07Z | 2020-11-04T11:04:33Z | 2020-11-04T11:04:33Z | 2020-11-06T15:38:24Z |
ENH: memory_map for compressed files | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 9ac3585aa9002..a38d7bc5e02e5 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -230,6 +230,7 @@ Other enhancements
- :class:`DatetimeIndex` and :class:`Series` with ``datetime64`` or ``datetime64tz`` dtypes now support ``std`` (:issue:`37436`)
- :class:`Window` now supports all Scipy window types in ``win_type`` with flexible keyword argument support (:issue:`34556`)
- :meth:`testing.assert_index_equal` now has a ``check_order`` parameter that allows indexes to be checked in an order-insensitive manner (:issue:`37478`)
+- :func:`read_csv` supports memory-mapping for compressed files (:issue:`37621`)
.. _whatsnew_120.api_breaking.python:
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 90a79e54015c4..910eb23d9a2d0 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -107,6 +107,7 @@ class IOHandles:
handle: Buffer
created_handles: List[Buffer] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
+ is_mmap: bool = False
def close(self) -> None:
"""
@@ -604,49 +605,49 @@ def get_handle(
except ImportError:
pass
- handles: List[Buffer] = list()
-
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
if encoding is None:
encoding = "utf-8"
# Convert pathlib.Path/py.path.local or string
- path_or_buf = stringify_path(path_or_buf)
- is_path = isinstance(path_or_buf, str)
- f = path_or_buf
+ handle = stringify_path(path_or_buf)
compression, compression_args = get_compression_method(compression)
- if is_path:
- compression = infer_compression(path_or_buf, compression)
+ compression = infer_compression(handle, compression)
- if compression:
+ # memory mapping needs to be the first step
+ handle, memory_map, handles = _maybe_memory_map(
+ handle, memory_map, encoding, mode, errors
+ )
+ is_path = isinstance(handle, str)
+ if compression:
# GZ Compression
if compression == "gzip":
if is_path:
- assert isinstance(path_or_buf, str)
- f = gzip.GzipFile(filename=path_or_buf, mode=mode, **compression_args)
+ assert isinstance(handle, str)
+ handle = gzip.GzipFile(filename=handle, mode=mode, **compression_args)
else:
- f = gzip.GzipFile(
- fileobj=path_or_buf, # type: ignore[arg-type]
+ handle = gzip.GzipFile(
+ fileobj=handle, # type: ignore[arg-type]
mode=mode,
**compression_args,
)
# BZ Compression
elif compression == "bz2":
- f = bz2.BZ2File(
- path_or_buf, mode=mode, **compression_args # type: ignore[arg-type]
+ handle = bz2.BZ2File(
+ handle, mode=mode, **compression_args # type: ignore[arg-type]
)
# ZIP Compression
elif compression == "zip":
- f = _BytesZipFile(path_or_buf, mode, **compression_args)
- if f.mode == "r":
- handles.append(f)
- zip_names = f.namelist()
+ handle = _BytesZipFile(handle, mode, **compression_args)
+ if handle.mode == "r":
+ handles.append(handle)
+ zip_names = handle.namelist()
if len(zip_names) == 1:
- f = f.open(zip_names.pop())
+ handle = handle.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
@@ -657,64 +658,52 @@ def get_handle(
# XZ Compression
elif compression == "xz":
- f = get_lzma_file(lzma)(path_or_buf, mode)
+ handle = get_lzma_file(lzma)(handle, mode)
# Unrecognized Compression
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
- assert not isinstance(f, str)
- handles.append(f)
+ assert not isinstance(handle, str)
+ handles.append(handle)
elif is_path:
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
- is_binary_mode = "b" in mode
- assert isinstance(path_or_buf, str)
- if encoding and not is_binary_mode:
+ assert isinstance(handle, str)
+ if encoding and "b" not in mode:
# Encoding
- f = open(path_or_buf, mode, encoding=encoding, errors=errors, newline="")
+ handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
else:
# Binary mode
- f = open(path_or_buf, mode)
- handles.append(f)
+ handle = open(handle, mode)
+ handles.append(handle)
# Convert BytesIO or file objects passed with an encoding
is_wrapped = False
if is_text and (
compression
- or isinstance(f, need_text_wrapping)
- or "b" in getattr(f, "mode", "")
+ or isinstance(handle, need_text_wrapping)
+ or "b" in getattr(handle, "mode", "")
):
- f = TextIOWrapper(
- f, encoding=encoding, errors=errors, newline="" # type: ignore[arg-type]
+ handle = TextIOWrapper(
+ handle, # type: ignore[arg-type]
+ encoding=encoding,
+ errors=errors,
+ newline="",
)
- handles.append(f)
+ handles.append(handle)
# do not mark as wrapped when the user provided a string
is_wrapped = not is_path
- if memory_map and hasattr(f, "fileno"):
- assert not isinstance(f, str)
- try:
- wrapped = cast(mmap.mmap, _MMapWrapper(f)) # type: ignore[arg-type]
- f.close()
- handles.remove(f)
- handles.append(wrapped)
- f = wrapped
- except Exception:
- # we catch any errors that may have occurred
- # because that is consistent with the lower-level
- # functionality of the C engine (pd.read_csv), so
- # leave the file handler as is then
- pass
-
handles.reverse() # close the most recently added buffer first
- assert not isinstance(f, str)
+ assert not isinstance(handle, str)
return IOHandles(
- handle=f,
+ handle=handle,
created_handles=handles,
is_wrapped=is_wrapped,
+ is_mmap=memory_map,
)
@@ -778,9 +767,16 @@ class _MMapWrapper(abc.Iterator):
"""
def __init__(self, f: IO):
+ self.attributes = {}
+ for attribute in ("seekable", "readable", "writeable"):
+ if not hasattr(f, attribute):
+ continue
+ self.attributes[attribute] = getattr(f, attribute)()
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
+ if name in self.attributes:
+ return lambda: self.attributes[name]
return getattr(self.mmap, name)
def __iter__(self) -> "_MMapWrapper":
@@ -799,3 +795,42 @@ def __next__(self) -> str:
if newline == "":
raise StopIteration
return newline
+
+
+def _maybe_memory_map(
+ handle: FileOrBuffer,
+ memory_map: bool,
+ encoding: str,
+ mode: str,
+ errors: Optional[str],
+) -> Tuple[FileOrBuffer, bool, List[Buffer]]:
+ """Try to use memory map file/buffer."""
+ handles: List[Buffer] = []
+ memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
+ if not memory_map:
+ return handle, memory_map, handles
+
+ # need to open the file first
+ if isinstance(handle, str):
+ if encoding and "b" not in mode:
+ # Encoding
+ handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
+ else:
+ # Binary mode
+ handle = open(handle, mode)
+ handles.append(handle)
+
+ try:
+ wrapped = cast(mmap.mmap, _MMapWrapper(handle)) # type: ignore[arg-type]
+ handle.close()
+ handles.remove(handle)
+ handles.append(wrapped)
+ handle = wrapped
+ except Exception:
+ # we catch any errors that may have occurred
+ # because that is consistent with the lower-level
+ # functionality of the C engine (pd.read_csv), so
+ # leave the file handler as is then
+ memory_map = False
+
+ return handle, memory_map, handles
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 3b72869188344..e4895d280c241 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -63,13 +63,7 @@
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
-from pandas.io.common import (
- get_compression_method,
- get_filepath_or_buffer,
- get_handle,
- stringify_path,
- validate_header_arg,
-)
+from pandas.io.common import get_filepath_or_buffer, get_handle, validate_header_arg
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
@@ -1834,16 +1828,6 @@ def __init__(self, src, **kwds):
ParserBase.__init__(self, kwds)
- if kwds.get("memory_map", False):
- # memory-mapped files are directly handled by the TextReader.
- src = stringify_path(src)
-
- if get_compression_method(kwds.get("compression", None))[0] is not None:
- raise ValueError(
- "read_csv does not support compression with memory_map=True. "
- + "Please use memory_map=False instead."
- )
-
self.handles = get_handle(
src,
mode="r",
@@ -1855,7 +1839,7 @@ def __init__(self, src, **kwds):
kwds.pop("encoding", None)
kwds.pop("memory_map", None)
kwds.pop("compression", None)
- if kwds.get("memory_map", False) and hasattr(self.handles.handle, "mmap"):
+ if self.handles.is_mmap and hasattr(self.handles.handle, "mmap"):
self.handles.handle = self.handles.handle.mmap
# #2442
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index e61a5fce99c69..8f63d06859f62 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -2275,40 +2275,38 @@ def test_read_csv_file_handle(all_parsers, io_class, encoding):
assert not handle.closed
-def test_memory_map_compression_error(c_parser_only):
+def test_memory_map_file_handle_silent_fallback(all_parsers, compression):
"""
- c-parsers do not support memory_map=True with compression.
+ Do not fail for buffers with memory_map=True (cannot memory map BytesIO).
- GH 36997
+ GH 37621
"""
- parser = c_parser_only
- df = DataFrame({"a": [1], "b": [2]})
- msg = (
- "read_csv does not support compression with memory_map=True. "
- + "Please use memory_map=False instead."
- )
+ parser = all_parsers
+ expected = DataFrame({"a": [1], "b": [2]})
- with tm.ensure_clean() as path:
- df.to_csv(path, compression="gzip", index=False)
+ handle = BytesIO()
+ expected.to_csv(handle, index=False, compression=compression, mode="wb")
+ handle.seek(0)
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(path, memory_map=True, compression="gzip")
+ tm.assert_frame_equal(
+ parser.read_csv(handle, memory_map=True, compression=compression),
+ expected,
+ )
-def test_memory_map_file_handle(all_parsers):
+def test_memory_map_compression(all_parsers, compression):
"""
- Support some buffers with memory_map=True.
+ Support memory map for compressed files.
- GH 36997
+ GH 37621
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
- handle = StringIO()
- expected.to_csv(handle, index=False)
- handle.seek(0)
+ with tm.ensure_clean() as path:
+ expected.to_csv(path, index=False, compression=compression)
- tm.assert_frame_equal(
- parser.read_csv(handle, memory_map=True),
- expected,
- )
+ tm.assert_frame_equal(
+ parser.read_csv(path, memory_map=True, compression=compression),
+ expected,
+ )
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Apply memory mapping first and then compression. The existing order didn't work.
This adds support for memory mapping in `read_csv` when compression is specified (python and c engine). | https://api.github.com/repos/pandas-dev/pandas/pulls/37621 | 2020-11-04T04:43:10Z | 2020-11-05T00:40:04Z | 2020-11-05T00:40:04Z | 2020-11-05T00:47:18Z |
TST/REF: parametrize set_axis tests | diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 3cd35e900ee06..4bd1d5fa56468 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -1,7 +1,6 @@
from datetime import datetime
import numpy as np
-import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
@@ -24,15 +23,6 @@
class TestDataFrameAlterAxes:
- def test_set_index_directly(self, float_string_frame):
- df = float_string_frame
- idx = Index(np.arange(len(df))[::-1])
-
- df.index = idx
- tm.assert_index_equal(df.index, idx)
- with pytest.raises(ValueError, match="Length mismatch"):
- df.index = idx[::2]
-
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
@@ -101,12 +91,6 @@ def test_convert_dti_to_series(self):
df.pop("ts")
tm.assert_frame_equal(df, expected)
- def test_set_columns(self, float_string_frame):
- cols = Index(np.arange(len(float_string_frame.columns)))
- float_string_frame.columns = cols
- with pytest.raises(ValueError, match="Length mismatch"):
- float_string_frame.columns = cols[::2]
-
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
diff --git a/pandas/tests/generic/methods/test_set_axis.py b/pandas/tests/generic/methods/test_set_axis.py
index 278d43ef93d2f..a46a91811f40e 100644
--- a/pandas/tests/generic/methods/test_set_axis.py
+++ b/pandas/tests/generic/methods/test_set_axis.py
@@ -57,6 +57,28 @@ def test_set_axis_invalid_axis_name(self, axis, obj):
with pytest.raises(ValueError, match="No axis named"):
obj.set_axis(list("abc"), axis=axis)
+ def test_set_axis_setattr_index_not_collection(self, obj):
+ # wrong type
+ msg = (
+ r"Index\(\.\.\.\) must be called with a collection of some "
+ r"kind, None was passed"
+ )
+ with pytest.raises(TypeError, match=msg):
+ obj.index = None
+
+ def test_set_axis_setattr_index_wrong_length(self, obj):
+ # wrong length
+ msg = (
+ f"Length mismatch: Expected axis has {len(obj)} elements, "
+ f"new values have {len(obj)-1} elements"
+ )
+ with pytest.raises(ValueError, match=msg):
+ obj.index = np.arange(len(obj) - 1)
+
+ if obj.ndim == 2:
+ with pytest.raises(ValueError, match="Length mismatch"):
+ obj.columns = obj.columns[::2]
+
class TestDataFrameSetAxis(SharedSetAxisTests):
@pytest.fixture
diff --git a/pandas/tests/series/methods/test_set_name.py b/pandas/tests/series/methods/test_set_name.py
new file mode 100644
index 0000000000000..cbc8ebde7a8ab
--- /dev/null
+++ b/pandas/tests/series/methods/test_set_name.py
@@ -0,0 +1,21 @@
+from datetime import datetime
+
+from pandas import Series
+
+
+class TestSetName:
+ def test_set_name(self):
+ ser = Series([1, 2, 3])
+ ser2 = ser._set_name("foo")
+ assert ser2.name == "foo"
+ assert ser.name is None
+ assert ser is not ser2
+
+ def test_set_name_attribute(self):
+ ser = Series([1, 2, 3])
+ ser2 = Series([1, 2, 3], name="bar")
+ for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]:
+ ser.name = name
+ assert ser.name == name
+ ser2.name = name
+ assert ser2.name == name
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
deleted file mode 100644
index 181d7de43d945..0000000000000
--- a/pandas/tests/series/test_alter_axes.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from datetime import datetime
-
-import numpy as np
-import pytest
-
-from pandas import Index, Series
-import pandas._testing as tm
-
-
-class TestSeriesAlterAxes:
- def test_setindex(self, string_series):
- # wrong type
- msg = (
- r"Index\(\.\.\.\) must be called with a collection of some "
- r"kind, None was passed"
- )
- with pytest.raises(TypeError, match=msg):
- string_series.index = None
-
- # wrong length
- msg = (
- "Length mismatch: Expected axis has 30 elements, "
- "new values have 29 elements"
- )
- with pytest.raises(ValueError, match=msg):
- string_series.index = np.arange(len(string_series) - 1)
-
- # works
- string_series.index = np.arange(len(string_series))
- assert isinstance(string_series.index, Index)
-
- # Renaming
-
- def test_set_name_attribute(self):
- s = Series([1, 2, 3])
- s2 = Series([1, 2, 3], name="bar")
- for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]:
- s.name = name
- assert s.name == name
- s2.name = name
- assert s2.name == name
-
- def test_set_name(self):
- s = Series([1, 2, 3])
- s2 = s._set_name("foo")
- assert s2.name == "foo"
- assert s.name is None
- assert s is not s2
-
- def test_set_index_makes_timeseries(self):
- idx = tm.makeDateIndex(10)
-
- s = Series(range(10))
- s.index = idx
- assert s.index._is_all_dates
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37619 | 2020-11-04T00:30:58Z | 2020-11-04T02:05:28Z | 2020-11-04T02:05:28Z | 2020-11-04T04:02:09Z |
TST/REF: tests.generic | diff --git a/pandas/tests/frame/methods/test_equals.py b/pandas/tests/frame/methods/test_equals.py
index c024390297fec..de2509ed91be2 100644
--- a/pandas/tests/frame/methods/test_equals.py
+++ b/pandas/tests/frame/methods/test_equals.py
@@ -1,4 +1,6 @@
-from pandas import DataFrame
+import numpy as np
+
+from pandas import DataFrame, date_range
import pandas._testing as tm
@@ -21,3 +23,56 @@ def test_equals_different_blocks(self):
tm.assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
+
+ def test_equals(self):
+ # Add object dtype column with nans
+ index = np.random.random(10)
+ df1 = DataFrame(np.random.random(10), index=index, columns=["floats"])
+ df1["text"] = "the sky is so blue. we could use more chocolate.".split()
+ df1["start"] = date_range("2000-1-1", periods=10, freq="T")
+ df1["end"] = date_range("2000-1-1", periods=10, freq="D")
+ df1["diff"] = df1["end"] - df1["start"]
+ df1["bool"] = np.arange(10) % 3 == 0
+ df1.loc[::2] = np.nan
+ df2 = df1.copy()
+ assert df1["text"].equals(df2["text"])
+ assert df1["start"].equals(df2["start"])
+ assert df1["end"].equals(df2["end"])
+ assert df1["diff"].equals(df2["diff"])
+ assert df1["bool"].equals(df2["bool"])
+ assert df1.equals(df2)
+ assert not df1.equals(object)
+
+ # different dtype
+ different = df1.copy()
+ different["floats"] = different["floats"].astype("float32")
+ assert not df1.equals(different)
+
+ # different index
+ different_index = -index
+ different = df2.set_index(different_index)
+ assert not df1.equals(different)
+
+ # different columns
+ different = df2.copy()
+ different.columns = df2.columns[::-1]
+ assert not df1.equals(different)
+
+ # DatetimeIndex
+ index = date_range("2000-1-1", periods=10, freq="T")
+ df1 = df1.set_index(index)
+ df2 = df1.copy()
+ assert df1.equals(df2)
+
+ # MultiIndex
+ df3 = df1.set_index(["text"], append=True)
+ df2 = df1.set_index(["text"], append=True)
+ assert df3.equals(df2)
+
+ df2 = df1.set_index(["floats"], append=True)
+ assert not df3.equals(df2)
+
+ # NaN in index
+ df3 = df1.set_index(["floats"], append=True)
+ df2 = df1.set_index(["floats"], append=True)
+ assert df3.equals(df2)
diff --git a/pandas/tests/frame/methods/test_head_tail.py b/pandas/tests/frame/methods/test_head_tail.py
index 93763bc12ce0d..fa28f7d3e16a2 100644
--- a/pandas/tests/frame/methods/test_head_tail.py
+++ b/pandas/tests/frame/methods/test_head_tail.py
@@ -4,6 +4,30 @@
import pandas._testing as tm
+def test_head_tail_generic(index, frame_or_series):
+ # GH#5370
+
+ ndim = 2 if frame_or_series is DataFrame else 1
+ shape = (len(index),) * ndim
+ vals = np.random.randn(*shape)
+ obj = frame_or_series(vals, index=index)
+
+ tm.assert_equal(obj.head(), obj.iloc[:5])
+ tm.assert_equal(obj.tail(), obj.iloc[-5:])
+
+ # 0-len
+ tm.assert_equal(obj.head(0), obj.iloc[0:0])
+ tm.assert_equal(obj.tail(0), obj.iloc[0:0])
+
+ # bounded
+ tm.assert_equal(obj.head(len(obj) + 1), obj)
+ tm.assert_equal(obj.tail(len(obj) + 1), obj)
+
+ # neg index
+ tm.assert_equal(obj.head(-3), obj.head(len(index) - 3))
+ tm.assert_equal(obj.tail(-3), obj.tail(len(index) - 3))
+
+
def test_head_tail(float_frame):
tm.assert_frame_equal(float_frame.head(), float_frame[:5])
tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
diff --git a/pandas/tests/generic/methods/test_first_valid_index.py b/pandas/tests/generic/methods/test_first_valid_index.py
index bca3452c3c458..8d021f0e3954e 100644
--- a/pandas/tests/generic/methods/test_first_valid_index.py
+++ b/pandas/tests/generic/methods/test_first_valid_index.py
@@ -9,10 +9,9 @@
class TestFirstValidIndex:
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_first_valid_index_single_nan(self, klass):
+ def test_first_valid_index_single_nan(self, frame_or_series):
# GH#9752 Series/DataFrame should both return None, not raise
- obj = klass([np.nan])
+ obj = frame_or_series([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
diff --git a/pandas/tests/generic/methods/test_pipe.py b/pandas/tests/generic/methods/test_pipe.py
index 59e5edc4b8bb5..b378600634bf0 100644
--- a/pandas/tests/generic/methods/test_pipe.py
+++ b/pandas/tests/generic/methods/test_pipe.py
@@ -5,11 +5,10 @@
class TestPipe:
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_pipe(self, klass):
+ def test_pipe(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
expected = DataFrame({"A": [1, 4, 9]})
- if klass is Series:
+ if frame_or_series is Series:
obj = obj["A"]
expected = expected["A"]
@@ -17,20 +16,18 @@ def test_pipe(self, klass):
result = obj.pipe(f, 2)
tm.assert_equal(result, expected)
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_pipe_tuple(self, klass):
+ def test_pipe_tuple(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
- if klass is Series:
+ if frame_or_series is Series:
obj = obj["A"]
f = lambda x, y: y
result = obj.pipe((f, "y"), 0)
tm.assert_equal(result, obj)
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_pipe_tuple_error(self, klass):
+ def test_pipe_tuple_error(self, frame_or_series):
obj = DataFrame({"A": [1, 2, 3]})
- if klass is Series:
+ if frame_or_series is Series:
obj = obj["A"]
f = lambda x, y: y
diff --git a/pandas/tests/generic/methods/test_reorder_levels.py b/pandas/tests/generic/methods/test_reorder_levels.py
index 8bb6417e56659..6bfbf089a6108 100644
--- a/pandas/tests/generic/methods/test_reorder_levels.py
+++ b/pandas/tests/generic/methods/test_reorder_levels.py
@@ -1,20 +1,19 @@
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex, Series
+from pandas import DataFrame, MultiIndex
import pandas._testing as tm
class TestReorderLevels:
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_reorder_levels(self, klass):
+ def test_reorder_levels(self, frame_or_series):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
- obj = df if klass is DataFrame else df["A"]
+ obj = df if frame_or_series is DataFrame else df["A"]
# no change, position
result = obj.reorder_levels([0, 1, 2])
@@ -32,7 +31,7 @@ def test_reorder_levels(self, klass):
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
- expected = expected if klass is DataFrame else expected["A"]
+ expected = expected if frame_or_series is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels([0, 0, 0])
@@ -42,7 +41,7 @@ def test_reorder_levels(self, klass):
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
- expected = expected if klass is DataFrame else expected["A"]
+ expected = expected if frame_or_series is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels(["L0", "L0", "L0"])
diff --git a/pandas/tests/generic/methods/test_sample.py b/pandas/tests/generic/methods/test_sample.py
index 7303dad9170ed..b26a3785f918d 100644
--- a/pandas/tests/generic/methods/test_sample.py
+++ b/pandas/tests/generic/methods/test_sample.py
@@ -155,22 +155,20 @@ def test_sample_none_weights(self, obj):
),
],
)
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_sample_random_state(self, func_str, arg, klass):
+ def test_sample_random_state(self, func_str, arg, frame_or_series):
# GH#32503
obj = DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
- if klass is Series:
+ if frame_or_series is Series:
obj = obj["col1"]
result = obj.sample(n=3, random_state=eval(func_str)(arg))
expected = obj.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_equal(result, expected)
- @pytest.mark.parametrize("klass", [Series, DataFrame])
- def test_sample_upsampling_without_replacement(self, klass):
+ def test_sample_upsampling_without_replacement(self, frame_or_series):
# GH#27451
obj = DataFrame({"A": list("abc")})
- if klass is Series:
+ if frame_or_series is Series:
obj = obj["A"]
msg = (
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 45601abc95fe6..930c48cbdc214 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -5,8 +5,7 @@
from pandas.core.dtypes.common import is_scalar
-import pandas as pd
-from pandas import DataFrame, Series, date_range
+from pandas import DataFrame, Series
import pandas._testing as tm
# ----------------------------------------------------------------------
@@ -248,31 +247,6 @@ def test_metadata_propagation(self):
self.check_metadata(v1 & v2)
self.check_metadata(v1 | v2)
- def test_head_tail(self, index):
- # GH5370
-
- o = self._construct(shape=len(index))
-
- axis = o._get_axis_name(0)
- setattr(o, axis, index)
-
- o.head()
-
- self._compare(o.head(), o.iloc[:5])
- self._compare(o.tail(), o.iloc[-5:])
-
- # 0-len
- self._compare(o.head(0), o.iloc[0:0])
- self._compare(o.tail(0), o.iloc[0:0])
-
- # bounded
- self._compare(o.head(len(o) + 1), o)
- self._compare(o.tail(len(o) + 1), o)
-
- # neg index
- self._compare(o.head(-3), o.head(len(index) - 3))
- self._compare(o.tail(-3), o.tail(len(index) - 3))
-
def test_size_compat(self):
# GH8846
# size property should be defined
@@ -460,77 +434,23 @@ def test_take_invalid_kwargs(self):
obj.take(indices, mode="clip")
@pytest.mark.parametrize("is_copy", [True, False])
- def test_depr_take_kwarg_is_copy(self, is_copy):
+ def test_depr_take_kwarg_is_copy(self, is_copy, frame_or_series):
# GH 27357
- df = DataFrame({"A": [1, 2, 3]})
+ obj = DataFrame({"A": [1, 2, 3]})
+ if frame_or_series is Series:
+ obj = obj["A"]
+
msg = (
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this."
)
with tm.assert_produces_warning(FutureWarning) as w:
- df.take([0, 1], is_copy=is_copy)
+ obj.take([0, 1], is_copy=is_copy)
assert w[0].message.args[0] == msg
- s = Series([1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- s.take([0, 1], is_copy=is_copy)
-
- def test_equals(self):
- # Add object dtype column with nans
- index = np.random.random(10)
- df1 = DataFrame(np.random.random(10), index=index, columns=["floats"])
- df1["text"] = "the sky is so blue. we could use more chocolate.".split()
- df1["start"] = date_range("2000-1-1", periods=10, freq="T")
- df1["end"] = date_range("2000-1-1", periods=10, freq="D")
- df1["diff"] = df1["end"] - df1["start"]
- df1["bool"] = np.arange(10) % 3 == 0
- df1.loc[::2] = np.nan
- df2 = df1.copy()
- assert df1["text"].equals(df2["text"])
- assert df1["start"].equals(df2["start"])
- assert df1["end"].equals(df2["end"])
- assert df1["diff"].equals(df2["diff"])
- assert df1["bool"].equals(df2["bool"])
- assert df1.equals(df2)
- assert not df1.equals(object)
-
- # different dtype
- different = df1.copy()
- different["floats"] = different["floats"].astype("float32")
- assert not df1.equals(different)
-
- # different index
- different_index = -index
- different = df2.set_index(different_index)
- assert not df1.equals(different)
-
- # different columns
- different = df2.copy()
- different.columns = df2.columns[::-1]
- assert not df1.equals(different)
-
- # DatetimeIndex
- index = pd.date_range("2000-1-1", periods=10, freq="T")
- df1 = df1.set_index(index)
- df2 = df1.copy()
- assert df1.equals(df2)
-
- # MultiIndex
- df3 = df1.set_index(["text"], append=True)
- df2 = df1.set_index(["text"], append=True)
- assert df3.equals(df2)
-
- df2 = df1.set_index(["floats"], append=True)
- assert not df3.equals(df2)
-
- # NaN in index
- df3 = df1.set_index(["floats"], append=True)
- df2 = df1.set_index(["floats"], append=True)
- assert df3.equals(df2)
-
- @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
- def test_axis_classmethods(self, box):
+ def test_axis_classmethods(self, frame_or_series):
+ box = frame_or_series
obj = box(dtype=object)
values = box._AXIS_TO_AXIS_NUMBER.keys()
for v in values:
@@ -538,24 +458,23 @@ def test_axis_classmethods(self, box):
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
- @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
- def test_axis_names_deprecated(self, box):
+ def test_axis_names_deprecated(self, frame_or_series):
# GH33637
+ box = frame_or_series
obj = box(dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
obj._AXIS_NAMES
- @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
- def test_axis_numbers_deprecated(self, box):
+ def test_axis_numbers_deprecated(self, frame_or_series):
# GH33637
+ box = frame_or_series
obj = box(dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
obj._AXIS_NUMBERS
- @pytest.mark.parametrize("as_frame", [True, False])
- def test_flags_identity(self, as_frame):
+ def test_flags_identity(self, frame_or_series):
s = Series([1, 2])
- if as_frame:
+ if frame_or_series is DataFrame:
s = s.to_frame()
assert s.flags is s.flags
| https://api.github.com/repos/pandas-dev/pandas/pulls/37618 | 2020-11-04T00:09:51Z | 2020-11-04T01:50:30Z | 2020-11-04T01:50:30Z | 2020-11-04T04:01:56Z | |
TST: collect tests by method | diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py
index cdcd922949bcf..368ce88abe165 100644
--- a/pandas/tests/frame/methods/test_asfreq.py
+++ b/pandas/tests/frame/methods/test_asfreq.py
@@ -74,3 +74,14 @@ def test_asfreq_fillvalue(self):
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
+
+ def test_asfreq_with_date_object_index(self, frame_or_series):
+ rng = date_range("1/1/2000", periods=20)
+ ts = frame_or_series(np.random.randn(20), index=rng)
+
+ ts2 = ts.copy()
+ ts2.index = [x.date() for x in ts2.index]
+
+ result = ts2.asfreq("4H", method="ffill")
+ expected = ts.asfreq("4H", method="ffill")
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_values.py b/pandas/tests/frame/methods/test_values.py
index 564a659724768..fb0c5d31f692b 100644
--- a/pandas/tests/frame/methods/test_values.py
+++ b/pandas/tests/frame/methods/test_values.py
@@ -1,6 +1,7 @@
import numpy as np
+import pytest
-from pandas import DataFrame, NaT, Timestamp, date_range
+from pandas import DataFrame, NaT, Series, Timestamp, date_range, period_range
import pandas._testing as tm
@@ -44,6 +45,22 @@ def test_values_duplicates(self):
tm.assert_numpy_array_equal(result, expected)
+ @pytest.mark.parametrize("constructor", [date_range, period_range])
+ def test_values_casts_datetimelike_to_object(self, constructor):
+ series = Series(constructor("2000-01-01", periods=10, freq="D"))
+
+ expected = series.astype("object")
+
+ df = DataFrame({"a": series, "b": np.random.randn(len(series))})
+
+ result = df.values.squeeze()
+ assert (result[:, 0] == expected.values).all()
+
+ df = DataFrame({"a": series, "b": ["foo"] * len(series)})
+
+ result = df.values.squeeze()
+ assert (result[:, 0] == expected.values).all()
+
def test_frame_values_with_tz(self):
tz = "US/Central"
df = DataFrame({"A": date_range("2000", periods=4, tz=tz)})
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index d4ebb557fd6cd..59269b9b54ddc 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -544,6 +544,13 @@ def test_contains_nonunique(self, vals):
class TestGetIndexer:
+ def test_get_indexer_date_objs(self):
+ rng = date_range("1/1/2000", periods=20)
+
+ result = rng.get_indexer(rng.map(lambda x: x.date()))
+ expected = rng.get_indexer(rng)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_get_indexer(self):
idx = pd.date_range("2000-01-01", periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py
index 9096d2a1033e5..93431a5c75091 100644
--- a/pandas/tests/series/apply/test_series_apply.py
+++ b/pandas/tests/series/apply/test_series_apply.py
@@ -5,12 +5,23 @@
import pytest
import pandas as pd
-from pandas import DataFrame, Index, MultiIndex, Series, isna
+from pandas import DataFrame, Index, MultiIndex, Series, isna, timedelta_range
import pandas._testing as tm
from pandas.core.base import SpecificationError
class TestSeriesApply:
+ def test_series_map_box_timedelta(self):
+ # GH#11349
+ ser = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
+
+ def f(x):
+ return x.total_seconds()
+
+ ser.map(f)
+ ser.apply(f)
+ DataFrame(ser).applymap(f)
+
def test_apply(self, datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(
diff --git a/pandas/tests/series/methods/test_values.py b/pandas/tests/series/methods/test_values.py
new file mode 100644
index 0000000000000..e28a714ea656d
--- /dev/null
+++ b/pandas/tests/series/methods/test_values.py
@@ -0,0 +1,20 @@
+import numpy as np
+import pytest
+
+from pandas import IntervalIndex, Series, period_range
+import pandas._testing as tm
+
+
+class TestValues:
+ @pytest.mark.parametrize(
+ "data",
+ [
+ period_range("2000", periods=4),
+ IntervalIndex.from_breaks([1, 2, 3, 4]),
+ ],
+ )
+ def test_values_object_extension_dtypes(self, data):
+ # https://github.com/pandas-dev/pandas/issues/23995
+ result = Series(data).values
+ expected = np.array(data.astype(object))
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 9154c566a3dae..fa8f85178ba9f 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -730,6 +730,21 @@ def test_datetime_understood(self):
expected = Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
+ def test_align_date_objects_with_datetimeindex(self):
+ rng = date_range("1/1/2000", periods=20)
+ ts = Series(np.random.randn(20), index=rng)
+
+ ts_slice = ts[5:]
+ ts2 = ts_slice.copy()
+ ts2.index = [x.date() for x in ts2.index]
+
+ result = ts + ts2
+ result2 = ts2 + ts
+ expected = ts + ts[5:]
+ expected.index = expected.index._with_freq(None)
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result2, expected)
+
@pytest.mark.parametrize(
"names",
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index b85a53960b0f6..2fbed92567f71 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -6,7 +6,7 @@
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
-from pandas import Categorical, DataFrame, Series, date_range
+from pandas import Categorical, DataFrame, Series
import pandas._testing as tm
@@ -120,18 +120,20 @@ def cmp(a, b):
s.astype("object").astype(CategoricalDtype()), roundtrip_expected
)
+ def test_invalid_conversions(self):
# invalid conversion (these are NOT a dtype)
+ cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
+ ser = Series(np.random.randint(0, 10000, 100)).sort_values()
+ ser = pd.cut(ser, range(0, 10500, 500), right=False, labels=cat)
+
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
-
- for invalid in [
- lambda x: x.astype(Categorical),
- lambda x: x.astype("object").astype(Categorical),
- ]:
- with pytest.raises(TypeError, match=msg):
- invalid(s)
+ with pytest.raises(TypeError, match=msg):
+ ser.astype(Categorical)
+ with pytest.raises(TypeError, match=msg):
+ ser.astype("object").astype(Categorical)
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
@@ -148,27 +150,6 @@ def test_astype_empty_constructor_equality(self, dtype):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
- def test_intercept_astype_object(self):
- series = Series(date_range("1/1/2000", periods=10))
-
- # This test no longer makes sense, as
- # Series is by default already M8[ns].
- expected = series.astype("object")
-
- df = DataFrame({"a": series, "b": np.random.randn(len(series))})
- exp_dtypes = Series(
- [np.dtype("datetime64[ns]"), np.dtype("float64")], index=["a", "b"]
- )
- tm.assert_series_equal(df.dtypes, exp_dtypes)
-
- result = df.values.squeeze()
- assert (result[:, 0] == expected.values).all()
-
- df = DataFrame({"a": series, "b": ["foo"] * len(series)})
-
- result = df.values.squeeze()
- assert (result[:, 0] == expected.values).all()
-
def test_series_to_categorical(self):
# see gh-16524: test conversion of Series to Categorical
series = Series(["a", "b", "c"])
@@ -178,19 +159,6 @@ def test_series_to_categorical(self):
tm.assert_series_equal(result, expected)
- @pytest.mark.parametrize(
- "data",
- [
- pd.period_range("2000", periods=4),
- pd.IntervalIndex.from_breaks([1, 2, 3, 4]),
- ],
- )
- def test_values_compatibility(self, data):
- # https://github.com/pandas-dev/pandas/issues/23995
- result = Series(data).values
- expected = np.array(data.astype(object))
- tm.assert_numpy_array_equal(result, expected)
-
def test_reindex_astype_order_consistency(self):
# GH 17444
s = Series([1, 2, 3], index=[2, 0, 1])
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
deleted file mode 100644
index 17dbfa9cf379a..0000000000000
--- a/pandas/tests/series/test_period.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import numpy as np
-
-from pandas import DataFrame, Series, period_range
-
-
-class TestSeriesPeriod:
-
- # ---------------------------------------------------------------------
- # NaT support
-
- def test_intercept_astype_object(self):
- series = Series(period_range("2000-01-01", periods=10, freq="D"))
-
- expected = series.astype("object")
-
- df = DataFrame({"a": series, "b": np.random.randn(len(series))})
-
- result = df.values.squeeze()
- assert (result[:, 0] == expected.values).all()
-
- df = DataFrame({"a": series, "b": ["foo"] * len(series)})
-
- result = df.values.squeeze()
- assert (result[:, 0] == expected.values).all()
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
deleted file mode 100644
index 0769606d18d57..0000000000000
--- a/pandas/tests/series/test_timeseries.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import numpy as np
-
-from pandas import DataFrame, Series, date_range, timedelta_range
-import pandas._testing as tm
-
-
-class TestTimeSeries:
- def test_promote_datetime_date(self):
- rng = date_range("1/1/2000", periods=20)
- ts = Series(np.random.randn(20), index=rng)
-
- ts_slice = ts[5:]
- ts2 = ts_slice.copy()
- ts2.index = [x.date() for x in ts2.index]
-
- result = ts + ts2
- result2 = ts2 + ts
- expected = ts + ts[5:]
- expected.index = expected.index._with_freq(None)
- tm.assert_series_equal(result, expected)
- tm.assert_series_equal(result2, expected)
-
- # test asfreq
- result = ts2.asfreq("4H", method="ffill")
- expected = ts[5:].asfreq("4H", method="ffill")
- tm.assert_series_equal(result, expected)
-
- result = rng.get_indexer(ts2.index)
- expected = rng.get_indexer(ts_slice.index)
- tm.assert_numpy_array_equal(result, expected)
-
- def test_series_map_box_timedelta(self):
- # GH 11349
- s = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
-
- def f(x):
- return x.total_seconds()
-
- s.map(f)
- s.apply(f)
- DataFrame(s).applymap(f)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37617 | 2020-11-03T23:16:38Z | 2020-11-04T01:51:34Z | 2020-11-04T01:51:34Z | 2020-11-04T03:53:42Z | |
TST/REF: share tests across Series/DataFrame | diff --git a/pandas/tests/frame/methods/test_asof.py b/pandas/tests/frame/methods/test_asof.py
index 70b42976c95a7..6931dd0ea2d4c 100644
--- a/pandas/tests/frame/methods/test_asof.py
+++ b/pandas/tests/frame/methods/test_asof.py
@@ -96,12 +96,16 @@ def test_missing(self, date_range_frame):
result = df.asof("1989-12-31")
assert isinstance(result.name, Period)
+ def test_asof_all_nans(self, frame_or_series):
+ # GH 15713
+ # DataFrame/Series is all nans
+ result = frame_or_series([np.nan]).asof([0])
+ expected = frame_or_series([np.nan])
+ tm.assert_equal(result, expected)
+
def test_all_nans(self, date_range_frame):
# GH 15713
# DataFrame is all nans
- result = DataFrame([np.nan]).asof([0])
- expected = DataFrame([np.nan])
- tm.assert_frame_equal(result, expected)
# testing non-default indexes, multiple inputs
N = 150
diff --git a/pandas/tests/frame/methods/test_droplevel.py b/pandas/tests/frame/methods/test_droplevel.py
index 517905cf23259..ce98704b03106 100644
--- a/pandas/tests/frame/methods/test_droplevel.py
+++ b/pandas/tests/frame/methods/test_droplevel.py
@@ -1,23 +1,32 @@
+import pytest
+
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
class TestDropLevel:
- def test_droplevel(self):
+ def test_droplevel(self, frame_or_series):
# GH#20342
- df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
- df = df.set_index([0, 1]).rename_axis(["a", "b"])
- df.columns = MultiIndex.from_tuples(
+ cols = MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
+ mi = MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"])
+ df = DataFrame([[3, 4], [7, 8], [11, 12]], index=mi, columns=cols)
+ if frame_or_series is not DataFrame:
+ df = df.iloc[:, 0]
# test that dropping of a level in index works
expected = df.reset_index("a", drop=True)
result = df.droplevel("a", axis="index")
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
- # test that dropping of a level in columns works
- expected = df.copy()
- expected.columns = Index(["c", "d"], name="level_1")
- result = df.droplevel("level_2", axis="columns")
- tm.assert_frame_equal(result, expected)
+ if frame_or_series is DataFrame:
+ # test that dropping of a level in columns works
+ expected = df.copy()
+ expected.columns = Index(["c", "d"], name="level_1")
+ result = df.droplevel("level_2", axis="columns")
+ tm.assert_equal(result, expected)
+ else:
+ # test that droplevel raises ValueError on axis != 0
+ with pytest.raises(ValueError, match="No axis named columns"):
+ df.droplevel(1, axis="columns")
diff --git a/pandas/tests/frame/methods/test_first_and_last.py b/pandas/tests/frame/methods/test_first_and_last.py
index 2b3756969acca..d21e1eee54e16 100644
--- a/pandas/tests/frame/methods/test_first_and_last.py
+++ b/pandas/tests/frame/methods/test_first_and_last.py
@@ -8,56 +8,64 @@
class TestFirst:
- def test_first_subset(self):
+ def test_first_subset(self, frame_or_series):
ts = tm.makeTimeDataFrame(freq="12h")
+ if frame_or_series is not DataFrame:
+ ts = ts["A"]
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
+ if frame_or_series is not DataFrame:
+ ts = ts["A"]
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
result = ts[:0].first("3M")
- tm.assert_frame_equal(result, ts[:0])
+ tm.assert_equal(result, ts[:0])
- def test_first_raises(self):
+ def test_first_last_raises(self, frame_or_series):
# GH#20725
- df = DataFrame([[1, 2, 3], [4, 5, 6]])
+ obj = DataFrame([[1, 2, 3], [4, 5, 6]])
+ if frame_or_series is not DataFrame:
+ obj = obj[0]
+
msg = "'first' only supports a DatetimeIndex index"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
- df.first("1D")
+ obj.first("1D")
+
+ msg = "'last' only supports a DatetimeIndex index"
+ with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
+ obj.last("1D")
- def test_last_subset(self):
+ def test_last_subset(self, frame_or_series):
ts = tm.makeTimeDataFrame(freq="12h")
+ if frame_or_series is not DataFrame:
+ ts = ts["A"]
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
+ if frame_or_series is not DataFrame:
+ ts = ts["A"]
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
result = ts[:0].last("3M")
- tm.assert_frame_equal(result, ts[:0])
-
- def test_last_raises(self):
- # GH20725
- df = DataFrame([[1, 2, 3], [4, 5, 6]])
- msg = "'last' only supports a DatetimeIndex index"
- with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
- df.last("1D")
+ tm.assert_equal(result, ts[:0])
diff --git a/pandas/tests/frame/methods/test_head_tail.py b/pandas/tests/frame/methods/test_head_tail.py
index 93763bc12ce0d..5cef7969f06ec 100644
--- a/pandas/tests/frame/methods/test_head_tail.py
+++ b/pandas/tests/frame/methods/test_head_tail.py
@@ -24,6 +24,9 @@ def test_head_tail(float_frame):
tm.assert_frame_equal(df.tail(0), df[0:0])
tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
+
+
+def test_head_tail_empty():
# test empty dataframe
empty_df = DataFrame()
tm.assert_frame_equal(empty_df.tail(), empty_df)
diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py
index 674f482c478a0..c6d6637edc88c 100644
--- a/pandas/tests/frame/methods/test_truncate.py
+++ b/pandas/tests/frame/methods/test_truncate.py
@@ -2,12 +2,15 @@
import pytest
import pandas as pd
+from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestDataFrameTruncate:
- def test_truncate(self, datetime_frame):
+ def test_truncate(self, datetime_frame, frame_or_series):
ts = datetime_frame[::3]
+ if frame_or_series is Series:
+ ts = ts.iloc[:, 0]
start, end = datetime_frame.index[3], datetime_frame.index[6]
@@ -16,34 +19,41 @@ def test_truncate(self, datetime_frame):
# neither specified
truncated = ts.truncate()
- tm.assert_frame_equal(truncated, ts)
+ tm.assert_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
- tm.assert_frame_equal(truncated, expected)
+ tm.assert_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
- tm.assert_frame_equal(truncated, expected)
+ tm.assert_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
- tm.assert_frame_equal(truncated, expected)
+ tm.assert_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
- tm.assert_frame_equal(truncated, expected)
+ tm.assert_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
- tm.assert_frame_equal(truncated, expected)
+ tm.assert_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
- tm.assert_frame_equal(truncated, expected)
+ tm.assert_equal(truncated, expected)
+
+ # corner case, empty series/frame returned
+ truncated = ts.truncate(after=ts.index[0] - ts.index.freq)
+ assert len(truncated) == 0
+
+ truncated = ts.truncate(before=ts.index[-1] + ts.index.freq)
+ assert len(truncated) == 0
msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00"
with pytest.raises(ValueError, match=msg):
@@ -57,25 +67,35 @@ def test_truncate_copy(self, datetime_frame):
truncated.values[:] = 5.0
assert not (datetime_frame.values[5:11] == 5).any()
- def test_truncate_nonsortedindex(self):
+ def test_truncate_nonsortedindex(self, frame_or_series):
# GH#17935
- df = pd.DataFrame({"A": ["a", "b", "c", "d", "e"]}, index=[5, 3, 2, 9, 0])
+ obj = DataFrame({"A": ["a", "b", "c", "d", "e"]}, index=[5, 3, 2, 9, 0])
+ if frame_or_series is Series:
+ obj = obj["A"]
+
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
- df.truncate(before=3, after=9)
+ obj.truncate(before=3, after=9)
+
+ def test_sort_values_nonsortedindex(self):
+ # TODO: belongs elsewhere?
- rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
- ts = pd.DataFrame(
+ rng = date_range("2011-01-01", "2012-01-01", freq="W")
+ ts = DataFrame(
{"A": np.random.randn(len(rng)), "B": np.random.randn(len(rng))}, index=rng
)
+
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
ts.sort_values("A", ascending=False).truncate(
before="2011-11", after="2011-12"
)
- df = pd.DataFrame(
+ def test_truncate_nonsortedindex_axis1(self):
+ # GH#17935
+
+ df = DataFrame(
{
3: np.random.randn(5),
20: np.random.randn(5),
@@ -93,27 +113,34 @@ def test_truncate_nonsortedindex(self):
[(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])],
)
@pytest.mark.parametrize("klass", [pd.Int64Index, pd.DatetimeIndex])
- def test_truncate_decreasing_index(self, before, after, indices, klass):
+ def test_truncate_decreasing_index(
+ self, before, after, indices, klass, frame_or_series
+ ):
# https://github.com/pandas-dev/pandas/issues/33756
idx = klass([3, 2, 1, 0])
if klass is pd.DatetimeIndex:
before = pd.Timestamp(before) if before is not None else None
after = pd.Timestamp(after) if after is not None else None
indices = [pd.Timestamp(i) for i in indices]
- values = pd.DataFrame(range(len(idx)), index=idx)
+ values = frame_or_series(range(len(idx)), index=idx)
result = values.truncate(before=before, after=after)
expected = values.loc[indices]
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
- def test_truncate_multiindex(self):
+ def test_truncate_multiindex(self, frame_or_series):
# GH 34564
mi = pd.MultiIndex.from_product([[1, 2, 3, 4], ["A", "B"]], names=["L1", "L2"])
- s1 = pd.DataFrame(range(mi.shape[0]), index=mi, columns=["col"])
+ s1 = DataFrame(range(mi.shape[0]), index=mi, columns=["col"])
+ if frame_or_series is Series:
+ s1 = s1["col"]
+
result = s1.truncate(before=2, after=3)
- df = pd.DataFrame.from_dict(
+ df = DataFrame.from_dict(
{"L1": [2, 2, 3, 3], "L2": ["A", "B", "A", "B"], "col": [2, 3, 4, 5]}
)
expected = df.set_index(["L1", "L2"])
+ if frame_or_series is Series:
+ expected = expected["col"]
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_tz_convert.py b/pandas/tests/frame/methods/test_tz_convert.py
index c70e479723644..ecb30cf11319b 100644
--- a/pandas/tests/frame/methods/test_tz_convert.py
+++ b/pandas/tests/frame/methods/test_tz_convert.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Index, MultiIndex, Series, date_range
+from pandas import DataFrame, Index, MultiIndex, date_range
import pandas._testing as tm
@@ -89,17 +89,16 @@ def test_tz_convert_and_localize(self, fn):
df = DataFrame(index=l0)
df = getattr(df, fn)("US/Pacific", level=1)
- @pytest.mark.parametrize("klass", [Series, DataFrame])
@pytest.mark.parametrize("copy", [True, False])
- def test_tz_convert_copy_inplace_mutate(self, copy, klass):
+ def test_tz_convert_copy_inplace_mutate(self, copy, frame_or_series):
# GH#6326
- obj = klass(
+ obj = frame_or_series(
np.arange(0, 5),
index=date_range("20131027", periods=5, freq="1H", tz="Europe/Berlin"),
)
orig = obj.copy()
result = obj.tz_convert("UTC", copy=copy)
- expected = klass(np.arange(0, 5), index=obj.index.tz_convert("UTC"))
+ expected = frame_or_series(np.arange(0, 5), index=obj.index.tz_convert("UTC"))
tm.assert_equal(result, expected)
tm.assert_equal(obj, orig)
assert result.index is not obj.index
diff --git a/pandas/tests/frame/methods/test_tz_localize.py b/pandas/tests/frame/methods/test_tz_localize.py
index 183b81ca5298e..aa5ab51fe3d8b 100644
--- a/pandas/tests/frame/methods/test_tz_localize.py
+++ b/pandas/tests/frame/methods/test_tz_localize.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series, date_range
+from pandas import DataFrame, date_range
import pandas._testing as tm
@@ -23,16 +23,15 @@ def test_frame_tz_localize(self):
assert result.columns.tz.zone == "UTC"
tm.assert_frame_equal(result, expected.T)
- @pytest.mark.parametrize("klass", [Series, DataFrame])
@pytest.mark.parametrize("copy", [True, False])
- def test_tz_localize_copy_inplace_mutate(self, copy, klass):
+ def test_tz_localize_copy_inplace_mutate(self, copy, frame_or_series):
# GH#6326
- obj = klass(
+ obj = frame_or_series(
np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=None)
)
orig = obj.copy()
result = obj.tz_localize("UTC", copy=copy)
- expected = klass(
+ expected = frame_or_series(
np.arange(0, 5),
index=date_range("20131027", periods=5, freq="1H", tz="UTC"),
)
diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py
index 4b4ef5ea046be..43d40d53dcd21 100644
--- a/pandas/tests/series/methods/test_asof.py
+++ b/pandas/tests/series/methods/test_asof.py
@@ -161,9 +161,6 @@ def test_errors(self):
def test_all_nans(self):
# GH 15713
# series is all nans
- result = Series([np.nan]).asof([0])
- expected = Series([np.nan])
- tm.assert_series_equal(result, expected)
# testing non-default indexes
N = 50
diff --git a/pandas/tests/series/methods/test_droplevel.py b/pandas/tests/series/methods/test_droplevel.py
deleted file mode 100644
index 449ddd1cd0e49..0000000000000
--- a/pandas/tests/series/methods/test_droplevel.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import pytest
-
-from pandas import MultiIndex, Series
-import pandas._testing as tm
-
-
-class TestDropLevel:
- def test_droplevel(self):
- # GH#20342
- ser = Series([1, 2, 3, 4])
- ser.index = MultiIndex.from_arrays(
- [(1, 2, 3, 4), (5, 6, 7, 8)], names=["a", "b"]
- )
- expected = ser.reset_index("b", drop=True)
- result = ser.droplevel("b", axis="index")
- tm.assert_series_equal(result, expected)
- # test that droplevel raises ValueError on axis != 0
- with pytest.raises(ValueError, match="No axis named columns"):
- ser.droplevel(1, axis="columns")
diff --git a/pandas/tests/series/methods/test_first_and_last.py b/pandas/tests/series/methods/test_first_and_last.py
deleted file mode 100644
index 7629dc8cda30b..0000000000000
--- a/pandas/tests/series/methods/test_first_and_last.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""
-Note: includes tests for `last`
-"""
-
-import numpy as np
-import pytest
-
-from pandas import Series, date_range
-import pandas._testing as tm
-
-
-class TestFirst:
- def test_first_subset(self):
- rng = date_range("1/1/2000", "1/1/2010", freq="12h")
- ts = Series(np.random.randn(len(rng)), index=rng)
- result = ts.first("10d")
- assert len(result) == 20
-
- rng = date_range("1/1/2000", "1/1/2010", freq="D")
- ts = Series(np.random.randn(len(rng)), index=rng)
- result = ts.first("10d")
- assert len(result) == 10
-
- result = ts.first("3M")
- expected = ts[:"3/31/2000"]
- tm.assert_series_equal(result, expected)
-
- result = ts.first("21D")
- expected = ts[:21]
- tm.assert_series_equal(result, expected)
-
- result = ts[:0].first("3M")
- tm.assert_series_equal(result, ts[:0])
-
- def test_first_raises(self):
- # GH#20725
- ser = Series("a b c".split())
- msg = "'first' only supports a DatetimeIndex index"
- with pytest.raises(TypeError, match=msg):
- ser.first("1D")
-
- def test_last_subset(self):
- rng = date_range("1/1/2000", "1/1/2010", freq="12h")
- ts = Series(np.random.randn(len(rng)), index=rng)
- result = ts.last("10d")
- assert len(result) == 20
-
- rng = date_range("1/1/2000", "1/1/2010", freq="D")
- ts = Series(np.random.randn(len(rng)), index=rng)
- result = ts.last("10d")
- assert len(result) == 10
-
- result = ts.last("21D")
- expected = ts["12/12/2009":]
- tm.assert_series_equal(result, expected)
-
- result = ts.last("21D")
- expected = ts[-21:]
- tm.assert_series_equal(result, expected)
-
- result = ts[:0].last("3M")
- tm.assert_series_equal(result, ts[:0])
-
- def test_last_raises(self):
- # GH#20725
- ser = Series("a b c".split())
- msg = "'last' only supports a DatetimeIndex index"
- with pytest.raises(TypeError, match=msg):
- ser.last("1D")
diff --git a/pandas/tests/series/indexing/test_pop.py b/pandas/tests/series/methods/test_pop.py
similarity index 100%
rename from pandas/tests/series/indexing/test_pop.py
rename to pandas/tests/series/methods/test_pop.py
diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py
index b03f516eeffc5..21de593c0e2af 100644
--- a/pandas/tests/series/methods/test_truncate.py
+++ b/pandas/tests/series/methods/test_truncate.py
@@ -1,102 +1,11 @@
from datetime import datetime
-import numpy as np
-import pytest
-
import pandas as pd
from pandas import Series, date_range
import pandas._testing as tm
-from pandas.tseries.offsets import BDay
-
class TestTruncate:
- def test_truncate(self, datetime_series):
- offset = BDay()
-
- ts = datetime_series[::3]
-
- start, end = datetime_series.index[3], datetime_series.index[6]
- start_missing, end_missing = datetime_series.index[2], datetime_series.index[7]
-
- # neither specified
- truncated = ts.truncate()
- tm.assert_series_equal(truncated, ts)
-
- # both specified
- expected = ts[1:3]
-
- truncated = ts.truncate(start, end)
- tm.assert_series_equal(truncated, expected)
-
- truncated = ts.truncate(start_missing, end_missing)
- tm.assert_series_equal(truncated, expected)
-
- # start specified
- expected = ts[1:]
-
- truncated = ts.truncate(before=start)
- tm.assert_series_equal(truncated, expected)
-
- truncated = ts.truncate(before=start_missing)
- tm.assert_series_equal(truncated, expected)
-
- # end specified
- expected = ts[:3]
-
- truncated = ts.truncate(after=end)
- tm.assert_series_equal(truncated, expected)
-
- truncated = ts.truncate(after=end_missing)
- tm.assert_series_equal(truncated, expected)
-
- # corner case, empty series returned
- truncated = ts.truncate(after=datetime_series.index[0] - offset)
- assert len(truncated) == 0
-
- truncated = ts.truncate(before=datetime_series.index[-1] + offset)
- assert len(truncated) == 0
-
- msg = "Truncate: 1999-12-31 00:00:00 must be after 2000-02-14 00:00:00"
- with pytest.raises(ValueError, match=msg):
- ts.truncate(
- before=datetime_series.index[-1] + offset,
- after=datetime_series.index[0] - offset,
- )
-
- def test_truncate_nonsortedindex(self):
- # GH#17935
-
- s = Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
- msg = "truncate requires a sorted index"
-
- with pytest.raises(ValueError, match=msg):
- s.truncate(before=3, after=9)
-
- rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
- ts = Series(np.random.randn(len(rng)), index=rng)
- msg = "truncate requires a sorted index"
-
- with pytest.raises(ValueError, match=msg):
- ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12")
-
- @pytest.mark.parametrize(
- "before, after, indices",
- [(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])],
- )
- @pytest.mark.parametrize("klass", [pd.Int64Index, pd.DatetimeIndex])
- def test_truncate_decreasing_index(self, before, after, indices, klass):
- # https://github.com/pandas-dev/pandas/issues/33756
- idx = klass([3, 2, 1, 0])
- if klass is pd.DatetimeIndex:
- before = pd.Timestamp(before) if before is not None else None
- after = pd.Timestamp(after) if after is not None else None
- indices = [pd.Timestamp(i) for i in indices]
- values = Series(range(len(idx)), index=idx)
- result = values.truncate(before=before, after=after)
- expected = values.loc[indices]
- tm.assert_series_equal(result, expected)
-
def test_truncate_datetimeindex_tz(self):
# GH 9243
idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific")
@@ -133,21 +42,6 @@ def test_truncate_periodindex(self):
expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")])
tm.assert_series_equal(result2, Series([2], index=expected_idx2))
- def test_truncate_multiindex(self):
- # GH 34564
- mi = pd.MultiIndex.from_product([[1, 2, 3, 4], ["A", "B"]], names=["L1", "L2"])
- s1 = Series(range(mi.shape[0]), index=mi, name="col")
- result = s1.truncate(before=2, after=3)
-
- df = pd.DataFrame.from_dict(
- {"L1": [2, 2, 3, 3], "L2": ["A", "B", "A", "B"], "col": [2, 3, 4, 5]}
- )
- return_value = df.set_index(["L1", "L2"], inplace=True)
- assert return_value is None
- expected = df.col
-
- tm.assert_series_equal(result, expected)
-
def test_truncate_one_element_series(self):
# GH 35544
series = Series([0.1], index=pd.DatetimeIndex(["2020-08-04"]))
| https://api.github.com/repos/pandas-dev/pandas/pulls/37616 | 2020-11-03T22:19:50Z | 2020-11-04T01:53:17Z | 2020-11-04T01:53:17Z | 2020-11-04T03:41:58Z | |
BUG: read-only values in cython funcs | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 0937ec3866e12..95450437ba6eb 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -399,11 +399,13 @@ Datetimelike
- Bug in :meth:`TimedeltaIndex.sum` and :meth:`Series.sum` with ``timedelta64`` dtype on an empty index or series returning ``NaT`` instead of ``Timedelta(0)`` (:issue:`31751`)
- Bug in :meth:`DatetimeArray.shift` incorrectly allowing ``fill_value`` with a mismatched timezone (:issue:`37299`)
- Bug in adding a :class:`BusinessDay` with nonzero ``offset`` to a non-scalar other (:issue:`37457`)
+- Bug in :func:`to_datetime` with a read-only array incorrectly raising (:issue:`34857`)
Timedelta
^^^^^^^^^
- Bug in :class:`TimedeltaIndex`, :class:`Series`, and :class:`DataFrame` floor-division with ``timedelta64`` dtypes and ``NaT`` in the denominator (:issue:`35529`)
- Bug in parsing of ISO 8601 durations in :class:`Timedelta`, :meth:`pd.to_datetime` (:issue:`37159`, fixes :issue:`29773` and :issue:`36204`)
+- Bug in :func:`to_timedelta` with a read-only array incorrectly raising (:issue:`34857`)
Timezones
^^^^^^^^^
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 13c7187923473..1b79d68c13570 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -268,7 +268,7 @@ ctypedef fused join_t:
@cython.wraparound(False)
@cython.boundscheck(False)
-def left_join_indexer_unique(join_t[:] left, join_t[:] right):
+def left_join_indexer_unique(ndarray[join_t] left, ndarray[join_t] right):
cdef:
Py_ssize_t i, j, nleft, nright
ndarray[int64_t] indexer
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index d2690be905a68..bc4632ad028ab 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -12,7 +12,7 @@ from _thread import allocate_lock as _thread_allocate_lock
import numpy as np
import pytz
-from numpy cimport int64_t
+from numpy cimport int64_t, ndarray
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
@@ -51,7 +51,7 @@ cdef dict _parse_code_table = {'y': 0,
'u': 22}
-def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise'):
+def array_strptime(ndarray[object] values, object fmt, bint exact=True, errors='raise'):
"""
Calculates the datetime structs represented by the passed array of strings
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 45f32d92c7a74..29e8c58055f9e 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -227,7 +227,7 @@ cdef convert_to_timedelta64(object ts, str unit):
@cython.boundscheck(False)
@cython.wraparound(False)
-def array_to_timedelta64(object[:] values, str unit=None, str errors="raise"):
+def array_to_timedelta64(ndarray[object] values, str unit=None, str errors="raise"):
"""
Convert an ndarray to an array of timedeltas. If errors == 'coerce',
coerce non-convertible objects to NaT. Otherwise, raise.
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 1955a96160a4a..e845dbf39dbc9 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1025,9 +1025,8 @@ def _addsub_object_array(self, other: np.ndarray, op):
result : same class as self
"""
assert op in [operator.add, operator.sub]
- if len(other) == 1:
+ if len(other) == 1 and self.ndim == 1:
# If both 1D then broadcasting is unambiguous
- # TODO(EA2D): require self.ndim == other.ndim here
return op(self, other[0])
warnings.warn(
diff --git a/pandas/tests/libs/test_join.py b/pandas/tests/libs/test_join.py
index 95d6dcbaf3baf..f3f09d7a42204 100644
--- a/pandas/tests/libs/test_join.py
+++ b/pandas/tests/libs/test_join.py
@@ -135,9 +135,14 @@ def test_cython_inner_join(self):
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
-def test_left_join_indexer_unique():
+@pytest.mark.parametrize("readonly", [True, False])
+def test_left_join_indexer_unique(readonly):
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
+ if readonly:
+ # GH#37312, GH#37264
+ a.setflags(write=False)
+ b.setflags(write=False)
result = libjoin.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index ebe118252c8cf..10bda16655586 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -36,6 +36,16 @@
class TestTimeConversionFormats:
+ @pytest.mark.parametrize("readonly", [True, False])
+ def test_to_datetime_readonly(self, readonly):
+ # GH#34857
+ arr = np.array([], dtype=object)
+ if readonly:
+ arr.setflags(write=False)
+ result = to_datetime(arr)
+ expected = to_datetime([])
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format(self, cache):
values = ["1/1/2000", "1/2/2000", "1/3/2000"]
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 8e48295c533cc..5be7e81df53f2 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -9,6 +9,16 @@
class TestTimedeltas:
+ @pytest.mark.parametrize("readonly", [True, False])
+ def test_to_timedelta_readonly(self, readonly):
+ # GH#34857
+ arr = np.array([], dtype=object)
+ if readonly:
+ arr.setflags(write=False)
+ result = to_timedelta(arr)
+ expected = to_timedelta([])
+ tm.assert_index_equal(result, expected)
+
def test_to_timedelta(self):
result = to_timedelta(["", ""])
| - [x] closes #34857
- [x] closes #37264
- [x] closes #37312
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37613 | 2020-11-03T20:59:30Z | 2020-11-04T13:43:47Z | 2020-11-04T13:43:46Z | 2021-09-12T05:30:38Z |
DOC: Fix punctuation | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c90ab9cceea8c..8050ce8b1b636 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2196,7 +2196,7 @@ def to_json(
* Series:
- default is 'index'
- - allowed values are: {'split','records','index','table'}.
+ - allowed values are: {'split', 'records', 'index', 'table'}.
* DataFrame:
| Fixed punctuation to match others.
| https://api.github.com/repos/pandas-dev/pandas/pulls/37612 | 2020-11-03T20:49:38Z | 2020-11-04T01:58:13Z | 2020-11-04T01:58:13Z | 2020-11-04T01:58:17Z |
Add tests for categorical with null ea as input | diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 657511116c306..23921356a2c5d 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -682,3 +682,10 @@ def test_interval(self):
expected_codes = np.array([0, 1], dtype="int8")
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
+
+ def test_categorical_extension_array_nullable(self, nulls_fixture):
+ # GH:
+ arr = pd.arrays.StringArray._from_sequence([nulls_fixture] * 2)
+ result = Categorical(arr)
+ expected = Categorical(Series([pd.NA, pd.NA], dtype="object"))
+ tm.assert_categorical_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
cc @jreback Is this what you had in mind for ExtensionArrays and Categoricals? | https://api.github.com/repos/pandas-dev/pandas/pulls/37611 | 2020-11-03T19:49:26Z | 2020-11-13T13:08:25Z | 2020-11-13T13:08:25Z | 2020-11-13T13:14:46Z |
CLN: remove rebox_native | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 579719d8bac3b..1955a96160a4a 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -151,7 +151,9 @@ def _rebox_native(cls, value: int) -> Union[int, np.datetime64, np.timedelta64]:
"""
raise AbstractMethodError(cls)
- def _unbox_scalar(self, value: DTScalarOrNaT, setitem: bool = False) -> int:
+ def _unbox_scalar(
+ self, value: DTScalarOrNaT, setitem: bool = False
+ ) -> Union[np.int64, np.datetime64, np.timedelta64]:
"""
Unbox the integer value of a scalar `value`.
@@ -636,7 +638,6 @@ def _unbox(
"""
if lib.is_scalar(other):
other = self._unbox_scalar(other, setitem=setitem)
- other = self._rebox_native(other)
else:
# same type as self
self._check_compatible_with(other, setitem=setitem)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index b05271552f117..f655d10881011 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -454,16 +454,13 @@ def _generate_range(
# -----------------------------------------------------------------
# DatetimeLike Interface
- @classmethod
- def _rebox_native(cls, value: int) -> np.datetime64:
- return np.int64(value).view("M8[ns]")
-
- def _unbox_scalar(self, value, setitem: bool = False):
+ def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
if not isna(value):
self._check_compatible_with(value, setitem=setitem)
- return value.value
+ return value.asm8
+ return np.datetime64(value.value, "ns")
def _scalar_from_string(self, value):
return Timestamp(value, tz=self.tz)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index b95a7acc19b1f..d808ade53ad33 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -260,18 +260,14 @@ def _generate_range(cls, start, end, periods, freq, fields):
# -----------------------------------------------------------------
# DatetimeLike Interface
- @classmethod
- def _rebox_native(cls, value: int) -> np.int64:
- return np.int64(value)
-
def _unbox_scalar(
self, value: Union[Period, NaTType], setitem: bool = False
) -> int:
if value is NaT:
- return value.value
+ return np.int64(value.value)
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=setitem)
- return value.ordinal
+ return np.int64(value.ordinal)
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index e5b56ae80b578..e4a844fd4c6ef 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -301,15 +301,11 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
# ----------------------------------------------------------------
# DatetimeLike Interface
- @classmethod
- def _rebox_native(cls, value: int) -> np.timedelta64:
- return np.int64(value).view("m8[ns]")
-
- def _unbox_scalar(self, value, setitem: bool = False):
+ def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value, setitem=setitem)
- return value.value
+ return np.timedelta64(value.value, "ns")
def _scalar_from_string(self, value):
return Timedelta(value)
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index b9298e9dec5b5..ec20c829f1544 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -191,10 +191,11 @@ def test_unbox_scalar(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
result = arr._unbox_scalar(arr[0])
- assert isinstance(result, int)
+ expected = arr._data.dtype.type
+ assert isinstance(result, expected)
result = arr._unbox_scalar(pd.NaT)
- assert isinstance(result, int)
+ assert isinstance(result, expected)
msg = f"'value' should be a {self.dtype.__name__}."
with pytest.raises(ValueError, match=msg):
| https://api.github.com/repos/pandas-dev/pandas/pulls/37608 | 2020-11-03T17:38:29Z | 2020-11-03T23:09:06Z | 2020-11-03T23:09:06Z | 2020-11-03T23:14:41Z | |
BUG: nunique not ignoring both None and np.nan | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 7111d54d65815..1da61d4b83e16 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -562,6 +562,7 @@ Other
- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError``, from a bare ``Exception`` previously (:issue:`35744`)
- Bug in ``accessor.DirNamesMixin``, where ``dir(obj)`` wouldn't show attributes defined on the instance (:issue:`37173`).
+- Bug in :meth:`Series.nunique` with ``dropna=True`` was returning incorrect results when both ``NA`` and ``None`` missing values were present (:issue:`37566`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/base.py b/pandas/core/base.py
index c91e4db004f2a..8db1d8073fb7d 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -33,7 +33,7 @@
is_scalar,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
-from pandas.core.dtypes.missing import isna
+from pandas.core.dtypes.missing import isna, remove_na_arraylike
from pandas.core import algorithms
from pandas.core.accessor import DirNamesMixin
@@ -1032,11 +1032,8 @@ def nunique(self, dropna: bool = True) -> int:
>>> s.nunique()
4
"""
- uniqs = self.unique()
- n = len(uniqs)
- if dropna and isna(uniqs).any():
- n -= 1
- return n
+ obj = remove_na_arraylike(self) if dropna else self
+ return len(obj.unique())
@property
def is_unique(self) -> bool:
diff --git a/pandas/tests/base/test_unique.py b/pandas/tests/base/test_unique.py
index e5592cef59592..1a554c85e018b 100644
--- a/pandas/tests/base/test_unique.py
+++ b/pandas/tests/base/test_unique.py
@@ -121,3 +121,11 @@ def test_unique_bad_unicode(idx_or_series_w_bad_unicode):
else:
expected = np.array(["\ud83d"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
+
+
+@pytest.mark.parametrize("dropna", [True, False])
+def test_nunique_dropna(dropna):
+ # GH37566
+ s = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT])
+ res = s.nunique(dropna)
+ assert res == 1 if dropna else 5
| - [x] closes #37566
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37607 | 2020-11-03T16:52:21Z | 2020-11-10T23:41:48Z | 2020-11-10T23:41:47Z | 2020-11-11T17:03:30Z |
BUG: set index of DataFrame.apply(f) when f returns dict (#37544) | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 2e976371c0ac8..bd8b05b8de3e4 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -547,6 +547,7 @@ Reshaping
- Bug in :func:`join` returned a non deterministic level-order for the resulting :class:`MultiIndex` (:issue:`36910`)
- Bug in :meth:`DataFrame.combine_first()` caused wrong alignment with dtype ``string`` and one level of ``MultiIndex`` containing only ``NA`` (:issue:`37591`)
- Fixed regression in :func:`merge` on merging DatetimeIndex with empty DataFrame (:issue:`36895`)
+- Bug in :meth:`DataFrame.apply` not setting index of return value when ``func`` return type is ``dict`` (:issue:`37544`)
Sparse
^^^^^^
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 002e260742dc5..a14debce6eea7 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -362,8 +362,10 @@ def wrap_results_for_axis(
isinstance(x, dict) for x in results.values()
):
# Our operation was a to_dict op e.g.
- # test_apply_dict GH#8735, test_apply_reduce_rows_to_dict GH#25196
- return self.obj._constructor_sliced(results)
+ # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
+ res = self.obj._constructor_sliced(results)
+ res.index = res_index
+ return res
try:
result = self.obj._constructor(data=results)
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py
index 03498b278f890..162035b53d68d 100644
--- a/pandas/tests/frame/apply/test_frame_apply.py
+++ b/pandas/tests/frame/apply/test_frame_apply.py
@@ -356,12 +356,17 @@ def test_apply_reduce_Series(self, float_frame):
result = float_frame.apply(np.mean, axis=1)
tm.assert_series_equal(result, expected)
- def test_apply_reduce_rows_to_dict(self):
- # GH 25196
- data = DataFrame([[1, 2], [3, 4]])
- expected = Series([{0: 1, 1: 3}, {0: 2, 1: 4}])
- result = data.apply(dict)
- tm.assert_series_equal(result, expected)
+ def test_apply_reduce_to_dict(self):
+ # GH 25196 37544
+ data = DataFrame([[1, 2], [3, 4]], columns=["c0", "c1"], index=["i0", "i1"])
+
+ result0 = data.apply(dict, axis=0)
+ expected0 = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns)
+ tm.assert_series_equal(result0, expected0)
+
+ result1 = data.apply(dict, axis=1)
+ expected1 = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index)
+ tm.assert_series_equal(result1, expected1)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
| - [x] closes #37544
- [x] tests (added) updated / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37606 | 2020-11-03T16:51:28Z | 2020-11-04T23:48:00Z | 2020-11-04T23:47:59Z | 2020-11-12T16:12:14Z |
ERR: fix error message in Period for invalid frequency | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index b1f9ff71f5faa..b817d80c64ccd 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -2438,7 +2438,7 @@ cpdef int freq_to_dtype_code(BaseOffset freq) except? -1:
try:
return freq._period_dtype_code
except AttributeError as err:
- raise ValueError(INVALID_FREQ_ERR_MSG) from err
+ raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) from err
cdef int64_t _ordinal_from_fields(int year, int month, quarter, int day,
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index f150e5e5b18b2..46bc6421c2070 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -1554,3 +1554,9 @@ def test_negone_ordinals():
repr(period)
period = Period(ordinal=-1, freq="W")
repr(period)
+
+
+def test_invalid_frequency_error_message():
+ msg = "Invalid frequency: <WeekOfMonth: week=0, weekday=0>"
+ with pytest.raises(ValueError, match=msg):
+ Period("2012-01-02", freq="WOM-1MON")
| Small fix of the error message, it was missing the actual freq to fill into the message.
On master, you get:
```
In [1]: pd.Period("2012-01-02", freq="WOM-1MON")
...
ValueError: Invalid frequency: {0}
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/37602 | 2020-11-03T14:59:02Z | 2020-11-03T19:18:26Z | 2020-11-03T19:18:26Z | 2020-11-03T19:18:30Z |
DEPR: DataFrame/Series.slice_shift | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 45a95f6aeb2f6..ea2e6784087f2 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -341,6 +341,8 @@ Deprecations
- Deprecate use of strings denoting units with 'M', 'Y' or 'y' in :func:`~pandas.to_timedelta` (:issue:`36666`)
- :class:`Index` methods ``&``, ``|``, and ``^`` behaving as the set operations :meth:`Index.intersection`, :meth:`Index.union`, and :meth:`Index.symmetric_difference`, respectively, are deprecated and in the future will behave as pointwise boolean operations matching :class:`Series` behavior. Use the named set methods instead (:issue:`36758`)
- :meth:`Categorical.is_dtype_equal` and :meth:`CategoricalIndex.is_dtype_equal` are deprecated, will be removed in a future version (:issue:`37545`)
+- :meth:`Series.slice_shift` and :meth:`DataFrame.slice_shift` are deprecated, use :meth:`Series.shift` or :meth:`DataFrame.shift` instead (:issue:`37601`)
+
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c90ab9cceea8c..3ccf3265c0d1a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9347,10 +9347,13 @@ def shift(
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
-
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
+ .. deprecated:: 1.2.0
+ slice_shift is deprecated,
+ use DataFrame/Series.shift instead.
+
Parameters
----------
periods : int
@@ -9365,6 +9368,14 @@ def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
+
+ msg = (
+ "The 'slice_shift' method is deprecated "
+ "and will be removed in a future version. "
+ "You can use DataFrame/Series.shift instead"
+ )
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+
if periods == 0:
return self
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index d7aadda990f53..d16e854c25ed8 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -424,8 +424,6 @@
(pd.DataFrame, frame_data, operator.methodcaller("where", np.array([[True]]))),
(pd.Series, ([1, 2],), operator.methodcaller("mask", np.array([True, False]))),
(pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))),
- (pd.Series, ([1, 2],), operator.methodcaller("slice_shift")),
- (pd.DataFrame, frame_data, operator.methodcaller("slice_shift")),
pytest.param(
(
pd.Series,
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 45601abc95fe6..6eec8ef3622a8 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -561,3 +561,14 @@ def test_flags_identity(self, as_frame):
assert s.flags is s.flags
s2 = s.copy()
assert s2.flags is not s.flags
+
+ def test_slice_shift_deprecated(self):
+ # GH 37601
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ s = Series([1, 2, 3, 4])
+
+ with tm.assert_produces_warning(FutureWarning):
+ df["A"].slice_shift()
+
+ with tm.assert_produces_warning(FutureWarning):
+ s.slice_shift()
| - [x] ref #18262
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37601 | 2020-11-03T12:28:07Z | 2020-11-04T02:21:01Z | 2020-11-04T02:21:01Z | 2020-11-04T09:23:51Z |
REF: re-use _validate_setitem_value in Categorical.fillna | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index b1f913e9ea641..9f0414cf7a806 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1655,21 +1655,15 @@ def fillna(self, value=None, method=None, limit=None):
codes = self._ndarray.copy()
mask = self.isna()
+ new_codes = self._validate_setitem_value(value)
+
if isinstance(value, (np.ndarray, Categorical)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
-
- not_categories = ~algorithms.isin(value, self.categories)
- if not isna(value[not_categories]).all():
- # All entries in `value` must either be a category or NA
- raise ValueError("fill value must be in categories")
-
- values_codes = _get_codes_for_values(value, self.categories)
- codes[mask] = values_codes[mask]
+ codes[mask] = new_codes[mask]
else:
- new_code = self._validate_fill_value(value)
- codes[mask] = new_code
+ codes[mask] = new_codes
return self._from_backing_data(codes)
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index 21bea9356dcf0..364c290edc46c 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -60,7 +60,10 @@ def test_set_item_nan(self):
),
(dict(), "Must specify a fill 'value' or 'method'."),
(dict(method="bad"), "Invalid fill method. Expecting .* bad"),
- (dict(value=Series([1, 2, 3, 4, "a"])), "fill value must be in categories"),
+ (
+ dict(value=Series([1, 2, 3, 4, "a"])),
+ "Cannot setitem on a Categorical with a new category",
+ ),
],
)
def test_fillna_raises(self, fillna_kwargs, msg):
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index 9fa1aa65379c5..bbb57da39705b 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -170,7 +170,7 @@ def test_na_actions_categorical(self):
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
- msg = "'fill_value=4' is not present in this Categorical's categories"
+ msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(ValueError, match=msg):
df.fillna(value={"cats": 4, "vals": "c"})
diff --git a/pandas/tests/indexes/categorical/test_fillna.py b/pandas/tests/indexes/categorical/test_fillna.py
index f6a6747166011..c8fc55c29054e 100644
--- a/pandas/tests/indexes/categorical/test_fillna.py
+++ b/pandas/tests/indexes/categorical/test_fillna.py
@@ -14,7 +14,7 @@ def test_fillna_categorical(self):
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
- msg = "'fill_value=2.0' is not present in this Categorical's categories"
+ msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(ValueError, match=msg):
idx.fillna(2.0)
@@ -36,7 +36,7 @@ def test_fillna_validates_with_no_nas(self):
ci = CategoricalIndex([2, 3, 3])
cat = ci._data
- msg = "'fill_value=False' is not present in this Categorical's categories"
+ msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(ValueError, match=msg):
ci.fillna(False)
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index d45486b9bdb29..aaa58cdb390f7 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -653,14 +653,14 @@ def test_fillna_categorical_raises(self):
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series(Categorical(data, categories=["a", "b"]))
- msg = "'fill_value=d' is not present in this Categorical's categories"
+ msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(ValueError, match=msg):
ser.fillna("d")
- with pytest.raises(ValueError, match="fill value must be in categories"):
+ with pytest.raises(ValueError, match=msg):
ser.fillna(Series("d"))
- with pytest.raises(ValueError, match="fill value must be in categories"):
+ with pytest.raises(ValueError, match=msg):
ser.fillna({1: "d", 3: "a"})
msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
| https://api.github.com/repos/pandas-dev/pandas/pulls/37597 | 2020-11-03T03:47:54Z | 2020-11-04T02:25:35Z | 2020-11-04T02:25:35Z | 2020-11-04T03:42:36Z | |
TST/REF: share method tests between DataFrame and Series | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 515d20e8c5781..dfca95c1f9efe 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -290,6 +290,16 @@ def unique_nulls_fixture(request):
# ----------------------------------------------------------------
# Classes
# ----------------------------------------------------------------
+
+
+@pytest.fixture(params=[pd.DataFrame, pd.Series])
+def frame_or_series(request):
+ """
+ Fixture to parametrize over DataFrame and Series.
+ """
+ return request.param
+
+
@pytest.fixture(
params=[pd.Index, pd.Series], ids=["index", "series"] # type: ignore[list-item]
)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5134529d9c21f..24b89085ac121 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -131,7 +131,13 @@
from pandas.core.construction import extract_array
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
-from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
+from pandas.core.indexes.api import (
+ DatetimeIndex,
+ Index,
+ PeriodIndex,
+ ensure_index,
+ ensure_index_from_sequences,
+)
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
@@ -9253,6 +9259,9 @@ def to_timestamp(
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
+ if not isinstance(old_ax, PeriodIndex):
+ raise TypeError(f"unsupported Type {type(old_ax).__name__}")
+
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
@@ -9282,6 +9291,9 @@ def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
+ if not isinstance(old_ax, DatetimeIndex):
+ raise TypeError(f"unsupported Type {type(old_ax).__name__}")
+
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
diff --git a/pandas/tests/frame/methods/test_at_time.py b/pandas/tests/frame/methods/test_at_time.py
index ac98d632c5dcd..7ac3868e8ddf4 100644
--- a/pandas/tests/frame/methods/test_at_time.py
+++ b/pandas/tests/frame/methods/test_at_time.py
@@ -4,14 +4,32 @@
import pytest
import pytz
+from pandas._libs.tslibs import timezones
+
from pandas import DataFrame, date_range
import pandas._testing as tm
class TestAtTime:
- def test_at_time(self):
+ @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
+ def test_localized_at_time(self, tzstr, frame_or_series):
+ tz = timezones.maybe_get_tz(tzstr)
+
+ rng = date_range("4/16/2012", "5/1/2012", freq="H")
+ ts = frame_or_series(np.random.randn(len(rng)), index=rng)
+
+ ts_local = ts.tz_localize(tzstr)
+
+ result = ts_local.at_time(time(10, 0))
+ expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
+ tm.assert_equal(result, expected)
+ assert timezones.tz_compare(result.index.tz, tz)
+
+ def test_at_time(self, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
+ if frame_or_series is not DataFrame:
+ ts = ts[0]
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
@@ -19,23 +37,24 @@ def test_at_time(self):
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
- tm.assert_frame_equal(result, expected)
-
- result = ts.loc[time(9, 30)]
- expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
-
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
+ def test_at_time_midnight(self, frame_or_series):
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
+ if frame_or_series is not DataFrame:
+ ts = ts[0]
result = ts.at_time(time(0, 0))
- tm.assert_frame_equal(result, ts)
+ tm.assert_equal(result, ts)
+ def test_at_time_nonexistent(self, frame_or_series):
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
- ts = DataFrame(np.random.randn(len(rng), 2), rng)
+ ts = DataFrame(np.random.randn(len(rng)), rng)
+ if frame_or_series is not DataFrame:
+ ts = ts[0]
rs = ts.at_time("16:00")
assert len(rs) == 0
@@ -62,12 +81,14 @@ def test_at_time_tz(self):
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
- def test_at_time_raises(self):
+ def test_at_time_raises(self, frame_or_series):
# GH#20725
- df = DataFrame([[1, 2, 3], [4, 5, 6]])
+ obj = DataFrame([[1, 2, 3], [4, 5, 6]])
+ if frame_or_series is not DataFrame:
+ obj = obj[0]
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
- df.at_time("00:00")
+ obj.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
diff --git a/pandas/tests/frame/methods/test_between_time.py b/pandas/tests/frame/methods/test_between_time.py
index 19e802d0fa663..73722f36a0b86 100644
--- a/pandas/tests/frame/methods/test_between_time.py
+++ b/pandas/tests/frame/methods/test_between_time.py
@@ -1,16 +1,73 @@
-from datetime import time
+from datetime import datetime, time
import numpy as np
import pytest
-from pandas import DataFrame, date_range
+from pandas._libs.tslibs import timezones
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestBetweenTime:
- def test_between_time(self, close_open_fixture):
+ @td.skip_if_has_locale
+ def test_between_time_formats(self, frame_or_series):
+ # GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
+ if frame_or_series is Series:
+ ts = ts[0]
+
+ strings = [
+ ("2:00", "2:30"),
+ ("0200", "0230"),
+ ("2:00am", "2:30am"),
+ ("0200am", "0230am"),
+ ("2:00:00", "2:30:00"),
+ ("020000", "023000"),
+ ("2:00:00am", "2:30:00am"),
+ ("020000am", "023000am"),
+ ]
+ expected_length = 28
+
+ for time_string in strings:
+ assert len(ts.between_time(*time_string)) == expected_length
+
+ @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
+ def test_localized_between_time(self, tzstr, frame_or_series):
+ tz = timezones.maybe_get_tz(tzstr)
+
+ rng = date_range("4/16/2012", "5/1/2012", freq="H")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+ if frame_or_series is DataFrame:
+ ts = ts.to_frame()
+
+ ts_local = ts.tz_localize(tzstr)
+
+ t1, t2 = time(10, 0), time(11, 0)
+ result = ts_local.between_time(t1, t2)
+ expected = ts.between_time(t1, t2).tz_localize(tzstr)
+ tm.assert_equal(result, expected)
+ assert timezones.tz_compare(result.index.tz, tz)
+
+ def test_between_time_types(self, frame_or_series):
+ # GH11818
+ rng = date_range("1/1/2000", "1/5/2000", freq="5min")
+ obj = DataFrame({"A": 0}, index=rng)
+ if frame_or_series is Series:
+ obj = obj["A"]
+
+ msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
+ with pytest.raises(ValueError, match=msg):
+ obj.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
+
+ def test_between_time(self, close_open_fixture, frame_or_series):
+ rng = date_range("1/1/2000", "1/5/2000", freq="5min")
+ ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
+ if frame_or_series is not DataFrame:
+ ts = ts[0]
+
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
@@ -37,11 +94,13 @@ def test_between_time(self, close_open_fixture):
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
- tm.assert_frame_equal(result, expected)
+ tm.assert_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
+ if frame_or_series is not DataFrame:
+ ts = ts[0]
stime = time(22, 0)
etime = time(9, 0)
@@ -65,14 +124,33 @@ def test_between_time(self, close_open_fixture):
else:
assert (t < etime) or (t >= stime)
- def test_between_time_raises(self):
+ def test_between_time_raises(self, frame_or_series):
# GH#20725
- df = DataFrame([[1, 2, 3], [4, 5, 6]])
+ obj = DataFrame([[1, 2, 3], [4, 5, 6]])
+ if frame_or_series is not DataFrame:
+ obj = obj[0]
+
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
- df.between_time(start_time="00:00", end_time="12:00")
+ obj.between_time(start_time="00:00", end_time="12:00")
+
+ def test_between_time_axis(self, frame_or_series):
+ # GH#8839
+ rng = date_range("1/1/2000", periods=100, freq="10min")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+ if frame_or_series is DataFrame:
+ ts = ts.to_frame()
+
+ stime, etime = ("08:00:00", "09:00:00")
+ expected_length = 7
+
+ assert len(ts.between_time(stime, etime)) == expected_length
+ assert len(ts.between_time(stime, etime, axis=0)) == expected_length
+ msg = f"No axis named {ts.ndim} for object type {type(ts).__name__}"
+ with pytest.raises(ValueError, match=msg):
+ ts.between_time(stime, etime, axis=ts.ndim)
- def test_between_time_axis(self, axis):
+ def test_between_time_axis_aliases(self, axis):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
diff --git a/pandas/tests/frame/methods/test_to_period.py b/pandas/tests/frame/methods/test_to_period.py
index 051461b6c554d..e3f3fe9f697a9 100644
--- a/pandas/tests/frame/methods/test_to_period.py
+++ b/pandas/tests/frame/methods/test_to_period.py
@@ -1,36 +1,87 @@
import numpy as np
import pytest
-from pandas import DataFrame, date_range, period_range
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ PeriodIndex,
+ Series,
+ date_range,
+ period_range,
+)
import pandas._testing as tm
class TestToPeriod:
- def test_frame_to_period(self):
+ def test_to_period(self, frame_or_series):
K = 5
- dr = date_range("1/1/2000", "1/1/2001")
- pr = period_range("1/1/2000", "1/1/2001")
- df = DataFrame(np.random.randn(len(dr), K), index=dr)
- df["mix"] = "a"
+ dr = date_range("1/1/2000", "1/1/2001", freq="D")
+ obj = DataFrame(
+ np.random.randn(len(dr), K), index=dr, columns=["A", "B", "C", "D", "E"]
+ )
+ obj["mix"] = "a"
+ if frame_or_series is Series:
+ obj = obj["A"]
- pts = df.to_period()
- exp = df.copy()
- exp.index = pr
- tm.assert_frame_equal(pts, exp)
+ pts = obj.to_period()
+ exp = obj.copy()
+ exp.index = period_range("1/1/2000", "1/1/2001")
+ tm.assert_equal(pts, exp)
- pts = df.to_period("M")
- tm.assert_index_equal(pts.index, exp.index.asfreq("M"))
+ pts = obj.to_period("M")
+ exp.index = exp.index.asfreq("M")
+ tm.assert_equal(pts, exp)
+
+ def test_to_period_without_freq(self, frame_or_series):
+ # GH#7606 without freq
+ idx = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"])
+ exp_idx = PeriodIndex(
+ ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], freq="D"
+ )
+
+ obj = DataFrame(np.random.randn(4, 4), index=idx, columns=idx)
+ if frame_or_series is Series:
+ obj = obj[idx[0]]
+ expected = obj.copy()
+ expected.index = exp_idx
+ tm.assert_equal(obj.to_period(), expected)
+
+ if frame_or_series is DataFrame:
+ expected = obj.copy()
+ expected.columns = exp_idx
+ tm.assert_frame_equal(obj.to_period(axis=1), expected)
+
+ def test_to_period_columns(self):
+ dr = date_range("1/1/2000", "1/1/2001")
+ df = DataFrame(np.random.randn(len(dr), 5), index=dr)
+ df["mix"] = "a"
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
- exp.columns = pr
+ exp.columns = period_range("1/1/2000", "1/1/2001")
tm.assert_frame_equal(pts, exp)
pts = df.to_period("M", axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq("M"))
+ def test_to_period_invalid_axis(self):
+ dr = date_range("1/1/2000", "1/1/2001")
+ df = DataFrame(np.random.randn(len(dr), 5), index=dr)
+ df["mix"] = "a"
+
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.to_period(axis=2)
+
+ def test_to_period_raises(self, index, frame_or_series):
+ # https://github.com/pandas-dev/pandas/issues/33327
+ obj = Series(index=index, dtype=object)
+ if frame_or_series is DataFrame:
+ obj = obj.to_frame()
+
+ if not isinstance(index, DatetimeIndex):
+ msg = f"unsupported Type {type(index).__name__}"
+ with pytest.raises(TypeError, match=msg):
+ obj.to_period()
diff --git a/pandas/tests/frame/methods/test_to_timestamp.py b/pandas/tests/frame/methods/test_to_timestamp.py
index ae7d2827e05a6..e23d12b691b4a 100644
--- a/pandas/tests/frame/methods/test_to_timestamp.py
+++ b/pandas/tests/frame/methods/test_to_timestamp.py
@@ -6,6 +6,8 @@
from pandas import (
DataFrame,
DatetimeIndex,
+ PeriodIndex,
+ Series,
Timedelta,
date_range,
period_range,
@@ -14,48 +16,70 @@
import pandas._testing as tm
+def _get_with_delta(delta, freq="A-DEC"):
+ return date_range(
+ to_datetime("1/1/2001") + delta,
+ to_datetime("12/31/2009") + delta,
+ freq=freq,
+ )
+
+
class TestToTimestamp:
- def test_frame_to_time_stamp(self):
+ def test_to_timestamp(self, frame_or_series):
K = 5
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
- df = DataFrame(np.random.randn(len(index), K), index=index)
- df["mix"] = "a"
+ obj = DataFrame(
+ np.random.randn(len(index), K),
+ index=index,
+ columns=["A", "B", "C", "D", "E"],
+ )
+ obj["mix"] = "a"
+ if frame_or_series is Series:
+ obj = obj["A"]
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
- result = df.to_timestamp("D", "end")
+ result = obj.to_timestamp("D", "end")
tm.assert_index_equal(result.index, exp_index)
- tm.assert_numpy_array_equal(result.values, df.values)
+ tm.assert_numpy_array_equal(result.values, obj.values)
+ if frame_or_series is Series:
+ assert result.name == "A"
exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
- result = df.to_timestamp("D", "start")
+ result = obj.to_timestamp("D", "start")
tm.assert_index_equal(result.index, exp_index)
- def _get_with_delta(delta, freq="A-DEC"):
- return date_range(
- to_datetime("1/1/2001") + delta,
- to_datetime("12/31/2009") + delta,
- freq=freq,
- )
+ result = obj.to_timestamp(how="start")
+ tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23)
- result = df.to_timestamp("H", "end")
+ result = obj.to_timestamp("H", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
- result = df.to_timestamp("T", "end")
+ result = obj.to_timestamp("T", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
- result = df.to_timestamp("S", "end")
+ result = obj.to_timestamp("S", "end")
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
+ def test_to_timestamp_columns(self):
+ K = 5
+ index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
+ df = DataFrame(
+ np.random.randn(len(index), K),
+ index=index,
+ columns=["A", "B", "C", "D", "E"],
+ )
+ df["mix"] = "a"
+
# columns
df = df.T
@@ -87,10 +111,6 @@ def _get_with_delta(delta, freq="A-DEC"):
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.columns, exp_index)
- # invalid axis
- with pytest.raises(ValueError, match="axis"):
- df.to_timestamp(axis=2)
-
result1 = df.to_timestamp("5t", axis=1)
result2 = df.to_timestamp("t", axis=1)
expected = date_range("2001-01-01", "2009-01-01", freq="AS")
@@ -101,3 +121,34 @@ def _get_with_delta(delta, freq="A-DEC"):
# PeriodIndex.to_timestamp always use 'infer'
assert result1.columns.freqstr == "AS-JAN"
assert result2.columns.freqstr == "AS-JAN"
+
+ def to_timestamp_invalid_axis(self):
+ index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
+ obj = DataFrame(np.random.randn(len(index), 5), index=index)
+
+ # invalid axis
+ with pytest.raises(ValueError, match="axis"):
+ obj.to_timestamp(axis=2)
+
+ def test_to_timestamp_hourly(self, frame_or_series):
+
+ index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
+ obj = Series(1, index=index, name="foo")
+ if frame_or_series is not Series:
+ obj = obj.to_frame()
+
+ exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
+ result = obj.to_timestamp(how="end")
+ exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
+ tm.assert_index_equal(result.index, exp_index)
+ if frame_or_series is Series:
+ assert result.name == "foo"
+
+ def test_to_timestamp_raises(self, index, frame_or_series):
+ # GH#33327
+ obj = frame_or_series(index=index, dtype=object)
+
+ if not isinstance(index, PeriodIndex):
+ msg = f"unsupported Type {type(index).__name__}"
+ with pytest.raises(TypeError, match=msg):
+ obj.to_timestamp()
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 4e46eb126894b..330092b08c1b2 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -719,3 +719,12 @@ def test_slice_datetime_locs(self, box, kind, tz_aware_fixture):
result = index.slice_locs(key, box(2010, 1, 2))
expected = (0, 1)
assert result == expected
+
+
+class TestIndexerBetweenTime:
+ def test_indexer_between_time(self):
+ # GH#11818
+ rng = date_range("1/1/2000", "1/5/2000", freq="5min")
+ msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
+ with pytest.raises(ValueError, match=msg):
+ rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index d1dcae5997b9d..c1a5db992d3df 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1,4 +1,5 @@
""" test label based indexing with loc """
+from datetime import time
from io import StringIO
import re
@@ -992,6 +993,27 @@ def test_loc_setitem_str_to_small_float_conversion_type(self):
expected = DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
+ def test_loc_getitem_time_object(self, frame_or_series):
+ rng = date_range("1/1/2000", "1/5/2000", freq="5min")
+ mask = (rng.hour == 9) & (rng.minute == 30)
+
+ obj = DataFrame(np.random.randn(len(rng), 3), index=rng)
+ if frame_or_series is Series:
+ obj = obj[0]
+
+ result = obj.loc[time(9, 30)]
+ exp = obj.loc[mask]
+ tm.assert_equal(result, exp)
+
+ chunk = obj.loc["1/4/2000":]
+ result = chunk.loc[time(9, 30)]
+ expected = result[-1:]
+
+ # Without resetting the freqs, these are 5 min and 1440 min, respectively
+ result.index = result.index._with_freq(None)
+ expected.index = expected.index._with_freq(None)
+ tm.assert_equal(result, expected)
+
class TestLocWithMultiIndex:
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 5e87f8f6c1059..2933983a5b18b 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -1,7 +1,7 @@
"""
Series.__getitem__ test classes are organized by the type of key passed.
"""
-from datetime import datetime
+from datetime import datetime, time
import numpy as np
import pytest
@@ -83,6 +83,16 @@ def test_string_index_alias_tz_aware(self, tz):
result = ser["1/3/2000"]
tm.assert_almost_equal(result, ser[2])
+ def test_getitem_time_object(self):
+ rng = date_range("1/1/2000", "1/5/2000", freq="5min")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+
+ mask = (rng.hour == 9) & (rng.minute == 30)
+ result = ts[time(9, 30)]
+ expected = ts[mask]
+ result.index = result.index._with_freq(None)
+ tm.assert_series_equal(result, expected)
+
class TestSeriesGetitemSlices:
def test_getitem_slice_2d(self, datetime_series):
diff --git a/pandas/tests/series/methods/test_at_time.py b/pandas/tests/series/methods/test_at_time.py
deleted file mode 100644
index 810e4c1446708..0000000000000
--- a/pandas/tests/series/methods/test_at_time.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from datetime import time
-
-import numpy as np
-import pytest
-
-from pandas._libs.tslibs import timezones
-
-from pandas import DataFrame, Series, date_range
-import pandas._testing as tm
-
-
-class TestAtTime:
- @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
- def test_localized_at_time(self, tzstr):
- tz = timezones.maybe_get_tz(tzstr)
-
- rng = date_range("4/16/2012", "5/1/2012", freq="H")
- ts = Series(np.random.randn(len(rng)), index=rng)
-
- ts_local = ts.tz_localize(tzstr)
-
- result = ts_local.at_time(time(10, 0))
- expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
- tm.assert_series_equal(result, expected)
- assert timezones.tz_compare(result.index.tz, tz)
-
- def test_at_time(self):
- rng = date_range("1/1/2000", "1/5/2000", freq="5min")
- ts = Series(np.random.randn(len(rng)), index=rng)
- rs = ts.at_time(rng[1])
- assert (rs.index.hour == rng[1].hour).all()
- assert (rs.index.minute == rng[1].minute).all()
- assert (rs.index.second == rng[1].second).all()
-
- result = ts.at_time("9:30")
- expected = ts.at_time(time(9, 30))
- tm.assert_series_equal(result, expected)
-
- df = DataFrame(np.random.randn(len(rng), 3), index=rng)
-
- result = ts[time(9, 30)]
- result_df = df.loc[time(9, 30)]
- expected = ts[(rng.hour == 9) & (rng.minute == 30)]
- exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
-
- result.index = result.index._with_freq(None)
- tm.assert_series_equal(result, expected)
- tm.assert_frame_equal(result_df, exp_df)
-
- chunk = df.loc["1/4/2000":]
- result = chunk.loc[time(9, 30)]
- expected = result_df[-1:]
-
- # Without resetting the freqs, these are 5 min and 1440 min, respectively
- result.index = result.index._with_freq(None)
- expected.index = expected.index._with_freq(None)
- tm.assert_frame_equal(result, expected)
-
- # midnight, everything
- rng = date_range("1/1/2000", "1/31/2000")
- ts = Series(np.random.randn(len(rng)), index=rng)
-
- result = ts.at_time(time(0, 0))
- tm.assert_series_equal(result, ts)
-
- # time doesn't exist
- rng = date_range("1/1/2012", freq="23Min", periods=384)
- ts = Series(np.random.randn(len(rng)), rng)
- rs = ts.at_time("16:00")
- assert len(rs) == 0
-
- def test_at_time_raises(self):
- # GH20725
- ser = Series("a b c".split())
- msg = "Index must be DatetimeIndex"
- with pytest.raises(TypeError, match=msg):
- ser.at_time("00:00")
diff --git a/pandas/tests/series/methods/test_between_time.py b/pandas/tests/series/methods/test_between_time.py
deleted file mode 100644
index e9d2f8e6f1637..0000000000000
--- a/pandas/tests/series/methods/test_between_time.py
+++ /dev/null
@@ -1,144 +0,0 @@
-from datetime import datetime, time
-from itertools import product
-
-import numpy as np
-import pytest
-
-from pandas._libs.tslibs import timezones
-import pandas.util._test_decorators as td
-
-from pandas import DataFrame, Series, date_range
-import pandas._testing as tm
-
-
-class TestBetweenTime:
- @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
- def test_localized_between_time(self, tzstr):
- tz = timezones.maybe_get_tz(tzstr)
-
- rng = date_range("4/16/2012", "5/1/2012", freq="H")
- ts = Series(np.random.randn(len(rng)), index=rng)
-
- ts_local = ts.tz_localize(tzstr)
-
- t1, t2 = time(10, 0), time(11, 0)
- result = ts_local.between_time(t1, t2)
- expected = ts.between_time(t1, t2).tz_localize(tzstr)
- tm.assert_series_equal(result, expected)
- assert timezones.tz_compare(result.index.tz, tz)
-
- def test_between_time(self):
- rng = date_range("1/1/2000", "1/5/2000", freq="5min")
- ts = Series(np.random.randn(len(rng)), index=rng)
- stime = time(0, 0)
- etime = time(1, 0)
-
- close_open = product([True, False], [True, False])
- for inc_start, inc_end in close_open:
- filtered = ts.between_time(stime, etime, inc_start, inc_end)
- exp_len = 13 * 4 + 1
- if not inc_start:
- exp_len -= 5
- if not inc_end:
- exp_len -= 4
-
- assert len(filtered) == exp_len
- for rs in filtered.index:
- t = rs.time()
- if inc_start:
- assert t >= stime
- else:
- assert t > stime
-
- if inc_end:
- assert t <= etime
- else:
- assert t < etime
-
- result = ts.between_time("00:00", "01:00")
- expected = ts.between_time(stime, etime)
- tm.assert_series_equal(result, expected)
-
- # across midnight
- rng = date_range("1/1/2000", "1/5/2000", freq="5min")
- ts = Series(np.random.randn(len(rng)), index=rng)
- stime = time(22, 0)
- etime = time(9, 0)
-
- close_open = product([True, False], [True, False])
- for inc_start, inc_end in close_open:
- filtered = ts.between_time(stime, etime, inc_start, inc_end)
- exp_len = (12 * 11 + 1) * 4 + 1
- if not inc_start:
- exp_len -= 4
- if not inc_end:
- exp_len -= 4
-
- assert len(filtered) == exp_len
- for rs in filtered.index:
- t = rs.time()
- if inc_start:
- assert (t >= stime) or (t <= etime)
- else:
- assert (t > stime) or (t <= etime)
-
- if inc_end:
- assert (t <= etime) or (t >= stime)
- else:
- assert (t < etime) or (t >= stime)
-
- def test_between_time_raises(self):
- # GH20725
- ser = Series("a b c".split())
- msg = "Index must be DatetimeIndex"
- with pytest.raises(TypeError, match=msg):
- ser.between_time(start_time="00:00", end_time="12:00")
-
- def test_between_time_types(self):
- # GH11818
- rng = date_range("1/1/2000", "1/5/2000", freq="5min")
- msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
- with pytest.raises(ValueError, match=msg):
- rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
-
- frame = DataFrame({"A": 0}, index=rng)
- with pytest.raises(ValueError, match=msg):
- frame.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
-
- series = Series(0, index=rng)
- with pytest.raises(ValueError, match=msg):
- series.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
-
- @td.skip_if_has_locale
- def test_between_time_formats(self):
- # GH11818
- rng = date_range("1/1/2000", "1/5/2000", freq="5min")
- ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
-
- strings = [
- ("2:00", "2:30"),
- ("0200", "0230"),
- ("2:00am", "2:30am"),
- ("0200am", "0230am"),
- ("2:00:00", "2:30:00"),
- ("020000", "023000"),
- ("2:00:00am", "2:30:00am"),
- ("020000am", "023000am"),
- ]
- expected_length = 28
-
- for time_string in strings:
- assert len(ts.between_time(*time_string)) == expected_length
-
- def test_between_time_axis(self):
- # issue 8839
- rng = date_range("1/1/2000", periods=100, freq="10min")
- ts = Series(np.random.randn(len(rng)), index=rng)
- stime, etime = ("08:00:00", "09:00:00")
- expected_length = 7
-
- assert len(ts.between_time(stime, etime)) == expected_length
- assert len(ts.between_time(stime, etime, axis=0)) == expected_length
- msg = "No axis named 1 for object type Series"
- with pytest.raises(ValueError, match=msg):
- ts.between_time(stime, etime, axis=1)
diff --git a/pandas/tests/series/methods/test_to_period.py b/pandas/tests/series/methods/test_to_period.py
deleted file mode 100644
index b40fc81931e20..0000000000000
--- a/pandas/tests/series/methods/test_to_period.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import (
- DataFrame,
- DatetimeIndex,
- PeriodIndex,
- Series,
- date_range,
- period_range,
-)
-import pandas._testing as tm
-
-
-class TestToPeriod:
- def test_to_period(self):
- rng = date_range("1/1/2000", "1/1/2001", freq="D")
- ts = Series(np.random.randn(len(rng)), index=rng)
-
- pts = ts.to_period()
- exp = ts.copy()
- exp.index = period_range("1/1/2000", "1/1/2001")
- tm.assert_series_equal(pts, exp)
-
- pts = ts.to_period("M")
- exp.index = exp.index.asfreq("M")
- tm.assert_index_equal(pts.index, exp.index.asfreq("M"))
- tm.assert_series_equal(pts, exp)
-
- # GH#7606 without freq
- idx = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"])
- exp_idx = PeriodIndex(
- ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], freq="D"
- )
-
- s = Series(np.random.randn(4), index=idx)
- expected = s.copy()
- expected.index = exp_idx
- tm.assert_series_equal(s.to_period(), expected)
-
- df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx)
- expected = df.copy()
- expected.index = exp_idx
- tm.assert_frame_equal(df.to_period(), expected)
-
- expected = df.copy()
- expected.columns = exp_idx
- tm.assert_frame_equal(df.to_period(axis=1), expected)
-
- def test_to_period_raises(self, index):
- # https://github.com/pandas-dev/pandas/issues/33327
- ser = Series(index=index, dtype=object)
- if not isinstance(index, DatetimeIndex):
- msg = f"unsupported Type {type(index).__name__}"
- with pytest.raises(TypeError, match=msg):
- ser.to_period()
diff --git a/pandas/tests/series/methods/test_to_timestamp.py b/pandas/tests/series/methods/test_to_timestamp.py
deleted file mode 100644
index 13a2042a2f639..0000000000000
--- a/pandas/tests/series/methods/test_to_timestamp.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from datetime import timedelta
-
-import pytest
-
-from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
-import pandas._testing as tm
-
-
-class TestToTimestamp:
- def test_to_timestamp(self):
- index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
- series = Series(1, index=index, name="foo")
-
- exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
- result = series.to_timestamp(how="end")
- exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
- assert result.name == "foo"
-
- exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
- result = series.to_timestamp(how="start")
- tm.assert_index_equal(result.index, exp_index)
-
- def _get_with_delta(delta, freq="A-DEC"):
- return date_range(
- to_datetime("1/1/2001") + delta,
- to_datetime("12/31/2009") + delta,
- freq=freq,
- )
-
- delta = timedelta(hours=23)
- result = series.to_timestamp("H", "end")
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- delta = timedelta(hours=23, minutes=59)
- result = series.to_timestamp("T", "end")
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- result = series.to_timestamp("S", "end")
- delta = timedelta(hours=23, minutes=59, seconds=59)
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
- series = Series(1, index=index, name="foo")
-
- exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
- result = series.to_timestamp(how="end")
- exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
- assert result.name == "foo"
-
- def test_to_timestamp_raises(self, index):
- # https://github.com/pandas-dev/pandas/issues/33327
- ser = Series(index=index, dtype=object)
- if not isinstance(index, PeriodIndex):
- msg = f"unsupported Type {type(index).__name__}"
- with pytest.raises(TypeError, match=msg):
- ser.to_timestamp()
| By default, a test using the frame_or_series fixture goes in tests.frame | https://api.github.com/repos/pandas-dev/pandas/pulls/37596 | 2020-11-02T22:35:47Z | 2020-11-03T02:43:39Z | 2020-11-03T02:43:39Z | 2020-11-03T02:51:11Z |
CLN: de-duplicate _validate_where_value with _validate_setitem_value | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 03f66ff82ad75..263512e427c69 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1177,11 +1177,6 @@ def map(self, mapper):
# -------------------------------------------------------------
# Validators; ideally these can be de-duplicated
- def _validate_where_value(self, value):
- if is_scalar(value):
- return self._validate_fill_value(value)
- return self._validate_listlike(value)
-
def _validate_insert_value(self, value) -> int:
return self._validate_fill_value(value)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index f8a609fb0cabe..579719d8bac3b 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -623,8 +623,6 @@ def _validate_setitem_value(self, value):
return self._unbox(value, setitem=True)
- _validate_where_value = _validate_setitem_value
-
def _validate_insert_value(self, value):
value = self._validate_scalar(value)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 9215fc8994d87..a92190a2bddf8 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -485,7 +485,7 @@ def where(self, cond, other=None):
values = self._data._ndarray
try:
- other = self._data._validate_where_value(other)
+ other = self._data._validate_setitem_value(other)
except (TypeError, ValueError) as err:
# Includes tzawareness mismatch and IncompatibleFrequencyError
oth = getattr(other, "dtype", other)
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index d37ec12fd3eda..cd1871e4687f3 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -343,7 +343,7 @@ def insert(self, loc: int, item):
def putmask(self, mask, value):
try:
- value = self._data._validate_where_value(value)
+ value = self._data._validate_setitem_value(value)
except (TypeError, ValueError):
return self.astype(object).putmask(mask, value)
| https://api.github.com/repos/pandas-dev/pandas/pulls/37595 | 2020-11-02T20:20:39Z | 2020-11-02T22:22:20Z | 2020-11-02T22:22:20Z | 2020-11-02T22:23:07Z | |
BUG: Index.where casting ints to str | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 8a092cb6e36db..45a95f6aeb2f6 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -456,6 +456,7 @@ Indexing
- Bug in :meth:`Series.loc.__getitem__` with a non-unique :class:`MultiIndex` and an empty-list indexer (:issue:`13691`)
- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`MultiIndex` with a level named "0" (:issue:`37194`)
- Bug in :meth:`Series.__getitem__` when using an unsigned integer array as an indexer giving incorrect results or segfaulting instead of raising ``KeyError`` (:issue:`37218`)
+- Bug in :meth:`Index.where` incorrectly casting numeric values to strings (:issue:`37591`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 5ee5e867567b3..b220756a24f9f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -40,7 +40,6 @@
ensure_int64,
ensure_object,
ensure_platform_int,
- is_bool,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
@@ -4079,23 +4078,16 @@ def where(self, cond, other=None):
if other is None:
other = self._na_value
- dtype = self.dtype
values = self.values
- if is_bool(other) or is_bool_dtype(other):
-
- # bools force casting
- values = values.astype(object)
- dtype = None
+ try:
+ self._validate_fill_value(other)
+ except (ValueError, TypeError):
+ return self.astype(object).where(cond, other)
values = np.where(cond, values, other)
- if self._is_numeric_dtype and np.any(isna(values)):
- # We can't coerce to the numeric dtype of "self" (unless
- # it's float) if there are NaN values in our output.
- dtype = None
-
- return Index(values, dtype=dtype, name=self.name)
+ return Index(values, name=self.name)
# construction helpers
@final
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index a92190a2bddf8..9e2ac6013cb43 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -482,16 +482,9 @@ def isin(self, values, level=None):
@Appender(Index.where.__doc__)
def where(self, cond, other=None):
- values = self._data._ndarray
+ other = self._data._validate_setitem_value(other)
- try:
- other = self._data._validate_setitem_value(other)
- except (TypeError, ValueError) as err:
- # Includes tzawareness mismatch and IncompatibleFrequencyError
- oth = getattr(other, "dtype", other)
- raise TypeError(f"Where requires matching dtype, not {oth}") from err
-
- result = np.where(cond, values, other)
+ result = np.where(cond, self._data._ndarray, other)
arr = self._data._from_backing_data(result)
return type(self)._simple_new(arr, name=self.name)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index d6f571360b457..9eb8a8b719d41 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -121,6 +121,8 @@ def _validate_fill_value(self, value):
# force conversion to object
# so we don't lose the bools
raise TypeError
+ if isinstance(value, str):
+ raise TypeError
return value
diff --git a/pandas/tests/indexes/base_class/test_where.py b/pandas/tests/indexes/base_class/test_where.py
new file mode 100644
index 0000000000000..0c8969735e14e
--- /dev/null
+++ b/pandas/tests/indexes/base_class/test_where.py
@@ -0,0 +1,13 @@
+import numpy as np
+
+from pandas import Index
+import pandas._testing as tm
+
+
+class TestWhere:
+ def test_where_intlike_str_doesnt_cast_ints(self):
+ idx = Index(range(3))
+ mask = np.array([True, False, True])
+ res = idx.where(mask, "2")
+ expected = Index([0, "2", 2])
+ tm.assert_index_equal(res, expected)
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index be8ca61f1a730..6f078237e3a97 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -143,10 +143,9 @@ def test_where_cast_str(self):
result = index.where(mask, [str(index[0])])
tm.assert_index_equal(result, expected)
- msg = "Where requires matching dtype, not foo"
+ msg = "value should be a '.*', 'NaT', or array of those"
with pytest.raises(TypeError, match=msg):
index.where(mask, "foo")
- msg = r"Where requires matching dtype, not \['foo'\]"
with pytest.raises(TypeError, match=msg):
index.where(mask, ["foo"])
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 4e46eb126894b..fe92aefc0d708 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -177,24 +177,26 @@ def test_where_invalid_dtypes(self):
i2 = Index([pd.NaT, pd.NaT] + dti[2:].tolist())
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ msg = "value should be a 'Timestamp', 'NaT', or array of those. Got"
+ msg2 = "Cannot compare tz-naive and tz-aware datetime-like objects"
+ with pytest.raises(TypeError, match=msg2):
# passing tz-naive ndarray to tzaware DTI
dti.where(notna(i2), i2.values)
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg2):
# passing tz-aware DTI to tznaive DTI
dti.tz_localize(None).where(notna(i2), i2)
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
dti.where(notna(i2), i2.tz_localize(None).to_period("D"))
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
dti.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
dti.where(notna(i2), i2.asi8)
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
# non-matching scalar
dti.where(notna(i2), pd.Timedelta(days=4))
@@ -203,7 +205,7 @@ def test_where_mismatched_nat(self, tz_aware_fixture):
dti = pd.date_range("2013-01-01", periods=3, tz=tz)
cond = np.array([True, False, True])
- msg = "Where requires matching dtype"
+ msg = "value should be a 'Timestamp', 'NaT', or array of those. Got"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
dti.where(cond, np.timedelta64("NaT", "ns"))
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index b6d3c36f1682c..19dfa9137cc5c 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -545,16 +545,17 @@ def test_where_invalid_dtypes(self):
i2 = PeriodIndex([NaT, NaT] + pi[2:].tolist(), freq="D")
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ msg = "value should be a 'Period', 'NaT', or array of those"
+ with pytest.raises(TypeError, match=msg):
pi.where(notna(i2), i2.asi8)
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
pi.where(notna(i2), i2.to_timestamp("S"))
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
# non-matching scalar
pi.where(notna(i2), Timedelta(days=4))
@@ -562,7 +563,7 @@ def test_where_mismatched_nat(self):
pi = period_range("20130101", periods=5, freq="D")
cond = np.array([True, False, True, True, False])
- msg = "Where requires matching dtype"
+ msg = "value should be a 'Period', 'NaT', or array of those"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
pi.where(cond, np.timedelta64("NaT", "ns"))
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 396a676b97a1b..37aa9653550fb 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -150,16 +150,17 @@ def test_where_invalid_dtypes(self):
i2 = Index([pd.NaT, pd.NaT] + tdi[2:].tolist())
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ msg = "value should be a 'Timedelta', 'NaT', or array of those"
+ with pytest.raises(TypeError, match=msg):
tdi.where(notna(i2), i2.asi8)
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
tdi.where(notna(i2), i2 + pd.Timestamp.now())
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
tdi.where(notna(i2), (i2 + pd.Timestamp.now()).to_period("D"))
- with pytest.raises(TypeError, match="Where requires matching dtype"):
+ with pytest.raises(TypeError, match=msg):
# non-matching scalar
tdi.where(notna(i2), pd.Timestamp.now())
@@ -167,7 +168,7 @@ def test_where_mismatched_nat(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = np.array([True, False, False])
- msg = "Where requires matching dtype"
+ msg = "value should be a 'Timedelta', 'NaT', or array of those"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
tdi.where(cond, np.datetime64("NaT", "ns"))
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 436b2aa838b08..fd6f6fbc6a4ba 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -780,7 +780,7 @@ def test_where_index_timedelta64(self, value):
result = tdi.where(cond, value)
tm.assert_index_equal(result, expected)
- msg = "Where requires matching dtype"
+ msg = "value should be a 'Timedelta', 'NaT', or array of thos"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
tdi.where(cond, np.datetime64("NaT", "ns"))
@@ -804,11 +804,12 @@ def test_where_index_period(self):
tm.assert_index_equal(result, expected)
# Passing a mismatched scalar
- msg = "Where requires matching dtype"
+ msg = "value should be a 'Period', 'NaT', or array of those"
with pytest.raises(TypeError, match=msg):
pi.where(cond, pd.Timedelta(days=4))
- with pytest.raises(TypeError, match=msg):
+ msg = r"Input has different freq=D from PeriodArray\(freq=Q-DEC\)"
+ with pytest.raises(ValueError, match=msg):
pi.where(cond, pd.Period("2020-04-21", "D"))
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Two things going on here (can separate if asked, but they got mixed together in the process).
1) don't re-raise in `DatetimeLikeIndexMixin.where`, so we end up giving the more standard exception message.
2) Use validate_fill_value in Index.where, and have that disallow strs for numeric. This fixes the following in master:
```
>>> idx = pd.Index(range(3))
>>> mask = np.array([True, False, True])
>>>
>>> idx.where(mask, "2")
Index(['0', '2', '2'], dtype='object')
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/37591 | 2020-11-02T15:48:14Z | 2020-11-03T02:47:21Z | 2020-11-03T02:47:20Z | 2020-11-03T02:51:44Z |
TST/REF: collect indexing tests by method | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 58f0e5bc1ad39..9eaa0d0ae6876 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1327,32 +1327,6 @@ def test_getitem_list_duplicates(self):
expected = df.iloc[:, 2:]
tm.assert_frame_equal(result, expected)
- def test_set_value_with_index_dtype_change(self):
- df_orig = DataFrame(np.random.randn(3, 3), index=range(3), columns=list("ABC"))
-
- # this is actually ambiguous as the 2 is interpreted as a positional
- # so column is not created
- df = df_orig.copy()
- df._set_value("C", 2, 1.0)
- assert list(df.index) == list(df_orig.index) + ["C"]
- # assert list(df.columns) == list(df_orig.columns) + [2]
-
- df = df_orig.copy()
- df.loc["C", 2] = 1.0
- assert list(df.index) == list(df_orig.index) + ["C"]
- # assert list(df.columns) == list(df_orig.columns) + [2]
-
- # create both new
- df = df_orig.copy()
- df._set_value("C", "D", 1.0)
- assert list(df.index) == list(df_orig.index) + ["C"]
- assert list(df.columns) == list(df_orig.columns) + ["D"]
-
- df = df_orig.copy()
- df.loc["C", "D"] = 1.0
- assert list(df.index) == list(df_orig.index) + ["C"]
- assert list(df.columns) == list(df_orig.columns) + ["D"]
-
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
diff --git a/pandas/tests/frame/indexing/test_set_value.py b/pandas/tests/frame/indexing/test_set_value.py
index 484e2d544197e..84def57f6b6e0 100644
--- a/pandas/tests/frame/indexing/test_set_value.py
+++ b/pandas/tests/frame/indexing/test_set_value.py
@@ -3,7 +3,7 @@
from pandas.core.dtypes.common import is_float_dtype
-from pandas import isna
+from pandas import DataFrame, isna
class TestSetValue:
@@ -38,3 +38,29 @@ def test_set_value_resize(self, float_frame):
msg = "could not convert string to float: 'sam'"
with pytest.raises(ValueError, match=msg):
res._set_value("foobar", "baz", "sam")
+
+ def test_set_value_with_index_dtype_change(self):
+ df_orig = DataFrame(np.random.randn(3, 3), index=range(3), columns=list("ABC"))
+
+ # this is actually ambiguous as the 2 is interpreted as a positional
+ # so column is not created
+ df = df_orig.copy()
+ df._set_value("C", 2, 1.0)
+ assert list(df.index) == list(df_orig.index) + ["C"]
+ # assert list(df.columns) == list(df_orig.columns) + [2]
+
+ df = df_orig.copy()
+ df.loc["C", 2] = 1.0
+ assert list(df.index) == list(df_orig.index) + ["C"]
+ # assert list(df.columns) == list(df_orig.columns) + [2]
+
+ # create both new
+ df = df_orig.copy()
+ df._set_value("C", "D", 1.0)
+ assert list(df.index) == list(df_orig.index) + ["C"]
+ assert list(df.columns) == list(df_orig.columns) + ["D"]
+
+ df = df_orig.copy()
+ df.loc["C", "D"] = 1.0
+ assert list(df.index) == list(df_orig.index) + ["C"]
+ assert list(df.columns) == list(df_orig.columns) + ["D"]
diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py
deleted file mode 100644
index b98c9a3df0438..0000000000000
--- a/pandas/tests/indexing/test_callable.py
+++ /dev/null
@@ -1,254 +0,0 @@
-import numpy as np
-
-import pandas as pd
-import pandas._testing as tm
-
-
-class TestIndexingCallable:
- def test_frame_loc_callable(self):
- # GH 11485
- df = pd.DataFrame({"A": [1, 2, 3, 4], "B": list("aabb"), "C": [1, 2, 3, 4]})
- # iloc cannot use boolean Series (see GH3635)
-
- # return bool indexer
- res = df.loc[lambda x: x.A > 2]
- tm.assert_frame_equal(res, df.loc[df.A > 2])
-
- res = df.loc[lambda x: x.A > 2]
- tm.assert_frame_equal(res, df.loc[df.A > 2])
-
- res = df.loc[lambda x: x.A > 2]
- tm.assert_frame_equal(res, df.loc[df.A > 2])
-
- res = df.loc[lambda x: x.A > 2]
- tm.assert_frame_equal(res, df.loc[df.A > 2])
-
- res = df.loc[lambda x: x.B == "b", :]
- tm.assert_frame_equal(res, df.loc[df.B == "b", :])
-
- res = df.loc[lambda x: x.B == "b", :]
- tm.assert_frame_equal(res, df.loc[df.B == "b", :])
-
- res = df.loc[lambda x: x.A > 2, lambda x: x.columns == "B"]
- tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
-
- res = df.loc[lambda x: x.A > 2, lambda x: x.columns == "B"]
- tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
-
- res = df.loc[lambda x: x.A > 2, lambda x: "B"]
- tm.assert_series_equal(res, df.loc[df.A > 2, "B"])
-
- res = df.loc[lambda x: x.A > 2, lambda x: "B"]
- tm.assert_series_equal(res, df.loc[df.A > 2, "B"])
-
- res = df.loc[lambda x: x.A > 2, lambda x: ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
-
- res = df.loc[lambda x: x.A > 2, lambda x: ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
-
- res = df.loc[lambda x: x.A == 2, lambda x: ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[df.A == 2, ["A", "B"]])
-
- res = df.loc[lambda x: x.A == 2, lambda x: ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[df.A == 2, ["A", "B"]])
-
- # scalar
- res = df.loc[lambda x: 1, lambda x: "A"]
- assert res == df.loc[1, "A"]
-
- res = df.loc[lambda x: 1, lambda x: "A"]
- assert res == df.loc[1, "A"]
-
- def test_frame_loc_callable_mixture(self):
- # GH 11485
- df = pd.DataFrame({"A": [1, 2, 3, 4], "B": list("aabb"), "C": [1, 2, 3, 4]})
-
- res = df.loc[lambda x: x.A > 2, ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
-
- res = df.loc[lambda x: x.A > 2, ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
-
- res = df.loc[[2, 3], lambda x: ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[[2, 3], ["A", "B"]])
-
- res = df.loc[[2, 3], lambda x: ["A", "B"]]
- tm.assert_frame_equal(res, df.loc[[2, 3], ["A", "B"]])
-
- res = df.loc[3, lambda x: ["A", "B"]]
- tm.assert_series_equal(res, df.loc[3, ["A", "B"]])
-
- res = df.loc[3, lambda x: ["A", "B"]]
- tm.assert_series_equal(res, df.loc[3, ["A", "B"]])
-
- def test_frame_loc_callable_labels(self):
- # GH 11485
- df = pd.DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
-
- # return label
- res = df.loc[lambda x: ["A", "C"]]
- tm.assert_frame_equal(res, df.loc[["A", "C"]])
-
- res = df.loc[lambda x: ["A", "C"]]
- tm.assert_frame_equal(res, df.loc[["A", "C"]])
-
- res = df.loc[lambda x: ["A", "C"], :]
- tm.assert_frame_equal(res, df.loc[["A", "C"], :])
-
- res = df.loc[lambda x: ["A", "C"], lambda x: "X"]
- tm.assert_series_equal(res, df.loc[["A", "C"], "X"])
-
- res = df.loc[lambda x: ["A", "C"], lambda x: ["X"]]
- tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]])
-
- # mixture
- res = df.loc[["A", "C"], lambda x: "X"]
- tm.assert_series_equal(res, df.loc[["A", "C"], "X"])
-
- res = df.loc[["A", "C"], lambda x: ["X"]]
- tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]])
-
- res = df.loc[lambda x: ["A", "C"], "X"]
- tm.assert_series_equal(res, df.loc[["A", "C"], "X"])
-
- res = df.loc[lambda x: ["A", "C"], ["X"]]
- tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]])
-
- def test_frame_loc_callable_setitem(self):
- # GH 11485
- df = pd.DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
-
- # return label
- res = df.copy()
- res.loc[lambda x: ["A", "C"]] = -20
- exp = df.copy()
- exp.loc[["A", "C"]] = -20
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.loc[lambda x: ["A", "C"], :] = 20
- exp = df.copy()
- exp.loc[["A", "C"], :] = 20
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.loc[lambda x: ["A", "C"], lambda x: "X"] = -1
- exp = df.copy()
- exp.loc[["A", "C"], "X"] = -1
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.loc[lambda x: ["A", "C"], lambda x: ["X"]] = [5, 10]
- exp = df.copy()
- exp.loc[["A", "C"], ["X"]] = [5, 10]
- tm.assert_frame_equal(res, exp)
-
- # mixture
- res = df.copy()
- res.loc[["A", "C"], lambda x: "X"] = np.array([-1, -2])
- exp = df.copy()
- exp.loc[["A", "C"], "X"] = np.array([-1, -2])
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.loc[["A", "C"], lambda x: ["X"]] = 10
- exp = df.copy()
- exp.loc[["A", "C"], ["X"]] = 10
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.loc[lambda x: ["A", "C"], "X"] = -2
- exp = df.copy()
- exp.loc[["A", "C"], "X"] = -2
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.loc[lambda x: ["A", "C"], ["X"]] = -4
- exp = df.copy()
- exp.loc[["A", "C"], ["X"]] = -4
- tm.assert_frame_equal(res, exp)
-
- def test_frame_iloc_callable(self):
- # GH 11485
- df = pd.DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
-
- # return location
- res = df.iloc[lambda x: [1, 3]]
- tm.assert_frame_equal(res, df.iloc[[1, 3]])
-
- res = df.iloc[lambda x: [1, 3], :]
- tm.assert_frame_equal(res, df.iloc[[1, 3], :])
-
- res = df.iloc[lambda x: [1, 3], lambda x: 0]
- tm.assert_series_equal(res, df.iloc[[1, 3], 0])
-
- res = df.iloc[lambda x: [1, 3], lambda x: [0]]
- tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
-
- # mixture
- res = df.iloc[[1, 3], lambda x: 0]
- tm.assert_series_equal(res, df.iloc[[1, 3], 0])
-
- res = df.iloc[[1, 3], lambda x: [0]]
- tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
-
- res = df.iloc[lambda x: [1, 3], 0]
- tm.assert_series_equal(res, df.iloc[[1, 3], 0])
-
- res = df.iloc[lambda x: [1, 3], [0]]
- tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
-
- def test_frame_iloc_callable_setitem(self):
- # GH 11485
- df = pd.DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
-
- # return location
- res = df.copy()
- res.iloc[lambda x: [1, 3]] = 0
- exp = df.copy()
- exp.iloc[[1, 3]] = 0
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.iloc[lambda x: [1, 3], :] = -1
- exp = df.copy()
- exp.iloc[[1, 3], :] = -1
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.iloc[lambda x: [1, 3], lambda x: 0] = 5
- exp = df.copy()
- exp.iloc[[1, 3], 0] = 5
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.iloc[lambda x: [1, 3], lambda x: [0]] = 25
- exp = df.copy()
- exp.iloc[[1, 3], [0]] = 25
- tm.assert_frame_equal(res, exp)
-
- # mixture
- res = df.copy()
- res.iloc[[1, 3], lambda x: 0] = -3
- exp = df.copy()
- exp.iloc[[1, 3], 0] = -3
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.iloc[[1, 3], lambda x: [0]] = -5
- exp = df.copy()
- exp.iloc[[1, 3], [0]] = -5
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.iloc[lambda x: [1, 3], 0] = 10
- exp = df.copy()
- exp.iloc[[1, 3], 0] = 10
- tm.assert_frame_equal(res, exp)
-
- res = df.copy()
- res.iloc[lambda x: [1, 3], [0]] = [-5, -5]
- exp = df.copy()
- exp.iloc[[1, 3], [0]] = [-5, -5]
- tm.assert_frame_equal(res, exp)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 867941a97b598..6c80354610a78 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -778,6 +778,92 @@ def test_iloc_setitem_series_duplicate_columns(self):
assert df.dtypes.iloc[2] == np.int64
+class TestILocCallable:
+ def test_frame_iloc_getitem_callable(self):
+ # GH#11485
+ df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
+
+ # return location
+ res = df.iloc[lambda x: [1, 3]]
+ tm.assert_frame_equal(res, df.iloc[[1, 3]])
+
+ res = df.iloc[lambda x: [1, 3], :]
+ tm.assert_frame_equal(res, df.iloc[[1, 3], :])
+
+ res = df.iloc[lambda x: [1, 3], lambda x: 0]
+ tm.assert_series_equal(res, df.iloc[[1, 3], 0])
+
+ res = df.iloc[lambda x: [1, 3], lambda x: [0]]
+ tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
+
+ # mixture
+ res = df.iloc[[1, 3], lambda x: 0]
+ tm.assert_series_equal(res, df.iloc[[1, 3], 0])
+
+ res = df.iloc[[1, 3], lambda x: [0]]
+ tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
+
+ res = df.iloc[lambda x: [1, 3], 0]
+ tm.assert_series_equal(res, df.iloc[[1, 3], 0])
+
+ res = df.iloc[lambda x: [1, 3], [0]]
+ tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
+
+ def test_frame_iloc_setitem_callable(self):
+ # GH#11485
+ df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
+
+ # return location
+ res = df.copy()
+ res.iloc[lambda x: [1, 3]] = 0
+ exp = df.copy()
+ exp.iloc[[1, 3]] = 0
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.iloc[lambda x: [1, 3], :] = -1
+ exp = df.copy()
+ exp.iloc[[1, 3], :] = -1
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.iloc[lambda x: [1, 3], lambda x: 0] = 5
+ exp = df.copy()
+ exp.iloc[[1, 3], 0] = 5
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.iloc[lambda x: [1, 3], lambda x: [0]] = 25
+ exp = df.copy()
+ exp.iloc[[1, 3], [0]] = 25
+ tm.assert_frame_equal(res, exp)
+
+ # mixture
+ res = df.copy()
+ res.iloc[[1, 3], lambda x: 0] = -3
+ exp = df.copy()
+ exp.iloc[[1, 3], 0] = -3
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.iloc[[1, 3], lambda x: [0]] = -5
+ exp = df.copy()
+ exp.iloc[[1, 3], [0]] = -5
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.iloc[lambda x: [1, 3], 0] = 10
+ exp = df.copy()
+ exp.iloc[[1, 3], 0] = 10
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.iloc[lambda x: [1, 3], [0]] = [-5, -5]
+ exp = df.copy()
+ exp.iloc[[1, 3], [0]] = [-5, -5]
+ tm.assert_frame_equal(res, exp)
+
+
class TestILocSeries:
def test_iloc(self):
ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))
diff --git a/pandas/tests/indexing/test_indexing_slow.py b/pandas/tests/indexing/test_indexing_slow.py
deleted file mode 100644
index 2ffa44bec14a6..0000000000000
--- a/pandas/tests/indexing/test_indexing_slow.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import pytest
-
-from pandas import DataFrame
-import pandas._testing as tm
-
-
-class TestIndexingSlow:
- @pytest.mark.slow
- def test_large_dataframe_indexing(self):
- # GH10692
- result = DataFrame({"x": range(10 ** 6)}, dtype="int64")
- result.loc[len(result)] = len(result) + 1
- expected = DataFrame({"x": range(10 ** 6 + 1)}, dtype="int64")
- tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index b1c66c3c8850a..d1dcae5997b9d 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1082,6 +1082,182 @@ def test_loc_setitem_multiindex_slice(self):
tm.assert_series_equal(result, expected)
+class TestLocSetitemWithExpansion:
+ @pytest.mark.slow
+ def test_loc_setitem_with_expansion_large_dataframe(self):
+ # GH#10692
+ result = DataFrame({"x": range(10 ** 6)}, dtype="int64")
+ result.loc[len(result)] = len(result) + 1
+ expected = DataFrame({"x": range(10 ** 6 + 1)}, dtype="int64")
+ tm.assert_frame_equal(result, expected)
+
+
+class TestLocCallable:
+ def test_frame_loc_getitem_callable(self):
+ # GH#11485
+ df = DataFrame({"A": [1, 2, 3, 4], "B": list("aabb"), "C": [1, 2, 3, 4]})
+ # iloc cannot use boolean Series (see GH3635)
+
+ # return bool indexer
+ res = df.loc[lambda x: x.A > 2]
+ tm.assert_frame_equal(res, df.loc[df.A > 2])
+
+ res = df.loc[lambda x: x.A > 2]
+ tm.assert_frame_equal(res, df.loc[df.A > 2])
+
+ res = df.loc[lambda x: x.A > 2]
+ tm.assert_frame_equal(res, df.loc[df.A > 2])
+
+ res = df.loc[lambda x: x.A > 2]
+ tm.assert_frame_equal(res, df.loc[df.A > 2])
+
+ res = df.loc[lambda x: x.B == "b", :]
+ tm.assert_frame_equal(res, df.loc[df.B == "b", :])
+
+ res = df.loc[lambda x: x.B == "b", :]
+ tm.assert_frame_equal(res, df.loc[df.B == "b", :])
+
+ res = df.loc[lambda x: x.A > 2, lambda x: x.columns == "B"]
+ tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
+
+ res = df.loc[lambda x: x.A > 2, lambda x: x.columns == "B"]
+ tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
+
+ res = df.loc[lambda x: x.A > 2, lambda x: "B"]
+ tm.assert_series_equal(res, df.loc[df.A > 2, "B"])
+
+ res = df.loc[lambda x: x.A > 2, lambda x: "B"]
+ tm.assert_series_equal(res, df.loc[df.A > 2, "B"])
+
+ res = df.loc[lambda x: x.A > 2, lambda x: ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
+
+ res = df.loc[lambda x: x.A > 2, lambda x: ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
+
+ res = df.loc[lambda x: x.A == 2, lambda x: ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[df.A == 2, ["A", "B"]])
+
+ res = df.loc[lambda x: x.A == 2, lambda x: ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[df.A == 2, ["A", "B"]])
+
+ # scalar
+ res = df.loc[lambda x: 1, lambda x: "A"]
+ assert res == df.loc[1, "A"]
+
+ res = df.loc[lambda x: 1, lambda x: "A"]
+ assert res == df.loc[1, "A"]
+
+ def test_frame_loc_getitem_callable_mixture(self):
+ # GH#11485
+ df = DataFrame({"A": [1, 2, 3, 4], "B": list("aabb"), "C": [1, 2, 3, 4]})
+
+ res = df.loc[lambda x: x.A > 2, ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
+
+ res = df.loc[lambda x: x.A > 2, ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]])
+
+ res = df.loc[[2, 3], lambda x: ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[[2, 3], ["A", "B"]])
+
+ res = df.loc[[2, 3], lambda x: ["A", "B"]]
+ tm.assert_frame_equal(res, df.loc[[2, 3], ["A", "B"]])
+
+ res = df.loc[3, lambda x: ["A", "B"]]
+ tm.assert_series_equal(res, df.loc[3, ["A", "B"]])
+
+ res = df.loc[3, lambda x: ["A", "B"]]
+ tm.assert_series_equal(res, df.loc[3, ["A", "B"]])
+
+ def test_frame_loc_getitem_callable_labels(self):
+ # GH#11485
+ df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
+
+ # return label
+ res = df.loc[lambda x: ["A", "C"]]
+ tm.assert_frame_equal(res, df.loc[["A", "C"]])
+
+ res = df.loc[lambda x: ["A", "C"]]
+ tm.assert_frame_equal(res, df.loc[["A", "C"]])
+
+ res = df.loc[lambda x: ["A", "C"], :]
+ tm.assert_frame_equal(res, df.loc[["A", "C"], :])
+
+ res = df.loc[lambda x: ["A", "C"], lambda x: "X"]
+ tm.assert_series_equal(res, df.loc[["A", "C"], "X"])
+
+ res = df.loc[lambda x: ["A", "C"], lambda x: ["X"]]
+ tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]])
+
+ # mixture
+ res = df.loc[["A", "C"], lambda x: "X"]
+ tm.assert_series_equal(res, df.loc[["A", "C"], "X"])
+
+ res = df.loc[["A", "C"], lambda x: ["X"]]
+ tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]])
+
+ res = df.loc[lambda x: ["A", "C"], "X"]
+ tm.assert_series_equal(res, df.loc[["A", "C"], "X"])
+
+ res = df.loc[lambda x: ["A", "C"], ["X"]]
+ tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]])
+
+ def test_frame_loc_setitem_callable(self):
+ # GH#11485
+ df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
+
+ # return label
+ res = df.copy()
+ res.loc[lambda x: ["A", "C"]] = -20
+ exp = df.copy()
+ exp.loc[["A", "C"]] = -20
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.loc[lambda x: ["A", "C"], :] = 20
+ exp = df.copy()
+ exp.loc[["A", "C"], :] = 20
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.loc[lambda x: ["A", "C"], lambda x: "X"] = -1
+ exp = df.copy()
+ exp.loc[["A", "C"], "X"] = -1
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.loc[lambda x: ["A", "C"], lambda x: ["X"]] = [5, 10]
+ exp = df.copy()
+ exp.loc[["A", "C"], ["X"]] = [5, 10]
+ tm.assert_frame_equal(res, exp)
+
+ # mixture
+ res = df.copy()
+ res.loc[["A", "C"], lambda x: "X"] = np.array([-1, -2])
+ exp = df.copy()
+ exp.loc[["A", "C"], "X"] = np.array([-1, -2])
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.loc[["A", "C"], lambda x: ["X"]] = 10
+ exp = df.copy()
+ exp.loc[["A", "C"], ["X"]] = 10
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.loc[lambda x: ["A", "C"], "X"] = -2
+ exp = df.copy()
+ exp.loc[["A", "C"], "X"] = -2
+ tm.assert_frame_equal(res, exp)
+
+ res = df.copy()
+ res.loc[lambda x: ["A", "C"], ["X"]] = -4
+ exp = df.copy()
+ exp.loc[["A", "C"], ["X"]] = -4
+ tm.assert_frame_equal(res, exp)
+
+
def test_series_loc_getitem_label_list_missing_values():
# gh-11428
key = np.array(
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37590 | 2020-11-02T15:36:20Z | 2020-11-02T21:21:20Z | 2020-11-02T21:21:20Z | 2020-11-02T21:47:52Z |
DOC: fix inconsistencies in `read_csv` docstring type descriptions | diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 153899e023137..777a74f46d75e 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -101,7 +101,8 @@
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
- Delimiter to use. If ``sep=None``, the C engine cannot automatically detect
+ Character or regex pattern to treat as the delimiter. If ``sep=None``, the
+ C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator from only the first valid
row of the file by Python's builtin sniffer tool, ``csv.Sniffer``.
@@ -111,9 +112,9 @@
to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, optional
Alias for ``sep``.
-header : int, list of int, None, default 'infer'
- Row number(s) to use as the column names, and the start of the
- data. Default behavior is to infer the column names: if no ``names``
+header : int, Sequence of int, 'infer' or None, default 'infer'
+ Row number(s) containing column labels and marking the start of the
+ data (zero-indexed). Default behavior is to infer the column names: if no ``names``
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly to ``names`` then the behavior is identical to
@@ -125,20 +126,21 @@
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
-names : array-like, optional
- List of column names to use. If the file contains a header row,
+names : Sequence of Hashable, optional
+ Sequence of column labels to apply. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
-index_col : int, str, sequence of int / str, or False, optional
- Column(s) to use as the row labels of the :class:`~pandas.DataFrame`, either given as
- string name or column index. If a sequence of ``int`` / ``str`` is given, a
- :class:`~pandas.MultiIndex` is used.
+index_col : Hashable, Sequence of Hashable or False, optional
+ Column(s) to use as row label(s), denoted either by column labels or column
+ indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex`
+ will be formed for the row labels.
Note: ``index_col=False`` can be used to force ``pandas`` to *not* use the first
- column as the index, e.g. when you have a malformed file with delimiters at
+ column as the index, e.g., when you have a malformed file with delimiters at
the end of each line.
-usecols : list-like or callable, optional
- Return a subset of the columns. If list-like, all elements must either
+usecols : list of Hashable or Callable, optional
+ Subset of columns to select, denoted either by column labels or column indices.
+ If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in ``names`` or
inferred from the document header row(s). If ``names`` are given, the document
@@ -156,9 +158,9 @@
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
-dtype : Type name or dict of column -> type, optional
- Data type for data or columns. E.g., ``{{'a': np.float64, 'b': np.int32,
- 'c': 'Int64'}}``
+dtype : dtype or dict of {{Hashable : dtype}}, optional
+ Data type(s) to apply to either the whole dataset or individual columns.
+ E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}``
Use ``str`` or ``object`` together with suitable ``na_values`` settings
to preserve and not interpret ``dtype``.
If ``converters`` are specified, they will be applied INSTEAD
@@ -176,18 +178,18 @@
.. versionadded:: 1.4.0
- The "pyarrow" engine was added as an *experimental* engine, and some features
+ The 'pyarrow' engine was added as an *experimental* engine, and some features
are unsupported, or may not work correctly, with this engine.
-converters : dict, optional
- ``dict`` of functions for converting values in certain columns. Keys can either
- be integers or column labels.
+converters : dict of {{Hashable : Callable}}, optional
+ Functions for converting values in specified columns. Keys can either
+ be column labels or column indices.
true_values : list, optional
- Values to consider as ``True`` in addition to case-insensitive variants of "True".
+ Values to consider as ``True`` in addition to case-insensitive variants of 'True'.
false_values : list, optional
- Values to consider as ``False`` in addition to case-insensitive variants of "False".
+ Values to consider as ``False`` in addition to case-insensitive variants of 'False'.
skipinitialspace : bool, default False
Skip spaces after delimiter.
-skiprows : list-like, int or callable, optional
+skiprows : int, list of int or Callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (``int``)
at the start of the file.
@@ -198,7 +200,7 @@
Number of lines at bottom of file to skip (Unsupported with ``engine='c'``).
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
-na_values : scalar, str, list-like, or dict, optional
+na_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional
Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific
per-column ``NA`` values. By default the following values are interpreted as
``NaN``: '"""
@@ -227,7 +229,7 @@
Indicate number of ``NA`` values placed in non-numeric columns.
skip_blank_lines : bool, default True
If ``True``, skip over blank lines rather than interpreting as ``NaN`` values.
-parse_dates : bool or list of int or names or list of lists or dict, \
+parse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \
default False
The behavior is as follows:
@@ -258,7 +260,7 @@
keep_date_col : bool, default False
If ``True`` and ``parse_dates`` specifies combining multiple columns then
keep the original columns.
-date_parser : function, optional
+date_parser : Callable, optional
Function to use for converting a sequence of string columns to an array of
``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the
conversion. ``pandas`` will try to call ``date_parser`` in three different ways,
@@ -273,9 +275,9 @@
Use ``date_format`` instead, or read in as ``object`` and then apply
:func:`~pandas.to_datetime` as-needed.
date_format : str or dict of column -> format, optional
- If used in conjunction with ``parse_dates``, will parse dates according to this
- format. For anything more complex,
- please read in as ``object`` and then apply :func:`~pandas.to_datetime` as-needed.
+ Format to use for parsing dates when used in conjunction with ``parse_dates``.
+ For anything more complex, please read in as ``object`` and then apply
+ :func:`~pandas.to_datetime` as-needed.
.. versionadded:: 2.0.0
dayfirst : bool, default False
@@ -305,50 +307,53 @@
.. versionchanged:: 1.4.0 Zstandard support.
-thousands : str, optional
- Thousands separator.
-decimal : str, default '.'
- Character to recognize as decimal point (e.g. use ',' for European data).
+thousands : str (length 1), optional
+ Character acting as the thousands separator in numerical values.
+decimal : str (length 1), default '.'
+ Character to recognize as decimal point (e.g., use ',' for European data).
lineterminator : str (length 1), optional
- Character to break file into lines. Only valid with C parser.
+ Character used to denote a line break. Only valid with C parser.
quotechar : str (length 1), optional
- The character used to denote the start and end of a quoted item. Quoted
+ Character used to denote the start and end of a quoted item. Quoted
items can include the ``delimiter`` and it will be ignored.
-quoting : int or csv.QUOTE_* instance, default 0
- Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
- ``QUOTE_MINIMAL`` (0), ``QUOTE_ALL`` (1), ``QUOTE_NONNUMERIC`` (2) or
- ``QUOTE_NONE`` (3).
+quoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \
+3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL
+ Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is
+ ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special
+ characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``,
+ or ``lineterminator``.
doublequote : bool, default True
When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive ``quotechar`` elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
- One-character string used to escape other characters.
-comment : str, optional
- Indicates remainder of line should not be parsed. If found at the beginning
+ Character used to escape other characters.
+comment : str (length 1), optional
+ Character indicating that the remainder of line should not be parsed.
+ If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter ``header`` but not by
``skiprows``. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being
treated as the header.
-encoding : str, optional, default "utf-8"
+encoding : str, optional, default 'utf-8'
Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
.. versionchanged:: 1.2
- When ``encoding`` is ``None``, ``errors="replace"`` is passed to
- ``open()``. Otherwise, ``errors="strict"`` is passed to ``open()``.
- This behavior was previously only the case for ``engine="python"``.
+ When ``encoding`` is ``None``, ``errors='replace'`` is passed to
+ ``open()``. Otherwise, ``errors='strict'`` is passed to ``open()``.
+ This behavior was previously only the case for ``engine='python'``.
.. versionchanged:: 1.3.0
``encoding_errors`` is a new argument. ``encoding`` has no longer an
influence on how encoding errors are handled.
-encoding_errors : str, optional, default "strict"
+encoding_errors : str, optional, default 'strict'
How encoding errors are treated. `List of possible values
<https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
@@ -360,7 +365,7 @@
``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to
override values, a ``ParserWarning`` will be issued. See ``csv.Dialect``
documentation for more details.
-on_bad_lines : {{'error', 'warn', 'skip'}} or callable, default 'error'
+on_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error'
Specifies what to do upon encountering a bad line (a line with too many fields).
Allowed values are :
@@ -378,11 +383,11 @@
If the function returns ``None``, the bad line will be ignored.
If the function returns a new ``list`` of strings with more elements than
expected, a ``ParserWarning`` will be emitted while dropping extra elements.
- Only supported when ``engine="python"``
+ Only supported when ``engine='python'``
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be
- used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
+ used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option
is set to ``True``, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
@@ -396,7 +401,7 @@
If a filepath is provided for ``filepath_or_buffer``, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
-float_precision : str, optional
+float_precision : {{'high', 'legacy', 'round_trip'}}, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or ``'high'`` for the ordinary converter,
``'legacy'`` for the original lower precision ``pandas`` converter, and
@@ -408,13 +413,14 @@
.. versionadded:: 1.2
-dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrame
- Which ``dtype_backend`` to use, e.g. whether a :class:`~pandas.DataFrame` should
- have NumPy arrays, nullable ``dtypes`` are used for all ``dtypes`` that have a
- nullable implementation when ``"numpy_nullable"`` is set, pyarrow is used for all
- dtypes if ``"pyarrow"`` is set.
+dtype_backend : {{'numpy_nullable', 'pyarrow'}}, defaults to NumPy backed DataFrame
+ Back-end data type to use for the :class:`~pandas.DataFrame`. For
+ ``'numpy_nullable'``, have NumPy arrays, nullable ``dtypes`` are used for all
+ ``dtypes`` that have a
+ nullable implementation when ``'numpy_nullable'`` is set, pyarrow is used for all
+ dtypes if ``'pyarrow'`` is set.
- The ``dtype_backends`` are still experimential.
+ The ``dtype_backends`` are still experimental.
.. versionadded:: 2.0
| - [x] closes #53763
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). | https://api.github.com/repos/pandas-dev/pandas/pulls/53834 | 2023-06-25T03:18:44Z | 2023-06-26T17:45:53Z | 2023-06-26T17:45:53Z | 2023-06-26T22:20:00Z |
BUG: DataFrame.shift(axis=1) with EADtype | diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index 7b9efd7f593dd..ede720bbbec40 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -535,11 +535,12 @@ Other
- Bug in :func:`assert_almost_equal` now throwing assertion error for two unequal sets (:issue:`51727`)
- Bug in :func:`assert_frame_equal` checks category dtypes even when asked not to check index type (:issue:`52126`)
- Bug in :meth:`DataFrame.reindex` with a ``fill_value`` that should be inferred with a :class:`ExtensionDtype` incorrectly inferring ``object`` dtype (:issue:`52586`)
+- Bug in :meth:`DataFrame.shift` and :meth:`Series.shift` when passing both "freq" and "fill_value" silently ignoring "fill_value" instead of raising ``ValueError`` (:issue:`53832`)
+- Bug in :meth:`DataFrame.shift` with ``axis=1`` on a :class:`DataFrame` with a single :class:`ExtensionDtype` column giving incorrect results (:issue:`53832`)
- Bug in :meth:`Series.align`, :meth:`DataFrame.align`, :meth:`Series.reindex`, :meth:`DataFrame.reindex`, :meth:`Series.interpolate`, :meth:`DataFrame.interpolate`, incorrectly failing to raise with method="asfreq" (:issue:`53620`)
- Bug in :meth:`Series.map` when giving a callable to an empty series, the returned series had ``object`` dtype. It now keeps the original dtype (:issue:`52384`)
- Bug in :meth:`Series.memory_usage` when ``deep=True`` throw an error with Series of objects and the returned value is incorrect, as it does not take into account GC corrections (:issue:`51858`)
- Fixed incorrect ``__name__`` attribute of ``pandas._libs.json`` (:issue:`52898`)
--
.. ***DO NOT USE THIS SECTION***
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4088736dd4150..1630c541c5701 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5483,45 +5483,41 @@ def shift(
) -> DataFrame:
axis = self._get_axis_number(axis)
+ if freq is not None and fill_value is not lib.no_default:
+ # GH#53832
+ raise ValueError(
+ "Cannot pass both 'freq' and 'fill_value' to "
+ f"{type(self).__name__}.shift"
+ )
+
ncols = len(self.columns)
- if (
- axis == 1
- and periods != 0
- and freq is None
- and fill_value is lib.no_default
- and ncols > 0
- ):
- # We will infer fill_value to match the closest column
-
- # Use a column that we know is valid for our column's dtype GH#38434
- label = self.columns[0]
-
- if periods > 0:
- result = self.iloc[:, :-periods]
- for col in range(min(ncols, abs(periods))):
- # TODO(EA2D): doing this in a loop unnecessary with 2D EAs
- # Define filler inside loop so we get a copy
- filler = self.iloc[:, 0].shift(len(self))
- result.insert(0, label, filler, allow_duplicates=True)
- else:
- result = self.iloc[:, -periods:]
- for col in range(min(ncols, abs(periods))):
- # Define filler inside loop so we get a copy
- filler = self.iloc[:, -1].shift(len(self))
- result.insert(
- len(result.columns), label, filler, allow_duplicates=True
- )
+ arrays = self._mgr.arrays
+ if axis == 1 and periods != 0 and ncols > 0 and freq is None:
+ if fill_value is lib.no_default:
+ # We will infer fill_value to match the closest column
- result.columns = self.columns.copy()
- return result
- elif (
- axis == 1
- and periods != 0
- and fill_value is not lib.no_default
- and ncols > 0
- ):
- arrays = self._mgr.arrays
- if len(arrays) > 1 or (
+ # Use a column that we know is valid for our column's dtype GH#38434
+ label = self.columns[0]
+
+ if periods > 0:
+ result = self.iloc[:, :-periods]
+ for col in range(min(ncols, abs(periods))):
+ # TODO(EA2D): doing this in a loop unnecessary with 2D EAs
+ # Define filler inside loop so we get a copy
+ filler = self.iloc[:, 0].shift(len(self))
+ result.insert(0, label, filler, allow_duplicates=True)
+ else:
+ result = self.iloc[:, -periods:]
+ for col in range(min(ncols, abs(periods))):
+ # Define filler inside loop so we get a copy
+ filler = self.iloc[:, -1].shift(len(self))
+ result.insert(
+ len(result.columns), label, filler, allow_duplicates=True
+ )
+
+ result.columns = self.columns.copy()
+ return result
+ elif len(arrays) > 1 or (
# If we only have one block and we know that we can't
# keep the same dtype (i.e. the _can_hold_element check)
# then we can go through the reindex_indexer path
@@ -5549,6 +5545,8 @@ def shift(
)
res_df = self._constructor(mgr)
return res_df.__finalize__(self, method="shift")
+ else:
+ return self.T.shift(periods=periods, fill_value=fill_value).T
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0be840f9a4ef1..4d50d4519859c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10434,7 +10434,7 @@ def shift(
periods: int = 1,
freq=None,
axis: Axis = 0,
- fill_value: Hashable = None,
+ fill_value: Hashable = lib.no_default,
) -> Self:
"""
Shift index by desired number of periods with an optional time `freq`.
@@ -10532,6 +10532,15 @@ def shift(
2020-01-07 30 33 37
2020-01-08 45 48 52
"""
+ axis = self._get_axis_number(axis)
+
+ if freq is not None and fill_value is not lib.no_default:
+ # GH#53832
+ raise ValueError(
+ "Cannot pass both 'freq' and 'fill_value' to "
+ f"{type(self).__name__}.shift"
+ )
+
if periods == 0:
return self.copy(deep=None)
@@ -10543,6 +10552,11 @@ def shift(
)
return self._constructor(new_data).__finalize__(self, method="shift")
+ return self._shift_with_freq(periods, axis, freq)
+
+ @final
+ def _shift_with_freq(self, periods: int, axis: int, freq) -> Self:
+ # see shift.__doc__
# when freq is given, index is shifted, data is not
index = self._get_axis(axis)
diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py
index 529be6850b3ba..ebbb7ca13646f 100644
--- a/pandas/tests/frame/methods/test_shift.py
+++ b/pandas/tests/frame/methods/test_shift.py
@@ -17,6 +17,35 @@
class TestDataFrameShift:
+ def test_shift_axis1_with_valid_fill_value_one_array(self):
+ # Case with axis=1 that does not go through the "len(arrays)>1" path
+ # in DataFrame.shift
+ data = np.random.randn(5, 3)
+ df = DataFrame(data)
+ res = df.shift(axis=1, periods=1, fill_value=12345)
+ expected = df.T.shift(periods=1, fill_value=12345).T
+ tm.assert_frame_equal(res, expected)
+
+ # same but with an 1D ExtensionArray backing it
+ df2 = df[[0]].astype("Float64")
+ res2 = df2.shift(axis=1, periods=1, fill_value=12345)
+ expected2 = DataFrame([12345] * 5, dtype="Float64")
+ tm.assert_frame_equal(res2, expected2)
+
+ def test_shift_disallow_freq_and_fill_value(self, frame_or_series):
+ # Can't pass both!
+ obj = frame_or_series(
+ np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
+ )
+
+ msg = "Cannot pass both 'freq' and 'fill_value' to (Series|DataFrame).shift"
+ with pytest.raises(ValueError, match=msg):
+ obj.shift(1, fill_value=1, freq="H")
+
+ if frame_or_series is DataFrame:
+ with pytest.raises(ValueError, match=msg):
+ obj.shift(1, axis=1, fill_value=1, freq="H")
+
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Following this we can simplify the Manager/Block/EA methods since they will only ever get `axis=0` | https://api.github.com/repos/pandas-dev/pandas/pulls/53832 | 2023-06-24T20:36:30Z | 2023-06-26T17:48:53Z | 2023-06-26T17:48:53Z | 2023-08-28T20:30:56Z |
DEPR: Remove bytes input for read_excel | diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index 7b9efd7f593dd..b3f17c62f3dde 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -295,6 +295,7 @@ Deprecations
- Deprecated allowing ``downcast`` keyword other than ``None``, ``False``, "infer", or a dict with these as values in :meth:`Series.fillna`, :meth:`DataFrame.fillna` (:issue:`40988`)
- Deprecated allowing arbitrary ``fill_value`` in :class:`SparseDtype`, in a future version the ``fill_value`` will need to be compatible with the ``dtype.subtype``, either a scalar that can be held by that subtype or ``NaN`` for integer or bool subtypes (:issue:`23124`)
- Deprecated behavior of :func:`assert_series_equal` and :func:`assert_frame_equal` considering NA-like values (e.g. ``NaN`` vs ``None`` as equivalent) (:issue:`52081`)
+- Deprecated bytes input to :func:`read_excel`. To read a file path, use a string or path-like object. (:issue:`53767`)
- Deprecated constructing :class:`SparseArray` from scalar data, pass a sequence instead (:issue:`53039`)
- Deprecated falling back to filling when ``value`` is not specified in :meth:`DataFrame.replace` and :meth:`Series.replace` with non-dict-like ``to_replace`` (:issue:`33302`)
- Deprecated literal json input to :func:`read_json`. Wrap literal json string input in ``io.StringIO`` instead. (:issue:`53409`)
@@ -305,6 +306,7 @@ Deprecations
- Deprecated the "method" and "limit" keywords on :meth:`Series.fillna`, :meth:`DataFrame.fillna`, :meth:`SeriesGroupBy.fillna`, :meth:`DataFrameGroupBy.fillna`, and :meth:`Resampler.fillna`, use ``obj.bfill()`` or ``obj.ffill()`` instead (:issue:`53394`)
- Deprecated the ``method`` and ``limit`` keywords in :meth:`DataFrame.replace` and :meth:`Series.replace` (:issue:`33302`)
- Deprecated values "pad", "ffill", "bfill", "backfill" for :meth:`Series.interpolate` and :meth:`DataFrame.interpolate`, use ``obj.ffill()`` or ``obj.bfill()`` instead (:issue:`53581`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_210.performance:
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index f4782dcfcc08d..fbc3893e9b53a 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -21,6 +21,7 @@
cast,
overload,
)
+import warnings
import zipfile
from pandas._config import config
@@ -36,6 +37,7 @@
Appender,
doc,
)
+from pandas.util._exceptions import find_stack_level
from pandas.util._validators import check_dtype_backend
from pandas.core.dtypes.common import (
@@ -97,6 +99,10 @@
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
+
+ .. deprecated:: 2.1.0
+ Passing byte strings is deprecated. To read from a
+ byte string, wrap it in a ``BytesIO`` object.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions (chart sheets do not count as a sheet position).
@@ -1504,6 +1510,13 @@ def __init__(
# First argument can also be bytes, so create a buffer
if isinstance(path_or_buffer, bytes):
path_or_buffer = BytesIO(path_or_buffer)
+ warnings.warn(
+ "Passing bytes to 'read_excel' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "byte string, wrap it in a `BytesIO` object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
# Could be a str, ExcelFile, Book, etc.
self.io = path_or_buffer
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 88f55145b599a..1e17e866ec530 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -3,6 +3,7 @@
time,
)
from functools import partial
+from io import BytesIO
import os
from pathlib import Path
import platform
@@ -873,7 +874,7 @@ def test_corrupt_bytes_raises(self, engine):
error = BadZipFile
msg = "File is not a zip file"
with pytest.raises(error, match=msg):
- pd.read_excel(bad_stream)
+ pd.read_excel(BytesIO(bad_stream))
@pytest.mark.network
@tm.network(
@@ -1446,6 +1447,18 @@ def test_euro_decimal_format(self, read_ext):
class TestExcelFileRead:
+ def test_deprecate_bytes_input(self, engine, read_ext):
+ # GH 53830
+ msg = (
+ "Passing bytes to 'read_excel' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "byte string, wrap it in a `BytesIO` object."
+ )
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ with open("test1" + read_ext, "rb") as f:
+ pd.read_excel(f.read(), engine=engine)
+
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
@@ -1629,7 +1642,7 @@ def test_excel_read_binary(self, engine, read_ext):
with open("test1" + read_ext, "rb") as f:
data = f.read()
- actual = pd.read_excel(data, engine=engine)
+ actual = pd.read_excel(BytesIO(data), engine=engine)
tm.assert_frame_equal(expected, actual)
def test_excel_read_binary_via_read_excel(self, read_ext, engine):
| - [X] closes #53767
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53830 | 2023-06-24T15:14:47Z | 2023-06-27T17:29:07Z | 2023-06-27T17:29:07Z | 2024-02-17T17:20:18Z |
TST: Use pytest-localserver instead of making network connections | diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 0923594f2c840..ffa7732c604a0 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -15,6 +15,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- pytest-asyncio>=0.17.0
+ - pytest-localserver>=0.7.1
- boto3
# required dependencies
diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml
index 51c7a97ad6500..596f3476c9c4e 100644
--- a/ci/deps/actions-311-downstream_compat.yaml
+++ b/ci/deps/actions-311-downstream_compat.yaml
@@ -16,6 +16,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- pytest-asyncio>=0.17.0
+ - pytest-localserver>=0.7.1
- boto3
# required dependencies
diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml
index 66b8650116854..9d60d734db5b3 100644
--- a/ci/deps/actions-311.yaml
+++ b/ci/deps/actions-311.yaml
@@ -15,6 +15,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- pytest-asyncio>=0.17.0
+ - pytest-localserver>=0.7.1
- boto3
# required dependencies
diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml
index e1b4fdfb1d897..91961e4af2d1c 100644
--- a/ci/deps/actions-39-minimum_versions.yaml
+++ b/ci/deps/actions-39-minimum_versions.yaml
@@ -17,6 +17,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- pytest-asyncio>=0.17.0
+ - pytest-localserver>=0.7.1
- boto3
# required dependencies
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 8ff47dbb9cc95..6ea0d41b947dc 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -15,6 +15,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- pytest-asyncio>=0.17.0
+ - pytest-localserver>=0.7.1
- boto3
# required dependencies
diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml
index ca9860fc20742..df4e8e285bd02 100644
--- a/ci/deps/circle-310-arm64.yaml
+++ b/ci/deps/circle-310-arm64.yaml
@@ -15,6 +15,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- pytest-asyncio>=0.17.0
+ - pytest-localserver>=0.7.1
- boto3
# required dependencies
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index 311120fc527d4..00f9fd74e01ca 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -612,23 +612,17 @@ deleted when the context block is exited.
Testing involving network connectivity
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-It is highly discouraged to add a test that connects to the internet due to flakiness of network connections and
-lack of ownership of the server that is being connected to. If network connectivity is absolutely required, use the
-``tm.network`` decorator.
+A unit test should not access a public data set over the internet due to flakiness of network connections and
+lack of ownership of the server that is being connected to. To mock this interaction, use the ``httpserver`` fixture from the
+`pytest-localserver plugin. <https://github.com/pytest-dev/pytest-localserver>`_ with synthetic data.
.. code-block:: python
- @tm.network # noqa
- def test_network():
- result = package.call_to_internet()
-
-If the test requires data from a specific website, specify ``check_before_test=True`` and the site in the decorator.
-
-.. code-block:: python
-
- @tm.network("https://www.somespecificsite.com", check_before_test=True)
- def test_network():
- result = pd.read_html("https://www.somespecificsite.com")
+ @pytest.mark.network
+ @pytest.mark.single_cpu
+ def test_network(httpserver):
+ httpserver.serve_content(content="content")
+ result = pd.read_html(httpserver.url)
Example
^^^^^^^
diff --git a/environment.yml b/environment.yml
index 6178fe896760f..8fd97e6fcc0e1 100644
--- a/environment.yml
+++ b/environment.yml
@@ -17,6 +17,7 @@ dependencies:
- pytest-cov
- pytest-xdist>=2.2.0
- pytest-asyncio>=0.17.0
+ - pytest-localserver>=0.7.1
- coverage
# required dependencies
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index de3dd58d3b716..fbbdfa4b8a5bf 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -51,7 +51,6 @@
)
from pandas._testing._io import (
close,
- network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
@@ -1150,7 +1149,6 @@ def shares_memory(left, right) -> bool:
"makeUIntIndex",
"maybe_produces_warning",
"NARROW_NP_DTYPES",
- "network",
"NP_NAT_OBJECTS",
"NULL_OBJECTS",
"OBJECT_DTYPES",
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py
index d79968a580e40..fa0bc58a132d4 100644
--- a/pandas/_testing/_io.py
+++ b/pandas/_testing/_io.py
@@ -1,10 +1,8 @@
from __future__ import annotations
import bz2
-from functools import wraps
import gzip
import io
-import socket
import tarfile
from typing import (
TYPE_CHECKING,
@@ -20,8 +18,6 @@
from pandas._testing._random import rands
from pandas._testing.contexts import ensure_clean
-from pandas.io.common import urlopen
-
if TYPE_CHECKING:
from pandas._typing import (
FilePath,
@@ -33,255 +29,6 @@
Series,
)
-# skip tests on exceptions with these messages
-_network_error_messages = (
- # 'urlopen error timed out',
- # 'timeout: timed out',
- # 'socket.timeout: timed out',
- "timed out",
- "Server Hangup",
- "HTTP Error 503: Service Unavailable",
- "502: Proxy Error",
- "HTTP Error 502: internal error",
- "HTTP Error 502",
- "HTTP Error 503",
- "HTTP Error 403",
- "HTTP Error 400",
- "Temporary failure in name resolution",
- "Name or service not known",
- "Connection refused",
- "certificate verify",
-)
-
-# or this e.errno/e.reason.errno
-_network_errno_vals = (
- 101, # Network is unreachable
- 111, # Connection refused
- 110, # Connection timed out
- 104, # Connection reset Error
- 54, # Connection reset by peer
- 60, # urllib.error.URLError: [Errno 60] Connection timed out
-)
-
-# Both of the above shouldn't mask real issues such as 404's
-# or refused connections (changed DNS).
-# But some tests (test_data yahoo) contact incredibly flakey
-# servers.
-
-# and conditionally raise on exception types in _get_default_network_errors
-
-
-def _get_default_network_errors():
- # Lazy import for http.client & urllib.error
- # because it imports many things from the stdlib
- import http.client
- import urllib.error
-
- return (
- OSError,
- http.client.HTTPException,
- TimeoutError,
- urllib.error.URLError,
- socket.timeout,
- )
-
-
-def optional_args(decorator):
- """
- allows a decorator to take optional positional and keyword arguments.
- Assumes that taking a single, callable, positional argument means that
- it is decorating a function, i.e. something like this::
-
- @my_decorator
- def function(): pass
-
- Calls decorator with decorator(f, *args, **kwargs)
- """
-
- @wraps(decorator)
- def wrapper(*args, **kwargs):
- def dec(f):
- return decorator(f, *args, **kwargs)
-
- is_decorating = not kwargs and len(args) == 1 and callable(args[0])
- if is_decorating:
- f = args[0]
- args = ()
- return dec(f)
- else:
- return dec
-
- return wrapper
-
-
-# error: Untyped decorator makes function "network" untyped
-@optional_args # type: ignore[misc]
-def network(
- t,
- url: str = "https://www.google.com",
- raise_on_error: bool = False,
- check_before_test: bool = False,
- error_classes=None,
- skip_errnos=_network_errno_vals,
- _skip_on_messages=_network_error_messages,
-):
- """
- Label a test as requiring network connection and, if an error is
- encountered, only raise if it does not find a network connection.
-
- In comparison to ``network``, this assumes an added contract to your test:
- you must assert that, under normal conditions, your test will ONLY fail if
- it does not have network connectivity.
-
- You can call this in 3 ways: as a standard decorator, with keyword
- arguments, or with a positional argument that is the url to check.
-
- Parameters
- ----------
- t : callable
- The test requiring network connectivity.
- url : path
- The url to test via ``pandas.io.common.urlopen`` to check
- for connectivity. Defaults to 'https://www.google.com'.
- raise_on_error : bool
- If True, never catches errors.
- check_before_test : bool
- If True, checks connectivity before running the test case.
- error_classes : tuple or Exception
- error classes to ignore. If not in ``error_classes``, raises the error.
- defaults to OSError. Be careful about changing the error classes here.
- skip_errnos : iterable of int
- Any exception that has .errno or .reason.erno set to one
- of these values will be skipped with an appropriate
- message.
- _skip_on_messages: iterable of string
- any exception e for which one of the strings is
- a substring of str(e) will be skipped with an appropriate
- message. Intended to suppress errors where an errno isn't available.
-
- Notes
- -----
- * ``raise_on_error`` supersedes ``check_before_test``
-
- Returns
- -------
- t : callable
- The decorated test ``t``, with checks for connectivity errors.
-
- Example
- -------
-
- Tests decorated with @network will fail if it's possible to make a network
- connection to another URL (defaults to google.com)::
-
- >>> from pandas import _testing as tm
- >>> @tm.network
- ... def test_network():
- ... with pd.io.common.urlopen("rabbit://bonanza.com"):
- ... pass
- >>> test_network() # doctest: +SKIP
- Traceback
- ...
- URLError: <urlopen error unknown url type: rabbit>
-
- You can specify alternative URLs::
-
- >>> @tm.network("https://www.yahoo.com")
- ... def test_something_with_yahoo():
- ... raise OSError("Failure Message")
- >>> test_something_with_yahoo() # doctest: +SKIP
- Traceback (most recent call last):
- ...
- OSError: Failure Message
-
- If you set check_before_test, it will check the url first and not run the
- test on failure::
-
- >>> @tm.network("failing://url.blaher", check_before_test=True)
- ... def test_something():
- ... print("I ran!")
- ... raise ValueError("Failure")
- >>> test_something() # doctest: +SKIP
- Traceback (most recent call last):
- ...
-
- Errors not related to networking will always be raised.
- """
- import pytest
-
- if error_classes is None:
- error_classes = _get_default_network_errors()
-
- t.network = True
-
- @wraps(t)
- def wrapper(*args, **kwargs):
- if (
- check_before_test
- and not raise_on_error
- and not can_connect(url, error_classes)
- ):
- pytest.skip(
- f"May not have network connectivity because cannot connect to {url}"
- )
- try:
- return t(*args, **kwargs)
- except Exception as err:
- errno = getattr(err, "errno", None)
- if not errno and hasattr(errno, "reason"):
- # error: "Exception" has no attribute "reason"
- errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
-
- if errno in skip_errnos:
- pytest.skip(f"Skipping test due to known errno and error {err}")
-
- e_str = str(err)
-
- if any(m.lower() in e_str.lower() for m in _skip_on_messages):
- pytest.skip(
- f"Skipping test because exception message is known and error {err}"
- )
-
- if not isinstance(err, error_classes) or raise_on_error:
- raise
- pytest.skip(f"Skipping test due to lack of connectivity and error {err}")
-
- return wrapper
-
-
-def can_connect(url, error_classes=None) -> bool:
- """
- Try to connect to the given url. True if succeeds, False if OSError
- raised
-
- Parameters
- ----------
- url : basestring
- The URL to try to connect to
-
- Returns
- -------
- connectable : bool
- Return True if no OSError (unable to connect) or URLError (bad url) was
- raised
- """
- if error_classes is None:
- error_classes = _get_default_network_errors()
-
- try:
- with urlopen(url, timeout=20) as response:
- # Timeout just in case rate-limiting is applied
- if (
- response.info().get("Content-type") == "text/html"
- and response.status != 200
- ):
- return False
- except error_classes:
- return False
- else:
- return True
-
-
# ------------------------------------------------------------------
# File-IO
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index c9890032f408a..170e2f61e7d4a 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -45,6 +45,11 @@ def feather_file(datapath):
return datapath("io", "data", "feather", "feather-0_3_1.feather")
+@pytest.fixture
+def xml_file(datapath):
+ return datapath("io", "data", "xml", "books.xml")
+
+
@pytest.fixture
def s3so(worker_id):
if is_ci_environment():
@@ -141,7 +146,9 @@ def s3_public_bucket(s3_resource):
@pytest.fixture
-def s3_public_bucket_with_data(s3_public_bucket, tips_file, jsonl_file, feather_file):
+def s3_public_bucket_with_data(
+ s3_public_bucket, tips_file, jsonl_file, feather_file, xml_file
+):
"""
The following datasets
are loaded.
@@ -158,6 +165,7 @@ def s3_public_bucket_with_data(s3_public_bucket, tips_file, jsonl_file, feather_
("tips.csv.bz2", tips_file + ".bz2"),
("items.jsonl", jsonl_file),
("simple_dataset.feather", feather_file),
+ ("books.xml", xml_file),
]
for s3_key, file_name in test_s3_files:
with open(file_name, "rb") as f:
@@ -175,7 +183,9 @@ def s3_private_bucket(s3_resource):
@pytest.fixture
-def s3_private_bucket_with_data(s3_private_bucket, tips_file, jsonl_file, feather_file):
+def s3_private_bucket_with_data(
+ s3_private_bucket, tips_file, jsonl_file, feather_file, xml_file
+):
"""
The following datasets
are loaded.
@@ -192,6 +202,7 @@ def s3_private_bucket_with_data(s3_private_bucket, tips_file, jsonl_file, feathe
("tips.csv.bz2", tips_file + ".bz2"),
("items.jsonl", jsonl_file),
("simple_dataset.feather", feather_file),
+ ("books.xml", xml_file),
]
for s3_key, file_name in test_s3_files:
with open(file_name, "rb") as f:
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 88f55145b599a..f507314928784 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -876,19 +876,11 @@ def test_corrupt_bytes_raises(self, engine):
pd.read_excel(bad_stream)
@pytest.mark.network
- @tm.network(
- url=(
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/data/excel/test1.xlsx"
- ),
- check_before_test=True,
- )
- def test_read_from_http_url(self, read_ext):
- url = (
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/data/excel/test1" + read_ext
- )
- url_table = pd.read_excel(url)
+ @pytest.mark.single_cpu
+ def test_read_from_http_url(self, httpserver, read_ext):
+ with open("test1" + read_ext, "rb") as f:
+ httpserver.serve_content(content=f.read())
+ url_table = pd.read_excel(httpserver.url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 90c48012ccac9..b6b21f9962876 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -998,10 +998,7 @@ def test_round_trip_exception(self, datapath):
tm.assert_frame_equal(res, df)
@pytest.mark.network
- @tm.network(
- url="https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5",
- check_before_test=True,
- )
+ @pytest.mark.single_cpu
@pytest.mark.parametrize(
"field,dtype",
[
@@ -1010,9 +1007,10 @@ def test_round_trip_exception(self, datapath):
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
- def test_url(self, field, dtype):
- url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5"
- result = read_json(url, convert_dates=True)
+ def test_url(self, field, dtype, httpserver):
+ data = '{"created_at": ["2023-06-23T18:21:36Z"], "closed_at": ["2023-06-23T18:21:36"], "updated_at": ["2023-06-23T18:21:36Z"]}\n' # noqa: E501
+ httpserver.serve_content(content=data)
+ result = read_json(httpserver.url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py
index ba196a532adf6..5ee629947db48 100644
--- a/pandas/tests/io/parser/common/test_file_buffer_url.py
+++ b/pandas/tests/io/parser/common/test_file_buffer_url.py
@@ -28,24 +28,17 @@
@pytest.mark.network
-@tm.network(
- url=(
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/parser/data/salaries.csv"
- ),
- check_before_test=True,
-)
-def test_url(all_parsers, csv_dir_path):
+@pytest.mark.single_cpu
+def test_url(all_parsers, csv_dir_path, httpserver):
parser = all_parsers
kwargs = {"sep": "\t"}
- url = (
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/parser/data/salaries.csv"
- )
- url_result = parser.read_csv(url, **kwargs)
-
local_path = os.path.join(csv_dir_path, "salaries.csv")
+ with open(local_path, encoding="utf-8") as f:
+ httpserver.serve_content(content=f.read())
+
+ url_result = parser.read_csv(httpserver.url, **kwargs)
+
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index d8c58649984fa..ba0307cf5111e 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -22,27 +22,28 @@
@pytest.mark.network
-@tm.network(
- url=(
- "https://github.com/pandas-dev/pandas/raw/main/"
- "pandas/tests/io/parser/data/salaries.csv"
- ),
- check_before_test=True,
-)
+@pytest.mark.single_cpu
@pytest.mark.parametrize("mode", ["explicit", "infer"])
@pytest.mark.parametrize("engine", ["python", "c"])
def test_compressed_urls(
- salaries_table, mode, engine, compression_only, compression_to_extension
+ httpserver,
+ datapath,
+ salaries_table,
+ mode,
+ engine,
+ compression_only,
+ compression_to_extension,
):
# test reading compressed urls with various engines and
# extension inference
+ if compression_only == "tar":
+ pytest.skip("TODO: Add tar salaraies.csv to pandas/io/parsers/data")
+
extension = compression_to_extension[compression_only]
- base_url = (
- "https://github.com/pandas-dev/pandas/raw/main/"
- "pandas/tests/io/parser/data/salaries.csv"
- )
+ with open(datapath("io", "parser", "data", "salaries.csv" + extension), "rb") as f:
+ httpserver.serve_content(content=f.read())
- url = base_url + extension
+ url = httpserver.url + "/salaries.csv" + extension
if mode != "explicit":
compression_only = mode
@@ -52,24 +53,16 @@ def test_compressed_urls(
@pytest.mark.network
-@tm.network(
- url=(
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/parser/data/unicode_series.csv"
- ),
- check_before_test=True,
-)
-def test_url_encoding_csv():
+@pytest.mark.single_cpu
+def test_url_encoding_csv(httpserver, datapath):
"""
read_csv should honor the requested encoding for URLs.
GH 10424
"""
- path = (
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/parser/data/unicode_series.csv"
- )
- df = read_csv(path, encoding="latin-1", header=None)
+ with open(datapath("io", "parser", "data", "unicode_series.csv"), "rb") as f:
+ httpserver.serve_content(content=f.read())
+ df = read_csv(httpserver.url, encoding="latin-1", header=None)
assert df.loc[15, 1] == "Á köldum klaka (Cold Fever) (1994)"
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index c682963c462cc..7f622295472e4 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -1013,47 +1013,19 @@ def test_invalid_dtype_backend():
@pytest.mark.network
-@tm.network(
- url="ftp://ftp.ncdc.noaa.gov/pub/data/igra/igra2-station-list.txt",
- check_before_test=True,
-)
-def test_url_urlopen():
- expected = pd.Index(
- [
- "CC",
- "Network",
- "Code",
- "StationId",
- "Latitude",
- "Longitude",
- "Elev",
- "dummy",
- "StationName",
- "From",
- "To",
- "Nrec",
- ],
- dtype="object",
- )
- url = "ftp://ftp.ncdc.noaa.gov/pub/data/igra/igra2-station-list.txt"
- with urlopen(url) as f:
- result = read_fwf(
- f,
- widths=(2, 1, 3, 5, 9, 10, 7, 4, 30, 5, 5, 7),
- names=(
- "CC",
- "Network",
- "Code",
- "StationId",
- "Latitude",
- "Longitude",
- "Elev",
- "dummy",
- "StationName",
- "From",
- "To",
- "Nrec",
- ),
- ).columns
+@pytest.mark.single_cpu
+def test_url_urlopen(httpserver):
+ data = """\
+A B C D
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+201160 364.136849 183.628767 11806.2
+201161 413.836124 184.375703 11916.8
+201162 502.953953 173.237159 12468.3
+"""
+ httpserver.serve_content(content=data)
+ expected = pd.Index(list("ABCD"))
+ with urlopen(httpserver.url) as f:
+ result = read_fwf(f).columns
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 7b139dc45624e..9de097fe8c0e6 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -142,21 +142,13 @@ def test_passthrough_keywords(self):
self.check_round_trip(df, write_kwargs={"version": 1})
@pytest.mark.network
- @tm.network(
- url=(
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/data/feather/feather-0_3_1.feather"
- ),
- check_before_test=True,
- )
- def test_http_path(self, feather_file):
+ @pytest.mark.single_cpu
+ def test_http_path(self, feather_file, httpserver):
# GH 29055
- url = (
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/data/feather/feather-0_3_1.feather"
- )
expected = read_feather(feather_file)
- res = read_feather(url)
+ with open(feather_file, "rb") as f:
+ httpserver.serve_content(content=f.read())
+ res = read_feather(httpserver.url)
tm.assert_frame_equal(expected, res)
def test_read_feather_dtype_backend(self, string_storage, dtype_backend):
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 5c6c33de5ac5f..d17e4b08b5a4d 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -35,7 +35,6 @@
)
from pandas.io.common import file_path_to_url
-import pandas.io.html
@pytest.fixture(
@@ -193,43 +192,30 @@ def test_dtype_backend(self, string_storage, dtype_backend):
tm.assert_frame_equal(result, expected)
@pytest.mark.network
- @tm.network(
- url=(
- "https://www.fdic.gov/resources/resolutions/"
- "bank-failures/failed-bank-list/index.html"
- ),
- check_before_test=True,
- )
- def test_banklist_url(self):
- url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa: E501
- df1 = self.read_html(
+ @pytest.mark.single_cpu
+ def test_banklist_url(self, httpserver, banklist_data):
+ with open(banklist_data, encoding="utf-8") as f:
+ httpserver.serve_content(content=f.read())
+ df1 = self.read_html(
+ # lxml cannot find attrs leave out for now
+ httpserver.url,
+ match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
+ )
# lxml cannot find attrs leave out for now
- url,
- match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
- )
- # lxml cannot find attrs leave out for now
- df2 = self.read_html(
- url,
- match="Metcalf Bank",
- ) # attrs={"class": "dataTable"})
+ df2 = self.read_html(
+ httpserver.url,
+ match="Metcalf Bank",
+ ) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@pytest.mark.network
- @tm.network(
- url=(
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/data/html/spam.html"
- ),
- check_before_test=True,
- )
- def test_spam_url(self):
- url = (
- "https://raw.githubusercontent.com/pandas-dev/pandas/main/"
- "pandas/tests/io/data/html/spam.html"
- )
- df1 = self.read_html(url, match=".*Water.*")
- df2 = self.read_html(url, match="Unit")
+ @pytest.mark.single_cpu
+ def test_spam_url(self, httpserver, spam_data):
+ with open(spam_data, encoding="utf-8") as f:
+ httpserver.serve_content(content=f.read())
+ df1 = self.read_html(httpserver.url, match=".*Water.*")
+ df2 = self.read_html(httpserver.url, match="Unit")
assert_framelist_equal(df1, df2)
@@ -366,21 +352,19 @@ def test_file_like(self, spam_data):
assert_framelist_equal(df1, df2)
@pytest.mark.network
- @tm.network
- def test_bad_url_protocol(self):
+ @pytest.mark.single_cpu
+ def test_bad_url_protocol(self, httpserver):
+ httpserver.serve_content("urlopen error unknown url type: git", code=404)
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@pytest.mark.slow
@pytest.mark.network
- @tm.network
- def test_invalid_url(self):
- msg = (
- "Name or service not known|Temporary failure in name resolution|"
- "No tables found"
- )
- with pytest.raises((URLError, ValueError), match=msg):
- self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
+ @pytest.mark.single_cpu
+ def test_invalid_url(self, httpserver):
+ httpserver.serve_content("Name or service not known", code=404)
+ with pytest.raises((URLError, ValueError), match="HTTP Error 404: NOT FOUND"):
+ self.read_html(httpserver.url, match=".*Water.*")
@pytest.mark.slow
def test_file_url(self, banklist_data):
@@ -454,20 +438,69 @@ def test_negative_skiprows(self, spam_data):
with pytest.raises(ValueError, match=msg):
self.read_html(spam_data, match="Water", skiprows=-1)
+ @pytest.fixture
+ def python_docs(self):
+ return """
+ <table class="contentstable" align="center"><tr>
+ <td width="50%">
+ <p class="biglink"><a class="biglink" href="whatsnew/2.7.html">What's new in Python 2.7?</a><br/>
+ <span class="linkdescr">or <a href="whatsnew/index.html">all "What's new" documents</a> since 2.0</span></p>
+ <p class="biglink"><a class="biglink" href="tutorial/index.html">Tutorial</a><br/>
+ <span class="linkdescr">start here</span></p>
+ <p class="biglink"><a class="biglink" href="library/index.html">Library Reference</a><br/>
+ <span class="linkdescr">keep this under your pillow</span></p>
+ <p class="biglink"><a class="biglink" href="reference/index.html">Language Reference</a><br/>
+ <span class="linkdescr">describes syntax and language elements</span></p>
+ <p class="biglink"><a class="biglink" href="using/index.html">Python Setup and Usage</a><br/>
+ <span class="linkdescr">how to use Python on different platforms</span></p>
+ <p class="biglink"><a class="biglink" href="howto/index.html">Python HOWTOs</a><br/>
+ <span class="linkdescr">in-depth documents on specific topics</span></p>
+ </td><td width="50%">
+ <p class="biglink"><a class="biglink" href="installing/index.html">Installing Python Modules</a><br/>
+ <span class="linkdescr">installing from the Python Package Index & other sources</span></p>
+ <p class="biglink"><a class="biglink" href="distributing/index.html">Distributing Python Modules</a><br/>
+ <span class="linkdescr">publishing modules for installation by others</span></p>
+ <p class="biglink"><a class="biglink" href="extending/index.html">Extending and Embedding</a><br/>
+ <span class="linkdescr">tutorial for C/C++ programmers</span></p>
+ <p class="biglink"><a class="biglink" href="c-api/index.html">Python/C API</a><br/>
+ <span class="linkdescr">reference for C/C++ programmers</span></p>
+ <p class="biglink"><a class="biglink" href="faq/index.html">FAQs</a><br/>
+ <span class="linkdescr">frequently asked questions (with answers!)</span></p>
+ </td></tr>
+ </table>
+
+ <p><strong>Indices and tables:</strong></p>
+ <table class="contentstable" align="center"><tr>
+ <td width="50%">
+ <p class="biglink"><a class="biglink" href="py-modindex.html">Python Global Module Index</a><br/>
+ <span class="linkdescr">quick access to all modules</span></p>
+ <p class="biglink"><a class="biglink" href="genindex.html">General Index</a><br/>
+ <span class="linkdescr">all functions, classes, terms</span></p>
+ <p class="biglink"><a class="biglink" href="glossary.html">Glossary</a><br/>
+ <span class="linkdescr">the most important terms explained</span></p>
+ </td><td width="50%">
+ <p class="biglink"><a class="biglink" href="search.html">Search page</a><br/>
+ <span class="linkdescr">search this documentation</span></p>
+ <p class="biglink"><a class="biglink" href="contents.html">Complete Table of Contents</a><br/>
+ <span class="linkdescr">lists all sections and subsections</span></p>
+ </td></tr>
+ </table>
+ """ # noqa: E501
+
@pytest.mark.network
- @tm.network(url="https://docs.python.org/2/", check_before_test=True)
- def test_multiple_matches(self):
- url = "https://docs.python.org/2/"
- dfs = self.read_html(url, match="Python")
+ @pytest.mark.single_cpu
+ def test_multiple_matches(self, python_docs, httpserver):
+ httpserver.serve_content(content=python_docs)
+ dfs = self.read_html(httpserver.url, match="Python")
assert len(dfs) > 1
@pytest.mark.network
- @tm.network(url="https://docs.python.org/2/", check_before_test=True)
- def test_python_docs_table(self):
- url = "https://docs.python.org/2/"
- dfs = self.read_html(url, match="Python")
+ @pytest.mark.single_cpu
+ def test_python_docs_table(self, python_docs, httpserver):
+ httpserver.serve_content(content=python_docs)
+ dfs = self.read_html(httpserver.url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
- assert sorted(zz) == sorted(["Repo", "What"])
+ assert sorted(zz) == ["Pyth", "What"]
def test_empty_tables(self):
"""
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index f2ff526a58f99..35bf75d3928f8 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -376,21 +376,13 @@ def check_external_error_on_write(self, df, engine, exc):
to_parquet(df, path, engine, compression=None)
@pytest.mark.network
- @tm.network(
- url=(
- "https://raw.githubusercontent.com/pandas-dev/pandas/"
- "main/pandas/tests/io/data/parquet/simple.parquet"
- ),
- check_before_test=True,
- )
- def test_parquet_read_from_url(self, df_compat, engine):
+ @pytest.mark.single_cpu
+ def test_parquet_read_from_url(self, httpserver, datapath, df_compat, engine):
if engine != "auto":
pytest.importorskip(engine)
- url = (
- "https://raw.githubusercontent.com/pandas-dev/pandas/"
- "main/pandas/tests/io/data/parquet/simple.parquet"
- )
- df = read_parquet(url)
+ with open(datapath("io", "data", "parquet", "simple.parquet"), mode="rb") as f:
+ httpserver.serve_content(content=f.read())
+ df = read_parquet(httpserver.url)
tm.assert_frame_equal(df, df_compat)
diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py
index 5171ec04b0bcf..35250f1dd3081 100644
--- a/pandas/tests/io/test_s3.py
+++ b/pandas/tests/io/test_s3.py
@@ -5,7 +5,6 @@
import pandas.util._test_decorators as td
from pandas import read_csv
-import pandas._testing as tm
def test_streaming_s3_objects():
@@ -21,28 +20,31 @@ def test_streaming_s3_objects():
@td.skip_if_no("s3fs")
-@pytest.mark.network
-@tm.network
-def test_read_without_creds_from_pub_bucket():
+@pytest.mark.single_cpu
+def test_read_without_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
# GH 34626
- # Use Amazon Open Data Registry - https://registry.opendata.aws/gdelt
- result = read_csv("s3://gdelt-open-data/events/1981.csv", nrows=3)
+ result = read_csv(
+ f"s3://{s3_public_bucket_with_data.name}/tips.csv",
+ nrows=3,
+ storage_options=s3so,
+ )
assert len(result) == 3
@td.skip_if_no("s3fs")
-@pytest.mark.network
-@tm.network
-def test_read_with_creds_from_pub_bucket(monkeypatch):
+@pytest.mark.single_cpu
+def test_read_with_creds_from_pub_bucket(s3_public_bucket_with_data, monkeypatch, s3so):
# Ensure we can read from a public bucket with credentials
# GH 34626
- # Use Amazon Open Data Registry - https://registry.opendata.aws/gdelt
# temporary workaround as moto fails for botocore >= 1.11 otherwise,
# see https://github.com/spulec/moto/issues/1924 & 1952
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "foobar_key")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret")
df = read_csv(
- "s3://gdelt-open-data/events/1981.csv", nrows=5, sep="\t", header=None
+ f"s3://{s3_public_bucket_with_data.name}/tips.csv",
+ nrows=5,
+ header=None,
+ storage_options=s3so,
)
assert len(df) == 5
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index b0e806caecc80..a3a1646bc4748 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -14,7 +14,6 @@
import numpy as np
import pytest
-from pandas.compat import is_ci_environment
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
EmptyDataError,
@@ -297,53 +296,16 @@ def test_parser_consistency_file(xml_books):
@pytest.mark.network
-@pytest.mark.slow
-@tm.network(
- url=(
- "https://data.cityofchicago.org/api/views/"
- "8pix-ypme/rows.xml?accessType=DOWNLOAD"
- ),
- check_before_test=True,
-)
-def test_parser_consistency_url(parser):
- url = (
- "https://data.cityofchicago.org/api/views/"
- "8pix-ypme/rows.xml?accessType=DOWNLOAD"
- )
-
- with tm.ensure_clean(filename="cta.xml") as path:
- (read_xml(url, xpath=".//row/row", parser=parser).to_xml(path, index=False))
+@pytest.mark.single_cpu
+def test_parser_consistency_url(parser, httpserver):
+ httpserver.serve_content(content=xml_default_nmsp)
- df_xpath = read_xml(path, parser=parser)
- df_iter = read_xml(
- path,
- parser=parser,
- iterparse={
- "row": [
- "_id",
- "_uuid",
- "_position",
- "_address",
- "stop_id",
- "direction_id",
- "stop_name",
- "station_name",
- "station_descriptive_name",
- "map_id",
- "ada",
- "red",
- "blue",
- "g",
- "brn",
- "p",
- "pexp",
- "y",
- "pnk",
- "o",
- "location",
- ]
- },
- )
+ df_xpath = read_xml(xml_default_nmsp, parser=parser)
+ df_iter = read_xml(
+ BytesIO(xml_default_nmsp.encode()),
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ )
tm.assert_frame_equal(df_xpath, df_iter)
@@ -520,14 +482,12 @@ def test_wrong_file_path_etree():
@pytest.mark.network
-@tm.network(
- url="https://www.w3schools.com/xml/books.xml",
- check_before_test=True,
-)
+@pytest.mark.single_cpu
@td.skip_if_no("lxml")
-def test_url():
- url = "https://www.w3schools.com/xml/books.xml"
- df_url = read_xml(url, xpath=".//book[count(*)=4]")
+def test_url(httpserver, xml_file):
+ with open(xml_file, encoding="utf-8") as f:
+ httpserver.serve_content(content=f.read())
+ df_url = read_xml(httpserver.url, xpath=".//book[count(*)=4]")
df_expected = DataFrame(
{
@@ -536,7 +496,6 @@ def test_url():
"author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"],
"year": [2005, 2005, 2003],
"price": [30.00, 29.99, 39.95],
- "cover": [None, None, "paperback"],
}
)
@@ -544,11 +503,11 @@ def test_url():
@pytest.mark.network
-@tm.network(url="https://www.w3schools.com/xml/python.xml", check_before_test=True)
-def test_wrong_url(parser):
- with pytest.raises(HTTPError, match=("HTTP Error 404: Not Found")):
- url = "https://www.w3schools.com/xml/python.xml"
- read_xml(url, xpath=".//book[count(*)=4]", parser=parser)
+@pytest.mark.single_cpu
+def test_wrong_url(parser, httpserver):
+ httpserver.serve_content("NOT FOUND", code=404)
+ with pytest.raises(HTTPError, match=("HTTP Error 404: NOT FOUND")):
+ read_xml(httpserver.url, xpath=".//book[count(*)=4]", parser=parser)
# XPATH
@@ -1429,17 +1388,18 @@ def test_file_io_iterparse(xml_books, parser, mode):
@pytest.mark.network
-@tm.network(url="https://www.w3schools.com/xml/books.xml", check_before_test=True)
-def test_url_path_error(parser):
- url = "https://www.w3schools.com/xml/books.xml"
- with pytest.raises(
- ParserError, match=("iterparse is designed for large XML files")
- ):
- read_xml(
- url,
- parser=parser,
- iterparse={"row": ["shape", "degrees", "sides", "date"]},
- )
+@pytest.mark.single_cpu
+def test_url_path_error(parser, httpserver, xml_file):
+ with open(xml_file, encoding="utf-8") as f:
+ httpserver.serve_content(content=f.read())
+ with pytest.raises(
+ ParserError, match=("iterparse is designed for large XML files")
+ ):
+ read_xml(
+ httpserver.url,
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
def test_compression_error(parser, compression_only):
@@ -1641,14 +1601,245 @@ def test_empty_data(xml_books, parser):
)
-@pytest.mark.network
@td.skip_if_no("lxml")
-@tm.network(
- url="https://www.w3schools.com/xml/cdcatalog_with_xsl.xml", check_before_test=True
-)
def test_online_stylesheet():
- xml = "https://www.w3schools.com/xml/cdcatalog_with_xsl.xml"
- xsl = "https://www.w3schools.com/xml/cdcatalog.xsl"
+ xml = """\
+<?xml version="1.0" encoding="UTF-8"?>
+<catalog>
+ <cd>
+ <title>Empire Burlesque</title>
+ <artist>Bob Dylan</artist>
+ <country>USA</country>
+ <company>Columbia</company>
+ <price>10.90</price>
+ <year>1985</year>
+ </cd>
+ <cd>
+ <title>Hide your heart</title>
+ <artist>Bonnie Tyler</artist>
+ <country>UK</country>
+ <company>CBS Records</company>
+ <price>9.90</price>
+ <year>1988</year>
+ </cd>
+ <cd>
+ <title>Greatest Hits</title>
+ <artist>Dolly Parton</artist>
+ <country>USA</country>
+ <company>RCA</company>
+ <price>9.90</price>
+ <year>1982</year>
+ </cd>
+ <cd>
+ <title>Still got the blues</title>
+ <artist>Gary Moore</artist>
+ <country>UK</country>
+ <company>Virgin records</company>
+ <price>10.20</price>
+ <year>1990</year>
+ </cd>
+ <cd>
+ <title>Eros</title>
+ <artist>Eros Ramazzotti</artist>
+ <country>EU</country>
+ <company>BMG</company>
+ <price>9.90</price>
+ <year>1997</year>
+ </cd>
+ <cd>
+ <title>One night only</title>
+ <artist>Bee Gees</artist>
+ <country>UK</country>
+ <company>Polydor</company>
+ <price>10.90</price>
+ <year>1998</year>
+ </cd>
+ <cd>
+ <title>Sylvias Mother</title>
+ <artist>Dr.Hook</artist>
+ <country>UK</country>
+ <company>CBS</company>
+ <price>8.10</price>
+ <year>1973</year>
+ </cd>
+ <cd>
+ <title>Maggie May</title>
+ <artist>Rod Stewart</artist>
+ <country>UK</country>
+ <company>Pickwick</company>
+ <price>8.50</price>
+ <year>1990</year>
+ </cd>
+ <cd>
+ <title>Romanza</title>
+ <artist>Andrea Bocelli</artist>
+ <country>EU</country>
+ <company>Polydor</company>
+ <price>10.80</price>
+ <year>1996</year>
+ </cd>
+ <cd>
+ <title>When a man loves a woman</title>
+ <artist>Percy Sledge</artist>
+ <country>USA</country>
+ <company>Atlantic</company>
+ <price>8.70</price>
+ <year>1987</year>
+ </cd>
+ <cd>
+ <title>Black angel</title>
+ <artist>Savage Rose</artist>
+ <country>EU</country>
+ <company>Mega</company>
+ <price>10.90</price>
+ <year>1995</year>
+ </cd>
+ <cd>
+ <title>1999 Grammy Nominees</title>
+ <artist>Many</artist>
+ <country>USA</country>
+ <company>Grammy</company>
+ <price>10.20</price>
+ <year>1999</year>
+ </cd>
+ <cd>
+ <title>For the good times</title>
+ <artist>Kenny Rogers</artist>
+ <country>UK</country>
+ <company>Mucik Master</company>
+ <price>8.70</price>
+ <year>1995</year>
+ </cd>
+ <cd>
+ <title>Big Willie style</title>
+ <artist>Will Smith</artist>
+ <country>USA</country>
+ <company>Columbia</company>
+ <price>9.90</price>
+ <year>1997</year>
+ </cd>
+ <cd>
+ <title>Tupelo Honey</title>
+ <artist>Van Morrison</artist>
+ <country>UK</country>
+ <company>Polydor</company>
+ <price>8.20</price>
+ <year>1971</year>
+ </cd>
+ <cd>
+ <title>Soulsville</title>
+ <artist>Jorn Hoel</artist>
+ <country>Norway</country>
+ <company>WEA</company>
+ <price>7.90</price>
+ <year>1996</year>
+ </cd>
+ <cd>
+ <title>The very best of</title>
+ <artist>Cat Stevens</artist>
+ <country>UK</country>
+ <company>Island</company>
+ <price>8.90</price>
+ <year>1990</year>
+ </cd>
+ <cd>
+ <title>Stop</title>
+ <artist>Sam Brown</artist>
+ <country>UK</country>
+ <company>A and M</company>
+ <price>8.90</price>
+ <year>1988</year>
+ </cd>
+ <cd>
+ <title>Bridge of Spies</title>
+ <artist>T`Pau</artist>
+ <country>UK</country>
+ <company>Siren</company>
+ <price>7.90</price>
+ <year>1987</year>
+ </cd>
+ <cd>
+ <title>Private Dancer</title>
+ <artist>Tina Turner</artist>
+ <country>UK</country>
+ <company>Capitol</company>
+ <price>8.90</price>
+ <year>1983</year>
+ </cd>
+ <cd>
+ <title>Midt om natten</title>
+ <artist>Kim Larsen</artist>
+ <country>EU</country>
+ <company>Medley</company>
+ <price>7.80</price>
+ <year>1983</year>
+ </cd>
+ <cd>
+ <title>Pavarotti Gala Concert</title>
+ <artist>Luciano Pavarotti</artist>
+ <country>UK</country>
+ <company>DECCA</company>
+ <price>9.90</price>
+ <year>1991</year>
+ </cd>
+ <cd>
+ <title>The dock of the bay</title>
+ <artist>Otis Redding</artist>
+ <country>USA</country>
+ <COMPANY>Stax Records</COMPANY>
+ <PRICE>7.90</PRICE>
+ <YEAR>1968</YEAR>
+ </cd>
+ <cd>
+ <title>Picture book</title>
+ <artist>Simply Red</artist>
+ <country>EU</country>
+ <company>Elektra</company>
+ <price>7.20</price>
+ <year>1985</year>
+ </cd>
+ <cd>
+ <title>Red</title>
+ <artist>The Communards</artist>
+ <country>UK</country>
+ <company>London</company>
+ <price>7.80</price>
+ <year>1987</year>
+ </cd>
+ <cd>
+ <title>Unchain my heart</title>
+ <artist>Joe Cocker</artist>
+ <country>USA</country>
+ <company>EMI</company>
+ <price>8.20</price>
+ <year>1987</year>
+ </cd>
+</catalog>
+"""
+ xsl = """\
+<?xml version="1.0" encoding="UTF-8"?>
+<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+<xsl:template match="/">
+<html>
+<body>
+ <h2>My CD Collection</h2>
+ <table border="1">
+ <tr bgcolor="#9acd32">
+ <th style="text-align:left">Title</th>
+ <th style="text-align:left">Artist</th>
+ </tr>
+ <xsl:for-each select="catalog/cd">
+ <tr>
+ <td><xsl:value-of select="title"/></td>
+ <td><xsl:value-of select="artist"/></td>
+ </tr>
+ </xsl:for-each>
+ </table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
+"""
df_xsl = read_xml(
xml,
@@ -1740,32 +1931,15 @@ def test_unsuported_compression(parser):
@pytest.mark.network
+@pytest.mark.single_cpu
@td.skip_if_no("s3fs")
@td.skip_if_no("lxml")
-@pytest.mark.skipif(
- is_ci_environment(),
- reason="2022.1.17: Hanging on the CI min versions build.",
-)
-@tm.network
-def test_s3_parser_consistency():
- # Python Software Foundation (2019 IRS-990 RETURN)
- s3 = "s3://irs-form-990/201923199349319487_public.xml"
+def test_s3_parser_consistency(s3_public_bucket_with_data, s3so):
+ s3 = f"s3://{s3_public_bucket_with_data.name}/books.xml"
- df_lxml = read_xml(
- s3,
- xpath=".//irs:Form990PartVIISectionAGrp",
- namespaces={"irs": "http://www.irs.gov/efile"},
- parser="lxml",
- storage_options={"anon": True},
- )
+ df_lxml = read_xml(s3, parser="lxml", storage_options=s3so)
- df_etree = read_xml(
- s3,
- xpath=".//irs:Form990PartVIISectionAGrp",
- namespaces={"irs": "http://www.irs.gov/efile"},
- parser="etree",
- storage_options={"anon": True},
- )
+ df_etree = read_xml(s3, parser="etree", storage_options=s3so)
tm.assert_frame_equal(df_lxml, df_etree)
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index 0f5fdbefd13d2..7354e313e24f4 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -139,14 +139,13 @@ def test_oo_optimized_datetime_index_unpickle():
)
-@pytest.mark.network
-@tm.network
def test_statsmodels():
statsmodels = import_module("statsmodels") # noqa: F841
- import statsmodels.api as sm
import statsmodels.formula.api as smf
- df = sm.datasets.get_rdataset("Guerry", "HistData").data
+ df = DataFrame(
+ {"Lottery": range(5), "Literacy": range(5), "Pop1831": range(100, 105)}
+ )
smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=df).fit()
@@ -163,11 +162,11 @@ def test_scikit_learn():
clf.predict(digits.data[-1:])
-@pytest.mark.network
-@tm.network
def test_seaborn():
seaborn = import_module("seaborn")
- tips = seaborn.load_dataset("tips")
+ tips = DataFrame(
+ {"day": pd.date_range("2023", freq="D", periods=5), "total_bill": range(5)}
+ )
seaborn.stripplot(x="day", y="total_bill", data=tips)
@@ -177,16 +176,8 @@ def test_pandas_gbq():
pandas_gbq = import_module("pandas_gbq") # noqa: F841
-@pytest.mark.network
-@tm.network
-@pytest.mark.xfail(
- raises=ValueError,
- reason="The Quandl API key must be provided either through the api_key "
- "variable or through the environmental variable QUANDL_API_KEY",
-)
def test_pandas_datareader():
- pandas_datareader = import_module("pandas_datareader")
- pandas_datareader.DataReader("F", "quandl", "2017-01-01", "2017-02-01")
+ pandas_datareader = import_module("pandas_datareader") # noqa: F841
def test_pyarrow(df):
diff --git a/pyproject.toml b/pyproject.toml
index 0d1bca886a638..ef257b3143598 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -483,6 +483,7 @@ filterwarnings = [
"ignore:a closed node found in the registry:UserWarning:tables",
"ignore:`np.object` is a deprecated:DeprecationWarning:tables",
"ignore:tostring:DeprecationWarning:tables",
+ "ignore:distutils Version classes are deprecated:DeprecationWarning:pandas_datareader",
"ignore:distutils Version classes are deprecated:DeprecationWarning:numexpr",
"ignore:distutils Version classes are deprecated:DeprecationWarning:fastparquet",
"ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec",
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 38a2ce7f66aa3..b1d8ce1cf2143 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -10,6 +10,7 @@ pytest>=7.3.2
pytest-cov
pytest-xdist>=2.2.0
pytest-asyncio>=0.17.0
+pytest-localserver>=0.7.1
coverage
python-dateutil
numpy
| Since we have a lot of CI jobs with unit test that make network connections, it is better if the network connections could be made locally if possible. This PR refactors all tests that make network connections to use `pytest-localserver` instead | https://api.github.com/repos/pandas-dev/pandas/pulls/53828 | 2023-06-24T02:16:48Z | 2023-06-27T16:58:07Z | 2023-06-27T16:58:06Z | 2023-07-04T07:37:08Z |
REF: implement PandasArray.pad_or_backfill | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index ea085b3d1f6ab..48c9305cf2ccc 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -2235,7 +2235,7 @@ def interpolate(
*,
method,
axis: int,
- index: Index | None,
+ index: Index,
limit,
limit_direction,
limit_area,
@@ -2255,7 +2255,7 @@ def interpolate(
else:
out_data = self._ndarray.copy()
- missing.interpolate_array_2d(
+ missing.interpolate_2d_inplace(
out_data,
method=method,
axis=axis,
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 113f22ad968bc..6d60657c3100b 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -1,6 +1,9 @@
from __future__ import annotations
-from typing import TYPE_CHECKING
+from typing import (
+ TYPE_CHECKING,
+ Literal,
+)
import numpy as np
@@ -32,6 +35,7 @@
from pandas._typing import (
AxisInt,
Dtype,
+ FillnaOptions,
NpDtype,
Scalar,
Self,
@@ -224,12 +228,42 @@ def _values_for_factorize(self) -> tuple[np.ndarray, float | None]:
fv = np.nan
return self._ndarray, fv
+ def pad_or_backfill(
+ self,
+ *,
+ method: FillnaOptions,
+ axis: int,
+ limit: int | None,
+ limit_area: Literal["inside", "outside"] | None = None,
+ copy: bool = True,
+ ) -> Self:
+ """
+ ffill or bfill
+ """
+ if copy:
+ out_data = self._ndarray.copy()
+ else:
+ out_data = self._ndarray
+
+ meth = missing.clean_fill_method(method)
+ missing.pad_or_backfill_inplace(
+ out_data,
+ method=meth,
+ axis=axis,
+ limit=limit,
+ limit_area=limit_area,
+ )
+
+ if not copy:
+ return self
+ return type(self)._simple_new(out_data, dtype=self.dtype)
+
def interpolate(
self,
*,
method,
axis: int,
- index: Index | None,
+ index: Index,
limit,
limit_direction,
limit_area,
@@ -246,7 +280,8 @@ def interpolate(
else:
out_data = self._ndarray.copy()
- missing.interpolate_array_2d(
+ # TODO: assert we have floating dtype?
+ missing.interpolate_2d_inplace(
out_data,
method=method,
axis=axis,
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 1872176394d02..aba6811c5eeb7 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -79,7 +79,7 @@
check_array_indexer,
unpack_tuple_and_ellipses,
)
-from pandas.core.missing import interpolate_2d
+from pandas.core.missing import pad_or_backfill_inplace
from pandas.core.nanops import check_below_min_count
from pandas.io.formats import printing
@@ -764,11 +764,11 @@ def fillna(
stacklevel=find_stack_level(),
)
new_values = np.asarray(self)
- # interpolate_2d modifies new_values inplace
- # error: Argument "method" to "interpolate_2d" has incompatible type
- # "Literal['backfill', 'bfill', 'ffill', 'pad']"; expected
+ # pad_or_backfill_inplace modifies new_values inplace
+ # error: Argument "method" to "pad_or_backfill_inplace" has incompatible
+ # type "Literal['backfill', 'bfill', 'ffill', 'pad']"; expected
# "Literal['pad', 'backfill']"
- interpolate_2d(
+ pad_or_backfill_inplace(
new_values, method=method, limit=limit # type: ignore[arg-type]
)
return type(self)(new_values, fill_value=self.fill_value)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index e61d233a0ae84..ab5ad8148b063 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1352,7 +1352,7 @@ def interpolate(
inplace: bool = False,
limit: int | None = None,
limit_direction: Literal["forward", "backward", "both"] = "forward",
- limit_area: str | None = None,
+ limit_area: Literal["inside", "outside"] | None = None,
fill_value: Any | None = None,
downcast: Literal["infer"] | None = None,
using_cow: bool = False,
@@ -1410,17 +1410,32 @@ def interpolate(
# Dispatch to the PandasArray method.
# We know self.array_values is a PandasArray bc EABlock overrides
- new_values = cast(PandasArray, self.array_values).interpolate(
- method=method,
- axis=axis,
- index=index,
- limit=limit,
- limit_direction=limit_direction,
- limit_area=limit_area,
- fill_value=fill_value,
- inplace=arr_inplace,
- **kwargs,
- )
+ if m is not None:
+ if fill_value is not None:
+ # similar to validate_fillna_kwargs
+ raise ValueError("Cannot pass both fill_value and method")
+
+ # TODO: warn about ignored kwargs, limit_direction, index...?
+ new_values = cast(PandasArray, self.array_values).pad_or_backfill(
+ method=method,
+ axis=axis,
+ limit=limit,
+ limit_area=limit_area,
+ copy=not arr_inplace,
+ )
+ else:
+ assert index is not None # for mypy
+ new_values = cast(PandasArray, self.array_values).interpolate(
+ method=method,
+ axis=axis,
+ index=index,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ fill_value=fill_value,
+ inplace=arr_inplace,
+ **kwargs,
+ )
data = new_values._ndarray
nb = self.make_block_same_class(data, refs=refs)
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 2e05375ca85e7..58b0e2907b8ce 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -302,60 +302,7 @@ def get_interp_index(method, index: Index) -> Index:
return index
-def interpolate_array_2d(
- data: np.ndarray,
- method: str = "pad",
- axis: AxisInt = 0,
- index: Index | None = None,
- limit: int | None = None,
- limit_direction: str = "forward",
- limit_area: str | None = None,
- fill_value: Any | None = None,
- **kwargs,
-) -> None:
- """
- Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill.
-
- Notes
- -----
- Alters 'data' in-place.
- """
- try:
- m = clean_fill_method(method)
- except ValueError:
- m = None
-
- if m is not None:
- if fill_value is not None:
- # similar to validate_fillna_kwargs
- raise ValueError("Cannot pass both fill_value and method")
-
- interpolate_2d(
- data,
- method=m,
- axis=axis,
- limit=limit,
- # error: Argument "limit_area" to "interpolate_2d" has incompatible
- # type "Optional[str]"; expected "Optional[Literal['inside', 'outside']]"
- limit_area=limit_area, # type: ignore[arg-type]
- )
- else:
- assert index is not None # for mypy
-
- _interpolate_2d_with_fill(
- data=data,
- index=index,
- axis=axis,
- method=method,
- limit=limit,
- limit_direction=limit_direction,
- limit_area=limit_area,
- fill_value=fill_value,
- **kwargs,
- )
-
-
-def _interpolate_2d_with_fill(
+def interpolate_2d_inplace(
data: np.ndarray, # floating dtype
index: Index,
axis: AxisInt,
@@ -845,7 +792,7 @@ def _interpolate_with_limit_area(
if last is None:
last = len(values)
- interpolate_2d(
+ pad_or_backfill_inplace(
values,
method=method,
limit=limit,
@@ -861,7 +808,7 @@ def _interpolate_with_limit_area(
values[invalid] = np.nan
-def interpolate_2d(
+def pad_or_backfill_inplace(
values: np.ndarray,
method: Literal["pad", "backfill"] = "pad",
axis: AxisInt = 0,
| Goes with (but independent of) #53822 trying to separate out pad_or_backfill from interpolate/fillna.
xref #53621 | https://api.github.com/repos/pandas-dev/pandas/pulls/53827 | 2023-06-23T22:53:11Z | 2023-06-26T17:51:31Z | 2023-06-26T17:51:31Z | 2023-06-26T17:52:45Z |
BUG: DataFrame.stack sometimes sorting the resulting index | diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index ee1f1b7be1b86..13a4a4531e6d0 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -104,7 +104,6 @@ Other enhancements
- Let :meth:`DataFrame.to_feather` accept a non-default :class:`Index` and non-string column names (:issue:`51787`)
- Performance improvement in :func:`read_csv` (:issue:`52632`) with ``engine="c"``
- :meth:`Categorical.from_codes` has gotten a ``validate`` parameter (:issue:`50975`)
-- :meth:`DataFrame.stack` gained the ``sort`` keyword to dictate whether the resulting :class:`MultiIndex` levels are sorted (:issue:`15105`)
- :meth:`DataFrame.unstack` gained the ``sort`` keyword to dictate whether the resulting :class:`MultiIndex` levels are sorted (:issue:`15105`)
- :meth:`DataFrameGroupby.agg` and :meth:`DataFrameGroupby.transform` now support grouping by multiple keys when the index is not a :class:`MultiIndex` for ``engine="numba"`` (:issue:`53486`)
- :meth:`Series.explode` now supports pyarrow-backed list types (:issue:`53602`)
@@ -501,7 +500,8 @@ Reshaping
- Bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax`, where the axis dtype would be lost for empty frames (:issue:`53265`)
- Bug in :meth:`DataFrame.merge` not merging correctly when having ``MultiIndex`` with single level (:issue:`52331`)
- Bug in :meth:`DataFrame.stack` losing extension dtypes when columns is a :class:`MultiIndex` and frame contains mixed dtypes (:issue:`45740`)
-- Bug in :meth:`DataFrame.stack` sorting columns lexicographically (:issue:`53786`)
+- Bug in :meth:`DataFrame.stack` sorting columns lexicographically in rare cases (:issue:`53786`)
+- Bug in :meth:`DataFrame.stack` sorting index lexicographically in rare cases (:issue:`53824`)
- Bug in :meth:`DataFrame.transpose` inferring dtype for object column (:issue:`51546`)
- Bug in :meth:`Series.combine_first` converting ``int64`` dtype to ``float64`` and losing precision on very large integers (:issue:`51764`)
-
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c7e8f74ff7849..d12c07b3caca4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9010,7 +9010,7 @@ def pivot_table(
sort=sort,
)
- def stack(self, level: IndexLabel = -1, dropna: bool = True, sort: bool = True):
+ def stack(self, level: IndexLabel = -1, dropna: bool = True):
"""
Stack the prescribed level(s) from columns to index.
@@ -9036,8 +9036,6 @@ def stack(self, level: IndexLabel = -1, dropna: bool = True, sort: bool = True):
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
- sort : bool, default True
- Whether to sort the levels of the resulting MultiIndex.
Returns
-------
@@ -9137,15 +9135,15 @@ def stack(self, level: IndexLabel = -1, dropna: bool = True, sort: bool = True):
>>> df_multi_level_cols2.stack(0)
kg m
- cat height NaN 2.0
- weight 1.0 NaN
- dog height NaN 4.0
- weight 3.0 NaN
+ cat weight 1.0 NaN
+ height NaN 2.0
+ dog weight 3.0 NaN
+ height NaN 4.0
>>> df_multi_level_cols2.stack([0, 1])
- cat height m 2.0
- weight kg 1.0
- dog height m 4.0
- weight kg 3.0
+ cat weight kg 1.0
+ height m 2.0
+ dog weight kg 3.0
+ height m 4.0
dtype: float64
**Dropping missing values**
@@ -9181,9 +9179,9 @@ def stack(self, level: IndexLabel = -1, dropna: bool = True, sort: bool = True):
)
if isinstance(level, (tuple, list)):
- result = stack_multiple(self, level, dropna=dropna, sort=sort)
+ result = stack_multiple(self, level, dropna=dropna)
else:
- result = stack(self, level, dropna=dropna, sort=sort)
+ result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 5deaa41e2f63c..b0c74745511c4 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -1,6 +1,5 @@
from __future__ import annotations
-import itertools
from typing import (
TYPE_CHECKING,
cast,
@@ -499,7 +498,7 @@ def unstack(obj: Series | DataFrame, level, fill_value=None, sort: bool = True):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value, sort=sort)
else:
- return obj.T.stack(dropna=False, sort=sort)
+ return obj.T.stack(dropna=False)
elif not isinstance(obj.index, MultiIndex):
# GH 36113
# Give nicer error messages when unstack a Series whose
@@ -572,7 +571,7 @@ def _unstack_extension_series(
return result
-def stack(frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True):
+def stack(frame: DataFrame, level=-1, dropna: bool = True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
@@ -594,9 +593,7 @@ def factorize(index):
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
- return _stack_multi_columns(
- frame, level_num=level_num, dropna=dropna, sort=sort
- )
+ return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
@@ -649,13 +646,13 @@ def factorize(index):
return frame._constructor_sliced(new_values, index=new_index)
-def stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = True):
+def stack_multiple(frame: DataFrame, level, dropna: bool = True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
- result = stack(result, lev, dropna=dropna, sort=sort)
+ result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
@@ -668,7 +665,7 @@ def stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = Tr
while level:
lev = level.pop(0)
- result = stack(result, lev, dropna=dropna, sort=sort)
+ result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
level = [v if v <= lev else v - 1 for v in level]
@@ -694,7 +691,14 @@ def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex:
# Remove duplicate tuples in the MultiIndex.
tuples = zip(*levs)
- unique_tuples = (key for key, _ in itertools.groupby(tuples))
+ seen = set()
+ # mypy doesn't like our trickery to get `set.add` to work in a comprehension
+ # error: "add" of "set" does not return a value
+ unique_tuples = (
+ key
+ for key in tuples
+ if not (key in seen or seen.add(key)) # type: ignore[func-returns-value]
+ )
new_levs = zip(*unique_tuples)
# The dtype of each level must be explicitly set to avoid inferring the wrong type.
@@ -710,7 +714,7 @@ def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex:
def _stack_multi_columns(
- frame: DataFrame, level_num: int = -1, dropna: bool = True, sort: bool = True
+ frame: DataFrame, level_num: int = -1, dropna: bool = True
) -> DataFrame:
def _convert_level_number(level_num: int, columns: Index):
"""
@@ -740,23 +744,12 @@ def _convert_level_number(level_num: int, columns: Index):
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = mi_cols = roll_columns
- if not mi_cols._is_lexsorted() and sort:
- # Workaround the edge case where 0 is one of the column names,
- # which interferes with trying to sort based on the first
- # level
- level_to_sort = _convert_level_number(0, mi_cols)
- this = this.sort_index(level=level_to_sort, axis=1)
- mi_cols = this.columns
-
- mi_cols = cast(MultiIndex, mi_cols)
new_columns = _stack_multi_column_index(mi_cols)
# time to ravel the values
new_data = {}
level_vals = mi_cols.levels[-1]
level_codes = unique(mi_cols.codes[-1])
- if sort:
- level_codes = np.sort(level_codes)
level_vals_nan = level_vals.insert(len(level_vals), None)
level_vals_used = np.take(level_vals_nan, level_codes)
@@ -764,7 +757,9 @@ def _convert_level_number(level_num: int, columns: Index):
drop_cols = []
for key in new_columns:
try:
- loc = this.columns.get_loc(key)
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", PerformanceWarning)
+ loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
@@ -774,9 +769,12 @@ def _convert_level_number(level_num: int, columns: Index):
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
- slice_len = len(loc)
+ slice_len = loc.sum()
else:
slice_len = loc.stop - loc.start
+ if loc.step is not None:
+ # Integer division using ceiling instead of floor
+ slice_len = -(slice_len // -loc.step)
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index a48728a778877..ffdcb06ee2847 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -1099,7 +1099,7 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels):
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
- (list("zyx"), [14, 15, 12, 13, 10, 11]),
+ (list("zyx"), [10, 11, 12, 13, 14, 15]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
@@ -1107,10 +1107,10 @@ def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
- df = DataFrame([sorted(data)], columns=midx)
+ df = DataFrame([data], columns=midx)
result = df.stack([0, 1])
- s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
+ s_cidx = pd.CategoricalIndex(labels, ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
@@ -1400,8 +1400,8 @@ def test_unstack_non_slice_like_blocks(using_array_manager):
tm.assert_frame_equal(res, expected)
-def test_stack_sort_false():
- # GH 15105
+def test_stack_nosort():
+ # GH 15105, GH 53825
data = [[1, 2, 3.0, 4.0], [2, 3, 4.0, 5.0], [3, 4, np.nan, np.nan]]
df = DataFrame(
data,
@@ -1409,7 +1409,7 @@ def test_stack_sort_false():
levels=[["B", "A"], ["x", "y"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
- result = df.stack(level=0, sort=False)
+ result = df.stack(level=0)
expected = DataFrame(
{"x": [1.0, 3.0, 2.0, 4.0, 3.0], "y": [2.0, 4.0, 3.0, 5.0, 4.0]},
index=MultiIndex.from_arrays([[0, 0, 1, 1, 2], ["B", "A", "B", "A", "B"]]),
@@ -1421,15 +1421,15 @@ def test_stack_sort_false():
data,
columns=MultiIndex.from_arrays([["B", "B", "A", "A"], ["x", "y", "x", "y"]]),
)
- result = df.stack(level=0, sort=False)
+ result = df.stack(level=0)
tm.assert_frame_equal(result, expected)
-def test_stack_sort_false_multi_level():
- # GH 15105
+def test_stack_nosort_multi_level():
+ # GH 15105, GH 53825
idx = MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
df = DataFrame([[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=idx)
- result = df.stack([0, 1], sort=False)
+ result = df.stack([0, 1])
expected_index = MultiIndex.from_tuples(
[
("cat", "weight", "kg"),
@@ -1999,13 +1999,12 @@ def __init__(self, *args, **kwargs) -> None:
),
)
@pytest.mark.parametrize("stack_lev", range(2))
- @pytest.mark.parametrize("sort", [True, False])
- def test_stack_order_with_unsorted_levels(self, levels, stack_lev, sort):
+ def test_stack_order_with_unsorted_levels(self, levels, stack_lev):
# GH#16323
# deep check for 1-row case
columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
- df_stacked = df.stack(stack_lev, sort=sort)
+ df_stacked = df.stack(stack_lev)
for row in df.index:
for col in df.columns:
expected = df.loc[row, col]
@@ -2037,7 +2036,7 @@ def test_stack_order_with_unsorted_levels_multi_row_2(self):
stack_lev = 1
columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)], index=[1, 0, 2, 3])
- result = df.stack(stack_lev, sort=True)
+ result = df.stack(stack_lev)
expected_index = MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]],
codes=[[1, 1, 0, 0, 2, 2, 3, 3], [1, 0, 1, 0, 1, 0, 1, 0]],
| - [x] closes #53824 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53825 | 2023-06-23T21:12:22Z | 2023-06-28T16:10:21Z | 2023-06-28T16:10:21Z | 2023-06-28T18:25:00Z |
Backport PR #53819 on branch 2.0.x (TST/CI: Skipif test_complibs) | diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py
index 1a126ad75c01c..623d6e664090f 100644
--- a/pandas/tests/io/pytables/test_file_handling.py
+++ b/pandas/tests/io/pytables/test_file_handling.py
@@ -230,11 +230,11 @@ def test_complibs_default_settings_override(tmp_path, setup_path):
@pytest.mark.parametrize("lvl", range(10))
@pytest.mark.parametrize("lib", tables.filters.all_complibs)
@pytest.mark.filterwarnings("ignore:object name is not a valid")
-@pytest.mark.xfail(
+@pytest.mark.skipif(
not PY311 and is_ci_environment() and is_platform_linux(),
- reason="producing invalid start bytes",
- raises=UnicodeDecodeError,
- strict=False,
+ reason="Segfaulting in a CI environment"
+ # with xfail, would sometimes raise UnicodeDecodeError
+ # invalid state byte
)
def test_complibs(tmp_path, lvl, lib):
# GH14478
| Backport PR #53819: TST/CI: Skipif test_complibs | https://api.github.com/repos/pandas-dev/pandas/pulls/53823 | 2023-06-23T20:05:23Z | 2023-06-23T23:50:48Z | 2023-06-23T23:50:48Z | 2023-06-23T23:50:49Z |
REF: implement Manager.pad_or_backfill | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0be840f9a4ef1..a8a3bf3fc532e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6876,7 +6876,7 @@ def _fillna_with_method(
return result
- new_mgr = self._mgr.interpolate(
+ new_mgr = self._mgr.pad_or_backfill(
method=method,
axis=axis,
limit=limit,
@@ -7957,9 +7957,6 @@ def interpolate(
stacklevel=find_stack_level(),
)
- if method not in fillna_methods:
- axis = self._info_axis_number
-
if isinstance(obj.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
@@ -7976,17 +7973,32 @@ def interpolate(
index = missing.get_interp_index(method, obj.index)
- new_data = obj._mgr.interpolate(
- method=method,
- axis=axis,
- index=index,
- limit=limit,
- limit_direction=limit_direction,
- limit_area=limit_area,
- inplace=inplace,
- downcast=downcast,
- **kwargs,
- )
+ if method.lower() in fillna_methods:
+ # TODO(3.0): remove this case
+ new_data = obj._mgr.pad_or_backfill(
+ method=method,
+ axis=axis,
+ index=index,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ inplace=inplace,
+ downcast=downcast,
+ **kwargs,
+ )
+ else:
+ axis = self._info_axis_number
+ new_data = obj._mgr.interpolate(
+ method=method,
+ axis=axis,
+ index=index,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ inplace=inplace,
+ downcast=downcast,
+ **kwargs,
+ )
result = self._constructor(new_data)
if should_transpose:
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 098a78fc54b71..e544034ea5894 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -357,6 +357,9 @@ def diff(self, n: int, axis: AxisInt) -> Self:
assert self.ndim == 2 and axis == 0 # caller ensures
return self.apply(algos.diff, n=n, axis=axis)
+ def pad_or_backfill(self, **kwargs) -> Self:
+ return self.apply_with_block("pad_or_backfill", swap_axis=False, **kwargs)
+
def interpolate(self, **kwargs) -> Self:
return self.apply_with_block("interpolate", swap_axis=False, **kwargs)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index ae820a40005df..6f92dd77908c9 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1343,6 +1343,35 @@ def fillna(
]
)
+ def pad_or_backfill(
+ self,
+ *,
+ method: FillnaOptions = "pad",
+ axis: AxisInt = 0,
+ index: Index | None = None,
+ inplace: bool = False,
+ limit: int | None = None,
+ limit_direction: Literal["forward", "backward", "both"] = "forward",
+ limit_area: str | None = None,
+ fill_value: Any | None = None,
+ downcast: Literal["infer"] | None = None,
+ using_cow: bool = False,
+ **kwargs,
+ ) -> list[Block]:
+ return self.interpolate(
+ method=method,
+ axis=axis,
+ index=index,
+ inplace=inplace,
+ limit=limit,
+ limit_direction=limit_direction,
+ limit_area=limit_area,
+ fill_value=fill_value,
+ downcast=downcast,
+ using_cow=using_cow,
+ **kwargs,
+ )
+
def interpolate(
self,
*,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index bb745f61ab221..292f5b2344227 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -416,6 +416,14 @@ def diff(self, n: int, axis: AxisInt) -> Self:
axis = self._normalize_axis(axis)
return self.apply("diff", n=n, axis=axis)
+ def pad_or_backfill(self, inplace: bool, **kwargs) -> Self:
+ return self.apply(
+ "pad_or_backfill",
+ inplace=inplace,
+ **kwargs,
+ using_cow=using_copy_on_write(),
+ )
+
def interpolate(self, inplace: bool, **kwargs) -> Self:
return self.apply(
"interpolate", inplace=inplace, **kwargs, using_cow=using_copy_on_write()
| Working towards separating fillna/interpolate | https://api.github.com/repos/pandas-dev/pandas/pulls/53822 | 2023-06-23T19:24:08Z | 2023-06-25T20:42:08Z | 2023-06-25T20:42:08Z | 2023-06-25T20:55:34Z |
better cython debugging | diff --git a/.gitignore b/.gitignore
index cd22c2bb8cb5b..051a3ec11b794 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,7 +39,7 @@
.mesonpy-native-file.ini
MANIFEST
compile_commands.json
-debug
+.debug
# Python files #
################
diff --git a/doc/source/development/debugging_extensions.rst b/doc/source/development/debugging_extensions.rst
index 27f812b65e261..63154369dfd88 100644
--- a/doc/source/development/debugging_extensions.rst
+++ b/doc/source/development/debugging_extensions.rst
@@ -14,8 +14,8 @@ For Python developers with limited or no C/C++ experience this can seem a daunti
2. `Fundamental Python Debugging Part 2 - Python Extensions <https://willayd.com/fundamental-python-debugging-part-2-python-extensions.html>`_
3. `Fundamental Python Debugging Part 3 - Cython Extensions <https://willayd.com/fundamental-python-debugging-part-3-cython-extensions.html>`_
-Generating debug builds
------------------------
+Debugging locally
+-----------------
By default building pandas from source will generate a release build. To generate a development build you can type::
@@ -27,6 +27,32 @@ By default building pandas from source will generate a release build. To generat
By specifying ``builddir="debug"`` all of the targets will be built and placed in the debug directory relative to the project root. This helps to keep your debug and release artifacts separate; you are of course able to choose a different directory name or omit altogether if you do not care to separate build types.
+Using Docker
+------------
+
+To simplify the debugging process, pandas has created a Docker image with a debug build of Python and the gdb/Cython debuggers pre-installed. You may either ``docker pull pandas/pandas-debug`` to get access to this image or build it from the ``tooling/debug`` folder locallly.
+
+You can then mount your pandas repository into this image via:
+
+.. code-block:: sh
+
+ docker run --rm -it -w /data -v ${PWD}:/data pandas/pandas-debug
+
+Inside the image, you can use meson to build/install pandas and place the build artifacts into a ``debug`` folder using a command as follows:
+
+.. code-block:: sh
+
+ python -m pip install -ve . --no-build-isolation --config-settings=builddir="debug" --config-settings=setup-args="-Dbuildtype=debug"
+
+If planning to use cygdb, the files required by that application are placed within the build folder. So you have to first ``cd`` to the build folder, then start that application.
+
+.. code-block:: sh
+
+ cd debug
+ cygdb
+
+Within the debugger you can use `cygdb commands <https://docs.cython.org/en/latest/src/userguide/debugging.html#using-the-debugger>`_ to navigate cython extensions.
+
Editor support
--------------
diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build
index fd632790546f6..b4662d6bf8dd2 100644
--- a/pandas/_libs/meson.build
+++ b/pandas/_libs/meson.build
@@ -101,12 +101,20 @@ libs_sources = {
'writers': {'sources': ['writers.pyx']}
}
+cython_args = [
+ '--include-dir',
+ meson.current_build_dir(),
+ '-X always_allow_keywords=true'
+]
+if get_option('buildtype') == 'debug'
+ cython_args += ['--gdb']
+endif
foreach ext_name, ext_dict : libs_sources
py.extension_module(
ext_name,
ext_dict.get('sources'),
- cython_args: ['--include-dir', meson.current_build_dir(), '-X always_allow_keywords=true'],
+ cython_args: cython_args,
include_directories: [inc_np, inc_pd],
dependencies: ext_dict.get('deps', ''),
subdir: 'pandas/_libs',
diff --git a/pandas/_libs/tslibs/meson.build b/pandas/_libs/tslibs/meson.build
index a1b0c54d1f48c..85410f771233f 100644
--- a/pandas/_libs/tslibs/meson.build
+++ b/pandas/_libs/tslibs/meson.build
@@ -19,11 +19,20 @@ tslibs_sources = {
'vectorized': {'sources': ['vectorized.pyx']},
}
+cython_args = [
+ '--include-dir',
+ meson.current_build_dir(),
+ '-X always_allow_keywords=true'
+]
+if get_option('buildtype') == 'debug'
+ cython_args += ['--gdb']
+endif
+
foreach ext_name, ext_dict : tslibs_sources
py.extension_module(
ext_name,
ext_dict.get('sources'),
- cython_args: ['--include-dir', meson.current_build_dir(), '-X always_allow_keywords=true'],
+ cython_args: cython_args,
include_directories: [inc_np, inc_pd],
dependencies: ext_dict.get('deps', ''),
subdir: 'pandas/_libs/tslibs',
diff --git a/setup.py b/setup.py
index 663bbd3952eab..db3717efb738d 100755
--- a/setup.py
+++ b/setup.py
@@ -418,6 +418,9 @@ def maybe_cythonize(extensions, *args, **kwargs):
kwargs["nthreads"] = parsed.parallel
build_ext.render_templates(_pxifiles)
+ if debugging_symbols_requested:
+ kwargs["gdb_debug"] = True
+
return cythonize(extensions, *args, **kwargs)
diff --git a/tooling/debug/Dockerfile.pandas-debug b/tooling/debug/Dockerfile.pandas-debug
new file mode 100644
index 0000000000000..00e10a85d7ab9
--- /dev/null
+++ b/tooling/debug/Dockerfile.pandas-debug
@@ -0,0 +1,35 @@
+FROM ubuntu:latest
+
+RUN apt-get update && apt-get upgrade -y
+RUN apt-get install -y build-essential git valgrind
+
+# cpython dev install
+RUN git clone -b 3.10 --depth 1 https://github.com/python/cpython.git /clones/cpython
+RUN apt-get install -y libbz2-dev libffi-dev libssl-dev zlib1g-dev liblzma-dev libsqlite3-dev libreadline-dev
+RUN cd /clones/cpython && ./configure --with-pydebug && CFLAGS="-g3" make -s -j$(nproc) && make install
+
+# gdb installation
+RUN apt-get install -y wget libgmp-dev
+RUN cd /tmp && wget http://mirrors.kernel.org/sourceware/gdb/releases/gdb-12.1.tar.gz && tar -zxf gdb-12.1.tar.gz
+RUN cd /tmp/gdb-12.1 && ./configure --with-python=python3 && make -j$(nproc) && make install
+RUN rm -r /tmp/gdb-12.1
+
+# pandas dependencies
+RUN python3 -m pip install \
+ cython \
+ hypothesis \
+ ninja \
+ numpy \
+ meson \
+ meson-python \
+ pytest \
+ pytest-asyncio \
+ python-dateutil \
+ pytz \
+ versioneer[toml]
+
+# At the time this docker image was built, there was a bug/limitation
+# with meson where only having a python3 executable and not python
+# would cause the build to fail. This symlink could be removed if
+# users stick to always calling python3 within the container
+RUN ln -s /usr/local/bin/python3 /usr/local/bin/python
diff --git a/tooling/debug/README b/tooling/debug/README
new file mode 100644
index 0000000000000..111a958ff5ef5
--- /dev/null
+++ b/tooling/debug/README
@@ -0,0 +1,19 @@
+The Docker image here helps to set up an isolated environment containing a debug version of Python and a gdb installation which the Cython debugger can work with.
+
+If you have internet access, you can pull a pre-built image via
+
+```sh
+docker pull pandas/pandas-debug
+```
+
+To build the image locally, you can do
+
+```sh
+docker build . -t pandas/pandas-debug -f Dockerfile.pandas-debug
+```
+
+For pandas developers, you can push a new copy of the image to dockerhub via
+
+```sh
+docker push pandas/pandas-debug
+```
| This is still super buggy but hopefully this gets people a bit closer | https://api.github.com/repos/pandas-dev/pandas/pulls/53821 | 2023-06-23T18:57:49Z | 2023-10-07T20:38:30Z | 2023-10-07T20:38:30Z | 2023-10-07T20:38:42Z |
TST: Refactor more slow tests | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 23ed053521baf..9d0e2145567bf 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -450,6 +450,9 @@ def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None):
unique1d = unique
+_MINIMUM_COMP_ARR_LEN = 1_000_000
+
+
def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]:
"""
Compute the isin boolean array.
@@ -518,7 +521,7 @@ def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]:
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
# in1d is faster for small sizes
if (
- len(comps_array) > 1_000_000
+ len(comps_array) > _MINIMUM_COMP_ARR_LEN
and len(values) <= 26
and comps_array.dtype != object
):
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 672f7c1f71b15..3b759010d1abb 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -45,6 +45,9 @@
from pandas.io.formats.format import DataFrameFormatter
+_DEFAULT_CHUNKSIZE_CELLS = 100_000
+
+
class CSVFormatter:
cols: np.ndarray
@@ -163,7 +166,7 @@ def _initialize_columns(self, cols: Sequence[Hashable] | None) -> np.ndarray:
def _initialize_chunksize(self, chunksize: int | None) -> int:
if chunksize is None:
- return (100000 // (len(self.cols) or 1)) or 1
+ return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1
return int(chunksize)
@property
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index 5671a569c8ac8..ee9c4f05991a0 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -752,13 +752,16 @@ def test_to_csv_chunking(self, chunksize):
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
- def test_to_csv_wide_frame_formatting(self):
+ def test_to_csv_wide_frame_formatting(self, monkeypatch):
# Issue #8621
- df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
+ chunksize = 100
+ df = DataFrame(np.random.randn(1, chunksize + 10), columns=None, index=None)
with tm.ensure_clean() as filename:
- df.to_csv(filename, header=False, index=False)
+ with monkeypatch.context() as m:
+ m.setattr("pandas.io.formats.csvs._DEFAULT_CHUNKSIZE_CELLS", chunksize)
+ df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
- tm.assert_frame_equal(rs, df)
+ tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
index 932457eebcd8e..e0868745a480a 100644
--- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
@@ -70,13 +70,11 @@ def test_indexer_caching():
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
- arrays = (range(n), range(n))
- index = MultiIndex.from_tuples(zip(*arrays))
+ index = MultiIndex.from_arrays([np.arange(n), np.arange(n)])
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
- s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py
index cd12ecc4fc7cc..4b0567d6265ad 100644
--- a/pandas/tests/io/parser/test_index_col.py
+++ b/pandas/tests/io/parser/test_index_col.py
@@ -228,16 +228,18 @@ def test_header_with_index_col(all_parsers):
@pytest.mark.slow
-def test_index_col_large_csv(all_parsers):
+def test_index_col_large_csv(all_parsers, monkeypatch):
# https://github.com/pandas-dev/pandas/issues/37094
parser = all_parsers
- N = 1_000_001
- df = DataFrame({"a": range(N), "b": np.random.randn(N)})
+ ARR_LEN = 100
+ df = DataFrame({"a": range(ARR_LEN + 1), "b": np.random.randn(ARR_LEN + 1)})
with tm.ensure_clean() as path:
df.to_csv(path, index=False)
- result = parser.read_csv(path, index_col=[0])
+ with monkeypatch.context() as m:
+ m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
+ result = parser.read_csv(path, index_col=[0])
tm.assert_frame_equal(result, df.set_index("a"))
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 01762e39c36c1..52fbfc23ef66c 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -423,44 +423,60 @@ def test_line_area_stacked(self, kind):
df2 = df.set_index(df.index + 1)
_check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)
- def test_line_area_nan_df(self):
+ @pytest.mark.parametrize(
+ "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)]
+ )
+ def test_line_area_nan_df(self, idx):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
- df = DataFrame({"a": values1, "b": values2})
- tdf = DataFrame({"a": values1, "b": values2}, index=tm.makeDateIndex(k=4))
-
- for d in [df, tdf]:
- ax = _check_plot_works(d.plot)
- masked1 = ax.lines[0].get_ydata()
- masked2 = ax.lines[1].get_ydata()
- # remove nan for comparison purpose
-
- exp = np.array([1, 2, 3], dtype=np.float64)
- tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
-
- exp = np.array([3, 2, 1], dtype=np.float64)
- tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
- tm.assert_numpy_array_equal(
- masked1.mask, np.array([False, False, True, False])
- )
- tm.assert_numpy_array_equal(
- masked2.mask, np.array([False, True, False, False])
- )
+ df = DataFrame({"a": values1, "b": values2}, index=idx)
- expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
- expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
+ ax = _check_plot_works(df.plot)
+ masked1 = ax.lines[0].get_ydata()
+ masked2 = ax.lines[1].get_ydata()
+ # remove nan for comparison purpose
- ax = _check_plot_works(d.plot, stacked=True)
- tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
- tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
+ exp = np.array([1, 2, 3], dtype=np.float64)
+ tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
- ax = _check_plot_works(d.plot.area)
- tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
- tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
+ exp = np.array([3, 2, 1], dtype=np.float64)
+ tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
+ tm.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False]))
+ tm.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False]))
+
+ @pytest.mark.parametrize(
+ "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)]
+ )
+ def test_line_area_nan_df_stacked(self, idx):
+ values1 = [1, 2, np.nan, 3]
+ values2 = [3, np.nan, 2, 1]
+ df = DataFrame({"a": values1, "b": values2}, index=idx)
+
+ expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
+ expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
+
+ ax = _check_plot_works(df.plot, stacked=True)
+ tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
+ tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
+
+ @pytest.mark.parametrize(
+ "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)]
+ )
+ @pytest.mark.parametrize("kwargs", [{}, {"stacked": False}])
+ def test_line_area_nan_df_stacked_area(self, idx, kwargs):
+ values1 = [1, 2, np.nan, 3]
+ values2 = [3, np.nan, 2, 1]
+ df = DataFrame({"a": values1, "b": values2}, index=idx)
+
+ expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
+ expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
- ax = _check_plot_works(d.plot.area, stacked=False)
- tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
+ ax = _check_plot_works(df.plot.area, **kwargs)
+ tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
+ if kwargs:
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
+ else:
+ tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
def test_line_lim(self):
df = DataFrame(np.random.rand(6, 3), columns=["x", "y", "z"])
@@ -1537,27 +1553,31 @@ def test_errorbar_with_integer_column_names(self):
_check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
- def test_errorbar_with_partial_columns(self):
+ @pytest.mark.parametrize("kind", ["line", "bar"])
+ def test_errorbar_with_partial_columns_kind(self, kind):
df = DataFrame(np.abs(np.random.randn(10, 3)))
df_err = DataFrame(np.abs(np.random.randn(10, 2)), columns=[0, 2])
- kinds = ["line", "bar"]
- for kind in kinds:
- ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
- _check_has_errorbars(ax, xerr=0, yerr=2)
+ ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
+ _check_has_errorbars(ax, xerr=0, yerr=2)
+ @pytest.mark.slow
+ def test_errorbar_with_partial_columns_dti(self):
+ df = DataFrame(np.abs(np.random.randn(10, 3)))
+ df_err = DataFrame(np.abs(np.random.randn(10, 2)), columns=[0, 2])
ix = date_range("1/1/2000", periods=10, freq="M")
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind="line")
_check_has_errorbars(ax, xerr=0, yerr=2)
+ @pytest.mark.slow
+ @pytest.mark.parametrize("err_box", [lambda x: x, DataFrame])
+ def test_errorbar_with_partial_columns_box(self, err_box):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
- d_err = {"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4}
- df_err = DataFrame(d_err)
- for err in [d_err, df_err]:
- ax = _check_plot_works(df.plot, yerr=err)
- _check_has_errorbars(ax, xerr=0, yerr=1)
+ err = err_box({"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4})
+ ax = _check_plot_works(df.plot, yerr=err)
+ _check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
def test_errorbar_timeseries(self, kind):
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py
index 336fed6293070..9546731b0d8fa 100644
--- a/pandas/tests/plotting/frame/test_frame_subplots.py
+++ b/pandas/tests/plotting/frame/test_frame_subplots.py
@@ -245,15 +245,11 @@ def test_subplots_layout_single_column(
assert axes.shape == expected_shape
@pytest.mark.slow
- def test_subplots_warnings(self):
+ @pytest.mark.parametrize("idx", [range(5), date_range("1/1/2000", periods=5)])
+ def test_subplots_warnings(self, idx):
# GH 9464
with tm.assert_produces_warning(None):
- df = DataFrame(np.random.randn(100, 4))
- df.plot(subplots=True, layout=(3, 2))
-
- df = DataFrame(
- np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
- )
+ df = DataFrame(np.random.randn(5, 4), index=idx)
df.plot(subplots=True, layout=(3, 2))
def test_subplots_multiple_axes(self):
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index f6b50aeb3139d..9c5de2918b9a4 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -218,28 +218,27 @@ def test_andrews_curves_handle(self):
_check_colors(handles, linecolors=colors)
@pytest.mark.slow
- def test_parallel_coordinates(self, iris):
- from matplotlib import cm
-
+ @pytest.mark.parametrize(
+ "color",
+ [("#556270", "#4ECDC4", "#C7F464"), ["dodgerblue", "aquamarine", "seagreen"]],
+ )
+ def test_parallel_coordinates_colors(self, iris, color):
from pandas.plotting import parallel_coordinates
df = iris
- ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name")
- nlines = len(ax.get_lines())
- nxticks = len(ax.xaxis.get_ticklabels())
-
- rgba = ("#556270", "#4ECDC4", "#C7F464")
ax = _check_plot_works(
- parallel_coordinates, frame=df, class_column="Name", color=rgba
+ parallel_coordinates, frame=df, class_column="Name", color=color
)
- _check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10])
+ _check_colors(ax.get_lines()[:10], linecolors=color, mapping=df["Name"][:10])
- cnames = ["dodgerblue", "aquamarine", "seagreen"]
- ax = _check_plot_works(
- parallel_coordinates, frame=df, class_column="Name", color=cnames
- )
- _check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10])
+ @pytest.mark.slow
+ def test_parallel_coordinates_cmap(self, iris):
+ from matplotlib import cm
+
+ from pandas.plotting import parallel_coordinates
+
+ df = iris
ax = _check_plot_works(
parallel_coordinates, frame=df, class_column="Name", colormap=cm.jet
@@ -247,15 +246,30 @@ def test_parallel_coordinates(self, iris):
cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
_check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10])
+ @pytest.mark.slow
+ def test_parallel_coordinates_line_diff(self, iris):
+ from pandas.plotting import parallel_coordinates
+
+ df = iris
+
+ ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name")
+ nlines = len(ax.get_lines())
+ nxticks = len(ax.xaxis.get_ticklabels())
+
ax = _check_plot_works(
parallel_coordinates, frame=df, class_column="Name", axvlines=False
)
assert len(ax.get_lines()) == (nlines - nxticks)
+ @pytest.mark.slow
+ def test_parallel_coordinates_handles(self, iris):
+ from pandas.plotting import parallel_coordinates
+
+ df = iris
colors = ["b", "g", "r"]
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors})
ax = parallel_coordinates(df, "Name", color=colors)
- handles, labels = ax.get_legend_handles_labels()
+ handles, _ = ax.get_legend_handles_labels()
_check_colors(handles, linecolors=colors)
# not sure if this is indicative of a problem
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/53820 | 2023-06-23T17:47:39Z | 2023-06-26T17:39:39Z | 2023-06-26T17:39:39Z | 2023-06-26T17:39:42Z |
TST/CI: Skipif test_complibs | diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py
index 1a126ad75c01c..623d6e664090f 100644
--- a/pandas/tests/io/pytables/test_file_handling.py
+++ b/pandas/tests/io/pytables/test_file_handling.py
@@ -230,11 +230,11 @@ def test_complibs_default_settings_override(tmp_path, setup_path):
@pytest.mark.parametrize("lvl", range(10))
@pytest.mark.parametrize("lib", tables.filters.all_complibs)
@pytest.mark.filterwarnings("ignore:object name is not a valid")
-@pytest.mark.xfail(
+@pytest.mark.skipif(
not PY311 and is_ci_environment() and is_platform_linux(),
- reason="producing invalid start bytes",
- raises=UnicodeDecodeError,
- strict=False,
+ reason="Segfaulting in a CI environment"
+ # with xfail, would sometimes raise UnicodeDecodeError
+ # invalid state byte
)
def test_complibs(tmp_path, lvl, lib):
# GH14478
| Even though this was just xfailed, it appears to also be causing segfault in the CI so just skipping for now e.g. https://github.com/pandas-dev/pandas/actions/runs/5352556089/jobs/9707592733 | https://api.github.com/repos/pandas-dev/pandas/pulls/53819 | 2023-06-23T17:44:56Z | 2023-06-23T20:04:23Z | 2023-06-23T20:04:23Z | 2023-06-23T20:04:27Z |
DOC: Fixing EX01 - Added examples | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 472bd78e4d3bc..5870a9ad8d60b 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -105,17 +105,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.errors.UnsupportedFunctionCall \
pandas.test \
pandas.NaT \
- pandas.SparseDtype \
- pandas.DatetimeTZDtype.unit \
- pandas.DatetimeTZDtype.tz \
- pandas.PeriodDtype.freq \
- pandas.IntervalDtype.subtype \
- pandas_dtype \
- pandas.api.types.is_bool \
- pandas.api.types.is_complex \
- pandas.api.types.is_float \
- pandas.api.types.is_integer \
- pandas.api.types.pandas_dtype \
pandas.read_clipboard \
pandas.ExcelFile \
pandas.ExcelFile.parse \
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index a622de742a840..c3fbd3ee4853e 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1056,6 +1056,14 @@ def is_float(obj: object) -> bool:
Returns
-------
bool
+
+ Examples
+ --------
+ >>> pd.api.types.is_float(1.0)
+ True
+
+ >>> pd.api.types.is_float(1)
+ False
"""
return util.is_float_object(obj)
@@ -1067,6 +1075,14 @@ def is_integer(obj: object) -> bool:
Returns
-------
bool
+
+ Examples
+ --------
+ >>> pd.api.types.is_integer(1)
+ True
+
+ >>> pd.api.types.is_integer(1.0)
+ False
"""
return util.is_integer_object(obj)
@@ -1089,6 +1105,14 @@ def is_bool(obj: object) -> bool:
Returns
-------
bool
+
+ Examples
+ --------
+ >>> pd.api.types.is_bool(True)
+ True
+
+ >>> pd.api.types.is_bool(1)
+ False
"""
return util.is_bool_object(obj)
@@ -1100,6 +1124,14 @@ def is_complex(obj: object) -> bool:
Returns
-------
bool
+
+ Examples
+ --------
+ >>> pd.api.types.is_complex(1 + 1j)
+ True
+
+ >>> pd.api.types.is_complex(1)
+ False
"""
return util.is_complex_object(obj)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 3931b12e06f9b..16b8e9770e7f3 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1603,6 +1603,11 @@ def pandas_dtype(dtype) -> DtypeObj:
Raises
------
TypeError if not a dtype
+
+ Examples
+ --------
+ >>> pd.api.types.pandas_dtype(int)
+ dtype('int64')
"""
# short-circuit
if isinstance(dtype, np.ndarray):
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 50fc5231a3a76..ea4d10c06efe3 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -697,16 +697,17 @@ class DatetimeTZDtype(PandasExtensionDtype):
Raises
------
- pytz.UnknownTimeZoneError
+ ZoneInfoNotFoundError
When the requested timezone cannot be found.
Examples
--------
- >>> pd.DatetimeTZDtype(tz='UTC')
+ >>> from zoneinfo import ZoneInfo
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC'))
datetime64[ns, UTC]
- >>> pd.DatetimeTZDtype(tz='dateutil/US/Central')
- datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')]
+ >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris'))
+ datetime64[ns, Europe/Paris]
"""
type: type[Timestamp] = Timestamp
@@ -772,6 +773,13 @@ def _creso(self) -> int:
def unit(self) -> str_type:
"""
The precision of the datetime data.
+
+ Examples
+ --------
+ >>> from zoneinfo import ZoneInfo
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
+ >>> dtype.unit
+ 'ns'
"""
return self._unit
@@ -779,6 +787,13 @@ def unit(self) -> str_type:
def tz(self) -> tzinfo:
"""
The timezone.
+
+ Examples
+ --------
+ >>> from zoneinfo import ZoneInfo
+ >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles'))
+ >>> dtype.tz
+ zoneinfo.ZoneInfo(key='America/Los_Angeles')
"""
return self._tz
@@ -967,6 +982,12 @@ def __reduce__(self):
def freq(self):
"""
The frequency object of this PeriodDtype.
+
+ Examples
+ --------
+ >>> dtype = pd.PeriodDtype(freq='D')
+ >>> dtype.freq
+ <Day>
"""
return self._freq
@@ -1217,6 +1238,12 @@ def closed(self) -> IntervalClosedType:
def subtype(self):
"""
The dtype of the Interval bounds.
+
+ Examples
+ --------
+ >>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
+ >>> dtype.subtype
+ dtype('int64')
"""
return self._subtype
@@ -1565,6 +1592,17 @@ class SparseDtype(ExtensionDtype):
Methods
-------
None
+
+ Examples
+ --------
+ >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))
+ >>> ser
+ 0 1
+ 1 0
+ 2 0
+ dtype: Sparse[int64, 0]
+ >>> ser.sparse.density
+ 0.3333333333333333
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
| - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Towards https://github.com/pandas-dev/pandas/issues/37875
| https://api.github.com/repos/pandas-dev/pandas/pulls/53818 | 2023-06-23T17:07:23Z | 2023-06-26T16:45:39Z | 2023-06-26T16:45:39Z | 2023-06-26T17:07:45Z |
DEPR: Deprecate use of un-supported numpy dt64/td64 dtype for pandas.array | diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index b517f5fc38d5e..e154ca2cd3884 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -320,7 +320,9 @@ Deprecations
- Deprecated strings ``T``, ``t``, ``L`` and ``l`` denoting units in :func:`to_timedelta` (:issue:`52536`)
- Deprecated the "method" and "limit" keywords on :meth:`Series.fillna`, :meth:`DataFrame.fillna`, :meth:`SeriesGroupBy.fillna`, :meth:`DataFrameGroupBy.fillna`, and :meth:`Resampler.fillna`, use ``obj.bfill()`` or ``obj.ffill()`` instead (:issue:`53394`)
- Deprecated the ``method`` and ``limit`` keywords in :meth:`DataFrame.replace` and :meth:`Series.replace` (:issue:`33302`)
+- Deprecated the use of non-supported datetime64 and timedelta64 resolutions with :func:`pandas.array`. Supported resolutions are: "s", "ms", "us", "ns" resolutions (:issue:`53058`)
- Deprecated values "pad", "ffill", "bfill", "backfill" for :meth:`Series.interpolate` and :meth:`DataFrame.interpolate`, use ``obj.ffill()`` or ``obj.bfill()`` instead (:issue:`53581`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_210.performance:
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 014c99c87ad00..9112c7e52a348 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -14,6 +14,7 @@
cast,
overload,
)
+import warnings
import numpy as np
from numpy import ma
@@ -31,6 +32,7 @@
DtypeObj,
T,
)
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
@@ -369,6 +371,16 @@ def array(
if lib.is_np_dtype(dtype, "m") and is_supported_unit(get_unit_from_dtype(dtype)):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
+ elif lib.is_np_dtype(dtype, "mM"):
+ warnings.warn(
+ r"datetime64 and timedelta64 dtype resolutions other than "
+ r"'s', 'ms', 'us', and 'ns' are deprecated. "
+ r"In future releases passing unsupported resolutions will "
+ r"raise an exception.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
return PandasArray._from_sequence(data, dtype=dtype, copy=copy)
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index 337cdaa26a3d4..d5c1d5bbd03b0 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -1,5 +1,6 @@
import datetime
import decimal
+import re
import numpy as np
import pytest
@@ -28,6 +29,20 @@
)
+@pytest.mark.parametrize("dtype_unit", ["M8[h]", "M8[m]", "m8[h]", "M8[m]"])
+def test_dt64_array(dtype_unit):
+ # PR 53817
+ dtype_var = np.dtype(dtype_unit)
+ msg = (
+ r"datetime64 and timedelta64 dtype resolutions other than "
+ r"'s', 'ms', 'us', and 'ns' are deprecated. "
+ r"In future releases passing unsupported resolutions will "
+ r"raise an exception."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=re.escape(msg)):
+ pd.array([], dtype=dtype_var)
+
+
@pytest.mark.parametrize(
"data, dtype, expected",
[
| - [X] closes #53058
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53817 | 2023-06-23T16:12:03Z | 2023-07-11T01:51:48Z | 2023-07-11T01:51:47Z | 2024-02-17T17:20:11Z |
CLN: PDEP6 precusor | diff --git a/pandas/tests/copy_view/test_interp_fillna.py b/pandas/tests/copy_view/test_interp_fillna.py
index 71023a538a2c3..5d4fd75df830c 100644
--- a/pandas/tests/copy_view/test_interp_fillna.py
+++ b/pandas/tests/copy_view/test_interp_fillna.py
@@ -248,7 +248,10 @@ def test_fillna_inplace_reference(using_copy_on_write):
def test_fillna_interval_inplace_reference(using_copy_on_write):
- ser = Series(interval_range(start=0, end=5), name="a")
+ # Set dtype explicitly to avoid implicit cast when setting nan
+ ser = Series(
+ interval_range(start=0, end=5), name="a", dtype="interval[float64, right]"
+ )
ser.iloc[1] = np.nan
ser_orig = ser.copy()
| Another precursor to https://github.com/pandas-dev/pandas/pull/53405 | https://api.github.com/repos/pandas-dev/pandas/pulls/53815 | 2023-06-23T11:14:24Z | 2023-06-23T15:45:57Z | 2023-06-23T15:45:57Z | 2023-06-23T15:45:58Z |
Update governance.md | diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md
index 0bb61592d7e5d..46480acc69c31 100644
--- a/web/pandas/about/governance.md
+++ b/web/pandas/about/governance.md
@@ -228,7 +228,7 @@ interactions with NumFOCUS.
Team.
- This Subcommittee shall NOT make decisions about the direction, scope or
technical direction of the Project.
-- This Subcommittee will have at least 5 members. No more than 2 Subcommitee
+- This Subcommittee will have at least 5 members. No more than 2 Subcommittee
Members can report to one person (either directly or indirectly) through
employment or contracting work (including the reportee, i.e. the reportee + 1
is the max). This avoids effective majorities resting on one person.
| Misspelling of Subcommittee
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53814 | 2023-06-23T05:06:41Z | 2023-06-23T14:43:56Z | 2023-06-23T14:43:56Z | 2023-06-23T14:43:57Z |
TST: Add test for loc expansion GH13829. | diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index b745575876212..6060f6b5e278a 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -930,6 +930,20 @@ def test_setitem_frame_keep_ea_dtype(self, any_numeric_ea_dtype):
)
tm.assert_frame_equal(df, expected)
+ def test_loc_expansion_with_timedelta_type(self):
+ result = DataFrame(columns=list("abc"))
+ result.loc[0] = {
+ "a": pd.to_timedelta(5, unit="s"),
+ "b": pd.to_timedelta(72, unit="s"),
+ "c": "23",
+ }
+ expected = DataFrame(
+ [[pd.Timedelta("0 days 00:00:05"), pd.Timedelta("0 days 00:01:12"), "23"]],
+ index=Index([0]),
+ columns=(["a", "b", "c"]),
+ )
+ tm.assert_frame_equal(result, expected)
+
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
| - [ ] closes #13829 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53812 | 2023-06-23T03:20:17Z | 2023-06-23T17:20:28Z | 2023-06-23T17:20:28Z | 2023-06-23T23:45:37Z |
REF: separate out cross-merge, make less stateful | diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index d406145e62ad7..66d2bf0b8991d 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -145,10 +145,79 @@ def merge(
indicator: str | bool = False,
validate: str | None = None,
) -> DataFrame:
- op = _MergeOperation(
+ if how == "cross":
+ return _cross_merge(
+ left,
+ right,
+ on=on,
+ left_on=left_on,
+ right_on=right_on,
+ left_index=left_index,
+ right_index=right_index,
+ sort=sort,
+ suffixes=suffixes,
+ indicator=indicator,
+ validate=validate,
+ copy=copy,
+ )
+ else:
+ op = _MergeOperation(
+ left,
+ right,
+ how=how,
+ on=on,
+ left_on=left_on,
+ right_on=right_on,
+ left_index=left_index,
+ right_index=right_index,
+ sort=sort,
+ suffixes=suffixes,
+ indicator=indicator,
+ validate=validate,
+ )
+ return op.get_result(copy=copy)
+
+
+def _cross_merge(
+ left: DataFrame | Series,
+ right: DataFrame | Series,
+ on: IndexLabel | None = None,
+ left_on: IndexLabel | None = None,
+ right_on: IndexLabel | None = None,
+ left_index: bool = False,
+ right_index: bool = False,
+ sort: bool = False,
+ suffixes: Suffixes = ("_x", "_y"),
+ copy: bool | None = None,
+ indicator: str | bool = False,
+ validate: str | None = None,
+) -> DataFrame:
+ """
+ See merge.__doc__ with how='cross'
+ """
+
+ if (
+ left_index
+ or right_index
+ or right_on is not None
+ or left_on is not None
+ or on is not None
+ ):
+ raise MergeError(
+ "Can not pass on, right_on, left_on or set right_index=True or "
+ "left_index=True"
+ )
+
+ cross_col = f"_cross_{uuid.uuid4()}"
+ left = left.assign(**{cross_col: 1})
+ right = right.assign(**{cross_col: 1})
+
+ left_on = right_on = [cross_col]
+
+ res = merge(
left,
right,
- how=how,
+ how="inner",
on=on,
left_on=left_on,
right_on=right_on,
@@ -158,8 +227,10 @@ def merge(
suffixes=suffixes,
indicator=indicator,
validate=validate,
+ copy=copy,
)
- return op.get_result(copy=copy)
+ del res[cross_col]
+ return res
def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces):
@@ -706,17 +777,6 @@ def __init__(
self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on)
- cross_col = None
- if self.how == "cross":
- (
- self.left,
- self.right,
- self.how,
- cross_col,
- ) = self._create_cross_configuration(self.left, self.right)
- self.left_on = self.right_on = [cross_col]
- self._cross = cross_col
-
(
self.left_join_keys,
self.right_join_keys,
@@ -829,17 +889,8 @@ def get_result(self, copy: bool | None = True) -> DataFrame:
self._maybe_restore_index_levels(result)
- self._maybe_drop_cross_column(result, self._cross)
-
return result.__finalize__(self, method="merge")
- @final
- def _maybe_drop_cross_column(
- self, result: DataFrame, cross_col: str | None
- ) -> None:
- if cross_col is not None:
- del result[cross_col]
-
@final
@cache_readonly
def _indicator_name(self) -> str | None:
@@ -1448,53 +1499,12 @@ def _maybe_coerce_merge_keys(self) -> None:
self.right = self.right.copy()
self.right[name] = self.right[name].astype(typ)
- @final
- def _create_cross_configuration(
- self, left: DataFrame, right: DataFrame
- ) -> tuple[DataFrame, DataFrame, JoinHow, str]:
- """
- Creates the configuration to dispatch the cross operation to inner join,
- e.g. adding a join column and resetting parameters. Join column is added
- to a new object, no inplace modification
-
- Parameters
- ----------
- left : DataFrame
- right : DataFrame
-
- Returns
- -------
- a tuple (left, right, how, cross_col) representing the adjusted
- DataFrames with cross_col, the merge operation set to inner and the column
- to join over.
- """
- cross_col = f"_cross_{uuid.uuid4()}"
- how: JoinHow = "inner"
- return (
- left.assign(**{cross_col: 1}),
- right.assign(**{cross_col: 1}),
- how,
- cross_col,
- )
-
def _validate_left_right_on(self, left_on, right_on):
left_on = com.maybe_make_list(left_on)
right_on = com.maybe_make_list(right_on)
- if self.how == "cross":
- if (
- self.left_index
- or self.right_index
- or right_on is not None
- or left_on is not None
- or self.on is not None
- ):
- raise MergeError(
- "Can not pass on, right_on, left_on or set right_index=True or "
- "left_index=True"
- )
# Hm, any way to make this logic less complicated??
- elif self.on is None and left_on is None and right_on is None:
+ if self.on is None and left_on is None and right_on is None:
if self.left_index and self.right_index:
left_on, right_on = (), ()
elif self.left_index:
@@ -1562,7 +1572,7 @@ def _validate_left_right_on(self, left_on, right_on):
'of levels in the index of "left"'
)
left_on = [None] * n
- if self.how != "cross" and len(right_on) != len(left_on):
+ if len(right_on) != len(left_on):
raise ValueError("len(right_on) must equal len(left_on)")
return left_on, right_on
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53810 | 2023-06-22T23:56:49Z | 2023-06-23T21:20:50Z | 2023-06-23T21:20:50Z | 2023-06-23T21:21:50Z |
DEPR: Remove literal string input for read_xml | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index ec0e7d0636b07..4d4b9e086e9e5 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2919,6 +2919,7 @@ Read an XML string:
.. ipython:: python
+ from io import StringIO
xml = """<?xml version="1.0" encoding="UTF-8"?>
<bookstore>
<book category="cooking">
@@ -2941,7 +2942,7 @@ Read an XML string:
</book>
</bookstore>"""
- df = pd.read_xml(xml)
+ df = pd.read_xml(StringIO(xml))
df
Read a URL with no options:
@@ -2961,7 +2962,7 @@ as a string:
f.write(xml)
with open(file_path, "r") as f:
- df = pd.read_xml(f.read())
+ df = pd.read_xml(StringIO(f.read()))
df
Read in the content of the "books.xml" as instance of ``StringIO`` or
@@ -3052,7 +3053,7 @@ For example, below XML contains a namespace with prefix, ``doc``, and URI at
</doc:row>
</doc:data>"""
- df = pd.read_xml(xml,
+ df = pd.read_xml(StringIO(xml),
xpath="//doc:row",
namespaces={"doc": "https://example.com"})
df
@@ -3082,7 +3083,7 @@ But assigning *any* temporary name to correct URI allows parsing by nodes.
</row>
</data>"""
- df = pd.read_xml(xml,
+ df = pd.read_xml(StringIO(xml),
xpath="//pandas:row",
namespaces={"pandas": "https://example.com"})
df
@@ -3117,7 +3118,7 @@ However, if XPath does not reference node names such as default, ``/*``, then
</row>
</data>"""
- df = pd.read_xml(xml, xpath="./row")
+ df = pd.read_xml(StringIO(xml), xpath="./row")
df
shows the attribute ``sides`` on ``shape`` element was not parsed as
@@ -3218,7 +3219,7 @@ output (as shown below for demonstration) for easier parse into ``DataFrame``:
</row>
</response>"""
- df = pd.read_xml(xml, stylesheet=xsl)
+ df = pd.read_xml(StringIO(xml), stylesheet=xsl)
df
For very large XML files that can range in hundreds of megabytes to gigabytes, :func:`pandas.read_xml`
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 9653226b96196..44728e7e552ab 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -221,6 +221,7 @@ apply converter methods, and parse dates (:issue:`43567`).
.. ipython:: python
+ from io import StringIO
xml_dates = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
@@ -244,7 +245,7 @@ apply converter methods, and parse dates (:issue:`43567`).
</data>"""
df = pd.read_xml(
- xml_dates,
+ StringIO(xml_dates),
dtype={'sides': 'Int64'},
converters={'degrees': str},
parse_dates=['date']
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index 0f669beaa036f..716ac3b49b004 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -311,6 +311,7 @@ Deprecations
- Deprecated constructing :class:`SparseArray` from scalar data, pass a sequence instead (:issue:`53039`)
- Deprecated falling back to filling when ``value`` is not specified in :meth:`DataFrame.replace` and :meth:`Series.replace` with non-dict-like ``to_replace`` (:issue:`33302`)
- Deprecated literal json input to :func:`read_json`. Wrap literal json string input in ``io.StringIO`` instead. (:issue:`53409`)
+- Deprecated literal string input to :func:`read_xml`. Wrap literal string/bytes input in ``io.StringIO`` / ``io.BytesIO`` instead. (:issue:`53767`)
- Deprecated literal string/bytes input to :func:`read_html`. Wrap literal string/bytes input in ``io.StringIO`` / ``io.BytesIO`` instead. (:issue:`53767`)
- Deprecated option "mode.use_inf_as_na", convert inf entries to ``NaN`` before instead (:issue:`51684`)
- Deprecated parameter ``obj`` in :meth:`GroupBy.get_group` (:issue:`53545`)
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 62bbb410dacc1..a58437fdeb8dc 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -12,6 +12,7 @@
Callable,
Sequence,
)
+import warnings
from pandas._libs import lib
from pandas.compat._optional import import_optional_dependency
@@ -20,6 +21,7 @@
ParserError,
)
from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
from pandas.util._validators import check_dtype_backend
from pandas.core.dtypes.common import is_list_like
@@ -30,6 +32,7 @@
file_exists,
get_handle,
infer_compression,
+ is_file_like,
is_fsspec_url,
is_url,
stringify_path,
@@ -802,6 +805,22 @@ def _parse(
p: _EtreeFrameParser | _LxmlFrameParser
+ if isinstance(path_or_buffer, str) and not any(
+ [
+ is_file_like(path_or_buffer),
+ file_exists(path_or_buffer),
+ is_url(path_or_buffer),
+ is_fsspec_url(path_or_buffer),
+ ]
+ ):
+ warnings.warn(
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
if parser == "lxml":
lxml = import_optional_dependency("lxml.etree", errors="ignore")
@@ -894,6 +913,10 @@ def read_xml(
string or a path. The string can further be a URL. Valid URL schemes
include http, ftp, s3, and file.
+ .. deprecated:: 2.1.0
+ Passing xml literal strings is deprecated.
+ Wrap literal xml input in ``io.StringIO`` or ``io.BytesIO`` instead.
+
xpath : str, optional, default './\*'
The XPath to parse required set of nodes for migration to DataFrame.
XPath should return a collection of elements and not a single
@@ -1049,6 +1072,7 @@ def read_xml(
Examples
--------
+ >>> import io
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <data xmlns="http://example.com">
... <row>
@@ -1068,7 +1092,7 @@ def read_xml(
... </row>
... </data>'''
- >>> df = pd.read_xml(xml)
+ >>> df = pd.read_xml(io.StringIO(xml))
>>> df
shape degrees sides
0 square 360 4.0
@@ -1082,7 +1106,7 @@ def read_xml(
... <row shape="triangle" degrees="180" sides="3.0"/>
... </data>'''
- >>> df = pd.read_xml(xml, xpath=".//row")
+ >>> df = pd.read_xml(io.StringIO(xml), xpath=".//row")
>>> df
shape degrees sides
0 square 360 4.0
@@ -1108,7 +1132,7 @@ def read_xml(
... </doc:row>
... </doc:data>'''
- >>> df = pd.read_xml(xml,
+ >>> df = pd.read_xml(io.StringIO(xml),
... xpath="//doc:row",
... namespaces={{"doc": "https://example.com"}})
>>> df
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index a3a1646bc4748..1a64d9910d8bf 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -246,6 +246,19 @@
)
+@td.skip_if_no("lxml")
+def test_literal_xml_deprecation():
+ # GH 53809
+ msg = (
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object."
+ )
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ read_xml(xml_default_nmsp)
+
+
@pytest.fixture(params=["rb", "r"])
def mode(request):
return request.param
@@ -300,7 +313,7 @@ def test_parser_consistency_file(xml_books):
def test_parser_consistency_url(parser, httpserver):
httpserver.serve_content(content=xml_default_nmsp)
- df_xpath = read_xml(xml_default_nmsp, parser=parser)
+ df_xpath = read_xml(StringIO(xml_default_nmsp), parser=parser)
df_iter = read_xml(
BytesIO(xml_default_nmsp.encode()),
parser=parser,
@@ -353,6 +366,11 @@ def test_file_buffered_reader_string(xml_books, parser, mode):
with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f:
xml_obj = f.read()
+ if mode == "rb":
+ xml_obj = StringIO(xml_obj.decode())
+ elif mode == "r":
+ xml_obj = StringIO(xml_obj)
+
df_str = read_xml(xml_obj, parser=parser)
df_expected = DataFrame(
@@ -373,6 +391,11 @@ def test_file_buffered_reader_no_xml_declaration(xml_books, parser, mode):
next(f)
xml_obj = f.read()
+ if mode == "rb":
+ xml_obj = StringIO(xml_obj.decode())
+ elif mode == "r":
+ xml_obj = StringIO(xml_obj)
+
df_str = read_xml(xml_obj, parser=parser)
df_expected = DataFrame(
@@ -391,7 +414,7 @@ def test_file_buffered_reader_no_xml_declaration(xml_books, parser, mode):
def test_string_charset(parser):
txt = "<中文標籤><row><c1>1</c1><c2>2</c2></row></中文標籤>"
- df_str = read_xml(txt, parser=parser)
+ df_str = read_xml(StringIO(txt), parser=parser)
df_expected = DataFrame({"c1": 1, "c2": 2}, index=[0])
@@ -449,34 +472,48 @@ def test_empty_string_lxml(val):
]
)
with pytest.raises(XMLSyntaxError, match=msg):
- read_xml(val, parser="lxml")
+ if isinstance(val, str):
+ read_xml(StringIO(val), parser="lxml")
+ else:
+ read_xml(BytesIO(val), parser="lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_etree(val):
with pytest.raises(ParseError, match="no element found"):
- read_xml(val, parser="etree")
+ if isinstance(val, str):
+ read_xml(StringIO(val), parser="etree")
+ else:
+ read_xml(BytesIO(val), parser="etree")
@td.skip_if_no("lxml")
def test_wrong_file_path_lxml():
- from lxml.etree import XMLSyntaxError
-
+ msg = (
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object."
+ )
filename = os.path.join("data", "html", "books.xml")
with pytest.raises(
- XMLSyntaxError,
- match=("Start tag expected, '<' not found"),
+ FutureWarning,
+ match=msg,
):
read_xml(filename, parser="lxml")
def test_wrong_file_path_etree():
+ msg = (
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object."
+ )
filename = os.path.join("data", "html", "books.xml")
with pytest.raises(
- ParseError,
- match=("not well-formed"),
+ FutureWarning,
+ match=msg,
):
read_xml(filename, parser="etree")
@@ -539,7 +576,7 @@ def test_bad_xpath_lxml(xml_books):
def test_default_namespace(parser):
df_nmsp = read_xml(
- xml_default_nmsp,
+ StringIO(xml_default_nmsp),
xpath=".//ns:row",
namespaces={"ns": "http://example.com"},
parser=parser,
@@ -565,7 +602,7 @@ def test_default_namespace(parser):
def test_prefix_namespace(parser):
df_nmsp = read_xml(
- xml_prefix_nmsp,
+ StringIO(xml_prefix_nmsp),
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser=parser,
@@ -589,14 +626,14 @@ def test_prefix_namespace(parser):
@td.skip_if_no("lxml")
def test_consistency_default_namespace():
df_lxml = read_xml(
- xml_default_nmsp,
+ StringIO(xml_default_nmsp),
xpath=".//ns:row",
namespaces={"ns": "http://example.com"},
parser="lxml",
)
df_etree = read_xml(
- xml_default_nmsp,
+ StringIO(xml_default_nmsp),
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser="etree",
@@ -608,14 +645,14 @@ def test_consistency_default_namespace():
@td.skip_if_no("lxml")
def test_consistency_prefix_namespace():
df_lxml = read_xml(
- xml_prefix_nmsp,
+ StringIO(xml_prefix_nmsp),
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser="lxml",
)
df_etree = read_xml(
- xml_prefix_nmsp,
+ StringIO(xml_prefix_nmsp),
xpath=".//doc:row",
namespaces={"doc": "http://example.com"},
parser="etree",
@@ -652,7 +689,7 @@ def test_none_namespace_prefix(key):
TypeError, match=("empty namespace prefix is not supported in XPath")
):
read_xml(
- xml_default_nmsp,
+ StringIO(xml_default_nmsp),
xpath=".//kml:Placemark",
namespaces={key: "http://www.opengis.net/kml/2.2"},
parser="lxml",
@@ -741,7 +778,7 @@ def test_empty_attrs_only(parser):
ValueError,
match=("xpath does not return any nodes or attributes"),
):
- read_xml(xml, xpath="./row", attrs_only=True, parser=parser)
+ read_xml(StringIO(xml), xpath="./row", attrs_only=True, parser=parser)
def test_empty_elems_only(parser):
@@ -756,7 +793,7 @@ def test_empty_elems_only(parser):
ValueError,
match=("xpath does not return any nodes or attributes"),
):
- read_xml(xml, xpath="./row", elems_only=True, parser=parser)
+ read_xml(StringIO(xml), xpath="./row", elems_only=True, parser=parser)
@td.skip_if_no("lxml")
@@ -781,8 +818,8 @@ def test_attribute_centric_xml():
</Stations>
</TrainSchedule>"""
- df_lxml = read_xml(xml, xpath=".//station")
- df_etree = read_xml(xml, xpath=".//station", parser="etree")
+ df_lxml = read_xml(StringIO(xml), xpath=".//station")
+ df_etree = read_xml(StringIO(xml), xpath=".//station", parser="etree")
df_iter_lx = read_xml_iterparse(xml, iterparse={"station": ["Name", "coords"]})
df_iter_et = read_xml_iterparse(
@@ -834,7 +871,10 @@ def test_repeat_names(parser):
</shape>
</shapes>"""
df_xpath = read_xml(
- xml, xpath=".//shape", parser=parser, names=["type_dim", "shape", "type_edge"]
+ StringIO(xml),
+ xpath=".//shape",
+ parser=parser,
+ names=["type_dim", "shape", "type_edge"],
)
df_iter = read_xml_iterparse(
@@ -876,7 +916,9 @@ def test_repeat_values_new_names(parser):
<family>ellipse</family>
</shape>
</shapes>"""
- df_xpath = read_xml(xml, xpath=".//shape", parser=parser, names=["name", "group"])
+ df_xpath = read_xml(
+ StringIO(xml), xpath=".//shape", parser=parser, names=["name", "group"]
+ )
df_iter = read_xml_iterparse(
xml,
@@ -919,7 +961,7 @@ def test_repeat_elements(parser):
</shape>
</shapes>"""
df_xpath = read_xml(
- xml,
+ StringIO(xml),
xpath=".//shape",
parser=parser,
names=["name", "family", "degrees", "sides"],
@@ -1154,8 +1196,8 @@ def test_style_charset():
</xsl:stylesheet>"""
- df_orig = read_xml(xml)
- df_style = read_xml(xml, stylesheet=xsl)
+ df_orig = read_xml(StringIO(xml))
+ df_style = read_xml(StringIO(xml), stylesheet=xsl)
tm.assert_frame_equal(df_orig, df_style)
@@ -1287,30 +1329,18 @@ def test_stylesheet_with_etree(kml_cta_rail_lines, xsl_flatten_doc):
@td.skip_if_no("lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_stylesheet(val):
- from lxml.etree import XMLSyntaxError
-
+ msg = (
+ "Passing literal xml to 'read_xml' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "literal string, wrap it in a 'StringIO' object."
+ )
kml = os.path.join("data", "xml", "cta_rail_lines.kml")
- with pytest.raises(
- XMLSyntaxError, match=("Document is empty|Start tag expected, '<' not found")
- ):
+ with pytest.raises(FutureWarning, match=msg):
read_xml(kml, stylesheet=val)
# ITERPARSE
-
-
-def test_string_error(parser):
- with pytest.raises(
- ParserError, match=("iterparse is designed for large XML files")
- ):
- read_xml(
- xml_default_nmsp,
- parser=parser,
- iterparse={"row": ["shape", "degrees", "sides", "date"]},
- )
-
-
def test_file_like_iterparse(xml_books, parser, mode):
with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f:
if mode == "r" and parser == "lxml":
@@ -1492,7 +1522,7 @@ def test_comment(parser):
</shapes>
<!-- comment after root -->"""
- df_xpath = read_xml(xml, xpath=".//shape", parser=parser)
+ df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser)
df_iter = read_xml_iterparse(
xml, parser=parser, iterparse={"shape": ["name", "type"]}
@@ -1528,7 +1558,7 @@ def test_dtd(parser):
</shape>
</shapes>"""
- df_xpath = read_xml(xml, xpath=".//shape", parser=parser)
+ df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser)
df_iter = read_xml_iterparse(
xml, parser=parser, iterparse={"shape": ["name", "type"]}
@@ -1564,7 +1594,7 @@ def test_processing_instruction(parser):
</shape>
</shapes>"""
- df_xpath = read_xml(xml, xpath=".//shape", parser=parser)
+ df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser)
df_iter = read_xml_iterparse(
xml, parser=parser, iterparse={"shape": ["name", "type"]}
@@ -1842,7 +1872,7 @@ def test_online_stylesheet():
"""
df_xsl = read_xml(
- xml,
+ StringIO(xml),
xpath=".//tr[td and position() <= 6]",
names=["title", "artist"],
stylesheet=xsl,
@@ -1982,7 +2012,7 @@ def test_read_xml_nullable_dtypes(parser, string_storage, dtype_backend):
string_array_na = ArrowStringArray(pa.array(["x", None]))
with pd.option_context("mode.string_storage", string_storage):
- result = read_xml(data, parser=parser, dtype_backend=dtype_backend)
+ result = read_xml(StringIO(data), parser=parser, dtype_backend=dtype_backend)
expected = DataFrame(
{
diff --git a/pandas/tests/io/xml/test_xml_dtypes.py b/pandas/tests/io/xml/test_xml_dtypes.py
index 911b540dbc380..fb24902efc0f5 100644
--- a/pandas/tests/io/xml/test_xml_dtypes.py
+++ b/pandas/tests/io/xml/test_xml_dtypes.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+from io import StringIO
+
import pytest
from pandas.errors import ParserWarning
@@ -81,7 +83,7 @@ def read_xml_iterparse(data, **kwargs):
def test_dtype_single_str(parser):
- df_result = read_xml(xml_types, dtype={"degrees": "str"}, parser=parser)
+ df_result = read_xml(StringIO(xml_types), dtype={"degrees": "str"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
@@ -102,7 +104,7 @@ def test_dtype_single_str(parser):
def test_dtypes_all_str(parser):
- df_result = read_xml(xml_dates, dtype="string", parser=parser)
+ df_result = read_xml(StringIO(xml_dates), dtype="string", parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
@@ -126,7 +128,7 @@ def test_dtypes_all_str(parser):
def test_dtypes_with_names(parser):
df_result = read_xml(
- xml_dates,
+ StringIO(xml_dates),
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64[ns]"},
parser=parser,
@@ -153,7 +155,7 @@ def test_dtypes_with_names(parser):
def test_dtype_nullable_int(parser):
- df_result = read_xml(xml_types, dtype={"sides": "Int64"}, parser=parser)
+ df_result = read_xml(StringIO(xml_types), dtype={"sides": "Int64"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
@@ -174,7 +176,7 @@ def test_dtype_nullable_int(parser):
def test_dtype_float(parser):
- df_result = read_xml(xml_types, dtype={"degrees": "float"}, parser=parser)
+ df_result = read_xml(StringIO(xml_types), dtype={"degrees": "float"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
@@ -214,7 +216,7 @@ def test_both_dtype_converters(parser):
with tm.assert_produces_warning(ParserWarning, match="Both a converter and dtype"):
df_result = read_xml(
- xml_types,
+ StringIO(xml_types),
dtype={"degrees": "str"},
converters={"degrees": str},
parser=parser,
@@ -235,7 +237,9 @@ def test_both_dtype_converters(parser):
def test_converters_str(parser):
- df_result = read_xml(xml_types, converters={"degrees": str}, parser=parser)
+ df_result = read_xml(
+ StringIO(xml_types), converters={"degrees": str}, parser=parser
+ )
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
@@ -258,7 +262,7 @@ def test_converters_str(parser):
def test_converters_date(parser):
convert_to_datetime = lambda x: to_datetime(x)
df_result = read_xml(
- xml_dates, converters={"date": convert_to_datetime}, parser=parser
+ StringIO(xml_dates), converters={"date": convert_to_datetime}, parser=parser
)
df_iter = read_xml_iterparse(
xml_dates,
@@ -305,7 +309,7 @@ def test_callable_str_converters(xml_books, parser, iterparse):
def test_parse_dates_column_name(parser):
- df_result = read_xml(xml_dates, parse_dates=["date"], parser=parser)
+ df_result = read_xml(StringIO(xml_dates), parse_dates=["date"], parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
@@ -327,7 +331,7 @@ def test_parse_dates_column_name(parser):
def test_parse_dates_column_index(parser):
- df_result = read_xml(xml_dates, parse_dates=[3], parser=parser)
+ df_result = read_xml(StringIO(xml_dates), parse_dates=[3], parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
@@ -349,7 +353,7 @@ def test_parse_dates_column_index(parser):
def test_parse_dates_true(parser):
- df_result = read_xml(xml_dates, parse_dates=True, parser=parser)
+ df_result = read_xml(StringIO(xml_dates), parse_dates=True, parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
@@ -401,7 +405,7 @@ def test_parse_dates_dictionary(parser):
</data>"""
df_result = read_xml(
- xml, parse_dates={"date_end": ["year", "month", "day"]}, parser=parser
+ StringIO(xml), parse_dates={"date_end": ["year", "month", "day"]}, parser=parser
)
df_iter = read_xml_iterparse(
xml,
@@ -459,7 +463,7 @@ def test_day_first_parse_dates(parser):
with tm.assert_produces_warning(
UserWarning, match="Parsing dates in %d/%m/%Y format"
):
- df_result = read_xml(xml, parse_dates=["date"], parser=parser)
+ df_result = read_xml(StringIO(xml), parse_dates=["date"], parser=parser)
df_iter = read_xml_iterparse(
xml,
parse_dates=["date"],
| - [X] closes #53767
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53809 | 2023-06-22T23:48:53Z | 2023-07-11T20:52:05Z | 2023-07-11T20:52:05Z | 2024-02-17T17:20:09Z |
REF: simplify merge code | diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 4d1f8bd6301d0..d406145e62ad7 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -717,13 +717,23 @@ def __init__(
self.left_on = self.right_on = [cross_col]
self._cross = cross_col
- # note this function has side effects
(
self.left_join_keys,
self.right_join_keys,
self.join_names,
+ left_drop,
+ right_drop,
) = self._get_merge_keys()
+ if left_drop:
+ self.left = self.left._drop_labels_or_levels(left_drop)
+
+ if right_drop:
+ self.right = self.right._drop_labels_or_levels(right_drop)
+
+ self._maybe_require_matching_dtypes(self.left_join_keys, self.right_join_keys)
+ self._validate_tolerance(self.left_join_keys)
+
# validate the merge keys dtypes. We may need to coerce
# to avoid incompatible dtypes
self._maybe_coerce_merge_keys()
@@ -732,7 +742,17 @@ def __init__(
# check if columns specified as unique
# are in fact unique.
if validate is not None:
- self._validate(validate)
+ self._validate_validate_kwd(validate)
+
+ def _maybe_require_matching_dtypes(
+ self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike]
+ ) -> None:
+ # Overridden by AsOfMerge
+ pass
+
+ def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None:
+ # Overridden by AsOfMerge
+ pass
@final
def _reindex_and_concat(
@@ -1127,24 +1147,21 @@ def _create_join_index(
index = index.append(Index([fill_value]))
return index.take(indexer)
+ @final
def _get_merge_keys(
self,
- ) -> tuple[list[ArrayLike], list[ArrayLike], list[Hashable]]:
+ ) -> tuple[
+ list[ArrayLike],
+ list[ArrayLike],
+ list[Hashable],
+ list[Hashable],
+ list[Hashable],
+ ]:
"""
- Note: has side effects (copy/delete key columns)
-
- Parameters
- ----------
- left
- right
- on
-
Returns
-------
- left_keys, right_keys, join_names
+ left_keys, right_keys, join_names, left_drop, right_drop
"""
- # left_keys, right_keys entries can actually be anything listlike
- # with a 'dtype' attr
left_keys: list[ArrayLike] = []
right_keys: list[ArrayLike] = []
join_names: list[Hashable] = []
@@ -1264,13 +1281,7 @@ def _get_merge_keys(
else:
left_keys = [self.left.index._values]
- if left_drop:
- self.left = self.left._drop_labels_or_levels(left_drop)
-
- if right_drop:
- self.right = self.right._drop_labels_or_levels(right_drop)
-
- return left_keys, right_keys, join_names
+ return left_keys, right_keys, join_names, left_drop, right_drop
@final
def _maybe_coerce_merge_keys(self) -> None:
@@ -1556,7 +1567,8 @@ def _validate_left_right_on(self, left_on, right_on):
return left_on, right_on
- def _validate(self, validate: str) -> None:
+ @final
+ def _validate_validate_kwd(self, validate: str) -> None:
# Check uniqueness of each
if self.left_index:
left_unique = self.orig_left.index.is_unique
@@ -1811,19 +1823,14 @@ def __init__(
def get_result(self, copy: bool | None = True) -> DataFrame:
join_index, left_indexer, right_indexer = self._get_join_info()
- llabels, rlabels = _items_overlap_with_suffix(
- self.left._info_axis, self.right._info_axis, self.suffixes
- )
-
left_join_indexer: npt.NDArray[np.intp] | None
right_join_indexer: npt.NDArray[np.intp] | None
if self.fill_method == "ffill":
if left_indexer is None:
raise TypeError("left_indexer cannot be None")
- left_indexer, right_indexer = cast(np.ndarray, left_indexer), cast(
- np.ndarray, right_indexer
- )
+ left_indexer = cast("npt.NDArray[np.intp]", left_indexer)
+ right_indexer = cast("npt.NDArray[np.intp]", right_indexer)
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
@@ -1888,6 +1895,18 @@ def __init__(
self.allow_exact_matches = allow_exact_matches
self.direction = direction
+ # check 'direction' is valid
+ if self.direction not in ["backward", "forward", "nearest"]:
+ raise MergeError(f"direction invalid: {self.direction}")
+
+ # validate allow_exact_matches
+ if not is_bool(self.allow_exact_matches):
+ msg = (
+ "allow_exact_matches must be boolean, "
+ f"passed {self.allow_exact_matches}"
+ )
+ raise MergeError(msg)
+
_OrderedMerge.__init__(
self,
left,
@@ -1975,17 +1994,12 @@ def _validate_left_right_on(self, left_on, right_on):
left_on = self.left_by + list(left_on)
right_on = self.right_by + list(right_on)
- # check 'direction' is valid
- if self.direction not in ["backward", "forward", "nearest"]:
- raise MergeError(f"direction invalid: {self.direction}")
-
return left_on, right_on
- def _get_merge_keys(
- self,
- ) -> tuple[list[ArrayLike], list[ArrayLike], list[Hashable]]:
- # note this function has side effects
- (left_join_keys, right_join_keys, join_names) = super()._get_merge_keys()
+ def _maybe_require_matching_dtypes(
+ self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike]
+ ) -> None:
+ # TODO: why do we do this for AsOfMerge but not the others?
# validate index types are the same
for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):
@@ -2012,6 +2026,7 @@ def _get_merge_keys(
)
raise MergeError(msg)
+ def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None:
# validate tolerance; datetime.timedelta or Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
@@ -2046,16 +2061,6 @@ def _get_merge_keys(
else:
raise MergeError("key must be integer, timestamp or float")
- # validate allow_exact_matches
- if not is_bool(self.allow_exact_matches):
- msg = (
- "allow_exact_matches must be boolean, "
- f"passed {self.allow_exact_matches}"
- )
- raise MergeError(msg)
-
- return left_join_keys, right_join_keys, join_names
-
def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""return the join indexers"""
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53808 | 2023-06-22T23:40:52Z | 2023-06-23T17:18:50Z | 2023-06-23T17:18:50Z | 2023-06-23T17:21:18Z |
REF: de-duplicate Block.diff, remove axis kwd | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c3574829f9b0e..23ed053521baf 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1441,8 +1441,8 @@ def diff(arr, n: int, axis: AxisInt = 0):
arr = arr.to_numpy()
dtype = arr.dtype
- if not isinstance(dtype, np.dtype):
- # i.e ExtensionDtype
+ if not isinstance(arr, np.ndarray):
+ # i.e ExtensionArray
if hasattr(arr, f"__{op.__name__}__"):
if axis != 0:
raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}")
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 635073efe9357..805366580bb50 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9479,7 +9479,7 @@ def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
# With periods=0 this is equivalent to a diff with axis=0
axis = 0
- new_data = self._mgr.diff(n=periods, axis=axis)
+ new_data = self._mgr.diff(n=periods)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 098a78fc54b71..ccb181c0537dc 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -353,9 +353,9 @@ def putmask(self, mask, new, align: bool = True) -> Self:
new=new,
)
- def diff(self, n: int, axis: AxisInt) -> Self:
- assert self.ndim == 2 and axis == 0 # caller ensures
- return self.apply(algos.diff, n=n, axis=axis)
+ def diff(self, n: int) -> Self:
+ assert self.ndim == 2 # caller ensures
+ return self.apply(algos.diff, n=n)
def interpolate(self, **kwargs) -> Self:
return self.apply_with_block("interpolate", swap_axis=False, **kwargs)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index ae820a40005df..e61d233a0ae84 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1426,10 +1426,12 @@ def interpolate(
nb = self.make_block_same_class(data, refs=refs)
return nb._maybe_downcast([nb], downcast, using_cow)
- def diff(self, n: int, axis: AxisInt = 1) -> list[Block]:
+ @final
+ def diff(self, n: int) -> list[Block]:
"""return block for the diff of the values"""
- # only reached with ndim == 2 and axis == 1
- new_values = algos.diff(self.values, n, axis=axis)
+ # only reached with ndim == 2
+ # TODO(EA2D): transpose will be unnecessary with 2D EAs
+ new_values = algos.diff(self.values.T, n, axis=0).T
return [self.make_block(values=new_values)]
def shift(
@@ -2067,12 +2069,6 @@ def slice_block_rows(self, slicer: slice) -> Self:
new_values = self.values[slicer]
return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs)
- def diff(self, n: int, axis: AxisInt = 1) -> list[Block]:
- # only reached with ndim == 2 and axis == 1
- # TODO(EA2D): Can share with NDArrayBackedExtensionBlock
- new_values = algos.diff(self.values, n, axis=0)
- return [self.make_block(values=new_values)]
-
def shift(
self, periods: int, axis: AxisInt = 0, fill_value: Any = None
) -> list[Block]:
@@ -2191,32 +2187,6 @@ def is_view(self) -> bool:
# check the ndarray values of the DatetimeIndex values
return self.values._ndarray.base is not None
- def diff(self, n: int, axis: AxisInt = 0) -> list[Block]:
- """
- 1st discrete difference.
-
- Parameters
- ----------
- n : int
- Number of periods to diff.
- axis : int, default 0
- Axis to diff upon.
-
- Returns
- -------
- A list with a new Block.
-
- Notes
- -----
- The arguments here are mimicking shift so they are called correctly
- by apply.
- """
- # only reached with ndim == 2 and axis == 1
- values = self.values
-
- new_values = values - values.shift(n, axis=axis)
- return [self.make_block(new_values)]
-
def shift(
self, periods: int, axis: AxisInt = 0, fill_value: Any = None
) -> list[Block]:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index bb745f61ab221..f11c909a5df41 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -411,10 +411,9 @@ def putmask(self, mask, new, align: bool = True) -> Self:
using_cow=using_copy_on_write(),
)
- def diff(self, n: int, axis: AxisInt) -> Self:
- # only reached with self.ndim == 2 and axis == 1
- axis = self._normalize_axis(axis)
- return self.apply("diff", n=n, axis=axis)
+ def diff(self, n: int) -> Self:
+ # only reached with self.ndim == 2
+ return self.apply("diff", n=n)
def interpolate(self, inplace: bool, **kwargs) -> Self:
return self.apply(
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/53807 | 2023-06-22T23:35:37Z | 2023-06-23T17:24:09Z | 2023-06-23T17:24:09Z | 2023-06-23T17:29:36Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.