title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Revert "DEPR: ExtensionOpsMixin -> OpsMixin" | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index bb06bcc9b5aa8..873437d917515 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -492,7 +492,6 @@ Deprecations
- Deprecated :meth:`Index.asi8` for :class:`Index` subclasses other than :class:`.DatetimeIndex`, :class:`.TimedeltaIndex`, and :class:`PeriodIndex` (:issue:`37877`)
- The ``inplace`` parameter of :meth:`Categorical.remove_unused_categories` is deprecated and will be removed in a future version (:issue:`37643`)
- The ``null_counts`` parameter of :meth:`DataFrame.info` is deprecated and replaced by ``show_counts``. It will be removed in a future version (:issue:`37999`)
-- :class:`ExtensionOpsMixin` and :class:`ExtensionScalarOpsMixin` are deprecated and will be removed in a future version. Use ``pd.core.arraylike.OpsMixin`` instead (:issue:`37080`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index e3469bba23ccd..76b7877b0ac70 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -21,7 +21,6 @@
Union,
cast,
)
-import warnings
import numpy as np
@@ -1238,21 +1237,6 @@ class ExtensionOpsMixin:
with NumPy arrays.
"""
- def __init_subclass__(cls, **kwargs):
- # We use __init_subclass__ to handle deprecations
- super().__init_subclass__()
-
- if cls.__name__ != "ExtensionScalarOpsMixin":
- # We only want to warn for user-defined subclasses,
- # and cannot reference ExtensionScalarOpsMixin directly at this point.
- warnings.warn(
- "ExtensionOpsMixin and ExtensionScalarOpsMixin are deprecated "
- "and will be removed in a future version. Use "
- "pd.core.arraylike.OpsMixin instead.",
- FutureWarning,
- stacklevel=2,
- )
-
@classmethod
def _create_arithmetic_method(cls, op):
raise AbstractMethodError(cls)
diff --git a/pandas/tests/arrays/test_deprecations.py b/pandas/tests/arrays/test_deprecations.py
deleted file mode 100644
index 7e80072e8794f..0000000000000
--- a/pandas/tests/arrays/test_deprecations.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import pandas._testing as tm
-from pandas.core.arrays import (
- ExtensionArray,
- ExtensionOpsMixin,
- ExtensionScalarOpsMixin,
-)
-
-
-def test_extension_ops_mixin_deprecated():
- # GH#37080 deprecated in favor of OpsMixin
- with tm.assert_produces_warning(FutureWarning):
-
- class MySubclass(ExtensionOpsMixin, ExtensionArray):
- pass
-
- with tm.assert_produces_warning(FutureWarning):
-
- class MyOtherSubclass(ExtensionScalarOpsMixin, ExtensionArray):
- pass
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index d7bdca4b218b5..a713550dafa5c 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -7,13 +7,12 @@
import numpy as np
from pandas.core.dtypes.base import ExtensionDtype
-from pandas.core.dtypes.cast import maybe_cast_to_extension_array
from pandas.core.dtypes.common import is_dtype_equal, is_list_like, pandas_dtype
import pandas as pd
from pandas.api.extensions import no_default, register_extension_dtype
from pandas.core.arraylike import OpsMixin
-from pandas.core.arrays import ExtensionArray
+from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
from pandas.core.indexers import check_array_indexer
@@ -46,7 +45,7 @@ def _is_numeric(self) -> bool:
return True
-class DecimalArray(OpsMixin, ExtensionArray):
+class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray):
__array_priority__ = 1000
def __init__(self, values, dtype=None, copy=False, context=None):
@@ -226,42 +225,6 @@ def convert_values(param):
return np.asarray(res, dtype=bool)
- _do_coerce = True # overriden in DecimalArrayWithoutCoercion
-
- def _arith_method(self, other, op):
- def convert_values(param):
- if isinstance(param, ExtensionArray) or is_list_like(param):
- ovalues = param
- else: # Assume its an object
- ovalues = [param] * len(self)
- return ovalues
-
- lvalues = self
- rvalues = convert_values(other)
-
- # If the operator is not defined for the underlying objects,
- # a TypeError should be raised
- res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
-
- def _maybe_convert(arr):
- if self._do_coerce:
- # https://github.com/pandas-dev/pandas/issues/22850
- # We catch all regular exceptions here, and fall back
- # to an ndarray.
- res = maybe_cast_to_extension_array(type(self), arr)
- if not isinstance(res, type(self)):
- # exception raised in _from_sequence; ensure we have ndarray
- res = np.asarray(arr)
- else:
- res = np.asarray(arr)
- return res
-
- if op.__name__ in {"divmod", "rdivmod"}:
- a, b = zip(*res)
- return _maybe_convert(a), _maybe_convert(b)
-
- return _maybe_convert(res)
-
def to_decimal(values, context=None):
return DecimalArray([decimal.Decimal(x) for x in values], context=context)
@@ -269,3 +232,6 @@ def to_decimal(values, context=None):
def make_data():
return [decimal.Decimal(random.random()) for _ in range(100)]
+
+
+DecimalArray._add_arithmetic_ops()
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index c3e84f75ebe68..233b658d29782 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -335,7 +335,12 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):
- _do_coerce = False
+ @classmethod
+ def _create_arithmetic_method(cls, op):
+ return cls._create_method(op, coerce_to_dtype=False)
+
+
+DecimalArrayWithoutCoercion._add_arithmetic_ops()
def test_combine_from_sequence_raises():
| Reverts pandas-dev/pandas#38142 (see discussion over there) | https://api.github.com/repos/pandas-dev/pandas/pulls/38158 | 2020-11-29T20:22:52Z | 2020-11-30T01:53:17Z | 2020-11-30T01:53:17Z | 2020-11-30T06:58:07Z |
BUG: assert_frame_equal exception for datetime #37609 | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index c9347b88f2072..6aff4da13b6e6 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -784,6 +784,9 @@ Other
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError`` rather than a bare ``Exception`` (:issue:`35744`)
- Bug in ``dir`` where ``dir(obj)`` wouldn't show attributes defined on the instance for pandas objects (:issue:`37173`)
- Bug in :meth:`RangeIndex.difference` returning :class:`Int64Index` in some cases where it should return :class:`RangeIndex` (:issue:`38028`)
+- Fixed bug in :func:`assert_series_equal` when comparing a datetime-like array with an equivalent non extension dtype array (:issue:`37609`)
+
+
.. ---------------------------------------------------------------------------
diff --git a/pandas/_testing.py b/pandas/_testing.py
index bfff4301c2220..469f5e1bed6ba 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1456,7 +1456,16 @@ def assert_series_equal(
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
- elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
+ elif is_extension_array_dtype_and_needs_i8_conversion(
+ left.dtype, right.dtype
+ ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
+ assert_extension_array_equal(
+ left._values,
+ right._values,
+ check_dtype=check_dtype,
+ index_values=np.asarray(left.index),
+ )
+ elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
@@ -1866,6 +1875,20 @@ def assert_copy(iter1, iter2, **eql_kwargs):
assert elem1 is not elem2, msg
+def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
+ """
+ Checks that we have the combination of an ExtensionArraydtype and
+ a dtype that should be converted to int64
+
+ Returns
+ -------
+ bool
+
+ Related to issue #37609
+ """
+ return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
+
+
def getCols(k):
return string.ascii_uppercase[:k]
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index 40d2763a13489..8034ace479a62 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -272,6 +272,20 @@ def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype):
tm.assert_frame_equal(left, right, check_dtype=False)
+@pytest.mark.parametrize(
+ "dtype",
+ [
+ ("timedelta64[ns]"),
+ ("datetime64[ns, UTC]"),
+ ("Period[D]"),
+ ],
+)
+def test_assert_frame_equal_datetime_like_dtype_mismatch(dtype):
+ df1 = DataFrame({"a": []}, dtype=dtype)
+ df2 = DataFrame({"a": []})
+ tm.assert_frame_equal(df1, df2, check_dtype=False)
+
+
def test_allows_duplicate_labels():
left = DataFrame()
right = DataFrame().set_flags(allows_duplicate_labels=False)
| closes #37609
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Working with @richardwong8 on this issue.
This addresses issue #37609 where a check `needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)` in assert_series_equal caused an assert_extension_array on left._values and right._values when DatetimeArray or TimedeltaArray were used. This caused an exception to be raised when either the right or left was not an instance of ExtensionArray but was used with DatetimeArray or TimedeltaArray.
This would have been covered in the earlier check `is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype)`, however, DatetimeArray and TimedeltaArray are still experimental and their dtypes are not instances of ExtensionDtype subclass. | https://api.github.com/repos/pandas-dev/pandas/pulls/38157 | 2020-11-29T19:26:37Z | 2020-12-02T23:31:30Z | 2020-12-02T23:31:30Z | 2020-12-02T23:31:34Z |
CLN: remove _recast_datetimelike_result | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b9226732d5a69..d35e6f900b87f 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -46,7 +46,6 @@
is_integer_dtype,
is_interval_dtype,
is_numeric_dtype,
- is_object_dtype,
is_scalar,
needs_i8_conversion,
)
@@ -1283,7 +1282,7 @@ def _wrap_applied_output_series(
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any():
- result = _recast_datetimelike_result(result)
+ result = result._convert(datetime=True)
else:
result = result._convert(datetime=True)
@@ -1836,40 +1835,3 @@ def nunique(self, dropna: bool = True) -> DataFrame:
return results
boxplot = boxplot_frame_groupby
-
-
-def _recast_datetimelike_result(result: DataFrame) -> DataFrame:
- """
- If we have date/time like in the original, then coerce dates
- as we are stacking can easily have object dtypes here.
-
- Parameters
- ----------
- result : DataFrame
-
- Returns
- -------
- DataFrame
-
- Notes
- -----
- - Assumes Groupby._selected_obj has ndim==2 and at least one
- datetimelike column
- """
- result = result.copy()
-
- obj_cols = [
- idx
- for idx in range(len(result.columns))
- if is_object_dtype(result.dtypes.iloc[idx])
- ]
-
- # See GH#26285
- for n in obj_cols:
- values = result.iloc[:, n].values
- converted = lib.maybe_convert_objects(
- values, convert_datetime=True, convert_timedelta=True
- )
-
- result.iloc[:, n] = converted
- return result
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38156 | 2020-11-29T19:14:28Z | 2020-11-30T13:21:03Z | 2020-11-30T13:21:02Z | 2020-11-30T14:51:00Z |
Backport PR #36927: BUG: Fix duplicates in intersection of multiindexes | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 46c4ad4f35fe4..edc2f7327abfc 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -23,6 +23,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.groupby` aggregation with out-of-bounds datetime objects in an object-dtype column (:issue:`36003`)
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
- Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`).
+- Fixed regression in :meth:`MultiIndex.intersection` returning duplicates when at least one of the indexes had duplicates (:issue:`36915`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e4dee2b0a08ce..b0f64bd76a174 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2654,7 +2654,7 @@ def intersection(self, other, sort=False):
self._assert_can_do_setop(other)
other = ensure_index(other)
- if self.equals(other):
+ if self.equals(other) and not self.has_duplicates:
return self._get_reconciled_name_object(other)
if not is_dtype_equal(self.dtype, other.dtype):
@@ -2672,7 +2672,7 @@ def intersection(self, other, sort=False):
except TypeError:
pass
else:
- return self._wrap_setop_result(other, result)
+ return self._wrap_setop_result(other, algos.unique1d(result))
try:
indexer = Index(rvals).get_indexer(lvals)
@@ -2683,13 +2683,16 @@ def intersection(self, other, sort=False):
indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
- taken = other.take(indexer)
+ taken = other.take(indexer).unique()
res_name = get_op_result_name(self, other)
if sort is None:
taken = algos.safe_sort(taken.values)
return self._shallow_copy(taken, name=res_name)
+ # Intersection has to be unique
+ assert algos.unique(taken._values).shape == taken._values.shape
+
taken.name = res_name
return taken
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index b9ba823ca1b0b..6ad82e81e7c30 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3398,6 +3398,8 @@ def intersection(self, other, sort=False):
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
+ if self.has_duplicates:
+ return self.unique()
return self
if not is_object_dtype(other.dtype):
@@ -3416,10 +3418,12 @@ def intersection(self, other, sort=False):
uniq_tuples = None # flag whether _inner_indexer was successful
if self.is_monotonic and other.is_monotonic:
try:
- uniq_tuples = self._inner_indexer(lvals, rvals)[0]
- sort = False # uniq_tuples is already sorted
+ inner_tuples = self._inner_indexer(lvals, rvals)[0]
+ sort = False # inner_tuples is already sorted
except TypeError:
pass
+ else:
+ uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
other_uniq = set(rvals)
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 60f3d23aaed13..5e1b8cd8dc8ca 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -539,7 +539,11 @@ def _should_reindex_frame_op(
if fill_value is None and level is None and axis is default_axis:
# TODO: any other cases we should handle here?
cols = left.columns.intersection(right.columns)
- if not (cols.equals(left.columns) and cols.equals(right.columns)):
+
+ # Intersection is always unique so we have to check the unique columns
+ left_uniques = left.columns.unique()
+ right_uniques = right.columns.unique()
+ if not (cols.equals(left_uniques) and cols.equals(right_uniques)):
return True
return False
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 2349cb1dcc0c7..f43a33d088df0 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1209,7 +1209,9 @@ def _validate_specification(self):
raise MergeError("Must pass left_on or left_index=True")
else:
# use the common columns
- common_cols = self.left.columns.intersection(self.right.columns)
+ left_cols = self.left.columns
+ right_cols = self.right.columns
+ common_cols = left_cols.intersection(right_cols)
if len(common_cols) == 0:
raise MergeError(
"No common columns to perform merge on. "
@@ -1218,7 +1220,10 @@ def _validate_specification(self):
f"left_index={self.left_index}, "
f"right_index={self.right_index}"
)
- if not common_cols.is_unique:
+ if (
+ not left_cols.join(common_cols, how="inner").is_unique
+ or not right_cols.join(common_cols, how="inner").is_unique
+ ):
raise MergeError(f"Data columns not unique: {repr(common_cols)}")
self.left_on = self.right_on = common_cols
elif self.on is not None:
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index d7427ee622977..8637c4cb8bffb 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -375,3 +375,26 @@ def test_setops_disallow_true(method):
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
+
+
+@pytest.mark.parametrize(
+ ("tuples", "exp_tuples"),
+ [
+ ([("val1", "test1")], [("val1", "test1")]),
+ ([("val1", "test1"), ("val1", "test1")], [("val1", "test1")]),
+ (
+ [("val2", "test2"), ("val1", "test1")],
+ [("val2", "test2"), ("val1", "test1")],
+ ),
+ ],
+)
+def test_intersect_with_duplicates(tuples, exp_tuples):
+ # GH#36915
+ left = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ right = MultiIndex.from_tuples(
+ [("val1", "test1"), ("val1", "test1"), ("val2", "test2")],
+ names=["first", "second"],
+ )
+ result = left.intersection(right)
+ expected = MultiIndex.from_tuples(exp_tuples, names=["first", "second"])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 59ee88117a984..a8dedffae0e65 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -688,7 +688,7 @@ def test_intersection_monotonic(self, index2, keeps_name, sort):
@pytest.mark.parametrize(
"index2,expected_arr",
- [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])],
+ [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B"])],
)
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort):
# non-monotonic non-unique
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 1a40fe550be61..26d7c14b46e08 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -95,3 +95,13 @@ def test_union_dtypes(left, right, expected):
b = pd.Index([], dtype=right)
result = (a | b).dtype
assert result == expected
+
+
+@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]])
+def test_intersection_duplicates(values):
+ # GH#31326
+ a = pd.Index(values)
+ b = pd.Index([3, 3])
+ result = a.intersection(b)
+ expected = pd.Index([3])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 4fd3c688b8771..491ec97e5dee9 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -742,7 +742,7 @@ def test_overlapping_columns_error_message(self):
# #2649, #10639
df2.columns = ["key1", "foo", "foo"]
- msg = r"Data columns not unique: Index\(\['foo', 'foo'\], dtype='object'\)"
+ msg = r"Data columns not unique: Index\(\['foo'\], dtype='object'\)"
with pytest.raises(MergeError, match=msg):
merge(df, df2)
| Backport PR #36927 | https://api.github.com/repos/pandas-dev/pandas/pulls/38155 | 2020-11-29T19:10:15Z | 2020-11-30T12:13:25Z | 2020-11-30T12:13:25Z | 2020-11-30T12:13:29Z |
CLN: improve is_unique check in Index.intersection | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 3f89b0619e600..d1d71e584ae3d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2379,6 +2379,10 @@ def unique(self, level=None):
"""
if level is not None:
self._validate_index_level(level)
+
+ if self.is_unique:
+ return self._shallow_copy()
+
result = super().unique()
return self._shallow_copy(result)
@@ -2864,7 +2868,7 @@ def _intersection(self, other, sort=False):
result = algos.safe_sort(result)
# Intersection has to be unique
- assert algos.unique(result).shape == result.shape
+ assert Index(result).is_unique
return result
| - [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
```
before after ratio
[e99e5ab3] [79d6a50f]
<cln_intersection~1> <cln_intersection>
- 918±20ns 819±2ns 0.89 index_object.Range.time_min_trivial
- 465±7ns 414±7ns 0.89 index_object.Range.time_get_loc_dec
- 464±6ns 411±7ns 0.89 index_object.Range.time_get_loc_inc
- 16.6±0.7ms 12.6±0.03ms 0.76 index_object.SetOperations.time_operation('date_string', 'union')
```
Seems to have no impact.
cc @jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/38154 | 2020-11-29T18:02:34Z | 2020-12-02T01:48:00Z | 2020-12-02T01:48:00Z | 2022-10-13T00:20:29Z |
BUG: loc raising KeyError for string slices in list-like indexer and DatetimeIndex | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index c9347b88f2072..e2b851d6ea81e 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -639,6 +639,7 @@ Indexing
- Bug in :meth:`DataFrame.loc` raising ``TypeError`` when non-integer slice was given to select values from :class:`MultiIndex` (:issue:`25165`, :issue:`24263`)
- Bug in :meth:`Series.at` returning :class:`Series` with one element instead of scalar when index is a :class:`MultiIndex` with one level (:issue:`38053`)
- Bug in :meth:`DataFrame.loc` returning and assigning elements in wrong order when indexer is differently ordered than the :class:`MultiIndex` to filter (:issue:`31330`, :issue:`34603`)
+- Bug in :meth:`DataFrame.loc` raising ``KeyError`` for list-like indexers with string slices for :class:`DatetimeIndex` (:issue:`27180`)
- Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.__getitem__` raising ``KeyError`` when columns were :class:`MultiIndex` with only one level (:issue:`29749`)
- Bug in :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__` raising blank ``KeyError`` without missing keys for :class:`IntervalIndex` (:issue:`27365`)
- Bug in setting a new label on a :class:`DataFrame` or :class:`Series` with a :class:`CategoricalIndex` incorrectly raising ``TypeError`` when the new label is not among the index's categories (:issue:`38098`)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 8329c41a74596..80357561b32e9 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -819,6 +819,25 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
# --------------------------------------------------------------------
+ def _convert_listlike_indexer(self, key):
+ if not isinstance(key, list):
+ # There are no slices, so we can dispatch back
+ return super()._convert_listlike_indexer(key)
+
+ new_indexer = []
+ positions = list(range(len(self)))
+ try:
+ for k in key:
+ # Convert slices to list of integers
+ indexer = positions[self.get_loc(k)]
+ if not isinstance(indexer, list):
+ indexer = [indexer]
+ new_indexer.extend(indexer)
+ except KeyError:
+ # Dispatch to base method for handling of KeyErrors
+ return super()._convert_listlike_indexer(key)
+ return np.array(new_indexer), key
+
@property
def inferred_type(self) -> str:
# b/c datetime is represented as microseconds since the epoch, make
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index d00fe58265a2e..9bf66cfd02942 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -231,3 +231,21 @@ def test_loc_setitem_with_existing_dst(self):
dtype=object,
)
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "indexer", [["2001-01", "2001-01-30"], ["2001-01", Timestamp("2001-01-30")]]
+ )
+ def test_loc_getitem_partial_strings_in_list(self, indexer):
+ # GH#27180
+ ser = Series(1, index=date_range("2001-01-29", periods=60))
+ result = ser.loc[indexer]
+ expected = Series(
+ 1,
+ index=[
+ Timestamp("2001-01-29"),
+ Timestamp("2001-01-30"),
+ Timestamp("2001-01-31"),
+ Timestamp("2001-01-30"),
+ ],
+ )
+ tm.assert_series_equal(result, expected)
| - [x] closes #27180
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I looked into this and added an implementation for list-like indexers containing slices and DatetimeIndexes. If we want to support this we probably have to do something like this. | https://api.github.com/repos/pandas-dev/pandas/pulls/38153 | 2020-11-29T16:56:26Z | 2021-01-01T22:34:37Z | null | 2021-01-01T22:34:51Z |
CLN: remove unreachable in maybe_cast_result | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index fe40bc42887c4..cf81e6f173bdd 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -297,7 +297,9 @@ def trans(x):
return result
-def maybe_cast_result(result, obj: "Series", numeric_only: bool = False, how: str = ""):
+def maybe_cast_result(
+ result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
+) -> ArrayLike:
"""
Try casting result to a different type if appropriate
@@ -320,19 +322,20 @@ def maybe_cast_result(result, obj: "Series", numeric_only: bool = False, how: st
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
- if not is_scalar(result):
- if (
- is_extension_array_dtype(dtype)
- and not is_categorical_dtype(dtype)
- and dtype.kind != "M"
- ):
- # We have to special case categorical so as not to upcast
- # things like counts back to categorical
- cls = dtype.construct_array_type()
- result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
+ assert not is_scalar(result)
+
+ if (
+ is_extension_array_dtype(dtype)
+ and not is_categorical_dtype(dtype)
+ and dtype.kind != "M"
+ ):
+ # We have to special case categorical so as not to upcast
+ # things like counts back to categorical
+ cls = dtype.construct_array_type()
+ result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
- elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
- result = maybe_downcast_to_dtype(result, dtype)
+ elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
+ result = maybe_downcast_to_dtype(result, dtype)
return result
| https://api.github.com/repos/pandas-dev/pandas/pulls/38152 | 2020-11-29T16:50:13Z | 2020-11-29T19:37:00Z | 2020-11-29T19:37:00Z | 2020-11-29T19:41:35Z | |
CLN: remove unused coerce arg in NDFrame._convert | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index fe40bc42887c4..e6fd63cda5ed4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1177,45 +1177,32 @@ def soft_convert_objects(
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
- coerce: bool = False,
copy: bool = True,
):
- """ if we have an object dtype, try to coerce dates and/or numbers """
+ """
+ Try to coerce datetime, timedelta, and numeric object-dtype columns
+ to inferred dtype.
+
+ Parameters
+ ----------
+ values : np.ndarray[object]
+ datetime : bool, default True
+ numeric: bool, default True
+ timedelta : bool, default True
+ copy : bool, default True
+
+ Returns
+ -------
+ np.ndarray
+ """
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
- validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
- elif conversion_count > 1 and coerce:
- raise ValueError(
- "Only one of 'datetime', 'numeric' or "
- "'timedelta' can be True when coerce=True."
- )
-
- if not is_object_dtype(values.dtype):
- # If not object, do not attempt conversion
- values = values.copy() if copy else values
- return values
-
- # If 1 flag is coerce, ensure 2 others are False
- if coerce:
- # Immediate return if coerce
- if datetime:
- from pandas import to_datetime
-
- return to_datetime(values, errors="coerce").to_numpy()
- elif timedelta:
- from pandas import to_timedelta
-
- return to_timedelta(values, errors="coerce").to_numpy()
- elif numeric:
- from pandas import to_numeric
-
- return to_numeric(values, errors="coerce")
# Soft conversions
if datetime:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c7448cf8f8e40..c810bbb4bfa81 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5997,7 +5997,6 @@ def _convert(
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
- coerce: bool_t = False,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
@@ -6011,9 +6010,6 @@ def _convert(
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
- coerce : bool, default False
- If True, force conversion with unconvertible values converted to
- nulls (NaN or NaT).
Returns
-------
@@ -6022,13 +6018,11 @@ def _convert(
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
- validate_bool_kwarg(coerce, "coerce")
return self._constructor(
self._mgr.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
- coerce=coerce,
copy=True,
)
).__finalize__(self)
@@ -6076,9 +6070,7 @@ def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
- self._mgr.convert(
- datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
- )
+ self._mgr.convert(datetime=True, numeric=False, timedelta=True, copy=True)
).__finalize__(self, method="infer_objects")
@final
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 74b5a184df95d..3aaa376242fea 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -700,7 +700,6 @@ def convert(
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
- coerce: bool = False,
) -> List["Block"]:
"""
attempt to coerce any object types to better types return a copy
@@ -2506,12 +2505,12 @@ def convert(
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
- coerce: bool = False,
) -> List["Block"]:
"""
- attempt to coerce any object types to better types return a copy of
+ attempt to cast any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
"""
+
# operate column-by-column
def f(mask, val, idx):
shape = val.shape
@@ -2520,7 +2519,6 @@ def f(mask, val, idx):
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
- coerce=coerce,
copy=copy,
)
if isinstance(values, np.ndarray):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 15b85b3200da3..168dba25ba29c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -636,7 +636,6 @@ def convert(
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
- coerce: bool = False,
) -> "BlockManager":
return self.apply(
"convert",
@@ -644,7 +643,6 @@ def convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
- coerce=coerce,
)
def replace(self, to_replace, value, inplace: bool, regex: bool) -> "BlockManager":
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index 7babc6853aef3..fbe6d1f595874 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -251,7 +251,7 @@ def make_dtnat_arr(n, nnat=None):
df = DataFrame(dict(a=s1, b=s2))
df.to_csv(pth, chunksize=chunksize)
- recons = self.read_csv(pth)._convert(datetime=True, coerce=True)
+ recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 9a883aac69e6b..ba8b1a8a0679d 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -14,7 +14,15 @@
from pandas.errors import ParserError
import pandas.util._test_decorators as td
-from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, read_csv
+from pandas import (
+ DataFrame,
+ MultiIndex,
+ Series,
+ Timestamp,
+ date_range,
+ read_csv,
+ to_datetime,
+)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
@@ -610,7 +618,7 @@ def try_remove_ws(x):
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
- converted[date_cols] = converted[date_cols]._convert(datetime=True, coerce=True)
+ converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
diff --git a/pandas/tests/series/methods/test_convert.py b/pandas/tests/series/methods/test_convert.py
index b213e4a6c4c8a..f052f4423d32a 100644
--- a/pandas/tests/series/methods/test_convert.py
+++ b/pandas/tests/series/methods/test_convert.py
@@ -3,45 +3,23 @@
import numpy as np
import pytest
-from pandas import NaT, Series, Timestamp
+from pandas import Series, Timestamp
import pandas._testing as tm
class TestConvert:
def test_convert(self):
# GH#10265
- # Tests: All to nans, coerce, true
- # Test coercion returns correct type
- ser = Series(["a", "b", "c"])
- results = ser._convert(datetime=True, coerce=True)
- expected = Series([NaT] * 3)
- tm.assert_series_equal(results, expected)
-
- results = ser._convert(numeric=True, coerce=True)
- expected = Series([np.nan] * 3)
- tm.assert_series_equal(results, expected)
-
- expected = Series([NaT] * 3, dtype=np.dtype("m8[ns]"))
- results = ser._convert(timedelta=True, coerce=True)
- tm.assert_series_equal(results, expected)
-
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
ser = Series(["a", "3.1415", dt, td])
- results = ser._convert(datetime=True, coerce=True)
- expected = Series([NaT, NaT, dt, NaT])
- tm.assert_series_equal(results, expected)
- results = ser._convert(numeric=True, coerce=True)
+ results = ser._convert(numeric=True)
expected = Series([np.nan, 3.1415, np.nan, np.nan])
tm.assert_series_equal(results, expected)
- results = ser._convert(timedelta=True, coerce=True)
- expected = Series([NaT, NaT, NaT, td], dtype=np.dtype("m8[ns]"))
- tm.assert_series_equal(results, expected)
-
# Test standard conversion returns original
results = ser._convert(datetime=True)
tm.assert_series_equal(results, ser)
@@ -116,19 +94,6 @@ def test_convert(self):
datetime(2001, 1, 3, 0, 0),
]
)
- s2 = Series(
- [
- datetime(2001, 1, 1, 0, 0),
- datetime(2001, 1, 2, 0, 0),
- datetime(2001, 1, 3, 0, 0),
- "foo",
- 1.0,
- 1,
- Timestamp("20010104"),
- "20010105",
- ],
- dtype="O",
- )
result = ser._convert(datetime=True)
expected = Series(
@@ -137,35 +102,12 @@ def test_convert(self):
)
tm.assert_series_equal(result, expected)
- result = ser._convert(datetime=True, coerce=True)
- tm.assert_series_equal(result, expected)
-
- expected = Series(
- [
- Timestamp("20010101"),
- Timestamp("20010102"),
- Timestamp("20010103"),
- NaT,
- NaT,
- NaT,
- Timestamp("20010104"),
- Timestamp("20010105"),
- ],
- dtype="M8[ns]",
- )
- result = s2._convert(datetime=True, numeric=False, timedelta=False, coerce=True)
- tm.assert_series_equal(result, expected)
- result = s2._convert(datetime=True, coerce=True)
- tm.assert_series_equal(result, expected)
-
- ser = Series(["foo", "bar", 1, 1.0], dtype="O")
- result = ser._convert(datetime=True, coerce=True)
- expected = Series([NaT] * 2 + [Timestamp(1)] * 2)
+ result = ser._convert(datetime=True)
tm.assert_series_equal(result, expected)
# preserver if non-object
ser = Series([1], dtype="float32")
- result = ser._convert(datetime=True, coerce=True)
+ result = ser._convert(datetime=True)
tm.assert_series_equal(result, ser)
# FIXME: dont leave commented-out
@@ -174,16 +116,6 @@ def test_convert(self):
# result = res._convert(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
- # dateutil parses some single letters into today's value as a date
- expected = Series([NaT])
- for x in "abcdefghijklmnopqrstuvwxyz":
- ser = Series([x])
- result = ser._convert(datetime=True, coerce=True)
- tm.assert_series_equal(result, expected)
- ser = Series([x.upper()])
- result = ser._convert(datetime=True, coerce=True)
- tm.assert_series_equal(result, expected)
-
def test_convert_no_arg_error(self):
ser = Series(["1.0", "2"])
msg = r"At least one of datetime, numeric or timedelta must be True\."
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38151 | 2020-11-29T16:06:47Z | 2020-11-29T21:53:26Z | 2020-11-29T21:53:26Z | 2020-11-29T22:03:29Z |
BUG: loc dropping levels when df has only one row | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 83bff6d7bfb2d..d261f675f3749 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -234,7 +234,7 @@ Indexing
- Bug in :meth:`CategoricalIndex.get_indexer` failing to raise ``InvalidIndexError`` when non-unique (:issue:`38372`)
- Bug in inserting many new columns into a :class:`DataFrame` causing incorrect subsequent indexing behavior (:issue:`38380`)
- Bug in :meth:`DataFrame.iloc.__setitem__` and :meth:`DataFrame.loc.__setitem__` with mixed dtypes when setting with a dictionary value (:issue:`38335`)
--
+- Bug in :meth:`DataFrame.loc` dropping levels of :class:`MultiIndex` when :class:`DataFrame` used as input has only one row (:issue:`10521`)
-
Missing
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 693b09336fefc..60b526426d413 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -842,8 +842,12 @@ def _getitem_nested_tuple(self, tup: Tuple):
if self.name != "loc":
# This should never be reached, but lets be explicit about it
raise ValueError("Too many indices")
- with suppress(IndexingError):
- return self._handle_lowerdim_multi_index_axis0(tup)
+ if self.ndim == 1 or not any(isinstance(x, slice) for x in tup):
+ # GH#10521 Series should reduce MultiIndex dimensions instead of
+ # DataFrame, IndexingError is not raised when slice(None,None,None)
+ # with one row.
+ with suppress(IndexingError):
+ return self._handle_lowerdim_multi_index_axis0(tup)
# this is a series with a multi-index specified a tuple of
# selectors
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 42525fc575397..37153bef8d77b 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -695,3 +695,17 @@ def test_loc_getitem_index_differently_ordered_slice_none():
columns=["a", "b"],
)
tm.assert_frame_equal(result, expected)
+
+
+def test_loc_getitem_drops_levels_for_one_row_dataframe():
+ # GH#10521
+ mi = MultiIndex.from_arrays([["x"], ["y"], ["z"]], names=["a", "b", "c"])
+ df = DataFrame({"d": [0]}, index=mi)
+ expected = df.copy()
+ result = df.loc["x", :, "z"]
+ tm.assert_frame_equal(result, expected)
+
+ ser = Series([0], index=mi)
+ result = ser.loc["x", :, "z"]
+ expected = Series([0], index=Index(["y"], name="b"))
+ tm.assert_series_equal(result, expected)
| - [x] closes #10521
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Originally this case was dispatched to xs, which dispatched to ``_get_loc_level``, which raised if a slice is not ``slice(None, None)`` and went back to our original function. If the ``DataFrame`` contains only one row, ``_get_loc_level`` does not raise and hence we drop levels.
I added len(self.obj) for performance reasons becasue I am not sure if the any check for slices is faster than supressing the IndexingError in general. | https://api.github.com/repos/pandas-dev/pandas/pulls/38150 | 2020-11-29T16:01:18Z | 2020-12-30T13:50:13Z | 2020-12-30T13:50:13Z | 2021-07-04T16:24:03Z |
ENH: Improve performance for df.__setitem__ with list-like indexers | diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index 74e0a3a434cde..4fd91c8aafe4b 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -358,6 +358,14 @@ def time_assign_with_setitem(self):
for i in range(100):
self.df[i] = np.random.randn(self.N)
+ def time_assign_list_like_with_setitem(self):
+ np.random.seed(1234)
+ self.df[list(range(100))] = np.random.randn(self.N, 100)
+
+ def time_assign_list_of_columns_concat(self):
+ df = DataFrame(np.random.randn(self.N, 100))
+ concat([self.df, df], axis=1)
+
class ChainIndexing:
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index edc2f7327abfc..d0a935bfb4e32 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -23,6 +23,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.groupby` aggregation with out-of-bounds datetime objects in an object-dtype column (:issue:`36003`)
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
- Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`).
+- Fixed performance regression for :meth:`DataFrame.__setitem__` with list-like indexers (:issue:`37954`)
- Fixed regression in :meth:`MultiIndex.intersection` returning duplicates when at least one of the indexes had duplicates (:issue:`36915`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index f6cf691ea911c..28e59df995a83 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -672,17 +672,8 @@ def _ensure_listlike_indexer(self, key, axis=None, value=None):
and not com.is_bool_indexer(key)
and all(is_hashable(k) for k in key)
):
- for i, k in enumerate(key):
- if k not in self.obj:
- if value is None:
- self.obj[k] = np.nan
- elif is_array_like(value) and value.ndim == 2:
- # GH#37964 have to select columnwise in case of array
- self.obj[k] = value[:, i]
- elif is_list_like(value):
- self.obj[k] = value[i]
- else:
- self.obj[k] = value
+ keys = self.obj.columns.union(key, sort=False)
+ self.obj._mgr = self.obj._mgr.reindex_axis(keys, 0)
def __setitem__(self, key, value):
if isinstance(key, tuple):
| - [x] closes #37954
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Reindexing the Block Manager improves the performance significantly. I hope I have not missed anything, concerning the reindexing of the blocks.
Time spent in ``_ensure_listlike_indexer`` is pretty low now.
timeit result for the ops methods:
```
In [20]: %timeit setitem(x, x_col, df)
1.09 ms ± 9.65 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
In [21]: %timeit concat(x, x_col, df)
293 µs ± 4.29 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
```
Should we add tests here? I have added an asv to capture this case.
cc @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/38148 | 2020-11-29T14:07:15Z | 2020-11-29T21:52:39Z | 2020-11-29T21:52:39Z | 2020-12-01T10:54:21Z |
ENH: add "schema" kwarg to io.sql.get_schema method | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index ac930b3e77785..1cebb8bcf3b1c 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -260,6 +260,7 @@ Other enhancements
- Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a Series or DataFrame (:issue:`28394`)
- :meth:`DataFrame.applymap` now supports ``na_action`` (:issue:`23803`)
- :class:`Index` with object dtype supports division and multiplication (:issue:`34160`)
+- :meth:`io.sql.get_schema` now supports a ``schema`` keyword argument that will add a schema into the create table statement (:issue:`28486`)
- :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`)
- :meth:`DataFrame.hist` now supports time series (datetime) data (:issue:`32590`)
- :meth:`.Styler.set_table_styles` now allows the direct styling of rows and columns and can be chained (:issue:`35607`)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 1fea50ecade3c..5678133d5a706 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -7,7 +7,7 @@
from datetime import date, datetime, time
from functools import partial
import re
-from typing import Iterator, Optional, Union, overload
+from typing import Iterator, List, Optional, Union, overload
import warnings
import numpy as np
@@ -1455,9 +1455,22 @@ def drop_table(self, table_name, schema=None):
self.get_table(table_name, schema).drop()
self.meta.clear()
- def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
+ def _create_sql_schema(
+ self,
+ frame: DataFrame,
+ table_name: str,
+ keys: Optional[List[str]] = None,
+ dtype: Optional[dict] = None,
+ schema: Optional[str] = None,
+ ):
table = SQLTable(
- table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
+ table_name,
+ self,
+ frame=frame,
+ index=False,
+ keys=keys,
+ dtype=dtype,
+ schema=schema,
)
return str(table.sql_schema())
@@ -1588,9 +1601,13 @@ def _create_table_setup(self):
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
-
+ if self.schema:
+ schema_name = self.schema + "."
+ else:
+ schema_name = ""
create_stmts = [
"CREATE TABLE "
+ + schema_name
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
@@ -1845,14 +1862,20 @@ def drop_table(self, name, schema=None):
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
- def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
+ def _create_sql_schema(self, frame, table_name, keys=None, dtype=None, schema=None):
table = SQLiteTable(
- table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
+ table_name,
+ self,
+ frame=frame,
+ index=False,
+ keys=keys,
+ dtype=dtype,
+ schema=schema,
)
return str(table.sql_schema())
-def get_schema(frame, name, keys=None, con=None, dtype=None):
+def get_schema(frame, name, keys=None, con=None, dtype=None, schema=None):
"""
Get the SQL db table schema for the given frame.
@@ -1870,7 +1893,12 @@ def get_schema(frame, name, keys=None, con=None, dtype=None):
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
+ schema: str, default: None
+ Optional specifying the schema to be used in creating the table.
+ .. versionadded:: 1.2.0
"""
pandas_sql = pandasSQL_builder(con=con)
- return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
+ return pandas_sql._create_sql_schema(
+ frame, name, keys=keys, dtype=dtype, schema=schema
+ )
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 1be6022bc0fcd..0195b61d13798 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -865,6 +865,13 @@ def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
+ def test_get_schema_with_schema(self):
+ # GH28486
+ create_sql = sql.get_schema(
+ self.test_frame1, "test", con=self.conn, schema="pypi"
+ )
+ assert "CREATE TABLE pypi." in create_sql
+
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
| - [x] closes #28486
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Picking up #33278 | https://api.github.com/repos/pandas-dev/pandas/pulls/38146 | 2020-11-29T04:41:13Z | 2020-12-08T02:44:48Z | 2020-12-08T02:44:47Z | 2020-12-08T02:45:00Z |
BUG: combine_first does not retain dtypes with Timestamp DataFrames | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 1f8fa1e2072fd..07b37de91daa4 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -536,6 +536,7 @@ Categorical
Datetimelike
^^^^^^^^^^^^
+- Bug in :meth:`DataFrame.combine_first` that would convert datetime-like column on other :class:`DataFrame` to integer when the column is not present in original :class:`DataFrame` (:issue:`28481`)
- Bug in :attr:`.DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`)
- Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`)
- Bug in :class:`.DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c9030a0b2423a..85e35dbb86f1c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6386,7 +6386,7 @@ def combine(
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
- arr = maybe_downcast_to_dtype(arr, this_dtype)
+ arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 08c4293323500..934ad9eb8213a 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -103,6 +103,7 @@ def test_combine_first_mixed_bug(self):
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
+ def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
@@ -118,6 +119,7 @@ def test_combine_first_mixed_bug(self):
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
+ def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
@@ -134,38 +136,56 @@ def test_combine_first_mixed_bug(self):
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
- # GH3552, return object dtype with bools
+ def test_combine_first_return_obj_type_with_bools(self):
+ # GH3552
+
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
- result = df1.combine_first(df2)[2]
- expected = Series([True, True, False], name=2)
- tm.assert_series_equal(result, expected)
-
- # GH 3593, converting datetime64[ns] incorrectly
- df0 = DataFrame(
- {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
- )
- df1 = DataFrame({"a": [None, None, None]})
- df2 = df1.combine_first(df0)
- tm.assert_frame_equal(df2, df0)
-
- df2 = df0.combine_first(df1)
- tm.assert_frame_equal(df2, df0)
-
- df0 = DataFrame(
- {"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}
- )
- df1 = DataFrame({"a": [datetime(2000, 1, 2), None, None]})
- df2 = df1.combine_first(df0)
- result = df0.copy()
- result.iloc[0, :] = df1.iloc[0, :]
- tm.assert_frame_equal(df2, result)
+ expected = Series([True, True, False], name=2, dtype=object)
+
+ result_12 = df1.combine_first(df2)[2]
+ tm.assert_series_equal(result_12, expected)
+
+ result_21 = df2.combine_first(df1)[2]
+ tm.assert_series_equal(result_21, expected)
+
+ @pytest.mark.parametrize(
+ "data1, data2, data_expected",
+ (
+ (
+ [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ [None, None, None],
+ [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ ),
+ (
+ [None, None, None],
+ [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ ),
+ (
+ [datetime(2000, 1, 2), None, None],
+ [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ [datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ ),
+ (
+ [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ [datetime(2000, 1, 2), None, None],
+ [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
+ ),
+ ),
+ )
+ def test_combine_first_convert_datatime_correctly(
+ self, data1, data2, data_expected
+ ):
+ # GH 3593
- df2 = df0.combine_first(df1)
- tm.assert_frame_equal(df2, df0)
+ df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
+ result = df1.combine_first(df2)
+ expected = DataFrame({"a": data_expected})
+ tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
@@ -339,9 +359,14 @@ def test_combine_first_int(self):
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
- res = df1.combine_first(df2)
- tm.assert_frame_equal(res, df1)
- assert res["a"].dtype == "int64"
+ result_12 = df1.combine_first(df2)
+ expected_12 = DataFrame({"a": [0, 1, 3, 5]}, dtype="float64")
+ tm.assert_frame_equal(result_12, expected_12)
+
+ result_21 = df2.combine_first(df1)
+ expected_21 = DataFrame({"a": [1, 4, 3, 5]}, dtype="float64")
+
+ tm.assert_frame_equal(result_21, expected_21)
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
@@ -367,6 +392,26 @@ def test_combine_first_string_dtype_only_na(self):
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize(
+ "scalar1, scalar2",
+ [
+ (datetime(2020, 1, 1), datetime(2020, 1, 2)),
+ (pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")),
+ (pd.Timedelta("89 days"), pd.Timedelta("60 min")),
+ (pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")),
+ ],
+)
+def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture):
+ # GH28481
+ na_value = nulls_fixture
+ frame = DataFrame([[na_value, na_value]], columns=["a", "b"])
+ other = DataFrame([[scalar1, scalar2]], columns=["b", "c"])
+
+ result = frame.combine_first(other)
+ expected = DataFrame([[na_value, scalar1, scalar2]], columns=["a", "b", "c"])
+ tm.assert_frame_equal(result, expected)
+
+
def test_combine_first_with_nan_multiindex():
# gh-36562
| - [x] closes #28481
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Picking up #35514 | https://api.github.com/repos/pandas-dev/pandas/pulls/38145 | 2020-11-29T04:31:12Z | 2020-11-30T13:21:49Z | 2020-11-30T13:21:49Z | 2020-12-01T06:04:26Z |
CLN: remove unnecesary cast.maybe_convert_objects | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index fe40bc42887c4..27c5527536057 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -99,7 +99,6 @@
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
- from pandas.core.indexes.datetimes import DatetimeIndex
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
@@ -1121,57 +1120,6 @@ def astype_nansafe(
return arr.view(dtype)
-def maybe_convert_objects(
- values: np.ndarray, convert_numeric: bool = True
-) -> Union[np.ndarray, "DatetimeIndex"]:
- """
- If we have an object dtype array, try to coerce dates and/or numbers.
-
- Parameters
- ----------
- values : ndarray
- convert_numeric : bool, default True
-
- Returns
- -------
- ndarray or DatetimeIndex
- """
- validate_bool_kwarg(convert_numeric, "convert_numeric")
-
- orig_values = values
-
- # convert dates
- if is_object_dtype(values.dtype):
- values = lib.maybe_convert_objects(values, convert_datetime=True)
-
- # convert timedeltas
- if is_object_dtype(values.dtype):
- values = lib.maybe_convert_objects(values, convert_timedelta=True)
-
- # convert to numeric
- if is_object_dtype(values.dtype):
- if convert_numeric:
- try:
- new_values = lib.maybe_convert_numeric(
- values, set(), coerce_numeric=True
- )
- except (ValueError, TypeError):
- pass
- else:
- # if we are all nans then leave me alone
- if not isna(new_values).all():
- values = new_values
-
- else:
- # soft-conversion
- values = lib.maybe_convert_objects(values)
-
- if values is orig_values:
- values = values.copy()
-
- return values
-
-
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 244c47cd1f1ea..b9226732d5a69 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -37,7 +37,6 @@
find_common_type,
maybe_cast_result,
maybe_cast_result_dtype,
- maybe_convert_objects,
maybe_downcast_numeric,
)
from pandas.core.dtypes.common import (
@@ -1867,8 +1866,9 @@ def _recast_datetimelike_result(result: DataFrame) -> DataFrame:
# See GH#26285
for n in obj_cols:
- converted = maybe_convert_objects(
- result.iloc[:, n].values, convert_numeric=False
+ values = result.iloc[:, n].values
+ converted = lib.maybe_convert_objects(
+ values, convert_datetime=True, convert_timedelta=True
)
result.iloc[:, n] = converted
diff --git a/pandas/tests/dtypes/cast/test_convert_objects.py b/pandas/tests/dtypes/cast/test_convert_objects.py
deleted file mode 100644
index a28d554acd312..0000000000000
--- a/pandas/tests/dtypes/cast/test_convert_objects.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas.core.dtypes.cast import maybe_convert_objects
-
-
-@pytest.mark.parametrize("data", [[1, 2], ["apply", "banana"]])
-def test_maybe_convert_objects_copy(data):
- arr = np.array(data)
- out = maybe_convert_objects(arr)
-
- assert arr is not out
| https://api.github.com/repos/pandas-dev/pandas/pulls/38144 | 2020-11-29T02:57:19Z | 2020-11-29T18:23:47Z | 2020-11-29T18:23:47Z | 2020-11-29T18:26:33Z | |
REF: refactor out EA dispatch for _cython_operation | diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 50c4cc53a12bb..7fc1a053eac57 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -445,36 +445,26 @@ def _get_cython_func_and_vals(
raise
return func, values
- def _cython_operation(
- self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs
- ) -> Tuple[np.ndarray, Optional[List[str]]]:
+ def _disallow_invalid_ops(self, values, how: str):
"""
- Returns the values of a cython operation as a Tuple of [data, names].
+ Check if this operation is valid for this dtype.
- Names is only useful when dealing with 2D results, like ohlc
- (see self._name_functions).
+ Raises
+ ------
+ NotImplementedError
"""
- assert kind in ["transform", "aggregate"]
- orig_values = values
-
- if values.ndim > 2:
- raise NotImplementedError("number of dimensions is currently limited to 2")
- elif values.ndim == 2:
- # Note: it is *not* the case that axis is always 0 for 1-dim values,
- # as we can have 1D ExtensionArrays that we need to treat as 2D
- assert axis == 1, axis
-
# can we do this operation with our cython functions
# if not raise NotImplementedError
- # we raise NotImplemented if this is an invalid operation
+ # we raise NotImplementedError if this is an invalid operation
# entirely, e.g. adding datetimes
- # categoricals are only 1d, so we
- # are not setup for dim transforming
if is_categorical_dtype(values.dtype) or is_sparse(values.dtype):
+ # categoricals are only 1d, so we
+ # are not setup for dim transforming
raise NotImplementedError(f"{values.dtype} dtype not supported")
elif is_datetime64_any_dtype(values.dtype):
+ # TODO: do we need to catch PeriodDtype explicitly?
if how in ["add", "prod", "cumsum", "cumprod"]:
raise NotImplementedError(
f"datetime64 type does not support {how} operations"
@@ -485,15 +475,88 @@ def _cython_operation(
f"timedelta64 type does not support {how} operations"
)
- if is_datetime64tz_dtype(values.dtype):
- # Cast to naive; we'll cast back at the end of the function
- # TODO: possible need to reshape?
- # TODO(EA2D):kludge can be avoided when 2D EA is allowed.
+ def _ea_wrap_cython_operation(
+ self,
+ kind: str,
+ values,
+ how: str,
+ axis: int,
+ min_count: int = -1,
+ **kwargs,
+ ) -> Tuple[np.ndarray, Optional[List[str]]]:
+ """
+ Cast ExtensionArray to numpy for op, then cast result back if appropriate.
+ """
+ # TODO: allow the EAs to implement this
+ orig_values = values
+
+ if is_datetime64tz_dtype(values.dtype) or is_period_dtype(values.dtype):
+ # All of the functions implemented here are ordinal, so we can
+ # operate on the tz-naive equivalents
values = values.view("M8[ns]")
+ res_values, names = self._cython_operation(
+ kind, values, how, axis, min_count, **kwargs
+ )
+ res_values = res_values.astype("i8", copy=False)
+ result = type(orig_values)._simple_new(res_values, dtype=orig_values.dtype)
+ return result, names
+
+ elif is_integer_dtype(values.dtype):
+ # IntegerArray
+ values = ensure_int_or_float(values)
+ res_values, names = self._cython_operation(
+ kind, values, how, axis, min_count, **kwargs
+ )
+ result = maybe_cast_result(result=res_values, obj=orig_values, how=how)
+ return result, names
+
+ elif is_bool_dtype(values.dtype):
+ # BooleanArray
+ values = ensure_int_or_float(values)
+ res_values, names = self._cython_operation(
+ kind, values, how, axis, min_count, **kwargs
+ )
+ result = maybe_cast_result(result=res_values, obj=orig_values, how=how)
+ return result, names
+
+ raise NotImplementedError(values.dtype)
+
+ def _cython_operation(
+ self,
+ kind: str,
+ values,
+ how: str,
+ axis: int,
+ min_count: int = -1,
+ **kwargs,
+ ) -> Tuple[np.ndarray, Optional[List[str]]]:
+ """
+ Returns the values of a cython operation as a Tuple of [data, names].
+
+ Names is only useful when dealing with 2D results, like ohlc
+ (see self._name_functions).
+ """
+ assert kind in ["transform", "aggregate"]
+ orig_values = values
+
+ if values.ndim > 2:
+ raise NotImplementedError("number of dimensions is currently limited to 2")
+ elif values.ndim == 2:
+ # Note: it is *not* the case that axis is always 0 for 1-dim values,
+ # as we can have 1D ExtensionArrays that we need to treat as 2D
+ assert axis == 1, axis
+
+ self._disallow_invalid_ops(values, how)
+
+ if is_extension_array_dtype(values.dtype):
+ return self._ea_wrap_cython_operation(
+ kind, values, how, axis, min_count, **kwargs
+ )
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
+ # TODO: overlap with stuff in algorithms._ensure_data, nanops?
if is_datetimelike:
values = values.view("int64")
is_numeric = True
@@ -573,19 +636,10 @@ def _cython_operation(
if swapped:
result = result.swapaxes(0, axis)
- if is_datetime64tz_dtype(orig_values.dtype) or is_period_dtype(
- orig_values.dtype
- ):
- # We need to use the constructors directly for these dtypes
- # since numpy won't recognize them
- # https://github.com/pandas-dev/pandas/issues/31471
- result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype)
- elif is_datetimelike and kind == "aggregate":
+ if is_datetimelike and kind == "aggregate":
+ # i.e. not rank, for which we want to keep numeric dtype
result = result.astype(orig_values.dtype)
- if is_extension_array_dtype(orig_values.dtype):
- result = maybe_cast_result(result=result, obj=orig_values, how=how)
-
return result, names
def aggregate(
| Make our dispatching/casting explicit and self-contained. | https://api.github.com/repos/pandas-dev/pandas/pulls/38143 | 2020-11-29T01:41:20Z | 2020-11-29T16:24:08Z | null | 2021-11-20T23:22:49Z |
DEPR: ExtensionOpsMixin -> OpsMixin | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6aff4f4bd41e2..eafd77376ac0b 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -487,6 +487,7 @@ Deprecations
- Deprecated :meth:`Index.asi8` for :class:`Index` subclasses other than :class:`.DatetimeIndex`, :class:`.TimedeltaIndex`, and :class:`PeriodIndex` (:issue:`37877`)
- The ``inplace`` parameter of :meth:`Categorical.remove_unused_categories` is deprecated and will be removed in a future version (:issue:`37643`)
- The ``null_counts`` parameter of :meth:`DataFrame.info` is deprecated and replaced by ``show_counts``. It will be removed in a future version (:issue:`37999`)
+- :class:`ExtensionOpsMixin` and :class:`ExtensionScalarOpsMixin` are deprecated and will be removed in a future version. Use ``pd.core.arraylike.OpsMixin`` instead (:issue:`37080`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 448025e05422d..32ee897d0a49e 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -21,6 +21,7 @@
Union,
cast,
)
+import warnings
import numpy as np
@@ -1219,6 +1220,21 @@ class ExtensionOpsMixin:
with NumPy arrays.
"""
+ def __init_subclass__(cls, **kwargs):
+ # We use __init_subclass__ to handle deprecations
+ super().__init_subclass__()
+
+ if cls.__name__ != "ExtensionScalarOpsMixin":
+ # We only want to warn for user-defined subclasses,
+ # and cannot reference ExtensionScalarOpsMixin directly at this point.
+ warnings.warn(
+ "ExtensionOpsMixin and ExtensionScalarOpsMixin are deprecated "
+ "and will be removed in a future version. Use "
+ "pd.core.arraylike.OpsMixin instead.",
+ FutureWarning,
+ stacklevel=2,
+ )
+
@classmethod
def _create_arithmetic_method(cls, op):
raise AbstractMethodError(cls)
diff --git a/pandas/tests/arrays/test_deprecations.py b/pandas/tests/arrays/test_deprecations.py
new file mode 100644
index 0000000000000..7e80072e8794f
--- /dev/null
+++ b/pandas/tests/arrays/test_deprecations.py
@@ -0,0 +1,19 @@
+import pandas._testing as tm
+from pandas.core.arrays import (
+ ExtensionArray,
+ ExtensionOpsMixin,
+ ExtensionScalarOpsMixin,
+)
+
+
+def test_extension_ops_mixin_deprecated():
+ # GH#37080 deprecated in favor of OpsMixin
+ with tm.assert_produces_warning(FutureWarning):
+
+ class MySubclass(ExtensionOpsMixin, ExtensionArray):
+ pass
+
+ with tm.assert_produces_warning(FutureWarning):
+
+ class MyOtherSubclass(ExtensionScalarOpsMixin, ExtensionArray):
+ pass
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 9ede9c7fbd0fd..8000584cff8c5 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -7,12 +7,13 @@
import numpy as np
from pandas.core.dtypes.base import ExtensionDtype
+from pandas.core.dtypes.cast import maybe_cast_to_extension_array
from pandas.core.dtypes.common import is_dtype_equal, is_list_like, pandas_dtype
import pandas as pd
from pandas.api.extensions import no_default, register_extension_dtype
from pandas.core.arraylike import OpsMixin
-from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
+from pandas.core.arrays import ExtensionArray
from pandas.core.indexers import check_array_indexer
@@ -45,7 +46,7 @@ def _is_numeric(self) -> bool:
return True
-class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray):
+class DecimalArray(OpsMixin, ExtensionArray):
__array_priority__ = 1000
def __init__(self, values, dtype=None, copy=False, context=None):
@@ -217,6 +218,42 @@ def convert_values(param):
return np.asarray(res, dtype=bool)
+ _do_coerce = True # overriden in DecimalArrayWithoutCoercion
+
+ def _arith_method(self, other, op):
+ def convert_values(param):
+ if isinstance(param, ExtensionArray) or is_list_like(param):
+ ovalues = param
+ else: # Assume its an object
+ ovalues = [param] * len(self)
+ return ovalues
+
+ lvalues = self
+ rvalues = convert_values(other)
+
+ # If the operator is not defined for the underlying objects,
+ # a TypeError should be raised
+ res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
+
+ def _maybe_convert(arr):
+ if self._do_coerce:
+ # https://github.com/pandas-dev/pandas/issues/22850
+ # We catch all regular exceptions here, and fall back
+ # to an ndarray.
+ res = maybe_cast_to_extension_array(type(self), arr)
+ if not isinstance(res, type(self)):
+ # exception raised in _from_sequence; ensure we have ndarray
+ res = np.asarray(arr)
+ else:
+ res = np.asarray(arr)
+ return res
+
+ if op.__name__ in {"divmod", "rdivmod"}:
+ a, b = zip(*res)
+ return _maybe_convert(a), _maybe_convert(b)
+
+ return _maybe_convert(res)
+
def to_decimal(values, context=None):
return DecimalArray([decimal.Decimal(x) for x in values], context=context)
@@ -224,6 +261,3 @@ def to_decimal(values, context=None):
def make_data():
return [decimal.Decimal(random.random()) for _ in range(100)]
-
-
-DecimalArray._add_arithmetic_ops()
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 233b658d29782..c3e84f75ebe68 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -335,12 +335,7 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):
- @classmethod
- def _create_arithmetic_method(cls, op):
- return cls._create_method(op, coerce_to_dtype=False)
-
-
-DecimalArrayWithoutCoercion._add_arithmetic_ops()
+ _do_coerce = False
def test_combine_from_sequence_raises():
| - [x] closes #37080
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38142 | 2020-11-28T23:44:03Z | 2020-11-29T19:18:39Z | 2020-11-29T19:18:39Z | 2020-11-29T20:25:44Z |
BUG: do not stringify file-like objects | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index bc7f5b8174573..4906288cc07d9 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -746,6 +746,7 @@ I/O
- :meth:`read_fwf` was inferring compression with ``compression=None`` which was not consistent with the other :meth:``read_*`` functions (:issue:`37909`)
- :meth:`DataFrame.to_html` was ignoring ``formatters`` argument for ``ExtensionDtype`` columns (:issue:`36525`)
- Bumped minimum xarray version to 0.12.3 to avoid reference to the removed ``Panel`` class (:issue:`27101`)
+- :meth:`DataFrame.to_csv` was re-opening file-like handles that also implement ``os.PathLike`` (:issue:`38125`)
Period
^^^^^^
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 9fede5180e727..64c5d3173fe0a 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -152,6 +152,7 @@ def validate_header_arg(header) -> None:
def stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
+ convert_file_like: bool = False,
) -> FileOrBuffer[AnyStr]:
"""
Attempt to convert a path-like object to a string.
@@ -169,12 +170,15 @@ def stringify_path(
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
- For backwards compatibility with older pythons, pathlib.Path and
- py.path objects are specially coerced.
-
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
+ if not convert_file_like and is_file_like(filepath_or_buffer):
+ # GH 38125: some fsspec objects implement os.PathLike but have already opened a
+ # file. This prevents opening the file a second time. infer_compression calls
+ # this function with convert_file_like=True to infer the compression.
+ return cast(FileOrBuffer[AnyStr], filepath_or_buffer)
+
if isinstance(filepath_or_buffer, os.PathLike):
filepath_or_buffer = filepath_or_buffer.__fspath__()
return _expand_user(filepath_or_buffer)
@@ -462,7 +466,7 @@ def infer_compression(
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
- filepath_or_buffer = stringify_path(filepath_or_buffer)
+ filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 3244b1c0f65b2..2e656edeee74a 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -74,7 +74,7 @@
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
-from pandas.io.common import IOHandles, get_handle, stringify_path, validate_header_arg
+from pandas.io.common import IOHandles, get_handle, validate_header_arg
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
@@ -774,7 +774,7 @@ class TextFileReader(abc.Iterator):
def __init__(self, f, engine=None, **kwds):
- self.f = stringify_path(f)
+ self.f = f
if engine is not None:
engine_specified = True
@@ -859,14 +859,14 @@ def _get_options_with_defaults(self, engine):
def _check_file_or_buffer(self, f, engine):
# see gh-16530
- if is_file_like(f):
+ if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly calls
# "__next__(...)" when iterating through such an object, meaning it
# needs to have that attribute
- if engine != "c" and not hasattr(f, "__next__"):
- msg = "The 'python' engine cannot iterate through this file buffer."
- raise ValueError(msg)
+ raise ValueError(
+ "The 'python' engine cannot iterate through this file buffer."
+ )
def _clean_options(self, options, engine):
result = options.copy()
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index c3b21daa0ac04..34cb00e89ea0c 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -85,6 +85,13 @@ def test_stringify_path_fspath(self):
result = icom.stringify_path(p)
assert result == "foo/bar.csv"
+ def test_stringify_file_and_path_like(self):
+ # GH 38125: do not stringify file objects that are also path-like
+ fsspec = pytest.importorskip("fsspec")
+ with tm.ensure_clean() as path:
+ with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
+ assert fsspec_obj == icom.stringify_path(fsspec_obj)
+
@pytest.mark.parametrize(
"extension,expected",
[
| - [x] closes #38125
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I will finish this PR (fix the mypy issues) after #38018 is merged.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38141 | 2020-11-28T21:23:29Z | 2020-12-14T01:07:31Z | 2020-12-14T01:07:31Z | 2020-12-14T01:09:00Z |
BUG: Categorical.unique should keep dtype unchanged | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 2b0b62ab7facf..6631a175ecb72 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -230,6 +230,38 @@ Notable bug fixes
These are bug fixes that might have notable behavior changes.
+``Categorical.unique`` now always maintains same dtype as original
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Previously, when calling :meth:`~Categorical.unique` with categorical data, unused categories in the new array
+would be removed, meaning that the dtype of the new array would be different than the
+original, if some categories are not present in the unique array (:issue:`18291`)
+
+As an example of this, given:
+
+.. ipython:: python
+
+ dtype = pd.CategoricalDtype(['bad', 'neutral', 'good'], ordered=True)
+ cat = pd.Categorical(['good', 'good', 'bad', 'bad'], dtype=dtype)
+ original = pd.Series(cat)
+ unique = original.unique()
+
+*pandas < 1.3.0*:
+
+.. code-block:: ipython
+
+ In [1]: unique
+ ['good', 'bad']
+ Categories (2, object): ['bad' < 'good']
+ In [2]: original.dtype == unique.dtype
+ False
+
+*pandas >= 1.3.0*
+
+.. ipython:: python
+
+ unique
+ original.dtype == unique.dtype
Preserve dtypes in :meth:`~pandas.DataFrame.combine_first`
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f2b5ad447a0cf..ba36e4a630e1f 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2127,16 +2127,15 @@ def mode(self, dropna=True):
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
- unique. Unused categories are NOT returned.
+ unique.
- - unordered category: values and categories are sorted by appearance
- order.
- - ordered category: values are sorted by appearance order, categories
- keeps existing order.
+ .. versionchanged:: 1.3.0
+
+ Previously, unused categories were dropped from the new categories.
Returns
-------
- unique values : ``Categorical``
+ Categorical
See Also
--------
@@ -2146,37 +2145,15 @@ def unique(self):
Examples
--------
- An unordered Categorical will return categories in the
- order of appearance.
-
>>> pd.Categorical(list("baabc")).unique()
['b', 'a', 'c']
- Categories (3, object): ['b', 'a', 'c']
-
- >>> pd.Categorical(list("baabc"), categories=list("abc")).unique()
- ['b', 'a', 'c']
- Categories (3, object): ['b', 'a', 'c']
-
- An ordered Categorical preserves the category ordering.
-
- >>> pd.Categorical(
- ... list("baabc"), categories=list("abc"), ordered=True
- ... ).unique()
- ['b', 'a', 'c']
+ Categories (3, object): ['a', 'b', 'c']
+ >>> pd.Categorical(list("baab"), categories=list("abc"), ordered=True).unique()
+ ['b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
"""
- # unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
- cat = self.copy()
-
- # keep nan in codes
- cat._ndarray = unique_codes
-
- # exclude nan from indexer for categories
- take_codes = unique_codes[unique_codes != -1]
- if self.ordered:
- take_codes = np.sort(take_codes)
- return cat.set_categories(cat.categories.take(take_codes))
+ return self._from_backing_data(unique_codes)
def _values_for_factorize(self):
return self._ndarray, -1
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index 6de8c1d789097..297681f1e10f5 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -76,6 +76,13 @@ def recode_for_groupby(
# sort=False should order groups in as-encountered order (GH-8868)
cat = c.unique()
+ # See GH-38140 for block below
+ # exclude nan from indexer for categories
+ take_codes = cat.codes[cat.codes != -1]
+ if cat.ordered:
+ take_codes = np.sort(take_codes)
+ cat = cat.set_categories(cat.categories.take(take_codes))
+
# But for groupby to work, all categories should be present,
# including those missing from the data (GH-13179), which .unique()
# above dropped
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5c605a6b441c6..33e3bfb6ee3aa 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1993,15 +1993,12 @@ def unique(self) -> ArrayLike:
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
- An unordered Categorical will return categories in the order of
- appearance.
+ An Categorical will return categories in the order of
+ appearance and with the same dtype.
>>> pd.Series(pd.Categorical(list('baabc'))).unique()
['b', 'a', 'c']
- Categories (3, object): ['b', 'a', 'c']
-
- An ordered Categorical preserves the category ordering.
-
+ Categories (3, object): ['a', 'b', 'c']
>>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
... ordered=True)).unique()
['b', 'a', 'c']
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 6899d821f80ad..56d474497a166 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -8,6 +8,7 @@
from pandas import (
Categorical,
+ CategoricalDtype,
Index,
NaT,
Series,
@@ -196,84 +197,49 @@ def test_searchsorted(self, ordered):
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
- def test_unique(self):
+ def test_unique(self, ordered):
+ # GH38140
+ dtype = CategoricalDtype(["a", "b", "c"], ordered=ordered)
+
# categories are reordered based on value when ordered=False
- cat = Categorical(["a", "b"])
- exp = Index(["a", "b"])
+ cat = Categorical(["a", "b", "c"], dtype=dtype)
res = cat.unique()
- tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
- cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
+ cat = Categorical(["a", "b", "a", "a"], dtype=dtype)
res = cat.unique()
- tm.assert_index_equal(res.categories, exp)
- tm.assert_categorical_equal(res, Categorical(exp))
+ tm.assert_categorical_equal(res, Categorical(["a", "b"], dtype=dtype))
- cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
- exp = Index(["c", "a", "b"])
+ cat = Categorical(["c", "a", "b", "a", "a"], dtype=dtype)
res = cat.unique()
- tm.assert_index_equal(res.categories, exp)
- exp_cat = Categorical(exp, categories=["c", "a", "b"])
+ exp_cat = Categorical(["c", "a", "b"], dtype=dtype)
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
- cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
- res = cat.unique()
- exp = Index(["b", "a"])
- tm.assert_index_equal(res.categories, exp)
- exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
- tm.assert_categorical_equal(res, exp_cat)
-
- def test_unique_ordered(self):
- # keep categories order when ordered=True
- cat = Categorical(["b", "a", "b"], categories=["a", "b"], ordered=True)
+ cat = Categorical(["b", np.nan, "b", np.nan, "a"], dtype=dtype)
res = cat.unique()
- exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
+ exp_cat = Categorical(["b", np.nan, "a"], dtype=dtype)
tm.assert_categorical_equal(res, exp_cat)
- cat = Categorical(
- ["c", "b", "a", "a"], categories=["a", "b", "c"], ordered=True
- )
- res = cat.unique()
- exp_cat = Categorical(["c", "b", "a"], categories=["a", "b", "c"], ordered=True)
- tm.assert_categorical_equal(res, exp_cat)
-
- cat = Categorical(["b", "a", "a"], categories=["a", "b", "c"], ordered=True)
- res = cat.unique()
- exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
- tm.assert_categorical_equal(res, exp_cat)
+ def test_unique_index_series(self, ordered):
+ # GH38140
+ dtype = CategoricalDtype([3, 2, 1], ordered=ordered)
- cat = Categorical(
- ["b", "b", np.nan, "a"], categories=["a", "b", "c"], ordered=True
- )
- res = cat.unique()
- exp_cat = Categorical(["b", np.nan, "a"], categories=["a", "b"], ordered=True)
- tm.assert_categorical_equal(res, exp_cat)
-
- def test_unique_index_series(self):
- c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
+ c = Categorical([3, 1, 2, 2, 1], dtype=dtype)
# Categorical.unique sorts categories by appearance order
# if ordered=False
- exp = Categorical([3, 1, 2], categories=[3, 1, 2])
+ exp = Categorical([3, 1, 2], dtype=dtype)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
- c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
- exp = Categorical([1, 2], categories=[1, 2])
+ c = Categorical([1, 1, 2, 2], dtype=dtype)
+ exp = Categorical([1, 2], dtype=dtype)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
- c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
- # Categorical.unique keeps categories order if ordered=True
- exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
- tm.assert_categorical_equal(c.unique(), exp)
-
- tm.assert_index_equal(Index(c).unique(), Index(exp))
- tm.assert_categorical_equal(Series(c).unique(), exp)
-
def test_shift(self):
# GH 9416
cat = Categorical(["a", "b", "c", "d", "a"])
diff --git a/pandas/tests/base/test_unique.py b/pandas/tests/base/test_unique.py
index 4aefa4be176fb..26e785a2796b1 100644
--- a/pandas/tests/base/test_unique.py
+++ b/pandas/tests/base/test_unique.py
@@ -67,8 +67,6 @@ def test_unique_null(null_obj, index_or_series_obj):
if is_datetime64tz_dtype(obj.dtype):
result = result.normalize()
expected = expected.normalize()
- elif isinstance(obj, pd.CategoricalIndex):
- expected = expected.set_categories(unique_values_not_null)
tm.assert_index_equal(result, expected)
else:
expected = np.array(unique_values, dtype=obj.dtype)
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 3ea5c34201b5c..ca9c2acb9fd12 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -40,12 +40,16 @@ def test_value_counts_with_normalize(self, data):
# GH 33172
data = data[:10].unique()
values = np.array(data[~data.isna()])
+ ser = pd.Series(data, dtype=data.dtype)
- result = (
- pd.Series(data, dtype=data.dtype).value_counts(normalize=True).sort_index()
- )
+ result = ser.value_counts(normalize=True).sort_index()
+
+ if not isinstance(data, pd.Categorical):
+ expected = pd.Series([1 / len(values)] * len(values), index=result.index)
+ else:
+ expected = pd.Series(0.0, index=result.index)
+ expected[result > 0] = 1 / len(values)
- expected = pd.Series([1 / len(values)] * len(values), index=result.index)
self.assert_series_equal(result, expected)
def test_count(self, data_missing):
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index d3c9b02b3ba23..678344f5b6909 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -4,7 +4,10 @@
from pandas._libs import index as libindex
import pandas as pd
-from pandas import Categorical
+from pandas import (
+ Categorical,
+ CategoricalDtype,
+)
import pandas._testing as tm
from pandas.core.indexes.api import (
CategoricalIndex,
@@ -186,18 +189,19 @@ def test_drop_duplicates(self, data, categories, expected):
tm.assert_index_equal(result, e)
@pytest.mark.parametrize(
- "data, categories, expected_data, expected_categories",
+ "data, categories, expected_data",
[
- ([1, 1, 1], [1, 2, 3], [1], [1]),
- ([1, 1, 1], list("abc"), [np.nan], []),
- ([1, 2, "a"], [1, 2, 3], [1, 2, np.nan], [1, 2]),
- ([2, "a", "b"], list("abc"), [np.nan, "a", "b"], ["a", "b"]),
+ ([1, 1, 1], [1, 2, 3], [1]),
+ ([1, 1, 1], list("abc"), [np.nan]),
+ ([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]),
+ ([2, "a", "b"], list("abc"), [np.nan, "a", "b"]),
],
)
- def test_unique(self, data, categories, expected_data, expected_categories):
+ def test_unique(self, data, categories, expected_data, ordered):
+ dtype = CategoricalDtype(categories, ordered=ordered)
- idx = CategoricalIndex(data, categories=categories)
- expected = CategoricalIndex(expected_data, categories=expected_categories)
+ idx = CategoricalIndex(data, dtype=dtype)
+ expected = CategoricalIndex(expected_data, dtype=dtype)
tm.assert_index_equal(idx.unique(), expected)
def test_repr_roundtrip(self):
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 127baae6e9352..c9d034361d8c4 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -602,7 +602,7 @@ def test_categorical(self):
# we are expecting to return in the order
# of appearance
- expected = Categorical(list("bac"), categories=list("bac"))
+ expected = Categorical(list("bac"))
# we are expecting to return in the order
# of the categories
@@ -632,7 +632,7 @@ def test_categorical(self):
tm.assert_categorical_equal(result, expected)
# CI -> return CI
- ci = CategoricalIndex(Categorical(list("baabc"), categories=list("bac")))
+ ci = CategoricalIndex(Categorical(list("baabc"), categories=list("abc")))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
| - [x] closes #18291
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
We want to keep the same dtype as in the original after applying `unique`. For example:
```python
>>> dtype = pd.CategoricalDtype(['very bad', 'bad', 'neutral', 'good', 'very good'], ordered=True)
>>> cat = pd.Categorical(['good','good', 'bad', 'bad'], dtype=dtype)
>>> cat
['good', 'good', 'bad', 'bad']
Categories (5, object): ['very bad' < 'bad' < 'neutral' < 'good' < 'very good']
>>> cat.unique()
['good', 'bad']
Categories (5, object): ['bad' < 'good']
>>> cat.unique().dtype == cat.dtype
False # master
True # this PR
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/38140 | 2020-11-28T19:59:54Z | 2021-04-16T17:43:33Z | 2021-04-16T17:43:32Z | 2021-04-22T07:50:14Z |
test backportability of #38120 | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 48d4fe65942fe..1f9ba48fb4ef7 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -46,11 +46,13 @@
pandas_dtype,
)
from pandas.core.dtypes.generic import (
+ ABCDatetimeArray,
ABCExtensionArray,
ABCIndex,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
+ ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
@@ -191,8 +193,15 @@ def _reconstruct_data(
-------
ExtensionArray or np.ndarray
"""
- if is_extension_array_dtype(dtype):
- values = dtype.construct_array_type()._from_sequence(values)
+ if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
+ # Catch DatetimeArray/TimedeltaArray
+ return values
+ elif is_extension_array_dtype(dtype):
+ cls = dtype.construct_array_type()
+ if isinstance(values, cls) and values.dtype == dtype:
+ return values
+
+ values = cls._from_sequence(values)
elif is_bool_dtype(dtype):
values = values.astype(dtype, copy=False)
@@ -654,6 +663,8 @@ def factorize(
values = _ensure_arraylike(values)
original = values
+ if not isinstance(values, ABCMultiIndex):
+ values = extract_array(values, extract_numpy=True)
# GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques
# of values, assign na_sentinel=-1 to replace code value for NaN.
@@ -662,6 +673,19 @@ def factorize(
na_sentinel = -1
dropna = False
+ if (
+ isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
+ and values.freq is not None
+ ):
+ codes, uniques = values.factorize(sort=sort)
+ if isinstance(original, ABCIndexClass):
+ uniques = original._shallow_copy(uniques, name=None)
+ elif isinstance(original, ABCSeries):
+ from pandas import Index
+
+ uniques = Index(uniques)
+ return codes, uniques
+
if is_extension_array_dtype(values.dtype):
values = extract_array(values)
codes, uniques = values.factorize(na_sentinel=na_sentinel)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index a10912aa45baa..a9fe95c0892e6 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1660,6 +1660,20 @@ def mean(self, skipna=True):
# Don't have to worry about NA `result`, since no NA went in.
return self._box_func(result)
+ # --------------------------------------------------------------
+
+ def factorize(self, na_sentinel=-1, sort: bool = False):
+ if self.freq is not None:
+ # We must be unique, so can short-circuit (and retain freq)
+ codes = np.arange(len(self), dtype=np.intp)
+ uniques = self.copy() # TODO: copy or view?
+ if sort and self.freq.n < 0:
+ codes = codes[::-1]
+ uniques = uniques[::-1]
+ return codes, uniques
+ # FIXME: shouldn't get here; we are ignoring sort
+ return super().factorize(na_sentinel=na_sentinel)
+
DatetimeLikeArrayMixin._add_comparison_ops()
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index fe78481d99d30..4d117a31255da 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -48,6 +48,7 @@
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
+from pandas.core.arrays.base import ExtensionArray
import pandas.core.common as com
@@ -766,6 +767,9 @@ def _check_timedeltalike_freq_compat(self, other):
raise raise_on_incompatible(self, other)
+ def factorize(self, na_sentinel=-1):
+ return ExtensionArray.factorize(self, na_sentinel=na_sentinel)
+
def raise_on_incompatible(left, right):
"""
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 7bb1d98086a91..1b20aad542084 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -271,10 +271,12 @@ def test_factorize(self):
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
# tz must be preserved
idx1 = idx1.tz_localize("Asia/Tokyo")
@@ -283,6 +285,7 @@ def test_factorize(self):
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
idx2 = pd.DatetimeIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"]
@@ -293,21 +296,31 @@ def test_factorize(self):
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
- # freq must be preserved
+ def test_factorize_preserves_freq(self):
+ # GH#38120 freq should be preserved
idx3 = date_range("2000-01", periods=4, freq="M", tz="Asia/Tokyo")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
+
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
- def test_factorize_tz(self, tz_naive_fixture):
+ arr, idx = pd.factorize(idx3)
+ tm.assert_numpy_array_equal(arr, exp_arr)
+ tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
+
+ def test_factorize_tz(self, tz_naive_fixture, index_or_series):
tz = tz_naive_fixture
# GH#13750
base = pd.date_range("2016-11-05", freq="H", periods=100, tz=tz)
@@ -315,27 +328,33 @@ def test_factorize_tz(self, tz_naive_fixture):
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
- for obj in [idx, pd.Series(idx)]:
- arr, res = obj.factorize()
- tm.assert_numpy_array_equal(arr, exp_arr)
- expected = base._with_freq(None)
- tm.assert_index_equal(res, expected)
+ obj = index_or_series(idx)
- def test_factorize_dst(self):
- # GH 13750
- idx = pd.date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern")
-
- for obj in [idx, pd.Series(idx)]:
- arr, res = obj.factorize()
- tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
- tm.assert_index_equal(res, idx)
-
- idx = pd.date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern")
+ arr, res = obj.factorize()
+ tm.assert_numpy_array_equal(arr, exp_arr)
+ expected = base._with_freq(None)
+ tm.assert_index_equal(res, expected)
+ assert res.freq == expected.freq
- for obj in [idx, pd.Series(idx)]:
- arr, res = obj.factorize()
- tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
- tm.assert_index_equal(res, idx)
+ def test_factorize_dst(self, index_or_series):
+ # GH 13750
+ idx = date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern")
+ obj = index_or_series(idx)
+
+ arr, res = obj.factorize()
+ tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
+ tm.assert_index_equal(res, idx)
+ if index_or_series is Index:
+ assert res.freq == idx.freq
+
+ idx = date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern")
+ obj = index_or_series(idx)
+
+ arr, res = obj.factorize()
+ tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
+ tm.assert_index_equal(res, idx)
+ if index_or_series is Index:
+ assert res.freq == idx.freq
@pytest.mark.parametrize(
"arr, expected",
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 4a1749ff734c1..ef1e599d13221 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -75,17 +75,26 @@ def test_factorize(self):
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
- # freq must be preserved
+ def test_factorize_preserves_freq(self):
+ # GH#38120 freq should be preserved
idx3 = timedelta_range("1 day", periods=4, freq="s")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
+
+ arr, idx = pd.factorize(idx3)
+ tm.assert_numpy_array_equal(arr, exp_arr)
+ tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
def test_sort_values(self):
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 4565d79c632de..162be4e0740d6 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -91,3 +91,13 @@ def test_multiindex_get_loc_list_raises(self):
msg = "unhashable type"
with pytest.raises(TypeError, match=msg):
idx.get_loc([])
+
+ def test_multiindex_with_datatime_level_preserves_freq(self):
+ # https://github.com/pandas-dev/pandas/issues/35563
+ idx = Index(range(2), name="A")
+ dti = pd.date_range("2020-01-01", periods=7, freq="D", name="B")
+ mi = MultiIndex.from_product([idx, dti])
+ df = DataFrame(np.random.randn(14, 2), index=mi)
+ result = df.loc[0].index
+ tm.assert_index_equal(result, dti)
+ assert result.freq == dti.freq
diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py
index 7e0be331ec8d5..d6b80a803a88d 100644
--- a/pandas/tests/window/common.py
+++ b/pandas/tests/window/common.py
@@ -12,7 +12,6 @@ def get_result(obj, obj2=None):
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = get_result(frame[1], frame[5])
- expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
| #38120 | https://api.github.com/repos/pandas-dev/pandas/pulls/38137 | 2020-11-28T19:45:13Z | 2020-11-29T20:21:42Z | null | 2020-11-30T17:34:55Z |
BUG: Categorical[dt64tz].to_numpy() losing tz | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index bb06bcc9b5aa8..3fab4850dd1ec 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -533,7 +533,7 @@ Categorical
- Bug in :meth:`Categorical.__setitem__` that incorrectly raised when trying to set a tuple value (:issue:`20439`)
- Bug in :meth:`CategoricalIndex.equals` incorrectly casting non-category entries to ``np.nan`` (:issue:`37667`)
- Bug in :meth:`CategoricalIndex.where` incorrectly setting non-category entries to ``np.nan`` instead of raising ``TypeError`` (:issue:`37977`)
--
+- Bug in :meth:`Categorical.to_numpy` and ``np.array(categorical)`` with timezone-aware ``datetime64`` categories incorrectly dropping the timezone information instead of casting to object dtype (:issue:`38136`)
Datetimelike
^^^^^^^^^^^^
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 3d9d2ba04f31b..cb5b4145855d1 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -320,6 +320,16 @@ def index_or_series(request):
index_or_series2 = index_or_series
+@pytest.fixture(
+ params=[pd.Index, pd.Series, pd.array], ids=["index", "series", "array"]
+)
+def index_or_series_or_array(request):
+ """
+ Fixture to parametrize over Index, Series, and ExtensionArray
+ """
+ return request.param
+
+
@pytest.fixture
def dict_subclass():
"""
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index fe66aae23f510..3995e7b251184 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1269,15 +1269,13 @@ def __array__(self, dtype=None) -> np.ndarray:
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
- ret = take_1d(self.categories.values, self._codes)
+ ret = take_1d(self.categories._values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
- if is_extension_array_dtype(ret):
- # When we're a Categorical[ExtensionArray], like Interval,
- # we need to ensure __array__ get's all the way to an
- # ndarray.
- ret = np.asarray(ret)
- return ret
+ # When we're a Categorical[ExtensionArray], like Interval,
+ # we need to ensure __array__ gets all the way to an
+ # ndarray.
+ return np.asarray(ret)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1f4221206e5bc..b1b5d16eaf7f0 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -84,7 +84,13 @@
from pandas.core.generic import NDFrame
from pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
-from pandas.core.indexes.api import Float64Index, Index, MultiIndex, ensure_index
+from pandas.core.indexes.api import (
+ CategoricalIndex,
+ Float64Index,
+ Index,
+ MultiIndex,
+ ensure_index,
+)
import pandas.core.indexes.base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
@@ -412,7 +418,13 @@ def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:
labels = ensure_index(labels)
if labels._is_all_dates:
- if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ deep_labels = labels
+ if isinstance(labels, CategoricalIndex):
+ deep_labels = labels.categories
+
+ if not isinstance(
+ deep_labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)
+ ):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py
index a6fdb82e48197..668954a3f4a0b 100644
--- a/pandas/tests/base/test_conversion.py
+++ b/pandas/tests/base/test_conversion.py
@@ -316,18 +316,34 @@ def test_array_multiindex_raises():
TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"),
np.array([0, 3600000000000], dtype="m8[ns]"),
),
+ # GH#26406 tz is preserved in Categorical[dt64tz]
+ (
+ pd.Categorical(pd.date_range("2016-01-01", periods=2, tz="US/Pacific")),
+ np.array(
+ [
+ Timestamp("2016-01-01", tz="US/Pacific"),
+ Timestamp("2016-01-02", tz="US/Pacific"),
+ ]
+ ),
+ ),
],
)
-def test_to_numpy(array, expected, index_or_series):
- box = index_or_series
+def test_to_numpy(array, expected, index_or_series_or_array):
+ box = index_or_series_or_array
thing = box(array)
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
+ if array.dtype.name == "int64" and box is pd.array:
+ pytest.xfail("thing is Int64 and to_numpy() returns object")
+
result = thing.to_numpy()
tm.assert_numpy_array_equal(result, expected)
+ result = np.asarray(thing)
+ tm.assert_numpy_array_equal(result, expected)
+
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize(
| - [x] closes #26406
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38136 | 2020-11-28T18:37:50Z | 2020-11-30T00:09:10Z | 2020-11-30T00:09:09Z | 2020-11-30T00:11:37Z |
ENH: Categorical.unique can keep same dtype | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6f046d3a9379d..61f8b7c6bcc6a 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -254,6 +254,7 @@ Other enhancements
- :func:`read_csv` supports memory-mapping for compressed files (:issue:`37621`)
- Improve error reporting for :meth:`DataFrame.merge` when invalid merge column definitions were given (:issue:`16228`)
- Improve numerical stability for :meth:`.Rolling.skew`, :meth:`.Rolling.kurt`, :meth:`Expanding.skew` and :meth:`Expanding.kurt` through implementation of Kahan summation (:issue:`6929`)
+- :meth:`Categorical.unique` has a new parameter ``remove_unused_categories``, which if set to ``False``, keeps the dtype of the original categorical (:issue:`38135`)
- Improved error reporting for subsetting columns of a :class:`.DataFrameGroupBy` with ``axis=1`` (:issue:`37725`)
- Implement method ``cross`` for :meth:`DataFrame.merge` and :meth:`DataFrame.join` (:issue:`5401`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 62e508c491740..487a12853f61a 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2035,16 +2035,24 @@ def mode(self, dropna=True):
# ------------------------------------------------------------------
# ExtensionArray Interface
- def unique(self):
+ def unique(self, remove_unused_categories: bool = True) -> "Categorical":
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
- unique. Unused categories are NOT returned.
+ unique. By default, unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
+ Parameters
+ ----------
+ remove_unused_categories : bool, default True
+ If True, unused categories are not returned.
+ If False, the input dtype is returned unchanged.
+
+ .. versionadded:: 1.2.0
+
Returns
-------
unique values : ``Categorical``
@@ -2075,13 +2083,24 @@ def unique(self):
... ).unique()
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
+
+ By default, unused categories are removed, but this can be changed:
+
+ >>> cat = pd.Categorical(list("baab"), categories=list("abc"), ordered=True)
+ >>> cat.unique()
+ ['b', 'a']
+ Categories (2, object): ['a' < 'b']
+ >>> cat.unique(remove_unused_categories=False)
+ ['b', 'a']
+ Categories (3, object): ['a' < 'b' < 'c']
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
- cat = self.copy()
- # keep nan in codes
- cat._codes = unique_codes
+ cat = self._constructor(unique_codes, dtype=self.dtype, fastpath=True)
+
+ if not remove_unused_categories:
+ return cat
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 7bd7d29ec9703..91559d92dcd92 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -6,7 +6,7 @@
from pandas.compat import PYPY
-from pandas import Categorical, Index, NaT, Series, date_range
+from pandas import Categorical, CategoricalDtype, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
@@ -242,6 +242,28 @@ def test_unique_ordered(self):
exp_cat = Categorical(["b", np.nan, "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
+ @pytest.mark.parametrize(
+ "values, expected",
+ [
+ [list("abc"), list("abc")],
+ [list("bac"), list("bac")],
+ [list("ab"), list("ab")],
+ [list("bc"), list("bc")],
+ [list("aabbcc"), list("abc")],
+ [list("aabb"), list("ab")],
+ [[np.nan, "a", "b"], [np.nan, "a", "b"]],
+ [["a", "b", np.nan], ["a", "b", np.nan]],
+ [["a", "b", "a", "b", np.nan], ["a", "b", np.nan]],
+ ],
+ )
+ def test_unique_keep_unused_categories(self, values, expected, ordered):
+ # GH38135
+ dtype = CategoricalDtype(list("abc"), ordered=ordered)
+ result = Categorical(values, dtype=dtype).unique(remove_unused_categories=False)
+ expected = Categorical(expected, dtype=dtype)
+
+ tm.assert_categorical_equal(result, expected)
+
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
| There are situations where we want to keep the same dtype as in the original after applying `unique` For example
```python
>>> dtype = pd.categoricalDtype(['very good', 'good', 'neutral', 'bad', 'very bad'], ordered=True)
>>> cat = pd.Categorical(['good','good', 'bad', 'bad'], dtype=dtype)
>>> cat
[good, good, bad, bad]
Categories (5, object): [very good < good < neutral < bad < very bad]
>>> cat.unique().dtype == cat.dtype
False # this is a bug IMO, but others may not agree
```
Even if it's not a bug, there are situations where we want the comparison above to return True. To alleviate the above, I've added a new parameter, so we can do
```
>>> cat.unique(remove_unused_categories=False).dtype == cat.dtype
True
```
Helps #18291, but does not close the issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38135 | 2020-11-28T16:22:31Z | 2020-11-28T19:57:05Z | null | 2020-11-28T19:57:10Z |
REF: de-duplicate Block.__init__ | diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py
index 3fbe324417c60..9d517fe55f808 100644
--- a/pandas/core/internals/api.py
+++ b/pandas/core/internals/api.py
@@ -10,6 +10,7 @@
import numpy as np
+from pandas._libs.internals import BlockPlacement
from pandas._typing import Dtype
from pandas.core.dtypes.common import is_datetime64tz_dtype
@@ -58,4 +59,17 @@ def make_block(
# for e.g. pyarrow?
values = DatetimeArray._simple_new(values, dtype=dtype)
+ if not isinstance(placement, BlockPlacement):
+ placement = BlockPlacement(placement)
+
+ if ndim is None:
+ # GH#38134 Block constructor now assumes ndim is not None
+ if not isinstance(values.dtype, np.dtype):
+ if len(placement) != 1:
+ ndim = 1
+ else:
+ ndim = 2
+ else:
+ ndim = values.ndim
+
return klass(values, ndim=ndim, placement=placement)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index b92ef3ec3b367..0ce746465cce4 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -170,6 +170,10 @@ def __init__(self, values, placement, ndim: int):
f"placement implies {len(self.mgr_locs)}"
)
+ elif self.is_extension and self.ndim == 2 and len(self.mgr_locs) != 1:
+ # TODO(EA2D): check unnecessary with 2D EAs
+ raise AssertionError("block.size != values.size")
+
@classmethod
def _maybe_coerce_values(cls, values):
"""
@@ -185,7 +189,7 @@ def _maybe_coerce_values(cls, values):
"""
return values
- def _check_ndim(self, values, ndim):
+ def _check_ndim(self, values, ndim: int):
"""
ndim inference and validation.
@@ -196,7 +200,7 @@ def _check_ndim(self, values, ndim):
Parameters
----------
values : array-like
- ndim : int or None
+ ndim : int
Returns
-------
@@ -206,8 +210,7 @@ def _check_ndim(self, values, ndim):
------
ValueError : the number of dimensions do not match
"""
- if ndim is None:
- ndim = values.ndim
+ assert isinstance(ndim, int) # GH#38134 enforce this
if self._validate_ndim:
if values.ndim != ndim:
@@ -1481,33 +1484,6 @@ class ExtensionBlock(Block):
values: ExtensionArray
- def __init__(self, values, placement, ndim: int):
- """
- Initialize a non-consolidatable block.
-
- 'ndim' may be inferred from 'placement'.
-
- This will call continue to call __init__ for the other base
- classes mixed in with this Mixin.
- """
-
- # Placement must be converted to BlockPlacement so that we can check
- # its length
- if not isinstance(placement, libinternals.BlockPlacement):
- placement = libinternals.BlockPlacement(placement)
-
- # Maybe infer ndim from placement
- if ndim is None:
- if len(placement) != 1:
- ndim = 1
- else:
- ndim = 2
- super().__init__(values, placement, ndim=ndim)
-
- if self.ndim == 2 and len(self.mgr_locs) != 1:
- # TODO(EA2D): check unnecessary with 2D EAs
- raise AssertionError("block.size != values.size")
-
@property
def shape(self) -> Shape:
# TODO(EA2D): override unnecessary with 2D EAs
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38134 | 2020-11-28T16:10:13Z | 2021-03-09T22:07:45Z | 2021-03-09T22:07:45Z | 2021-04-15T02:28:42Z |
test backportability of #36927 | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 46c4ad4f35fe4..edc2f7327abfc 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -23,6 +23,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.groupby` aggregation with out-of-bounds datetime objects in an object-dtype column (:issue:`36003`)
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
- Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`).
+- Fixed regression in :meth:`MultiIndex.intersection` returning duplicates when at least one of the indexes had duplicates (:issue:`36915`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e4dee2b0a08ce..7bb0d68aa6910 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2654,7 +2654,7 @@ def intersection(self, other, sort=False):
self._assert_can_do_setop(other)
other = ensure_index(other)
- if self.equals(other):
+ if self.equals(other) and not self.has_duplicates:
return self._get_reconciled_name_object(other)
if not is_dtype_equal(self.dtype, other.dtype):
@@ -2672,7 +2672,7 @@ def intersection(self, other, sort=False):
except TypeError:
pass
else:
- return self._wrap_setop_result(other, result)
+ return self._wrap_setop_result(other, algos.unique1d(result))
try:
indexer = Index(rvals).get_indexer(lvals)
@@ -2683,7 +2683,7 @@ def intersection(self, other, sort=False):
indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
- taken = other.take(indexer)
+ taken = other.take(indexer).unique()
res_name = get_op_result_name(self, other)
if sort is None:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index b9ba823ca1b0b..6ad82e81e7c30 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3398,6 +3398,8 @@ def intersection(self, other, sort=False):
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
+ if self.has_duplicates:
+ return self.unique()
return self
if not is_object_dtype(other.dtype):
@@ -3416,10 +3418,12 @@ def intersection(self, other, sort=False):
uniq_tuples = None # flag whether _inner_indexer was successful
if self.is_monotonic and other.is_monotonic:
try:
- uniq_tuples = self._inner_indexer(lvals, rvals)[0]
- sort = False # uniq_tuples is already sorted
+ inner_tuples = self._inner_indexer(lvals, rvals)[0]
+ sort = False # inner_tuples is already sorted
except TypeError:
pass
+ else:
+ uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
other_uniq = set(rvals)
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 60f3d23aaed13..e938fb49c2ed4 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -539,7 +539,9 @@ def _should_reindex_frame_op(
if fill_value is None and level is None and axis is default_axis:
# TODO: any other cases we should handle here?
cols = left.columns.intersection(right.columns)
- if not (cols.equals(left.columns) and cols.equals(right.columns)):
+ if not (
+ cols.equals(left.columns.unique()) and cols.equals(right.columns.unique())
+ ):
return True
return False
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 2349cb1dcc0c7..f43a33d088df0 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1209,7 +1209,9 @@ def _validate_specification(self):
raise MergeError("Must pass left_on or left_index=True")
else:
# use the common columns
- common_cols = self.left.columns.intersection(self.right.columns)
+ left_cols = self.left.columns
+ right_cols = self.right.columns
+ common_cols = left_cols.intersection(right_cols)
if len(common_cols) == 0:
raise MergeError(
"No common columns to perform merge on. "
@@ -1218,7 +1220,10 @@ def _validate_specification(self):
f"left_index={self.left_index}, "
f"right_index={self.right_index}"
)
- if not common_cols.is_unique:
+ if (
+ not left_cols.join(common_cols, how="inner").is_unique
+ or not right_cols.join(common_cols, how="inner").is_unique
+ ):
raise MergeError(f"Data columns not unique: {repr(common_cols)}")
self.left_on = self.right_on = common_cols
elif self.on is not None:
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index d7427ee622977..8637c4cb8bffb 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -375,3 +375,26 @@ def test_setops_disallow_true(method):
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
+
+
+@pytest.mark.parametrize(
+ ("tuples", "exp_tuples"),
+ [
+ ([("val1", "test1")], [("val1", "test1")]),
+ ([("val1", "test1"), ("val1", "test1")], [("val1", "test1")]),
+ (
+ [("val2", "test2"), ("val1", "test1")],
+ [("val2", "test2"), ("val1", "test1")],
+ ),
+ ],
+)
+def test_intersect_with_duplicates(tuples, exp_tuples):
+ # GH#36915
+ left = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ right = MultiIndex.from_tuples(
+ [("val1", "test1"), ("val1", "test1"), ("val2", "test2")],
+ names=["first", "second"],
+ )
+ result = left.intersection(right)
+ expected = MultiIndex.from_tuples(exp_tuples, names=["first", "second"])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 1a40fe550be61..26d7c14b46e08 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -95,3 +95,13 @@ def test_union_dtypes(left, right, expected):
b = pd.Index([], dtype=right)
result = (a | b).dtype
assert result == expected
+
+
+@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]])
+def test_intersection_duplicates(values):
+ # GH#31326
+ a = pd.Index(values)
+ b = pd.Index([3, 3])
+ result = a.intersection(b)
+ expected = pd.Index([3])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 4fd3c688b8771..491ec97e5dee9 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -742,7 +742,7 @@ def test_overlapping_columns_error_message(self):
# #2649, #10639
df2.columns = ["key1", "foo", "foo"]
- msg = r"Data columns not unique: Index\(\['foo', 'foo'\], dtype='object'\)"
+ msg = r"Data columns not unique: Index\(\['foo'\], dtype='object'\)"
with pytest.raises(MergeError, match=msg):
merge(df, df2)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38133 | 2020-11-28T16:02:09Z | 2020-11-28T16:54:01Z | null | 2020-11-28T16:54:01Z | |
DOC: add contibutors to 1.2.0 release notes | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 5d36c52da9f0d..12a2d3aecdaf2 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -782,3 +782,5 @@ Other
Contributors
~~~~~~~~~~~~
+
+.. contributors:: v1.1.4..v1.2.0|HEAD
| https://api.github.com/repos/pandas-dev/pandas/pulls/38132 | 2020-11-28T14:58:17Z | 2020-11-28T18:05:06Z | 2020-11-28T18:05:06Z | 2020-11-28T18:25:14Z | |
REF: use np.where instead of maybe_upcast_putmask in nanops | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 80c4cd5b44a92..88662a4fabed8 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -12,7 +12,6 @@
from pandas._typing import ArrayLike, Dtype, DtypeObj, F, Scalar
from pandas.compat._optional import import_optional_dependency
-from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas.core.dtypes.common import (
get_dtype,
is_any_int_dtype,
@@ -284,7 +283,7 @@ def _get_values(
"""
# In _get_values is only called from within nanops, and in all cases
# with scalar fill_value. This guarantee is important for the
- # maybe_upcast_putmask call below
+ # np.where call below
assert is_scalar(fill_value)
values = extract_array(values, extract_numpy=True)
@@ -292,10 +291,12 @@ def _get_values(
dtype = values.dtype
+ datetimelike = False
if needs_i8_conversion(values.dtype):
# changing timedelta64/datetime64 to int64 needs to happen after
# finding `mask` above
values = np.asarray(values.view("i8"))
+ datetimelike = True
dtype_ok = _na_ok_dtype(dtype)
@@ -306,13 +307,13 @@ def _get_values(
)
if skipna and (mask is not None) and (fill_value is not None):
- values = values.copy()
- if dtype_ok and mask.any():
- np.putmask(values, mask, fill_value)
-
- # promote if needed
- else:
- values, _ = maybe_upcast_putmask(values, mask, fill_value)
+ if mask.any():
+ if dtype_ok or datetimelike:
+ values = values.copy()
+ np.putmask(values, mask, fill_value)
+ else:
+ # np.where will promote if needed
+ values = np.where(~mask, values, fill_value)
# return a platform independent precision dtype
dtype_max = dtype
| This way we make at most one copy. Also we get rid of the only use of maybe_upcast_putmask that has fill_value of anything other than np.nan, so in a follow-up it can be simplified. | https://api.github.com/repos/pandas-dev/pandas/pulls/38130 | 2020-11-28T04:12:17Z | 2020-11-29T19:25:08Z | 2020-11-29T19:25:08Z | 2020-11-29T19:30:25Z |
REF: de-duplicate ndarray[datetimelike] wrapping | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index efb66c9a47a97..757cea2c710b2 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -44,7 +44,11 @@
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
-from pandas.core.construction import array, extract_array
+from pandas.core.construction import (
+ array,
+ ensure_wrapped_if_datetimelike,
+ extract_array,
+)
from pandas.core.indexers import check_array_indexer
from pandas.core.indexes.base import ensure_index
from pandas.core.ops import invalid_comparison, unpack_zerodim_and_defer
@@ -251,11 +255,9 @@ def _simple_new(
raise ValueError(msg)
# For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
- from pandas.core.ops.array_ops import maybe_upcast_datetimelike_array
-
- left = maybe_upcast_datetimelike_array(left)
+ left = ensure_wrapped_if_datetimelike(left)
left = extract_array(left, extract_numpy=True)
- right = maybe_upcast_datetimelike_array(right)
+ right = ensure_wrapped_if_datetimelike(right)
right = extract_array(right, extract_numpy=True)
lbase = getattr(left, "_ndarray", left).base
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index f9ebe3f1e185e..96cf1be7520fb 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -402,6 +402,24 @@ def extract_array(obj: object, extract_numpy: bool = False) -> Union[Any, ArrayL
return obj
+def ensure_wrapped_if_datetimelike(arr):
+ """
+ Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
+ """
+ if isinstance(arr, np.ndarray):
+ if arr.dtype.kind == "M":
+ from pandas.core.arrays import DatetimeArray
+
+ return DatetimeArray._from_sequence(arr)
+
+ elif arr.dtype.kind == "m":
+ from pandas.core.arrays import TimedeltaArray
+
+ return TimedeltaArray._from_sequence(arr)
+
+ return arr
+
+
def sanitize_array(
data,
index: Optional[Index],
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 63e3440558c75..a9355e30cd3c2 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -18,7 +18,7 @@
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.sparse import SparseArray
-from pandas.core.construction import array
+from pandas.core.construction import array, ensure_wrapped_if_datetimelike
def _get_dtype_kinds(arrays) -> Set[str]:
@@ -360,12 +360,14 @@ def _concat_datetime(to_concat, axis=0):
-------
a single array, preserving the combined dtypes
"""
- to_concat = [_wrap_datetimelike(x) for x in to_concat]
+ to_concat = [ensure_wrapped_if_datetimelike(x) for x in to_concat]
+
single_dtype = len({x.dtype for x in to_concat}) == 1
# multiple types, need to coerce to object
if not single_dtype:
- # wrap_datetimelike ensures that astype(object) wraps in Timestamp/Timedelta
+ # ensure_wrapped_if_datetimelike ensures that astype(object) wraps
+ # in Timestamp/Timedelta
return _concatenate_2d([x.astype(object) for x in to_concat], axis=axis)
if axis == 1:
@@ -379,17 +381,3 @@ def _concat_datetime(to_concat, axis=0):
assert result.shape[0] == 1
result = result[0]
return result
-
-
-def _wrap_datetimelike(arr):
- """
- Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
-
- DTA/TDA handle .astype(object) correctly.
- """
- from pandas.core.construction import array as pd_array, extract_array
-
- arr = extract_array(arr, extract_numpy=True)
- if isinstance(arr, np.ndarray) and arr.dtype.kind in ["m", "M"]:
- arr = pd_array(arr)
- return arr
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index c855687552e82..41d539564d91e 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -30,6 +30,7 @@
from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, notna
+from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.ops import missing
from pandas.core.ops.dispatch import should_extension_dispatch
from pandas.core.ops.invalid import invalid_comparison
@@ -175,8 +176,8 @@ def arithmetic_op(left: ArrayLike, right: Any, op):
# NB: We assume that extract_array has already been called
# on `left` and `right`.
- lvalues = maybe_upcast_datetimelike_array(left)
- rvalues = maybe_upcast_datetimelike_array(right)
+ lvalues = ensure_wrapped_if_datetimelike(left)
+ rvalues = ensure_wrapped_if_datetimelike(right)
rvalues = _maybe_upcast_for_op(rvalues, lvalues.shape)
if should_extension_dispatch(lvalues, rvalues) or isinstance(rvalues, Timedelta):
@@ -206,7 +207,7 @@ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:
ndarray or ExtensionArray
"""
# NB: We assume extract_array has already been called on left and right
- lvalues = maybe_upcast_datetimelike_array(left)
+ lvalues = ensure_wrapped_if_datetimelike(left)
rvalues = right
rvalues = lib.item_from_zerodim(rvalues)
@@ -331,7 +332,7 @@ def fill_bool(x, left=None):
right = construct_1d_object_array_from_listlike(right)
# NB: We assume extract_array has already been called on left and right
- lvalues = maybe_upcast_datetimelike_array(left)
+ lvalues = ensure_wrapped_if_datetimelike(left)
rvalues = right
if should_extension_dispatch(lvalues, rvalues):
@@ -400,31 +401,6 @@ def get_array_op(op):
raise NotImplementedError(op_name)
-def maybe_upcast_datetimelike_array(obj: ArrayLike) -> ArrayLike:
- """
- If we have an ndarray that is either datetime64 or timedelta64, wrap in EA.
-
- Parameters
- ----------
- obj : ndarray or ExtensionArray
-
- Returns
- -------
- ndarray or ExtensionArray
- """
- if isinstance(obj, np.ndarray):
- if obj.dtype.kind == "m":
- from pandas.core.arrays import TimedeltaArray
-
- return TimedeltaArray._from_sequence(obj)
- if obj.dtype.kind == "M":
- from pandas.core.arrays import DatetimeArray
-
- return DatetimeArray._from_sequence(obj)
-
- return obj
-
-
def _maybe_upcast_for_op(obj, shape: Shape):
"""
Cast non-pandas objects to pandas types to unify behavior of arithmetic
| https://api.github.com/repos/pandas-dev/pandas/pulls/38129 | 2020-11-28T04:05:41Z | 2020-11-29T18:15:13Z | 2020-11-29T18:15:13Z | 2020-11-29T18:19:45Z | |
CLN: remove unreachable in core.dtypes | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 0f0e82f4ad4e2..fe40bc42887c4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -317,10 +317,7 @@ def maybe_cast_result(result, obj: "Series", numeric_only: bool = False, how: st
result : array-like
result maybe casted to the dtype.
"""
- if obj.ndim > 1:
- dtype = obj._values.dtype
- else:
- dtype = obj.dtype
+ dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
if not is_scalar(result):
@@ -452,12 +449,9 @@ def maybe_upcast_putmask(
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
- if is_scalar(other):
- if isna(other):
- other = result.dtype.type("nat")
- elif is_integer(other):
- other = np.array(other, dtype=result.dtype)
- elif is_integer_dtype(other):
+ if isna(other):
+ other = result.dtype.type("nat")
+ elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
@@ -510,9 +504,8 @@ def maybe_casted_values(
"""
values = index._values
- if not isinstance(index, (ABCPeriodIndex, ABCDatetimeIndex)):
- if values.dtype == np.object_:
- values = lib.maybe_convert_objects(values)
+ if values.dtype == np.object_:
+ values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
@@ -1357,9 +1350,6 @@ def maybe_infer_to_datetimelike(
value, (ABCDatetimeIndex, ABCPeriodIndex, ABCDatetimeArray, ABCPeriodArray)
):
return value
- elif isinstance(value, ABCSeries):
- if isinstance(value._values, ABCDatetimeIndex):
- return value._values
v = value
@@ -1451,9 +1441,6 @@ def maybe_cast_to_datetime(value, dtype: DtypeObj, errors: str = "raise"):
from pandas.core.tools.timedeltas import to_timedelta
if dtype is not None:
- if isinstance(dtype, str):
- dtype = np.dtype(dtype)
-
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
@@ -1466,18 +1453,21 @@ def maybe_cast_to_datetime(value, dtype: DtypeObj, errors: str = "raise"):
f"Please pass in '{dtype.name}[ns]' instead."
)
- if is_datetime64 and not is_dtype_equal(
- getattr(dtype, "subtype", dtype), DT64NS_DTYPE
- ):
-
- # pandas supports dtype whose granularity is less than [ns]
- # e.g., [ps], [fs], [as]
- if dtype <= np.dtype("M8[ns]"):
- if dtype.name == "datetime64":
- raise ValueError(msg)
- dtype = DT64NS_DTYPE
- else:
- raise TypeError(f"cannot convert datetimelike to dtype [{dtype}]")
+ if is_datetime64:
+ # unpack e.g. SparseDtype
+ dtype = getattr(dtype, "subtype", dtype)
+ if not is_dtype_equal(dtype, DT64NS_DTYPE):
+
+ # pandas supports dtype whose granularity is less than [ns]
+ # e.g., [ps], [fs], [as]
+ if dtype <= np.dtype("M8[ns]"):
+ if dtype.name == "datetime64":
+ raise ValueError(msg)
+ dtype = DT64NS_DTYPE
+ else:
+ raise TypeError(
+ f"cannot convert datetimelike to dtype [{dtype}]"
+ )
elif is_datetime64tz:
# our NaT doesn't support tz's
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index a9b0498081511..63e3440558c75 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -152,7 +152,7 @@ def is_nonempty(x) -> bool:
return np.concatenate(to_concat)
elif _contains_datetime or "timedelta" in typs:
- return _concat_datetime(to_concat, axis=axis, typs=typs)
+ return _concat_datetime(to_concat, axis=axis)
elif all_empty:
# we have all empties, but may need to coerce the result dtype to
@@ -346,7 +346,7 @@ def _concatenate_2d(to_concat, axis: int):
return np.concatenate(to_concat, axis=axis)
-def _concat_datetime(to_concat, axis=0, typs=None):
+def _concat_datetime(to_concat, axis=0):
"""
provide concatenation of an datetimelike array of arrays each of which is a
single M8[ns], datetime64[ns, tz] or m8[ns] dtype
@@ -355,15 +355,11 @@ def _concat_datetime(to_concat, axis=0, typs=None):
----------
to_concat : array of arrays
axis : axis to provide concatenation
- typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
"""
- if typs is None:
- typs = _get_dtype_kinds(to_concat)
-
to_concat = [_wrap_datetimelike(x) for x in to_concat]
single_dtype = len({x.dtype for x in to_concat}) == 1
| https://api.github.com/repos/pandas-dev/pandas/pulls/38128 | 2020-11-28T03:39:58Z | 2020-11-28T17:42:26Z | 2020-11-28T17:42:26Z | 2020-11-28T18:22:09Z | |
CLN: simplify mask_missing | diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 52536583b9b0d..0afffbc1460e0 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -7,7 +7,7 @@
import numpy as np
from pandas._libs import algos, lib
-from pandas._typing import DtypeObj
+from pandas._typing import ArrayLike, DtypeObj
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
@@ -15,57 +15,45 @@
ensure_float64,
is_integer_dtype,
is_numeric_v_string_like,
- is_scalar,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna
-def mask_missing(arr, values_to_mask):
+def mask_missing(arr: ArrayLike, values_to_mask) -> np.ndarray:
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
- """
- dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
- try:
- values_to_mask = np.array(values_to_mask, dtype=dtype)
+ Parameters
+ ----------
+ arr : ArrayLike
+ values_to_mask: list, tuple, or scalar
- except Exception:
- values_to_mask = np.array(values_to_mask, dtype=object)
+ Returns
+ -------
+ np.ndarray[bool]
+ """
+ # When called from Block.replace/replace_list, values_to_mask is a scalar
+ # known to be holdable by arr.
+ # When called from Series._single_replace, values_to_mask is tuple or list
+ dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
+ values_to_mask = np.array(values_to_mask, dtype=dtype)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
- mask = None
+ # GH 21977
+ mask = np.zeros(arr.shape, dtype=bool)
for x in nonna:
- if mask is None:
- if is_numeric_v_string_like(arr, x):
- # GH#29553 prevent numpy deprecation warnings
- mask = False
- else:
- mask = arr == x
-
- # if x is a string and arr is not, then we get False and we must
- # expand the mask to size arr.shape
- if is_scalar(mask):
- mask = np.zeros(arr.shape, dtype=bool)
+ if is_numeric_v_string_like(arr, x):
+ # GH#29553 prevent numpy deprecation warnings
+ pass
else:
- if is_numeric_v_string_like(arr, x):
- # GH#29553 prevent numpy deprecation warnings
- mask |= False
- else:
- mask |= arr == x
+ mask |= arr == x
if na_mask.any():
- if mask is None:
- mask = isna(arr)
- else:
- mask |= isna(arr)
-
- # GH 21977
- if mask is None:
- mask = np.zeros(arr.shape, dtype=bool)
+ mask |= isna(arr)
return mask
| https://api.github.com/repos/pandas-dev/pandas/pulls/38127 | 2020-11-28T03:06:49Z | 2020-11-28T17:44:14Z | 2020-11-28T17:44:14Z | 2020-11-28T18:21:24Z | |
ENH: Implement dict-like support for rename and set_names in MultiIndex | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 393866b92771b..1fa6f0aec4c70 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -51,6 +51,7 @@ Other enhancements
- :func:`pandas.read_sql_query` now accepts a ``dtype`` argument to cast the columnar data from the SQL database based on user input (:issue:`10285`)
- Improved integer type mapping from pandas to SQLAlchemy when using :meth:`DataFrame.to_sql` (:issue:`35076`)
- :func:`to_numeric` now supports downcasting of nullable ``ExtensionDtype`` objects (:issue:`33013`)
+- Add support for dict-like names in :class:`MultiIndex.set_names` and :class:`MultiIndex.rename` (:issue:`20421`)
- :func:`pandas.read_excel` can now auto detect .xlsb files (:issue:`35416`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2db803e5c1b19..37212466f86f0 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -80,6 +80,7 @@
ABCSeries,
ABCTimedeltaIndex,
)
+from pandas.core.dtypes.inference import is_dict_like
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core import missing, ops
@@ -1378,11 +1379,18 @@ def set_names(self, names, level=None, inplace: bool = False):
Parameters
----------
- names : label or list of label
+
+ names : label or list of label or dict-like for MultiIndex
Name(s) to set.
+
+ .. versionchanged:: 1.3.0
+
level : int, label or list of int or label, optional
- If the index is a MultiIndex, level(s) to set (None for all
- levels). Otherwise level must be None.
+ If the index is a MultiIndex and names is not dict-like, level(s) to set
+ (None for all levels). Otherwise level must be None.
+
+ .. versionchanged:: 1.3.0
+
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
@@ -1425,16 +1433,40 @@ def set_names(self, names, level=None, inplace: bool = False):
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
+
+ When renaming levels with a dict, levels can not be passed.
+
+ >>> idx.set_names({'kind': 'snake'})
+ MultiIndex([('python', 2018),
+ ('python', 2019),
+ ( 'cobra', 2018),
+ ( 'cobra', 2019)],
+ names=['snake', 'year'])
"""
if level is not None and not isinstance(self, ABCMultiIndex):
raise ValueError("Level must be None for non-MultiIndex")
- if level is not None and not is_list_like(level) and is_list_like(names):
+ elif level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string when a single level is provided.")
- if not is_list_like(names) and level is None and self.nlevels > 1:
+ elif not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
+ elif is_dict_like(names) and not isinstance(self, ABCMultiIndex):
+ raise TypeError("Can only pass dict-like as `names` for MultiIndex.")
+
+ elif is_dict_like(names) and level is not None:
+ raise TypeError("Can not pass level for dictlike `names`.")
+
+ if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None:
+ # Transform dict to list of new names and corresponding levels
+ level, names_adjusted = [], []
+ for i, name in enumerate(self.names):
+ if name in names.keys():
+ level.append(i)
+ names_adjusted.append(names[name])
+ names = names_adjusted
+
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
@@ -1444,6 +1476,7 @@ def set_names(self, names, level=None, inplace: bool = False):
idx = self
else:
idx = self._shallow_copy()
+
idx._set_names(names, level=level)
if not inplace:
return idx
diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py
index 2689e24502b5b..de7d2b4410d42 100644
--- a/pandas/tests/indexes/multi/test_names.py
+++ b/pandas/tests/indexes/multi/test_names.py
@@ -150,3 +150,56 @@ def test_setting_names_from_levels_raises():
assert pd.Index._no_setting_name is False
assert pd.Int64Index._no_setting_name is False
assert pd.RangeIndex._no_setting_name is False
+
+
+@pytest.mark.parametrize("func", ["rename", "set_names"])
+@pytest.mark.parametrize(
+ "rename_dict, exp_names",
+ [
+ ({"x": "z"}, ["z", "y", "z"]),
+ ({"x": "z", "y": "x"}, ["z", "x", "z"]),
+ ({"y": "z"}, ["x", "z", "x"]),
+ ({}, ["x", "y", "x"]),
+ ({"z": "a"}, ["x", "y", "x"]),
+ ({"y": "z", "a": "b"}, ["x", "z", "x"]),
+ ],
+)
+def test_name_mi_with_dict_like_duplicate_names(func, rename_dict, exp_names):
+ # GH#20421
+ mi = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=["x", "y", "x"])
+ result = getattr(mi, func)(rename_dict)
+ expected = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=exp_names)
+ tm.assert_index_equal(result, expected)
+
+
+@pytest.mark.parametrize("func", ["rename", "set_names"])
+@pytest.mark.parametrize(
+ "rename_dict, exp_names",
+ [
+ ({"x": "z"}, ["z", "y"]),
+ ({"x": "z", "y": "x"}, ["z", "x"]),
+ ({"a": "z"}, ["x", "y"]),
+ ({}, ["x", "y"]),
+ ],
+)
+def test_name_mi_with_dict_like(func, rename_dict, exp_names):
+ # GH#20421
+ mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"])
+ result = getattr(mi, func)(rename_dict)
+ expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=exp_names)
+ tm.assert_index_equal(result, expected)
+
+
+def test_index_name_with_dict_like_raising():
+ # GH#20421
+ ix = pd.Index([1, 2])
+ msg = "Can only pass dict-like as `names` for MultiIndex."
+ with pytest.raises(TypeError, match=msg):
+ ix.set_names({"x": "z"})
+
+
+def test_multiindex_name_and_level_raising():
+ # GH#20421
+ mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"])
+ with pytest.raises(TypeError, match="Can not pass level for dictlike `names`."):
+ mi.set_names(names={"x": "z"}, level={"x": "z"})
| - [x] closes #20421
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I tried my hand in adding support for dict-like renamings for MultiIndexes. This seemed as the most straight forward way to me, because we do not have to add complex logic as long as we can cast the dict-like objects to regular configurations.
We probably must add a docstring to MultiIndex.rename now? | https://api.github.com/repos/pandas-dev/pandas/pulls/38126 | 2020-11-28T01:23:22Z | 2021-01-04T18:55:43Z | 2021-01-04T18:55:42Z | 2021-01-04T18:56:29Z |
Backport PR #38087 on branch 1.1.x (BLD: Only enable -Werror in the CI jobs) | diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 9adb6fe674099..d725075b85fc6 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -111,6 +111,12 @@ fi
echo "activate pandas-dev"
source activate pandas-dev
+# Explicitly set an environment variable indicating that this is pandas' CI environment.
+#
+# This allows us to enable things like -Werror that shouldn't be activated in
+# downstream CI jobs that may also build pandas from source.
+export PANDAS_CI=1
+
echo
echo "remove any installed pandas package"
echo "w/o removing anything else"
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 29b0e99a3a356..552c8ced6b08a 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -33,6 +33,14 @@ Bug fixes
.. ---------------------------------------------------------------------------
+.. _whatsnew_115.other:
+
+Other
+~~~~~
+- Only set ``-Werror`` as a compiler flag in the CI jobs (:issue:`33315`, :issue:`33314`)
+
+.. ---------------------------------------------------------------------------
+
.. _whatsnew_115.contributors:
Contributors
diff --git a/setup.py b/setup.py
index 5555592de45e0..915847c2936bb 100755
--- a/setup.py
+++ b/setup.py
@@ -427,15 +427,16 @@ def run(self):
endian_macro = [("__LITTLE_ENDIAN__", "1")]
+extra_compile_args = []
+extra_link_args = []
if is_platform_windows():
- extra_compile_args = []
- extra_link_args = []
if debugging_symbols_requested:
extra_compile_args.append("/Z7")
extra_link_args.append("/DEBUG")
else:
- extra_compile_args = ["-Werror"]
- extra_link_args = []
+ # PANDAS_CI=1 is set by ci/setup_env.sh
+ if os.environ.get("PANDAS_CI", "0") == "1":
+ extra_compile_args.append("-Werror")
if debugging_symbols_requested:
extra_compile_args.append("-g")
| Backport PR #38087: BLD: Only enable -Werror in the CI jobs | https://api.github.com/repos/pandas-dev/pandas/pulls/38124 | 2020-11-27T20:50:06Z | 2020-11-27T23:35:32Z | 2020-11-27T23:35:32Z | 2020-11-27T23:35:32Z |
Backport PR #38094 on branch 1.1.x (REGR: fix regression in groupby aggregation with out-of-bounds datetimes) | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 29b0e99a3a356..9be1ff7e836ea 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in inplace operations on :class:`Series` with ``ExtensionDtype`` with NumPy dtyped operand (:issue:`37910`)
- Fixed regression in metadata propagation for ``groupby`` iterator (:issue:`37343`)
- Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
+- Fixed regression in :meth:`DataFrame.groupby` aggregation with out-of-bounds datetime objects in an object-dtype column (:issue:`36003`)
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
- Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`).
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 7b36bc8baf891..0d6f7f955b217 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -46,7 +46,9 @@ cdef class _BaseGrouper:
Slider islider, Slider vslider):
if cached_typ is None:
cached_ityp = self.ityp(islider.buf)
- cached_typ = self.typ(vslider.buf, index=cached_ityp, name=self.name)
+ cached_typ = self.typ(
+ vslider.buf, dtype=vslider.buf.dtype, index=cached_ityp, name=self.name
+ )
else:
# See the comment in indexes/base.py about _index_data.
# We need this for EA-backed indexes that have a reference
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 127d3fadee555..9d67397d2ccb9 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1,6 +1,7 @@
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
+import datetime
import functools
from functools import partial
@@ -1089,3 +1090,21 @@ def test_agg_no_suffix_index():
result = df["A"].agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = pd.Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"], name="A")
tm.assert_series_equal(result, expected)
+
+
+def test_aggregate_datetime_objects():
+ # https://github.com/pandas-dev/pandas/issues/36003
+ # ensure we don't raise an error but keep object dtype for out-of-bounds
+ # datetimes
+ df = DataFrame(
+ {
+ "A": ["X", "Y"],
+ "B": [
+ datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
+ datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
+ ],
+ }
+ )
+ result = df.groupby("A").B.max()
+ expected = df.set_index("A")["B"]
+ tm.assert_series_equal(result, expected)
| Backport PR #38094: REGR: fix regression in groupby aggregation with out-of-bounds datetimes | https://api.github.com/repos/pandas-dev/pandas/pulls/38123 | 2020-11-27T20:48:34Z | 2020-11-27T22:31:03Z | 2020-11-27T22:31:02Z | 2020-11-27T22:31:03Z |
BUG: Index.intersection casting to object instead of numeric | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 049ccc0e6c4df..0702cb8c70955 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -771,6 +771,7 @@ Other
- Fixed metadata propagation in the :class:`Series.dt`, :class:`Series.str` accessors, :class:`DataFrame.duplicated`, :class:`DataFrame.stack`, :class:`DataFrame.unstack`, :class:`DataFrame.pivot`, :class:`DataFrame.append`, :class:`DataFrame.diff`, :class:`DataFrame.applymap` and :class:`DataFrame.update` methods (:issue:`28283`, :issue:`37381`)
- Fixed metadata propagation when selecting columns with ``DataFrame.__getitem__`` (:issue:`28283`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is an :class:`Index` or other list-like (:issue:`36384`)
+- Bug in :meth:`Index.intersection` with non-matching numeric dtypes casting to ``object`` dtype instead of minimal common dtype (:issue:`38122`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError`` rather than a bare ``Exception`` (:issue:`35744`)
- Bug in ``dir`` where ``dir(obj)`` wouldn't show attributes defined on the instance for pandas objects (:issue:`37173`)
- Bug in :meth:`RangeIndex.difference` returning :class:`Int64Index` in some cases where it should return :class:`RangeIndex` (:issue:`38028`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c49f3f9457161..467250f9ee5cf 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -33,6 +33,7 @@
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.cast import (
+ find_common_type,
maybe_cast_to_integer_array,
validate_numeric_casting,
)
@@ -2826,8 +2827,9 @@ def intersection(self, other, sort=False):
return self._get_reconciled_name_object(other)
if not is_dtype_equal(self.dtype, other.dtype):
- this = self.astype("O")
- other = other.astype("O")
+ dtype = find_common_type([self.dtype, other.dtype])
+ this = self.astype(dtype)
+ other = other.astype(dtype)
return this.intersection(other, sort=sort)
result = self._intersection(other, sort=sort)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 9b4b459d9a122..8b773dbce6298 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3713,16 +3713,14 @@ def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
if len(other) == 0:
- other = MultiIndex(
- levels=[[]] * self.nlevels,
- codes=[[]] * self.nlevels,
- verify_integrity=False,
- )
+ return self[:0], self.names
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other)
- except TypeError as err:
+ except (ValueError, TypeError) as err:
+ # ValueError raised by tupels_to_object_array if we
+ # have non-object dtype
raise TypeError(msg) from err
else:
result_names = get_unanimous_names(self, other)
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 4ac9a27069a3f..805da75813a4b 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas import MultiIndex, Series
+from pandas import Index, MultiIndex, Series
import pandas._testing as tm
@@ -294,6 +294,24 @@ def test_intersection(idx, sort):
# assert result.equals(tuples)
+def test_intersection_non_object(idx, sort):
+ other = Index(range(3), name="foo")
+
+ result = idx.intersection(other, sort=sort)
+ expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=None)
+ tm.assert_index_equal(result, expected, exact=True)
+
+ # if we pass a length-0 ndarray (i.e. no name, we retain our idx.name)
+ result = idx.intersection(np.asarray(other)[:0], sort=sort)
+ expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=idx.names)
+ tm.assert_index_equal(result, expected, exact=True)
+
+ msg = "other must be a MultiIndex or a list of tuples"
+ with pytest.raises(TypeError, match=msg):
+ # With non-zero length non-index, we try and fail to convert to tuples
+ idx.intersection(np.asarray(other), sort=sort)
+
+
def test_intersect_equal_sort():
# GH-24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py
index 1fd41b017221b..5623b0904c0d5 100644
--- a/pandas/tests/indexes/ranges/test_setops.py
+++ b/pandas/tests/indexes/ranges/test_setops.py
@@ -3,11 +3,40 @@
import numpy as np
import pytest
-from pandas import Index, Int64Index, RangeIndex
+from pandas import Index, Int64Index, RangeIndex, UInt64Index
import pandas._testing as tm
class TestRangeIndexSetOps:
+ @pytest.mark.parametrize("klass", [RangeIndex, Int64Index, UInt64Index])
+ def test_intersection_mismatched_dtype(self, klass):
+ # check that we cast to float, not object
+ index = RangeIndex(start=0, stop=20, step=2, name="foo")
+ index = klass(index)
+
+ flt = index.astype(np.float64)
+
+ # bc index.equals(flt), we go through fastpath and get RangeIndex back
+ result = index.intersection(flt)
+ tm.assert_index_equal(result, index, exact=True)
+
+ result = flt.intersection(index)
+ tm.assert_index_equal(result, flt, exact=True)
+
+ # neither empty, not-equals
+ result = index.intersection(flt[1:])
+ tm.assert_index_equal(result, flt[1:], exact=True)
+
+ result = flt[1:].intersection(index)
+ tm.assert_index_equal(result, flt[1:], exact=True)
+
+ # empty other
+ result = index.intersection(flt[:0])
+ tm.assert_index_equal(result, flt[:0], exact=True)
+
+ result = flt[:0].intersection(index)
+ tm.assert_index_equal(result, flt[:0], exact=True)
+
def test_intersection(self, sort):
# intersect with Int64Index
index = RangeIndex(start=0, stop=20, step=2)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38122 | 2020-11-27T20:39:46Z | 2020-11-29T18:15:59Z | 2020-11-29T18:15:59Z | 2020-12-04T13:40:48Z |
API: preserve freq in DTI/TDI.factorize | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index edc2f7327abfc..4770ab37e08d2 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -19,6 +19,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` for ``__setitem__`` when one-dimensional tuple was given to select from :class:`MultiIndex` (:issue:`37711`)
- Fixed regression in inplace operations on :class:`Series` with ``ExtensionDtype`` with NumPy dtyped operand (:issue:`37910`)
- Fixed regression in metadata propagation for ``groupby`` iterator (:issue:`37343`)
+- Fixed regression in :class:`MultiIndex` constructed from a :class:`DatetimeIndex` not retaining frequency (:issue:`35563`)
- Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
- Fixed regression in :meth:`DataFrame.groupby` aggregation with out-of-bounds datetime objects in an object-dtype column (:issue:`36003`)
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 713d58b4df5be..840e79c6c9ebe 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -48,11 +48,13 @@
pandas_dtype,
)
from pandas.core.dtypes.generic import (
+ ABCDatetimeArray,
ABCExtensionArray,
ABCIndexClass,
ABCMultiIndex,
ABCRangeIndex,
ABCSeries,
+ ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
@@ -199,8 +201,16 @@ def _reconstruct_data(
-------
ExtensionArray or np.ndarray
"""
+ if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
+ # Catch DatetimeArray/TimedeltaArray
+ return values
+
if is_extension_array_dtype(dtype):
- values = dtype.construct_array_type()._from_sequence(values)
+ cls = dtype.construct_array_type()
+ if isinstance(values, cls) and values.dtype == dtype:
+ return values
+
+ values = cls._from_sequence(values)
elif is_bool_dtype(dtype):
values = values.astype(dtype, copy=False)
@@ -674,8 +684,13 @@ def factorize(
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
+ if isinstance(values, ABCRangeIndex):
+ return values.factorize(sort=sort)
+
values = _ensure_arraylike(values)
original = values
+ if not isinstance(values, ABCMultiIndex):
+ values = extract_array(values, extract_numpy=True)
# GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques
# of values, assign na_sentinel=-1 to replace code value for NaN.
@@ -684,10 +699,20 @@ def factorize(
na_sentinel = -1
dropna = False
- if isinstance(values, ABCRangeIndex):
- return values.factorize(sort=sort)
- elif is_extension_array_dtype(values.dtype):
- values = extract_array(values)
+ if (
+ isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
+ and values.freq is not None
+ ):
+ codes, uniques = values.factorize(sort=sort)
+ if isinstance(original, ABCIndexClass):
+ uniques = original._shallow_copy(uniques, name=None)
+ elif isinstance(original, ABCSeries):
+ from pandas import Index
+
+ uniques = Index(uniques)
+ return codes, uniques
+
+ if is_extension_array_dtype(values.dtype):
codes, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
else:
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 8fa2c734092f4..979ca70bad9b3 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1645,6 +1645,24 @@ def _with_freq(self, freq):
arr._freq = freq
return arr
+ # --------------------------------------------------------------
+
+ def factorize(self, na_sentinel=-1, sort: bool = False):
+ if self.freq is not None:
+ # We must be unique, so can short-circuit (and retain freq)
+ codes = np.arange(len(self), dtype=np.intp)
+ uniques = self.copy() # TODO: copy or view?
+ if sort and self.freq.n < 0:
+ codes = codes[::-1]
+ # TODO: overload __getitem__, a slice indexer returns same type as self
+ # error: Incompatible types in assignment (expression has type
+ # "Union[DatetimeLikeArrayMixin, Union[Any, Any]]", variable
+ # has type "TimelikeOps") [assignment]
+ uniques = uniques[::-1] # type: ignore[assignment]
+ return codes, uniques
+ # FIXME: shouldn't get here; we are ignoring sort
+ return super().factorize(na_sentinel=na_sentinel)
+
# -------------------------------------------------------------------
# Shared Constructor Helpers
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 2657fc817ec3a..789510b452969 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -265,10 +265,12 @@ def test_factorize(self):
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
# tz must be preserved
idx1 = idx1.tz_localize("Asia/Tokyo")
@@ -277,6 +279,7 @@ def test_factorize(self):
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
idx2 = DatetimeIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"]
@@ -287,21 +290,31 @@ def test_factorize(self):
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
- # freq must be preserved
+ def test_factorize_preserves_freq(self):
+ # GH#38120 freq should be preserved
idx3 = date_range("2000-01", periods=4, freq="M", tz="Asia/Tokyo")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
+
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
+
+ arr, idx = pd.factorize(idx3)
+ tm.assert_numpy_array_equal(arr, exp_arr)
+ tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
- def test_factorize_tz(self, tz_naive_fixture):
+ def test_factorize_tz(self, tz_naive_fixture, index_or_series):
tz = tz_naive_fixture
# GH#13750
base = date_range("2016-11-05", freq="H", periods=100, tz=tz)
@@ -309,27 +322,33 @@ def test_factorize_tz(self, tz_naive_fixture):
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
- for obj in [idx, pd.Series(idx)]:
- arr, res = obj.factorize()
- tm.assert_numpy_array_equal(arr, exp_arr)
- expected = base._with_freq(None)
- tm.assert_index_equal(res, expected)
+ obj = index_or_series(idx)
+
+ arr, res = obj.factorize()
+ tm.assert_numpy_array_equal(arr, exp_arr)
+ expected = base._with_freq(None)
+ tm.assert_index_equal(res, expected)
+ assert res.freq == expected.freq
- def test_factorize_dst(self):
+ def test_factorize_dst(self, index_or_series):
# GH 13750
idx = date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern")
+ obj = index_or_series(idx)
- for obj in [idx, pd.Series(idx)]:
- arr, res = obj.factorize()
- tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
- tm.assert_index_equal(res, idx)
+ arr, res = obj.factorize()
+ tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
+ tm.assert_index_equal(res, idx)
+ if index_or_series is Index:
+ assert res.freq == idx.freq
idx = date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern")
+ obj = index_or_series(idx)
- for obj in [idx, pd.Series(idx)]:
- arr, res = obj.factorize()
- tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
- tm.assert_index_equal(res, idx)
+ arr, res = obj.factorize()
+ tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
+ tm.assert_index_equal(res, idx)
+ if index_or_series is Index:
+ assert res.freq == idx.freq
@pytest.mark.parametrize(
"arr, expected",
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 774370ed866da..f0e730eecf3d5 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -75,17 +75,26 @@ def test_factorize(self):
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
+ assert idx.freq == exp_idx.freq
- # freq must be preserved
+ def test_factorize_preserves_freq(self):
+ # GH#38120 freq should be preserved
idx3 = timedelta_range("1 day", periods=4, freq="s")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
+
+ arr, idx = pd.factorize(idx3)
+ tm.assert_numpy_array_equal(arr, exp_arr)
+ tm.assert_index_equal(idx, idx3)
+ assert idx.freq == idx3.freq
def test_sort_values(self):
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index a3b8d66c92024..9a3039c28416c 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -83,3 +83,13 @@ def test_nested_tuples_duplicates(self):
df3 = df.copy(deep=True)
df3.loc[[(dti[0], "a")], "c2"] = 1.0
tm.assert_frame_equal(df3, expected)
+
+ def test_multiindex_with_datatime_level_preserves_freq(self):
+ # https://github.com/pandas-dev/pandas/issues/35563
+ idx = Index(range(2), name="A")
+ dti = pd.date_range("2020-01-01", periods=7, freq="D", name="B")
+ mi = MultiIndex.from_product([idx, dti])
+ df = DataFrame(np.random.randn(14, 2), index=mi)
+ result = df.loc[0].index
+ tm.assert_index_equal(result, dti)
+ assert result.freq == dti.freq
diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py
index aa3453680190b..57665b47dea7f 100644
--- a/pandas/tests/window/moments/test_moments_consistency_ewm.py
+++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py
@@ -11,7 +11,6 @@ def test_ewm_pairwise_cov_corr(func, frame):
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
- expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index 802ece77fd36d..53e5354340dcc 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -51,7 +51,6 @@ def test_rolling_pairwise_cov_corr(func, frame):
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
- expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
| cc @simonjayhawkins
closes #35563
this is an old branch I resurrected and rebased, so has some excess cruft that would need to be removed if we want to move forward.
LMK if there are any tests you'd like to see added based on the other issue (or just go ahead and push them if you like) | https://api.github.com/repos/pandas-dev/pandas/pulls/38120 | 2020-11-27T19:48:55Z | 2020-11-30T13:40:57Z | 2020-11-30T13:40:56Z | 2021-11-20T23:21:28Z |
BUG: parsed datetime string resolution incorrect | diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index f6eeb121b1ac0..53d573e7eed71 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -565,10 +565,11 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
"hour",
"minute",
"second",
- "minute",
- "second",
+ "millisecond",
"microsecond",
+ "nanosecond"
}
+
if reso.attrname not in valid_resos:
raise KeyError
| I was working today with datetime index, you have the reso (resolutions) defined from year to nanosecond, but then you had the lines 560-570 repeating 'minute' and 'second', and working with millisecond or nanosecond raises a KeyIndex error.
- [ ] closes #38121
- [ ] closes #38077
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38118 | 2020-11-27T17:45:38Z | 2021-01-24T01:22:07Z | null | 2021-01-30T15:32:34Z |
ENH: include conversion to nullable float in convert_dtypes() | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 0f0e82f4ad4e2..1947d681e70f4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1255,6 +1255,7 @@ def convert_dtypes(
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
+ convert_floating: bool = True,
) -> Dtype:
"""
Convert objects to best possible type, and optionally,
@@ -1269,6 +1270,10 @@ def convert_dtypes(
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
+ convert_floating : bool, defaults True
+ Whether, if possible, conversion can be done to floating extension types.
+ If `convert_integer` is also True, preference will be give to integer
+ dtypes if the floats can be faithfully casted to integers.
Returns
-------
@@ -1276,7 +1281,9 @@ def convert_dtypes(
new dtype
"""
is_extension = is_extension_array_dtype(input_array.dtype)
- if (convert_string or convert_integer or convert_boolean) and not is_extension:
+ if (
+ convert_string or convert_integer or convert_boolean or convert_floating
+ ) and not is_extension:
try:
inferred_dtype = lib.infer_dtype(input_array)
except ValueError:
@@ -1304,6 +1311,29 @@ def convert_dtypes(
if is_integer_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
+ if convert_floating:
+ if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
+ input_array.dtype
+ ):
+ from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
+
+ inferred_float_dtype = FLOAT_STR_TO_DTYPE.get(
+ input_array.dtype.name, "Float64"
+ )
+ # if we could also convert to integer, check if all floats
+ # are actually integers
+ if convert_integer:
+ arr = input_array[notna(input_array)]
+ if (arr.astype(int) == arr).all():
+ inferred_dtype = "Int64"
+ else:
+ inferred_dtype = inferred_float_dtype
+ else:
+ inferred_dtype = inferred_float_dtype
+ else:
+ if is_float_dtype(inferred_dtype):
+ inferred_dtype = input_array.dtype
+
if convert_boolean:
if is_bool_dtype(input_array.dtype):
inferred_dtype = "boolean"
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c7448cf8f8e40..c9f862d136477 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6088,6 +6088,7 @@ def convert_dtypes(
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
+ convert_floating: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
@@ -6104,6 +6105,12 @@ def convert_dtypes(
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
+ convert_floating : bool, defaults True
+ Whether, if possible, conversion can be done to floating extension types.
+ If `convert_integer` is also True, preference will be give to integer
+ dtypes if the floats can be faithfully casted to integers.
+
+ .. versionadded:: 1.2.0
Returns
-------
@@ -6121,19 +6128,25 @@ def convert_dtypes(
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
- ``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
- possible to turn off individual conversions to ``StringDtype``, the integer
- extension types or ``BooleanDtype``, respectively.
+ ``convert_string``, ``convert_integer``, ``convert_boolean`` and
+ ``convert_boolean``, it is possible to turn off individual conversions
+ to ``StringDtype``, the integer extension types, ``BooleanDtype``
+ or floating extension types, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
- convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
- type, otherwise leave as ``object``.
+ convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer
+ or floating extension type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
- appropriate integer extension type.
+ appropriate integer extension type. Otherwise, convert to an
+ appropriate floating extension type.
+
+ .. versionchanged:: 1.2
+ Starting with pandas 1.2, this method also converts float columns
+ to the nullable floating extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
@@ -6173,7 +6186,7 @@ def convert_dtypes(
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
- 0 1 x True h 10 NaN
+ 0 1 x True h 10 <NA>
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
@@ -6183,7 +6196,7 @@ def convert_dtypes(
c boolean
d string
e Int64
- f float64
+ f Float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
@@ -6205,12 +6218,20 @@ def convert_dtypes(
"""
if self.ndim == 1:
return self._convert_dtypes(
- infer_objects, convert_string, convert_integer, convert_boolean
+ infer_objects,
+ convert_string,
+ convert_integer,
+ convert_boolean,
+ convert_floating,
)
else:
results = [
col._convert_dtypes(
- infer_objects, convert_string, convert_integer, convert_boolean
+ infer_objects,
+ convert_string,
+ convert_integer,
+ convert_boolean,
+ convert_floating,
)
for col_name, col in self.items()
]
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d493ac0a8c051..1f4221206e5bc 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4706,6 +4706,7 @@ def _convert_dtypes(
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
+ convert_floating: bool = True,
) -> "Series":
input_series = self
if infer_objects:
@@ -4713,9 +4714,13 @@ def _convert_dtypes(
if is_object_dtype(input_series):
input_series = input_series.copy()
- if convert_string or convert_integer or convert_boolean:
+ if convert_string or convert_integer or convert_boolean or convert_floating:
inferred_dtype = convert_dtypes(
- input_series._values, convert_string, convert_integer, convert_boolean
+ input_series._values,
+ convert_string,
+ convert_integer,
+ convert_boolean,
+ convert_floating,
)
try:
result = input_series.astype(inferred_dtype)
diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py
index d44667b258414..920182a99e9ef 100644
--- a/pandas/tests/series/methods/test_convert_dtypes.py
+++ b/pandas/tests/series/methods/test_convert_dtypes.py
@@ -58,9 +58,17 @@
[10, np.nan, 20],
np.dtype("float"),
"Int64",
- {("convert_integer", False): np.dtype("float")},
+ {
+ ("convert_integer", False, "convert_floating", True): "Float64",
+ ("convert_integer", False, "convert_floating", False): np.dtype("float"),
+ },
+ ),
+ (
+ [np.nan, 100.5, 200],
+ np.dtype("float"),
+ "Float64",
+ {("convert_floating", False): np.dtype("float")},
),
- ([np.nan, 100.5, 200], np.dtype("float"), np.dtype("float"), {}),
(
[3, 4, 5],
"Int8",
@@ -85,20 +93,30 @@
"Int8",
{("convert_integer", False): np.dtype("i1")},
),
+ (
+ [1.2, 1.3],
+ np.dtype("float32"),
+ "Float32",
+ {("convert_floating", False): np.dtype("float32")},
+ ),
(
[1, 2.0],
object,
"Int64",
{
- ("convert_integer", False): np.dtype("float"),
+ ("convert_integer", False): "Float64",
+ ("convert_integer", False, "convert_floating", False): np.dtype("float"),
("infer_objects", False): np.dtype("object"),
},
),
(
[1, 2.5],
object,
- np.dtype("float"),
- {("infer_objects", False): np.dtype("object")},
+ "Float64",
+ {
+ ("convert_floating", False): np.dtype("float"),
+ ("infer_objects", False): np.dtype("object"),
+ },
),
(["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
(
@@ -134,7 +152,7 @@ class TestSeriesConvertDtypes:
"data, maindtype, expected_default, expected_other",
test_cases,
)
- @pytest.mark.parametrize("params", product(*[(True, False)] * 4))
+ @pytest.mark.parametrize("params", product(*[(True, False)] * 5))
def test_convert_dtypes(
self, data, maindtype, params, expected_default, expected_other
):
@@ -150,12 +168,13 @@ def test_convert_dtypes(
"convert_string",
"convert_integer",
"convert_boolean",
+ "convert_floating",
]
params_dict = dict(zip(param_names, params))
expected_dtype = expected_default
- for (key, val), dtype in expected_other.items():
- if params_dict[key] is val:
+ for spec, dtype in expected_other.items():
+ if all(params_dict[key] is val for key, val in zip(spec[::2], spec[1::2])):
expected_dtype = dtype
expected = pd.Series(data, dtype=expected_dtype)
| xref https://github.com/pandas-dev/pandas/issues/38110
There is one potentially corner case: what with floats that are all "integer"-like? I think we want to keep returning nullable int for that, at least by default, and that is what I did now in this PR But we might want to add a parameter controlling that behaviour? (but that can also be added later on if there is demand for it) | https://api.github.com/repos/pandas-dev/pandas/pulls/38117 | 2020-11-27T17:07:08Z | 2020-11-29T19:12:55Z | 2020-11-29T19:12:55Z | 2020-11-29T19:29:16Z |
CLN: fix flake8 C408 part 2 | diff --git a/pandas/_testing.py b/pandas/_testing.py
index 68371b782aac2..bfff4301c2220 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -2167,15 +2167,15 @@ def makeCustomIndex(
names = [names]
# specific 1D index type requested?
- idx_func = dict(
- i=makeIntIndex,
- f=makeFloatIndex,
- s=makeStringIndex,
- u=makeUnicodeIndex,
- dt=makeDateIndex,
- td=makeTimedeltaIndex,
- p=makePeriodIndex,
- ).get(idx_type)
+ idx_func = {
+ "i": makeIntIndex,
+ "f": makeFloatIndex,
+ "s": makeStringIndex,
+ "u": makeUnicodeIndex,
+ "dt": makeDateIndex,
+ "td": makeTimedeltaIndex,
+ "p": makePeriodIndex,
+ }.get(idx_type)
if idx_func:
# pandas\_testing.py:2120: error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index c2e91c7877d35..c47c31fabeb70 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -71,7 +71,7 @@ def __call__(
raise ValueError(f"invalid validation method '{method}'")
-ARGMINMAX_DEFAULTS = dict(out=None)
+ARGMINMAX_DEFAULTS = {"out": None}
validate_argmin = CompatValidator(
ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1
)
@@ -151,7 +151,7 @@ def validate_argsort_with_ascending(ascending, args, kwargs):
return ascending
-CLIP_DEFAULTS: Dict[str, Any] = dict(out=None)
+CLIP_DEFAULTS: Dict[str, Any] = {"out": None}
validate_clip = CompatValidator(
CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
)
@@ -208,10 +208,10 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1
)
-LOGICAL_FUNC_DEFAULTS = dict(out=None, keepdims=False)
+LOGICAL_FUNC_DEFAULTS = {"out": None, "keepdims": False}
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs")
-MINMAX_DEFAULTS = dict(axis=None, out=None, keepdims=False)
+MINMAX_DEFAULTS = {"axis": None, "out": None, "keepdims": False}
validate_min = CompatValidator(
MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1
)
@@ -219,17 +219,17 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
)
-RESHAPE_DEFAULTS: Dict[str, str] = dict(order="C")
+RESHAPE_DEFAULTS: Dict[str, str] = {"order": "C"}
validate_reshape = CompatValidator(
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
)
-REPEAT_DEFAULTS: Dict[str, Any] = dict(axis=None)
+REPEAT_DEFAULTS: Dict[str, Any] = {"axis": None}
validate_repeat = CompatValidator(
REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
)
-ROUND_DEFAULTS: Dict[str, Any] = dict(out=None)
+ROUND_DEFAULTS: Dict[str, Any] = {"out": None}
validate_round = CompatValidator(
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
)
@@ -300,7 +300,7 @@ def validate_take_with_convert(convert, args, kwargs):
return convert
-TRANSPOSE_DEFAULTS = dict(axes=None)
+TRANSPOSE_DEFAULTS = {"axes": None}
validate_transpose = CompatValidator(
TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
)
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 5cc6525dc3c9b..02214ff51b02a 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -162,7 +162,7 @@ def repeat(
--------
numpy.ndarray.repeat
"""
- nv.validate_repeat(tuple(), dict(axis=axis))
+ nv.validate_repeat((), {"axis": axis})
new_data = self._ndarray.repeat(repeats, axis=axis)
return self._from_backing_data(new_data)
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index c591f81390388..b8375af797b3a 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -58,7 +58,7 @@
SparseArrayT = TypeVar("SparseArrayT", bound="SparseArray")
-_sparray_doc_kwargs = dict(klass="SparseArray")
+_sparray_doc_kwargs = {"klass": "SparseArray"}
def _get_fill(arr: "SparseArray") -> np.ndarray:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 998117cc49d50..0921c3460c626 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -373,7 +373,7 @@ def sum(
min_count: int = 0,
):
nv.validate_sum(
- (), dict(dtype=dtype, out=out, keepdims=keepdims, initial=initial)
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims, "initial": initial}
)
result = nanops.nansum(
@@ -391,7 +391,7 @@ def std(
skipna: bool = True,
):
nv.validate_stat_ddof_func(
- (), dict(dtype=dtype, out=out, keepdims=keepdims), fname="std"
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
)
result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 5f724d9e89d05..f333ee0f71e46 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -46,13 +46,13 @@
if TYPE_CHECKING:
from pandas import Categorical
-_shared_docs: Dict[str, str] = dict()
-_indexops_doc_kwargs = dict(
- klass="IndexOpsMixin",
- inplace="",
- unique="IndexOpsMixin",
- duplicated="IndexOpsMixin",
-)
+_shared_docs: Dict[str, str] = {}
+_indexops_doc_kwargs = {
+ "klass": "IndexOpsMixin",
+ "inplace": "",
+ "unique": "IndexOpsMixin",
+ "duplicated": "IndexOpsMixin",
+}
_T = TypeVar("_T", bound="IndexOpsMixin")
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 0e5867809fe52..d95d8bd4b694e 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -18,9 +18,9 @@ def create_pandas_abc_type(name, attr, comp):
def _check(cls, inst) -> bool:
return getattr(inst, attr, "_typ") in comp
- dct = dict(__instancecheck__=_check, __subclasscheck__=_check)
+ dct = {"__instancecheck__": _check, "__subclasscheck__": _check}
meta = type("ABCBase", (type,), dct)
- return meta(name, tuple(), dct)
+ return meta(name, (), dct)
ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ", ("int64index",))
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index ae3612c99d5cd..7c97725f1264c 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -85,8 +85,8 @@ class providing the base-class of operations.
to each row or column of a DataFrame.
"""
-_apply_docs = dict(
- template="""
+_apply_docs = {
+ "template": """
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
@@ -123,7 +123,7 @@ class providing the base-class of operations.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
- dataframe_examples="""
+ "dataframe_examples": """
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
@@ -163,7 +163,7 @@ class providing the base-class of operations.
b 2
dtype: int64
""",
- series_examples="""
+ "series_examples": """
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
@@ -202,7 +202,7 @@ class providing the base-class of operations.
--------
{examples}
""",
-)
+}
_groupby_agg_method_template = """
Compute {fname} of group values.
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index e2507aeaeb652..7956b3a623333 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -27,7 +27,7 @@
import pandas.core.missing as missing
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
-_index_doc_kwargs.update(dict(target_klass="CategoricalIndex"))
+_index_doc_kwargs.update({"target_klass": "CategoricalIndex"})
@inherit_names(
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 1b18f04ba603d..28ff5a8bacc71 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -200,7 +200,7 @@ def __contains__(self, key: Any) -> bool:
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
- nv.validate_take(tuple(), kwargs)
+ nv.validate_take((), kwargs)
indices = np.asarray(indices, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index f6eeb121b1ac0..f12b8f51b3bfc 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -337,7 +337,7 @@ def __reduce__(self):
# we use a special reduce here because we need
# to simply set the .tz (and not reinterpret it)
- d = dict(data=self._data)
+ d = {"data": self._data}
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (type(self), d), None
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index 6c35b882b5d67..a4642c0eb63d6 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -269,7 +269,7 @@ def _get_engine_target(self) -> np.ndarray:
return np.asarray(self._data)
def repeat(self, repeats, axis=None):
- nv.validate_repeat(tuple(), dict(axis=axis))
+ nv.validate_repeat((), {"axis": axis})
result = self._data.repeat(repeats, axis=axis)
return type(self)._simple_new(result, name=self.name)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 5dff07ee4c6dd..b223e583d0ce0 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -43,7 +43,7 @@
from pandas.core.ops import get_op_result_name
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
-_index_doc_kwargs.update(dict(target_klass="PeriodIndex or list of Periods"))
+_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
# --- Period index sketch
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 4cd7cc56144d9..15b85b3200da3 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -267,7 +267,7 @@ def __getstate__(self):
"0.14.1": {
"axes": axes_array,
"blocks": [
- dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
+ {"values": b.values, "mgr_locs": b.mgr_locs.indexer}
for b in self.blocks
],
}
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index e5589b0dae837..afd189ad16b5d 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -43,7 +43,7 @@
from pandas.tseries.frequencies import is_subperiod, is_superperiod
from pandas.tseries.offsets import DateOffset, Day, Nano, Tick
-_shared_docs_kwargs: Dict[str, str] = dict()
+_shared_docs_kwargs: Dict[str, str] = {}
class Resampler(BaseGroupBy, ShallowMixin):
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index bcdb223415813..3c4c42886b396 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -22,7 +22,7 @@
from pandas import DataFrame, Series
-@Appender(_shared_docs["melt"] % dict(caller="pd.melt(df, ", other="DataFrame.melt"))
+@Appender(_shared_docs["melt"] % {"caller": "pd.melt(df, ", "other": "DataFrame.melt"})
def melt(
frame: "DataFrame",
id_vars=None,
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 9de9d1f434a12..3aeb3b664b27f 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -1,6 +1,6 @@
from typing import Dict
-_shared_docs: Dict[str, str] = dict()
+_shared_docs: Dict[str, str] = {}
_shared_docs[
"aggregate"
diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py
index 1dd005c1602a5..ed920c174ea69 100644
--- a/pandas/core/util/numba_.py
+++ b/pandas/core/util/numba_.py
@@ -9,7 +9,7 @@
from pandas.errors import NumbaUtilError
GLOBAL_USE_NUMBA: bool = False
-NUMBA_FUNC_CACHE: Dict[Tuple[Callable, str], Callable] = dict()
+NUMBA_FUNC_CACHE: Dict[Tuple[Callable, str], Callable] = {}
def maybe_use_numba(engine: Optional[str]) -> bool:
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index fbda78a1842ca..6d14d6172aa6c 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -159,13 +159,13 @@ def _initialize_chunksize(self, chunksize: Optional[int]) -> int:
@property
def _number_format(self) -> Dict[str, Any]:
"""Dictionary used for storing number formatting settings."""
- return dict(
- na_rep=self.na_rep,
- float_format=self.float_format,
- date_format=self.date_format,
- quoting=self.quoting,
- decimal=self.decimal,
- )
+ return {
+ "na_rep": self.na_rep,
+ "float_format": self.float_format,
+ "date_format": self.date_format,
+ "quoting": self.quoting,
+ "decimal": self.decimal,
+ }
@property
def data_index(self) -> Index:
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index ac453839792f3..128e50d84657c 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -206,7 +206,7 @@ def as_escaped_string(
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
- escape_chars = escape_chars or tuple()
+ escape_chars = escape_chars or ()
result = str(thing)
for c in escape_chars:
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 0eeff44d0f74c..4557c10927a15 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -433,16 +433,16 @@ def format_attr(pair):
else:
table_attr += ' class="tex2jax_ignore"'
- return dict(
- head=head,
- cellstyle=cellstyle,
- body=body,
- uuid=uuid,
- precision=precision,
- table_styles=table_styles,
- caption=caption,
- table_attributes=table_attr,
- )
+ return {
+ "head": head,
+ "cellstyle": cellstyle,
+ "body": body,
+ "uuid": uuid,
+ "precision": precision,
+ "table_styles": table_styles,
+ "caption": caption,
+ "table_attributes": table_attr,
+ }
def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Styler":
"""
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index e1feb1aa3fada..b1d705439e300 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -1098,7 +1098,7 @@ def _process_converter(self, f, filt=None):
assert obj is not None # for mypy
needs_new_obj = False
- new_obj = dict()
+ new_obj = {}
for i, (col, c) in enumerate(obj.items()):
if filt(col, c):
new_data, result = f(col, c)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index d97ba6183c955..6f296d3c8d92f 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1464,7 +1464,7 @@ def _read_value_labels(self) -> None:
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
- self.value_label_dict[labname] = dict()
+ self.value_label_dict[labname] = {}
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 0202337a4389a..092a3f0d4402f 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -822,7 +822,7 @@ def test_operators_timedelta64(self):
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
- df = DataFrame(dict(A=v1))
+ df = DataFrame({"A": v1})
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index cb0ba128c1fb7..36ed790eff63c 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -62,13 +62,13 @@ def test_set_item_nan(self):
"fillna_kwargs, msg",
[
(
- dict(value=1, method="ffill"),
+ {"value": 1, "method": "ffill"},
"Cannot specify both 'value' and 'method'.",
),
- (dict(), "Must specify a fill 'value' or 'method'."),
- (dict(method="bad"), "Invalid fill method. Expecting .* bad"),
+ ({}, "Must specify a fill 'value' or 'method'."),
+ ({"method": "bad"}, "Invalid fill method. Expecting .* bad"),
(
- dict(value=Series([1, 2, 3, 4, "a"])),
+ {"value": Series([1, 2, 3, 4, "a"])},
"Cannot setitem on a Categorical with a new category",
),
],
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index 517dc4a2c3d8b..992dff218415d 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -12,42 +12,47 @@
TEST_LENGTH = 20
-plain_case = dict(
- xloc=[0, 7, 15],
- xlen=[3, 5, 5],
- yloc=[2, 9, 14],
- ylen=[2, 3, 5],
- intersect_loc=[2, 9, 15],
- intersect_len=[1, 3, 4],
-)
-delete_blocks = dict(
- xloc=[0, 5], xlen=[4, 4], yloc=[1], ylen=[4], intersect_loc=[1], intersect_len=[3]
-)
-split_blocks = dict(
- xloc=[0],
- xlen=[10],
- yloc=[0, 5],
- ylen=[3, 7],
- intersect_loc=[0, 5],
- intersect_len=[3, 5],
-)
-skip_block = dict(
- xloc=[10],
- xlen=[5],
- yloc=[0, 12],
- ylen=[5, 3],
- intersect_loc=[12],
- intersect_len=[3],
-)
-
-no_intersect = dict(
- xloc=[0, 10],
- xlen=[4, 6],
- yloc=[5, 17],
- ylen=[4, 2],
- intersect_loc=[],
- intersect_len=[],
-)
+plain_case = {
+ "xloc": [0, 7, 15],
+ "xlen": [3, 5, 5],
+ "yloc": [2, 9, 14],
+ "ylen": [2, 3, 5],
+ "intersect_loc": [2, 9, 15],
+ "intersect_len": [1, 3, 4],
+}
+delete_blocks = {
+ "xloc": [0, 5],
+ "xlen": [4, 4],
+ "yloc": [1],
+ "ylen": [4],
+ "intersect_loc": [1],
+ "intersect_len": [3],
+}
+split_blocks = {
+ "xloc": [0],
+ "xlen": [10],
+ "yloc": [0, 5],
+ "ylen": [3, 7],
+ "intersect_loc": [0, 5],
+ "intersect_len": [3, 5],
+}
+skip_block = {
+ "xloc": [10],
+ "xlen": [5],
+ "yloc": [0, 12],
+ "ylen": [5, 3],
+ "intersect_loc": [12],
+ "intersect_len": [3],
+}
+
+no_intersect = {
+ "xloc": [0, 10],
+ "xlen": [4, 6],
+ "yloc": [5, 17],
+ "ylen": [4, 2],
+ "intersect_loc": [],
+ "intersect_len": [],
+}
def check_cases(_check_case):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 2db9a9a403e1c..ce6737db44195 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -105,16 +105,16 @@ def test_period_dtype(self, dtype):
assert com.pandas_dtype(dtype) == dtype
-dtypes = dict(
- datetime_tz=com.pandas_dtype("datetime64[ns, US/Eastern]"),
- datetime=com.pandas_dtype("datetime64[ns]"),
- timedelta=com.pandas_dtype("timedelta64[ns]"),
- period=PeriodDtype("D"),
- integer=np.dtype(np.int64),
- float=np.dtype(np.float64),
- object=np.dtype(object),
- category=com.pandas_dtype("category"),
-)
+dtypes = {
+ "datetime_tz": com.pandas_dtype("datetime64[ns, US/Eastern]"),
+ "datetime": com.pandas_dtype("datetime64[ns]"),
+ "timedelta": com.pandas_dtype("timedelta64[ns]"),
+ "period": PeriodDtype("D"),
+ "integer": np.dtype(np.int64),
+ "float": np.dtype(np.float64),
+ "object": np.dtype(object),
+ "category": com.pandas_dtype("category"),
+}
@pytest.mark.parametrize("name1,dtype1", list(dtypes.items()), ids=lambda x: str(x))
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 29a59cdefbd83..1cc03d4f4f2bd 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -447,10 +447,10 @@ def test_repeat(self, data, repeats, as_series, use_numpy):
@pytest.mark.parametrize(
"repeats, kwargs, error, msg",
[
- (2, dict(axis=1), ValueError, "axis"),
- (-1, dict(), ValueError, "negative"),
- ([1, 2], dict(), ValueError, "shape"),
- (2, dict(foo="bar"), TypeError, "'foo'"),
+ (2, {"axis": 1}, ValueError, "axis"),
+ (-1, {}, ValueError, "negative"),
+ ([1, 2], {}, ValueError, "shape"),
+ (2, {"foo": "bar"}, TypeError, "'foo'"),
],
)
def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy):
diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py
index 73e60ff389038..95ebaa4641d1b 100644
--- a/pandas/tests/frame/common.py
+++ b/pandas/tests/frame/common.py
@@ -5,7 +5,7 @@
def _check_mixed_float(df, dtype=None):
# float16 are most likely to be upcasted to float32
- dtypes = dict(A="float32", B="float32", C="float16", D="float64")
+ dtypes = {"A": "float32", "B": "float32", "C": "float16", "D": "float64"}
if isinstance(dtype, str):
dtypes = {k: dtype for k, v in dtypes.items()}
elif isinstance(dtype, dict):
@@ -21,7 +21,7 @@ def _check_mixed_float(df, dtype=None):
def _check_mixed_int(df, dtype=None):
- dtypes = dict(A="int32", B="uint64", C="uint8", D="int64")
+ dtypes = {"A": "int32", "B": "uint64", "C": "uint8", "D": "int64"}
if isinstance(dtype, str):
dtypes = {k: dtype for k, v in dtypes.items()}
elif isinstance(dtype, dict):
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index f05c90f37ea8a..d79969eac0323 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -563,7 +563,7 @@ def test_astype_empty_dtype_dict(self):
# issue mentioned further down in the following issue's thread
# https://github.com/pandas-dev/pandas/issues/33113
df = DataFrame()
- result = df.astype(dict())
+ result = df.astype({})
tm.assert_frame_equal(result, df)
assert result is not df
diff --git a/pandas/tests/frame/methods/test_convert.py b/pandas/tests/frame/methods/test_convert.py
index 50add248f9614..a00b2b5960884 100644
--- a/pandas/tests/frame/methods/test_convert.py
+++ b/pandas/tests/frame/methods/test_convert.py
@@ -43,9 +43,9 @@ def test_convert_objects(self, float_string_frame):
converted["H"].astype("int32")
# mixed in a single column
- df = DataFrame(dict(s=Series([1, "na", 3, 4])))
+ df = DataFrame({"s": Series([1, "na", 3, 4])})
result = df._convert(datetime=True, numeric=True)
- expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
+ expected = DataFrame({"s": Series([1, np.nan, 3, 4])})
tm.assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py
index 8affcce478cf4..b8328b43a6b13 100644
--- a/pandas/tests/frame/methods/test_diff.py
+++ b/pandas/tests/frame/methods/test_diff.py
@@ -132,10 +132,10 @@ def test_diff_datetime_axis1(self, tz):
def test_diff_timedelta(self):
# GH#4533
df = DataFrame(
- dict(
- time=[Timestamp("20130101 9:01"), Timestamp("20130101 9:02")],
- value=[1.0, 2.0],
- )
+ {
+ "time": [Timestamp("20130101 9:01"), Timestamp("20130101 9:02")],
+ "value": [1.0, 2.0],
+ }
)
res = df.diff()
diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py
index 857dd0ad7268b..1080d97b30987 100644
--- a/pandas/tests/frame/methods/test_rename.py
+++ b/pandas/tests/frame/methods/test_rename.py
@@ -76,8 +76,8 @@ def test_rename(self, float_frame):
@pytest.mark.parametrize(
"args,kwargs",
[
- ((ChainMap({"A": "a"}, {"B": "b"}),), dict(axis="columns")),
- ((), dict(columns=ChainMap({"A": "a"}, {"B": "b"}))),
+ ((ChainMap({"A": "a"}, {"B": "b"}),), {"axis": "columns"}),
+ ((), {"columns": ChainMap({"A": "a"}, {"B": "b"})}),
],
)
def test_rename_chainmap(self, args, kwargs):
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 8e59dd959ab57..ab750bca7e069 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1123,7 +1123,7 @@ def test_replace_series_no_regex(self):
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
- df = DataFrame(dict(A=[np.nan, 1]))
+ df = DataFrame({"A": [np.nan, 1]})
res1 = df.replace(to_replace={np.nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0])
diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 5864b547a552b..00d4a4277a42f 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -618,7 +618,7 @@ def test_reset_index_empty_frame_with_datetime64_multiindex():
def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby():
# https://github.com/pandas-dev/pandas/issues/35657
- df = DataFrame(dict(c1=[10.0], c2=["a"], c3=pd.to_datetime("2020-01-01")))
+ df = DataFrame({"c1": [10.0], "c2": ["a"], "c3": pd.to_datetime("2020-01-01")})
df = df.head(0).groupby(["c2", "c3"])[["c1"]].sum()
result = df.reset_index()
expected = DataFrame(
diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py
index 109561a5acb23..b42c56f256478 100644
--- a/pandas/tests/frame/test_nonunique_indexes.py
+++ b/pandas/tests/frame/test_nonunique_indexes.py
@@ -250,7 +250,7 @@ def test_column_dups_operations(self):
# operations
for op in ["__add__", "__mul__", "__sub__", "__truediv__"]:
- df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
+ df = DataFrame({"A": np.arange(10), "B": np.random.rand(10)})
expected = getattr(df, op)(df)
expected.columns = ["A", "A"]
df.columns = ["A", "A"]
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index f3f2bbe1d160e..af134db587306 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -127,7 +127,7 @@ def test_ops(self, op_str, op, rop, n):
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
- df = DataFrame(dict(A=np.random.randn(25000)))
+ df = DataFrame({"A": np.random.randn(25000)})
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index c70bfc4a3602b..c9e737a9dcb0f 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -336,19 +336,19 @@ def test_unstack_mixed_type_name_in_multiindex(
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
- dict(
- state=["IL", "MI", "NC"],
- index=["a", "b", "c"],
- some_categories=Series(["a", "b", "c"]).astype("category"),
- A=np.random.rand(3),
- B=1,
- C="foo",
- D=pd.Timestamp("20010102"),
- E=Series([1.0, 50.0, 100.0]).astype("float32"),
- F=Series([3.0, 4.0, 5.0]).astype("float64"),
- G=False,
- H=Series([1, 200, 923442], dtype="int8"),
- )
+ {
+ "state": ["IL", "MI", "NC"],
+ "index": ["a", "b", "c"],
+ "some_categories": Series(["a", "b", "c"]).astype("category"),
+ "A": np.random.rand(3),
+ "B": 1,
+ "C": "foo",
+ "D": pd.Timestamp("20010102"),
+ "E": Series([1.0, 50.0, 100.0]).astype("float32"),
+ "F": Series([3.0, 4.0, 5.0]).astype("float64"),
+ "G": False,
+ "H": Series([1, 200, 923442], dtype="int8"),
+ }
)
def unstack_and_compare(df, column_name):
@@ -1689,7 +1689,7 @@ def test_stack_multiple_bug(self):
name = (["a"] * 3) + (["b"] * 3)
date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2)
var1 = np.random.randint(0, 100, 6)
- df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
+ df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1})
multi = df.set_index(["DATE", "ID"])
multi.columns.name = "Params"
diff --git a/pandas/tests/frame/test_ufunc.py b/pandas/tests/frame/test_ufunc.py
index 7bc9aa29af3b4..81c0dc65b4e97 100644
--- a/pandas/tests/frame/test_ufunc.py
+++ b/pandas/tests/frame/test_ufunc.py
@@ -7,7 +7,7 @@
dtypes = [
"int64",
"Int64",
- dict(A="int64", B="Int64"),
+ {"A": "int64", "B": "Int64"},
]
diff --git a/pandas/tests/frame/test_validate.py b/pandas/tests/frame/test_validate.py
index c7270322b980c..e99e0a6863848 100644
--- a/pandas/tests/frame/test_validate.py
+++ b/pandas/tests/frame/test_validate.py
@@ -26,7 +26,7 @@ class TestDataFrameValidate:
@pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(self, dataframe, func, inplace):
msg = 'For argument "inplace" expected type bool'
- kwargs = dict(inplace=inplace)
+ kwargs = {"inplace": inplace}
if func == "query":
kwargs["expr"] = "a > b"
diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py
index 300f4cd72573a..1b32675ec2d35 100644
--- a/pandas/tests/generic/test_duplicate_labels.py
+++ b/pandas/tests/generic/test_duplicate_labels.py
@@ -203,7 +203,7 @@ def test_concat(self, objs, kwargs):
pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags(
allows_duplicate_labels=False
),
- dict(left_index=True, right_index=True),
+ {"left_index": True, "right_index": True},
False,
marks=not_implemented,
),
@@ -213,7 +213,7 @@ def test_concat(self, objs, kwargs):
allows_duplicate_labels=False
),
pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
- dict(left_index=True, right_index=True),
+ {"left_index": True, "right_index": True},
False,
marks=not_implemented,
),
@@ -221,7 +221,7 @@ def test_concat(self, objs, kwargs):
(
pd.DataFrame({"A": [0, 1]}, index=["a", "b"]),
pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
- dict(left_index=True, right_index=True),
+ {"left_index": True, "right_index": True},
True,
),
],
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 151ec03662335..b2074dcb08c95 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -665,7 +665,7 @@ def test_apply_aggregating_timedelta_and_datetime():
df["time_delta_zero"] = df.datetime - df.datetime
result = df.groupby("clientid").apply(
lambda ddf: Series(
- dict(clientid_age=ddf.time_delta_zero.min(), date=ddf.datetime.min())
+ {"clientid_age": ddf.time_delta_zero.min(), "date": ddf.datetime.min()}
)
)
expected = DataFrame(
@@ -784,7 +784,7 @@ def test_func(x):
def test_groupby_apply_return_empty_chunk():
# GH 22221: apply filter which returns some empty groups
- df = DataFrame(dict(value=[0, 1], group=["filled", "empty"]))
+ df = DataFrame({"value": [0, 1], "group": ["filled", "empty"]})
groups = df.groupby("group")
result = groups.apply(lambda group: group[group.value != 1]["value"])
expected = Series(
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 78c438fa11a0e..7c179a79513fa 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -149,11 +149,11 @@ def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
- dict(
- A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
- B=Series(np.arange(7), dtype="int64"),
- C=date_range("20130101", periods=7),
- )
+ {
+ "A": ["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
+ "B": Series(np.arange(7), dtype="int64"),
+ "C": date_range("20130101", periods=7),
+ }
)
def f(grp):
@@ -257,7 +257,7 @@ def test_len():
assert len(grouped) == expected
# issue 11016
- df = DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
+ df = DataFrame({"a": [np.nan] * 3, "b": [1, 2, 3]})
assert len(df.groupby("a")) == 0
assert len(df.groupby("b")) == 3
assert len(df.groupby(["a", "b"])) == 3
diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py
index bd6d33c59a48a..53729a120ae8d 100644
--- a/pandas/tests/groupby/test_quantile.py
+++ b/pandas/tests/groupby/test_quantile.py
@@ -157,7 +157,7 @@ def test_quantile_raises():
def test_quantile_out_of_bounds_q_raises():
# https://github.com/pandas-dev/pandas/issues/27470
- df = DataFrame(dict(a=[0, 0, 0, 1, 1, 1], b=range(6)))
+ df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})
g = df.groupby([0, 0, 0, 1, 1, 1])
with pytest.raises(ValueError, match="Got '50.0' instead"):
g.quantile(50)
@@ -169,7 +169,7 @@ def test_quantile_out_of_bounds_q_raises():
def test_quantile_missing_group_values_no_segfaults():
# GH 28662
data = np.array([1.0, np.nan, 1.0])
- df = DataFrame(dict(key=data, val=range(3)))
+ df = DataFrame({"key": data, "val": range(3)})
# Random segfaults; would have been guaranteed in loop
grp = df.groupby("key")
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index 2340168415382..28095c0b0c39f 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -651,7 +651,7 @@ def test_groupby_first_datetime64(self):
def test_groupby_max_datetime64(self):
# GH 5869
# datetimelike dtype conversion from int
- df = DataFrame(dict(A=Timestamp("20130101"), B=np.arange(5)))
+ df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})
expected = df.groupby("A")["A"].apply(lambda x: x.max())
result = df.groupby("A")["A"].max()
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index c86cb4532bc26..c5d454baa7e7b 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -65,9 +65,13 @@ def rebuild_index(df):
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
- kwargs = dict(
- normalize=normalize, sort=sort, ascending=ascending, dropna=dropna, bins=bins
- )
+ kwargs = {
+ "normalize": normalize,
+ "sort": sort,
+ "ascending": ascending,
+ "dropna": dropna,
+ "bins": bins,
+ }
gr = df.groupby(keys, sort=isort)
left = gr["3rd"].value_counts(**kwargs)
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 878a89bd52cb1..f354682bf6f70 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -94,7 +94,7 @@ def test_range_slice_outofbounds(self, make_range):
def test_maybe_cast_slice_bound(self, make_range, frame_or_series):
idx = make_range(start="2013/10/01", freq="D", periods=10)
- obj = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
+ obj = DataFrame({"units": [100 + i for i in range(10)]}, index=idx)
if frame_or_series is not DataFrame:
obj = obj["units"]
diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py
index 8529a0fb33b67..4f1af132204bb 100644
--- a/pandas/tests/io/formats/test_to_excel.py
+++ b/pandas/tests/io/formats/test_to_excel.py
@@ -278,7 +278,7 @@ def test_css_to_excel_good_colors(input_color, output_color):
f"color: {input_color}"
)
- expected = dict()
+ expected = {}
expected["fill"] = {"patternType": "solid", "fgColor": output_color}
@@ -305,7 +305,7 @@ def test_css_to_excel_bad_colors(input_color):
f"color: {input_color}"
)
- expected = dict()
+ expected = {}
if input_color is not None:
expected["fill"] = {"patternType": "solid"}
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index aaadc965aca52..a88dec84bd693 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -152,8 +152,8 @@ def test_to_html_decimal(datapath):
@pytest.mark.parametrize(
"kwargs,string,expected",
[
- (dict(), "<type 'str'>", "escaped"),
- (dict(escape=False), "<b>bold</b>", "escape_disabled"),
+ ({}, "<type 'str'>", "escaped"),
+ ({"escape": False}, "<b>bold</b>", "escape_disabled"),
],
)
def test_to_html_escaped(kwargs, string, expected, datapath):
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 81e8e0bd2b526..ba6d7c010613b 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -92,7 +92,7 @@ def test_to_latex_tabular_without_index(self):
@pytest.mark.parametrize(
"bad_column_format",
- [5, 1.2, ["l", "r"], ("r", "c"), {"r", "c", "l"}, dict(a="r", b="l")],
+ [5, 1.2, ["l", "r"], ("r", "c"), {"r", "c", "l"}, {"a": "r", "b": "l"}],
)
def test_to_latex_bad_column_format(self, bad_column_format):
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index fdf2caa804def..7a5aca13b33f5 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -379,7 +379,7 @@ def test_frame_infinity(self, orient, inf, dtype):
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
- df = DataFrame([dict(a_float=value)])
+ df = DataFrame([{"a_float": value}])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
@@ -475,8 +475,8 @@ def test_blocks_compat_GH9037(self):
index = DatetimeIndex(list(index), freq=None)
df_mixed = DataFrame(
- dict(
- float_1=[
+ {
+ "float_1": [
-0.92077639,
0.77434435,
1.25234727,
@@ -488,7 +488,7 @@ def test_blocks_compat_GH9037(self):
0.95748401,
-1.02970536,
],
- int_1=[
+ "int_1": [
19680418,
75337055,
99973684,
@@ -500,7 +500,7 @@ def test_blocks_compat_GH9037(self):
41903419,
16008365,
],
- str_1=[
+ "str_1": [
"78c608f1",
"64a99743",
"13d2ff52",
@@ -512,7 +512,7 @@ def test_blocks_compat_GH9037(self):
"7a669144",
"8d64d068",
],
- float_2=[
+ "float_2": [
-0.0428278,
-1.80872357,
3.36042349,
@@ -524,7 +524,7 @@ def test_blocks_compat_GH9037(self):
-0.03030452,
1.43366348,
],
- str_2=[
+ "str_2": [
"14f04af9",
"d085da90",
"4bcfac83",
@@ -536,7 +536,7 @@ def test_blocks_compat_GH9037(self):
"1f6a09ba",
"4bfc4d87",
],
- int_2=[
+ "int_2": [
86967717,
98098830,
51927505,
@@ -548,7 +548,7 @@ def test_blocks_compat_GH9037(self):
24867120,
76131025,
],
- ),
+ },
index=index,
)
diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py
index d03c85f65ea8d..e8893b4c02238 100644
--- a/pandas/tests/io/parser/conftest.py
+++ b/pandas/tests/io/parser/conftest.py
@@ -13,7 +13,7 @@ class BaseParser:
def update_kwargs(self, kwargs):
kwargs = kwargs.copy()
- kwargs.update(dict(engine=self.engine, low_memory=self.low_memory))
+ kwargs.update({"engine": self.engine, "low_memory": self.low_memory})
return kwargs
diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py
index 690d3133dae5e..220d9474c6dbf 100644
--- a/pandas/tests/io/parser/test_compression.py
+++ b/pandas/tests/io/parser/test_compression.py
@@ -109,7 +109,7 @@ def test_compression(parser_and_data, compression_only, buffer, filename):
def test_infer_compression(all_parsers, csv1, buffer, ext):
# see gh-9770
parser = all_parsers
- kwargs = dict(index_col=0, parse_dates=True)
+ kwargs = {"index_col": 0, "parse_dates": True}
expected = parser.read_csv(csv1, **kwargs)
kwargs["compression"] = "infer"
@@ -144,7 +144,7 @@ def test_compression_utf_encoding(all_parsers, csv_dir_path, utf_value, encoding
@pytest.mark.parametrize("invalid_compression", ["sfark", "bz3", "zipper"])
def test_invalid_compression(all_parsers, invalid_compression):
parser = all_parsers
- compress_kwargs = dict(compression=invalid_compression)
+ compress_kwargs = {"compression": invalid_compression}
msg = f"Unrecognized compression type: {invalid_compression}"
diff --git a/pandas/tests/io/parser/test_converters.py b/pandas/tests/io/parser/test_converters.py
index 88b400d9a11df..1d2fb7fddc9dd 100644
--- a/pandas/tests/io/parser/test_converters.py
+++ b/pandas/tests/io/parser/test_converters.py
@@ -57,7 +57,7 @@ def test_converters_no_implicit_conv(all_parsers):
def test_converters_euro_decimal_format(all_parsers):
# see gh-583
- converters = dict()
+ converters = {}
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
diff --git a/pandas/tests/io/parser/test_dtypes.py b/pandas/tests/io/parser/test_dtypes.py
index 861aeba60cab7..930ce1a695e1f 100644
--- a/pandas/tests/io/parser/test_dtypes.py
+++ b/pandas/tests/io/parser/test_dtypes.py
@@ -495,7 +495,7 @@ def test_dtype_with_converters(all_parsers):
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
("category", DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[])),
(
- dict(a="category", b="category"),
+ {"a": "category", "b": "category"},
DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[]),
),
("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),
@@ -510,7 +510,7 @@ def test_dtype_with_converters(all_parsers):
),
),
(
- dict(a=np.int64, b=np.int32),
+ {"a": np.int64, "b": np.int32},
DataFrame(
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
index=[],
diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py
index 5c4e642115798..457a6567febab 100644
--- a/pandas/tests/io/parser/test_mangle_dupes.py
+++ b/pandas/tests/io/parser/test_mangle_dupes.py
@@ -11,7 +11,7 @@
import pandas._testing as tm
-@pytest.mark.parametrize("kwargs", [dict(), dict(mangle_dupe_cols=True)])
+@pytest.mark.parametrize("kwargs", [{}, {"mangle_dupe_cols": True}])
def test_basic(all_parsers, kwargs):
# TODO: add test for condition "mangle_dupe_cols=False"
# once it is actually supported (gh-12935)
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 77a4c4a8faf5e..4c6ffaf425cf7 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -662,12 +662,12 @@ def test_scatterplot_datetime_data(self):
def test_scatterplot_object_data(self):
# GH 18755
- df = DataFrame(dict(a=["A", "B", "C"], b=[2, 3, 4]))
+ df = DataFrame({"a": ["A", "B", "C"], "b": [2, 3, 4]})
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
- df = DataFrame(dict(a=["A", "B", "C"], b=["a", "b", "c"]))
+ df = DataFrame({"a": ["A", "B", "C"], "b": ["a", "b", "c"]})
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
@@ -1266,7 +1266,7 @@ def test_line_label_none(self):
def test_specified_props_kwd_plot_box(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
- kwd = {props: dict(color="C1")}
+ kwd = {props: {"color": "C1"}}
result = df.plot.box(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@@ -2022,7 +2022,7 @@ def test_secondary_axis_font_size(self, method):
fontsize = 20
sy = ["C", "D"]
- kwargs = dict(secondary_y=sy, fontsize=fontsize, mark_right=True)
+ kwargs = {"secondary_y": sy, "fontsize": fontsize, "mark_right": True}
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py
index d9fe7363a15ad..f711e9d1092a1 100644
--- a/pandas/tests/plotting/frame/test_frame_color.py
+++ b/pandas/tests/plotting/frame/test_frame_color.py
@@ -551,9 +551,12 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
- dict_colors = dict(
- boxes="#572923", whiskers="#982042", medians="#804823", caps="#123456"
- )
+ dict_colors = {
+ "boxes": "#572923",
+ "whiskers": "#982042",
+ "medians": "#804823",
+ "caps": "#123456",
+ }
bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
_check_colors(
bp,
@@ -566,7 +569,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
tm.close()
# partial colors
- dict_colors = dict(whiskers="c", medians="m")
+ dict_colors = {"whiskers": "c", "medians": "m"}
bp = df.plot.box(color=dict_colors, return_type="dict")
_check_colors(bp, default_colors[0], "c", "m")
tm.close()
@@ -594,7 +597,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
- df.plot.box(color=dict(boxes="red", xxxx="blue"))
+ df.plot.box(color={"boxes": "red", "xxxx": "blue"})
def test_default_color_cycle(self):
import cycler
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 590758bc01fbb..3db612bc4afee 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1277,7 +1277,7 @@ def test_mpl_nopandas(self):
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
- kw = dict(fmt="-", lw=4)
+ kw = {"fmt": "-", "lw": 4}
_, ax = self.plt.subplots()
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 65b50e829478d..3e41dab39e71d 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1273,7 +1273,7 @@ def test_resample_timegrouper():
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
- df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
+ df = DataFrame({"A": dates, "B": np.arange(len(dates))})
result = df.set_index("A").resample("M").count()
exp_idx = DatetimeIndex(
["2014-07-31", "2014-08-31", "2014-09-30", "2014-10-31", "2014-11-30"],
@@ -1288,7 +1288,9 @@ def test_resample_timegrouper():
result = df.groupby(Grouper(freq="M", key="A")).count()
tm.assert_frame_equal(result, expected)
- df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates))))
+ df = DataFrame(
+ {"A": dates, "B": np.arange(len(dates)), "C": np.arange(len(dates))}
+ )
result = df.set_index("A").resample("M").count()
expected = DataFrame(
{"B": [1, 0, 2, 2, 1], "C": [1, 0, 2, 2, 1]},
@@ -1728,7 +1730,7 @@ def test_resample_apply_product():
index = date_range(start="2012-01-31", freq="M", periods=12)
ts = Series(range(12), index=index)
- df = DataFrame(dict(A=ts, B=ts + 2))
+ df = DataFrame({"A": ts, "B": ts + 2})
result = df.resample("Q").apply(np.product)
expected = DataFrame(
np.array([[0, 24], [60, 210], [336, 720], [990, 1716]], dtype=np.int64),
diff --git a/pandas/tests/reshape/concat/test_empty.py b/pandas/tests/reshape/concat/test_empty.py
index 5c540124de8e6..a97e9265b4f99 100644
--- a/pandas/tests/reshape/concat/test_empty.py
+++ b/pandas/tests/reshape/concat/test_empty.py
@@ -26,7 +26,7 @@ def test_handle_empty_objects(self, sort):
# empty as first element with time series
# GH3259
df = DataFrame(
- dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
+ {"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
diff --git a/pandas/tests/reshape/concat/test_invalid.py b/pandas/tests/reshape/concat/test_invalid.py
index 3a886e0d612c6..ec8167906a60c 100644
--- a/pandas/tests/reshape/concat/test_invalid.py
+++ b/pandas/tests/reshape/concat/test_invalid.py
@@ -12,7 +12,7 @@ def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
- for obj in [1, dict(), [1, 2], (1, 2)]:
+ for obj in [1, {}, [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
diff --git a/pandas/tests/reshape/merge/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py
index d20d93370ec7e..c3e0a92850c07 100644
--- a/pandas/tests/reshape/merge/test_merge_index_as_string.py
+++ b/pandas/tests/reshape/merge/test_merge_index_as_string.py
@@ -8,22 +8,22 @@
@pytest.fixture
def df1():
return DataFrame(
- dict(
- outer=[1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4],
- inner=[1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2],
- v1=np.linspace(0, 1, 11),
- )
+ {
+ "outer": [1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4],
+ "inner": [1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2],
+ "v1": np.linspace(0, 1, 11),
+ }
)
@pytest.fixture
def df2():
return DataFrame(
- dict(
- outer=[1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3],
- inner=[1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3],
- v2=np.linspace(10, 11, 12),
- )
+ {
+ "outer": [1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3],
+ "inner": [1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3],
+ "v2": np.linspace(10, 11, 12),
+ }
)
diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py
index 8aa4012b3e77c..4786b8c35a5b1 100644
--- a/pandas/tests/reshape/test_cut.py
+++ b/pandas/tests/reshape/test_cut.py
@@ -377,10 +377,10 @@ def test_series_ret_bins():
@pytest.mark.parametrize(
"kwargs,msg",
[
- (dict(duplicates="drop"), None),
- (dict(), "Bin edges must be unique"),
- (dict(duplicates="raise"), "Bin edges must be unique"),
- (dict(duplicates="foo"), "invalid value for 'duplicates' parameter"),
+ ({"duplicates": "drop"}, None),
+ ({}, "Bin edges must be unique"),
+ ({"duplicates": "raise"}, "Bin edges must be unique"),
+ ({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"),
],
)
def test_cut_duplicates_bin(kwargs, msg):
diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py
index c436ab5d90578..e7a04bafed8e3 100644
--- a/pandas/tests/reshape/test_qcut.py
+++ b/pandas/tests/reshape/test_qcut.py
@@ -166,10 +166,10 @@ def test_qcut_list_like_labels(labels, expected):
@pytest.mark.parametrize(
"kwargs,msg",
[
- (dict(duplicates="drop"), None),
- (dict(), "Bin edges must be unique"),
- (dict(duplicates="raise"), "Bin edges must be unique"),
- (dict(duplicates="foo"), "invalid value for 'duplicates' parameter"),
+ ({"duplicates": "drop"}, None),
+ ({}, "Bin edges must be unique"),
+ ({"duplicates": "raise"}, "Bin edges must be unique"),
+ ({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 71ddf72562f36..a15ef11f9c292 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -561,7 +561,7 @@ def test_indexing():
expected = ts["2001"]
expected.name = "A"
- df = DataFrame(dict(A=ts))
+ df = DataFrame({"A": ts})
with tm.assert_produces_warning(FutureWarning):
# GH#36179 string indexing on rows for DataFrame deprecated
result = df["2001"]["A"]
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 682c057f05700..3bb87a8346c78 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -667,7 +667,9 @@ def test_underlying_data_conversion():
df
df["val"].update(s)
- expected = DataFrame(dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
+ expected = DataFrame(
+ {"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
+ )
return_value = expected.set_index(["a", "b", "c"], inplace=True)
assert return_value is None
tm.assert_frame_equal(df, expected)
@@ -690,11 +692,11 @@ def test_underlying_data_conversion():
pd.set_option("chained_assignment", "raise")
# GH 3217
- df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
+ df = DataFrame({"a": [1, 3], "b": [np.nan, 2]})
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
- expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=["foo", np.nan]))
+ expected = DataFrame({"a": [1, 3], "b": [np.nan, 2], "c": ["foo", np.nan]})
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py
index 1b05f72f5cf4d..7c64d10675edd 100644
--- a/pandas/tests/series/methods/test_interpolate.py
+++ b/pandas/tests/series/methods/test_interpolate.py
@@ -37,7 +37,7 @@ def nontemporal_method(request):
separately from these non-temporal methods.
"""
method = request.param
- kwargs = dict(order=1) if method in ("spline", "polynomial") else dict()
+ kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@@ -67,7 +67,7 @@ def interp_methods_ind(request):
'values' as a parameterization
"""
method = request.param
- kwargs = dict(order=1) if method in ("spline", "polynomial") else dict()
+ kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
diff --git a/pandas/tests/tseries/holiday/test_holiday.py b/pandas/tests/tseries/holiday/test_holiday.py
index a2c146dbd65e8..0fb1da777e357 100644
--- a/pandas/tests/tseries/holiday/test_holiday.py
+++ b/pandas/tests/tseries/holiday/test_holiday.py
@@ -210,16 +210,16 @@ def test_argument_types(transform):
@pytest.mark.parametrize(
"name,kwargs",
[
- ("One-Time", dict(year=2012, month=5, day=28)),
+ ("One-Time", {"year": 2012, "month": 5, "day": 28}),
(
"Range",
- dict(
- month=5,
- day=28,
- start_date=datetime(2012, 1, 1),
- end_date=datetime(2012, 12, 31),
- offset=DateOffset(weekday=MO(1)),
- ),
+ {
+ "month": 5,
+ "day": 28,
+ "start_date": datetime(2012, 1, 1),
+ "end_date": datetime(2012, 12, 31),
+ "offset": DateOffset(weekday=MO(1)),
+ },
),
],
)
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index fca1316493e85..1ac98247780b7 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -4191,8 +4191,8 @@ class TestDST:
# test both basic names and dateutil timezones
timezone_utc_offsets = {
- "US/Eastern": dict(utc_offset_daylight=-4, utc_offset_standard=-5),
- "dateutil/US/Pacific": dict(utc_offset_daylight=-7, utc_offset_standard=-8),
+ "US/Eastern": {"utc_offset_daylight": -4, "utc_offset_standard": -5},
+ "dateutil/US/Pacific": {"utc_offset_daylight": -7, "utc_offset_standard": -8},
}
valid_date_offsets_singular = [
"weekday",
diff --git a/pandas/tests/tslibs/test_to_offset.py b/pandas/tests/tslibs/test_to_offset.py
index 93e5e2c801c09..5b1134ee85e2c 100644
--- a/pandas/tests/tslibs/test_to_offset.py
+++ b/pandas/tests/tslibs/test_to_offset.py
@@ -131,15 +131,15 @@ def test_to_offset_leading_plus(freqstr, expected):
@pytest.mark.parametrize(
"kwargs,expected",
[
- (dict(days=1, seconds=1), offsets.Second(86401)),
- (dict(days=-1, seconds=1), offsets.Second(-86399)),
- (dict(hours=1, minutes=10), offsets.Minute(70)),
- (dict(hours=1, minutes=-10), offsets.Minute(50)),
- (dict(weeks=1), offsets.Day(7)),
- (dict(hours=1), offsets.Hour(1)),
- (dict(hours=1), to_offset("60min")),
- (dict(microseconds=1), offsets.Micro(1)),
- (dict(microseconds=0), offsets.Nano(0)),
+ ({"days": 1, "seconds": 1}, offsets.Second(86401)),
+ ({"days": -1, "seconds": 1}, offsets.Second(-86399)),
+ ({"hours": 1, "minutes": 10}, offsets.Minute(70)),
+ ({"hours": 1, "minutes": -10}, offsets.Minute(50)),
+ ({"weeks": 1}, offsets.Day(7)),
+ ({"hours": 1}, offsets.Hour(1)),
+ ({"hours": 1}, to_offset("60min")),
+ ({"microseconds": 1}, offsets.Micro(1)),
+ ({"microseconds": 0}, offsets.Nano(0)),
],
)
def test_to_offset_pd_timedelta(kwargs, expected):
diff --git a/pandas/tests/util/test_assert_categorical_equal.py b/pandas/tests/util/test_assert_categorical_equal.py
index 8957e7a172666..29a0805bceb98 100644
--- a/pandas/tests/util/test_assert_categorical_equal.py
+++ b/pandas/tests/util/test_assert_categorical_equal.py
@@ -16,7 +16,7 @@ def test_categorical_equal(c):
def test_categorical_equal_order_mismatch(check_category_order):
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 4], categories=[4, 3, 2, 1])
- kwargs = dict(check_category_order=check_category_order)
+ kwargs = {"check_category_order": check_category_order}
if check_category_order:
msg = """Categorical\\.categories are different
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index d5161ce37494b..40d2763a13489 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -120,7 +120,7 @@ def test_frame_equal_shape_mismatch(df1, df2, obj_fixture):
],
)
def test_frame_equal_index_dtype_mismatch(df1, df2, msg, check_index_type):
- kwargs = dict(check_index_type=check_index_type)
+ kwargs = {"check_index_type": check_index_type}
if check_index_type:
with pytest.raises(AssertionError, match=msg):
@@ -134,7 +134,7 @@ def test_empty_dtypes(check_dtype):
df1 = DataFrame(columns=columns)
df2 = DataFrame(columns=columns)
- kwargs = dict(check_dtype=check_dtype)
+ kwargs = {"check_dtype": check_dtype}
df1["col1"] = df1["col1"].astype("int64")
if check_dtype:
diff --git a/pandas/tests/util/test_validate_args.py b/pandas/tests/util/test_validate_args.py
index 746d859b3322e..db532480efe07 100644
--- a/pandas/tests/util/test_validate_args.py
+++ b/pandas/tests/util/test_validate_args.py
@@ -30,7 +30,7 @@ def test_bad_arg_length_max_value_single():
def test_bad_arg_length_max_value_multiple():
args = (None, None)
- compat_args = dict(foo=None)
+ compat_args = {"foo": None}
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
@@ -61,7 +61,7 @@ def test_not_all_defaults(i):
def test_validation():
# No exceptions should be raised.
- validate_args(_fname, (None,), 2, dict(out=None))
+ validate_args(_fname, (None,), 2, {"out": None})
compat_args = {"axis": 1, "out": None}
validate_args(_fname, (1, None), 2, compat_args)
diff --git a/pandas/tests/util/test_validate_kwargs.py b/pandas/tests/util/test_validate_kwargs.py
index 8fe2a3712bf49..c357affb6203d 100644
--- a/pandas/tests/util/test_validate_kwargs.py
+++ b/pandas/tests/util/test_validate_kwargs.py
@@ -41,7 +41,7 @@ def test_validation():
# No exceptions should be raised.
compat_args = {"f": None, "b": 1, "ba": "s"}
- kwargs = dict(f=None, b=1)
+ kwargs = {"f": None, "b": 1}
validate_kwargs(_fname, kwargs, compat_args)
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 72003eeddf5ee..5256cc29d5543 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -106,7 +106,7 @@ def show_versions(as_json: Union[str, bool] = False) -> None:
deps = _get_dependency_info()
if as_json:
- j = dict(system=sys_info, dependencies=deps)
+ j = {"system": sys_info, "dependencies": deps}
if as_json is True:
print(j)
| followup of #38078 | https://api.github.com/repos/pandas-dev/pandas/pulls/38116 | 2020-11-27T16:58:38Z | 2020-11-28T17:35:09Z | 2020-11-28T17:35:09Z | 2020-12-05T18:52:47Z |
Backport PR #34407 on branch 1.1.x: REGR: revert "CLN: _consolidate_inplace less" / fix regression in fillna() | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index a8bbf692a72e5..29b0e99a3a356 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -21,6 +21,7 @@ Fixed regressions
- Fixed regression in metadata propagation for ``groupby`` iterator (:issue:`37343`)
- Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
+- Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`).
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index be85ab251c0c3..1c6248ad71b62 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3484,6 +3484,8 @@ class animal locomotion
if axis == 1:
return self[key]
+ self._consolidate_inplace()
+
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
@@ -6011,6 +6013,8 @@ def fillna(
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
+ self._consolidate_inplace()
+
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
@@ -6449,6 +6453,8 @@ def replace(
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
+ self._consolidate_inplace()
+
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 4c52343d08513..67bf2584bb84e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -417,6 +417,7 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T:
def quantile(
self,
axis: int = 0,
+ consolidate: bool = True,
transposed: bool = False,
interpolation="linear",
qs=None,
@@ -430,6 +431,8 @@ def quantile(
Parameters
----------
axis: reduction axis, default 0
+ consolidate: bool, default True. Join together blocks having same
+ dtype
transposed: bool, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
@@ -444,6 +447,9 @@ def quantile(
# simplify some of the code here and in the blocks
assert self.ndim >= 2
+ if consolidate:
+ self._consolidate_inplace()
+
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index b4f91590e09d1..0f5048dde3250 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -717,3 +717,14 @@ def test_fill_corner(self, float_frame, float_string_frame):
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
+
+
+def test_fillna_nonconsolidated_frame():
+ # https://github.com/pandas-dev/pandas/issues/36495
+ df = DataFrame(
+ [[1, 1, 1, 1.0], [2, 2, 2, 2.0], [3, 3, 3, 3.0]],
+ columns=["i1", "i2", "i3", "f1"],
+ )
+ df_nonconsol = df.pivot("i1", "i2")
+ result = df_nonconsol.fillna(0)
+ assert result.isna().sum().sum() == 0
| Backport PR #34407 | https://api.github.com/repos/pandas-dev/pandas/pulls/38115 | 2020-11-27T16:44:12Z | 2020-11-27T19:47:55Z | 2020-11-27T19:47:55Z | 2020-11-27T19:48:01Z |
REF: implement _should_compare | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c49f3f9457161..d3abeeebb5195 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4973,6 +4973,22 @@ def _maybe_promote(self, other: "Index"):
return self, other
+ def _get_other_deep(self, other: "Index") -> "Index":
+ dtype = other.dtype
+ if is_categorical_dtype(dtype):
+ # If there is ever a SparseIndex, this could get dispatched
+ # here too.
+ return dtype.categories
+ return other
+
+ def _should_compare(self, other: "Index") -> bool:
+ """
+ Check if `self == other` can ever have non-False entries.
+ """
+ other = self._get_other_deep(other)
+ dtype = other.dtype
+ return self._is_comparable_dtype(dtype) or is_object_dtype(dtype)
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
| Broken off from #38105 so I can start using it to fix inconsistent set ops. | https://api.github.com/repos/pandas-dev/pandas/pulls/38114 | 2020-11-27T16:16:58Z | 2020-11-29T15:31:15Z | null | 2020-11-29T15:31:44Z |
TST: rewrite convert_dtypes test to make it easier extendable | diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py
index 8a915324a72c1..d44667b258414 100644
--- a/pandas/tests/series/methods/test_convert_dtypes.py
+++ b/pandas/tests/series/methods/test_convert_dtypes.py
@@ -8,272 +8,167 @@
import pandas as pd
import pandas._testing as tm
+# Each test case consists of a tuple with the data and dtype to create the
+# test Series, the default dtype for the expected result (which is valid
+# for most cases), and the specific cases where the result deviates from
+# this default. Those overrides are defined as a dict with (keyword, val) as
+# dictionary key. In case of multiple items, the last override takes precendence.
+test_cases = [
+ (
+ # data
+ [1, 2, 3],
+ # original dtype
+ np.dtype("int32"),
+ # default expected dtype
+ "Int32",
+ # exceptions on expected dtype
+ {("convert_integer", False): np.dtype("int32")},
+ ),
+ (
+ [1, 2, 3],
+ np.dtype("int64"),
+ "Int64",
+ {("convert_integer", False): np.dtype("int64")},
+ ),
+ (
+ ["x", "y", "z"],
+ np.dtype("O"),
+ pd.StringDtype(),
+ {("convert_string", False): np.dtype("O")},
+ ),
+ (
+ [True, False, np.nan],
+ np.dtype("O"),
+ pd.BooleanDtype(),
+ {("convert_boolean", False): np.dtype("O")},
+ ),
+ (
+ ["h", "i", np.nan],
+ np.dtype("O"),
+ pd.StringDtype(),
+ {("convert_string", False): np.dtype("O")},
+ ),
+ ( # GH32117
+ ["h", "i", 1],
+ np.dtype("O"),
+ np.dtype("O"),
+ {},
+ ),
+ (
+ [10, np.nan, 20],
+ np.dtype("float"),
+ "Int64",
+ {("convert_integer", False): np.dtype("float")},
+ ),
+ ([np.nan, 100.5, 200], np.dtype("float"), np.dtype("float"), {}),
+ (
+ [3, 4, 5],
+ "Int8",
+ "Int8",
+ {},
+ ),
+ (
+ [[1, 2], [3, 4], [5]],
+ None,
+ np.dtype("O"),
+ {},
+ ),
+ (
+ [4, 5, 6],
+ np.dtype("uint32"),
+ "UInt32",
+ {("convert_integer", False): np.dtype("uint32")},
+ ),
+ (
+ [-10, 12, 13],
+ np.dtype("i1"),
+ "Int8",
+ {("convert_integer", False): np.dtype("i1")},
+ ),
+ (
+ [1, 2.0],
+ object,
+ "Int64",
+ {
+ ("convert_integer", False): np.dtype("float"),
+ ("infer_objects", False): np.dtype("object"),
+ },
+ ),
+ (
+ [1, 2.5],
+ object,
+ np.dtype("float"),
+ {("infer_objects", False): np.dtype("object")},
+ ),
+ (["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
+ (
+ pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+ pd.DatetimeTZDtype(tz="UTC"),
+ pd.DatetimeTZDtype(tz="UTC"),
+ {},
+ ),
+ (
+ pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+ "datetime64[ns]",
+ np.dtype("datetime64[ns]"),
+ {},
+ ),
+ (
+ pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
+ object,
+ np.dtype("datetime64[ns]"),
+ {("infer_objects", False): np.dtype("object")},
+ ),
+ (pd.period_range("1/1/2011", freq="M", periods=3), None, pd.PeriodDtype("M"), {}),
+ (
+ pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
+ None,
+ pd.IntervalDtype("int64"),
+ {},
+ ),
+]
+
class TestSeriesConvertDtypes:
- # The answerdict has keys that have 4 tuples, corresponding to the arguments
- # infer_objects, convert_string, convert_integer, convert_boolean
- # This allows all 16 possible combinations to be tested. Since common
- # combinations expect the same answer, this provides an easy way to list
- # all the possibilities
@pytest.mark.parametrize(
- "data, maindtype, answerdict",
- [
- (
- [1, 2, 3],
- np.dtype("int32"),
- {
- ((True, False), (True, False), (True,), (True, False)): "Int32",
- ((True, False), (True, False), (False,), (True, False)): np.dtype(
- "int32"
- ),
- },
- ),
- (
- [1, 2, 3],
- np.dtype("int64"),
- {
- ((True, False), (True, False), (True,), (True, False)): "Int64",
- ((True, False), (True, False), (False,), (True, False)): np.dtype(
- "int64"
- ),
- },
- ),
- (
- ["x", "y", "z"],
- np.dtype("O"),
- {
- (
- (True, False),
- (True,),
- (True, False),
- (True, False),
- ): pd.StringDtype(),
- ((True, False), (False,), (True, False), (True, False)): np.dtype(
- "O"
- ),
- },
- ),
- (
- [True, False, np.nan],
- np.dtype("O"),
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True,),
- ): pd.BooleanDtype(),
- ((True, False), (True, False), (True, False), (False,)): np.dtype(
- "O"
- ),
- },
- ),
- (
- ["h", "i", np.nan],
- np.dtype("O"),
- {
- (
- (True, False),
- (True,),
- (True, False),
- (True, False),
- ): pd.StringDtype(),
- ((True, False), (False,), (True, False), (True, False)): np.dtype(
- "O"
- ),
- },
- ),
- ( # GH32117
- ["h", "i", 1],
- np.dtype("O"),
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): np.dtype("O"),
- },
- ),
- (
- [10, np.nan, 20],
- np.dtype("float"),
- {
- ((True, False), (True, False), (True,), (True, False)): "Int64",
- ((True, False), (True, False), (False,), (True, False)): np.dtype(
- "float"
- ),
- },
- ),
- (
- [np.nan, 100.5, 200],
- np.dtype("float"),
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): np.dtype("float"),
- },
- ),
- (
- [3, 4, 5],
- "Int8",
- {((True, False), (True, False), (True, False), (True, False)): "Int8"},
- ),
- (
- [[1, 2], [3, 4], [5]],
- None,
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): np.dtype("O"),
- },
- ),
- (
- [4, 5, 6],
- np.dtype("uint32"),
- {
- ((True, False), (True, False), (True,), (True, False)): "UInt32",
- ((True, False), (True, False), (False,), (True, False)): np.dtype(
- "uint32"
- ),
- },
- ),
- (
- [-10, 12, 13],
- np.dtype("i1"),
- {
- ((True, False), (True, False), (True,), (True, False)): "Int8",
- ((True, False), (True, False), (False,), (True, False)): np.dtype(
- "i1"
- ),
- },
- ),
- (
- [1, 2.0],
- object,
- {
- ((True,), (True, False), (True,), (True, False)): "Int64",
- ((True,), (True, False), (False,), (True, False)): np.dtype(
- "float"
- ),
- ((False,), (True, False), (True, False), (True, False)): np.dtype(
- "object"
- ),
- },
- ),
- (
- [1, 2.5],
- object,
- {
- ((True,), (True, False), (True, False), (True, False)): np.dtype(
- "float"
- ),
- ((False,), (True, False), (True, False), (True, False)): np.dtype(
- "object"
- ),
- },
- ),
- (
- ["a", "b"],
- pd.CategoricalDtype(),
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): pd.CategoricalDtype(),
- },
- ),
- (
- pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
- pd.DatetimeTZDtype(tz="UTC"),
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): pd.DatetimeTZDtype(tz="UTC"),
- },
- ),
- (
- pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
- "datetime64[ns]",
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): np.dtype("datetime64[ns]"),
- },
- ),
- (
- pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
- object,
- {
- ((True,), (True, False), (True, False), (True, False)): np.dtype(
- "datetime64[ns]"
- ),
- ((False,), (True, False), (True, False), (True, False)): np.dtype(
- "O"
- ),
- },
- ),
- (
- pd.period_range("1/1/2011", freq="M", periods=3),
- None,
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): pd.PeriodDtype("M"),
- },
- ),
- (
- pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
- None,
- {
- (
- (True, False),
- (True, False),
- (True, False),
- (True, False),
- ): pd.IntervalDtype("int64"),
- },
- ),
- ],
+ "data, maindtype, expected_default, expected_other",
+ test_cases,
)
@pytest.mark.parametrize("params", product(*[(True, False)] * 4))
- def test_convert_dtypes(self, data, maindtype, params, answerdict):
+ def test_convert_dtypes(
+ self, data, maindtype, params, expected_default, expected_other
+ ):
if maindtype is not None:
series = pd.Series(data, dtype=maindtype)
else:
series = pd.Series(data)
- answers = {k: a for (kk, a) in answerdict.items() for k in product(*kk)}
- ns = series.convert_dtypes(*params)
- expected_dtype = answers[tuple(params)]
- expected = pd.Series(series.values, dtype=expected_dtype)
- tm.assert_series_equal(ns, expected)
+ result = series.convert_dtypes(*params)
+
+ param_names = [
+ "infer_objects",
+ "convert_string",
+ "convert_integer",
+ "convert_boolean",
+ ]
+ params_dict = dict(zip(param_names, params))
+
+ expected_dtype = expected_default
+ for (key, val), dtype in expected_other.items():
+ if params_dict[key] is val:
+ expected_dtype = dtype
+
+ expected = pd.Series(data, dtype=expected_dtype)
+ tm.assert_series_equal(result, expected)
# Test that it is a copy
copy = series.copy(deep=True)
- if is_interval_dtype(ns.dtype) and ns.dtype.subtype.kind in ["i", "u"]:
+ if is_interval_dtype(result.dtype) and result.dtype.subtype.kind in ["i", "u"]:
msg = "Cannot set float NaN to integer-backed IntervalArray"
with pytest.raises(ValueError, match=msg):
- ns[ns.notna()] = np.nan
+ result[result.notna()] = np.nan
else:
- ns[ns.notna()] = np.nan
+ result[result.notna()] = np.nan
# Make sure original not changed
tm.assert_series_equal(series, copy)
| I have a branch adding `convert_floating` support for ``convert_dtypes``, and the current way they are written is not really maintainable when adding more keywords (eg the tuples of 4 would become tuples of 5, making it even harder to know what is what, and making the formatting less easy to follow).
In this PR, I just refactored how the test cases are defined and didn't change any actual content. | https://api.github.com/repos/pandas-dev/pandas/pulls/38113 | 2020-11-27T16:13:11Z | 2020-11-28T14:27:21Z | 2020-11-28T14:27:21Z | 2020-11-28T14:31:45Z |
REF: IntervalIndex._assert_can_do_setop | diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index ed92b3dade6a0..bd92926941aa1 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -122,6 +122,7 @@ def setop_check(method):
@wraps(method)
def wrapped(self, other, sort=False):
+ self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other = ensure_index(other)
@@ -131,14 +132,6 @@ def wrapped(self, other, sort=False):
result = result.astype(self.dtype)
return result
- if self._is_non_comparable_own_type(other):
- # GH#19016: ensure set op will not return a prohibited dtype
- raise TypeError(
- "can only do set operations between two IntervalIndex "
- "objects that are closed on the same side "
- "and have compatible dtypes"
- )
-
return method(self, other, sort)
return wrapped
@@ -956,11 +949,27 @@ def _format_space(self) -> str:
# --------------------------------------------------------------------
# Set Operations
+ def _assert_can_do_setop(self, other):
+ super()._assert_can_do_setop(other)
+
+ if isinstance(other, IntervalIndex) and self._is_non_comparable_own_type(other):
+ # GH#19016: ensure set op will not return a prohibited dtype
+ raise TypeError(
+ "can only do set operations between two IntervalIndex "
+ "objects that are closed on the same side "
+ "and have compatible dtypes"
+ )
+
@Appender(Index.intersection.__doc__)
@setop_check
- def intersection(
- self, other: "IntervalIndex", sort: bool = False
- ) -> "IntervalIndex":
+ def intersection(self, other, sort=False) -> Index:
+ self._validate_sort_keyword(sort)
+ self._assert_can_do_setop(other)
+ other, _ = self._convert_can_do_setop(other)
+
+ if not isinstance(other, IntervalIndex):
+ return self.astype(object).intersection(other)
+
if self.left.is_unique and self.right.is_unique:
taken = self._intersection_unique(other)
elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
| Working towards standardizing+deduplicating set ops across Index subclasses, xref #38111 | https://api.github.com/repos/pandas-dev/pandas/pulls/38112 | 2020-11-27T16:00:40Z | 2020-11-28T18:22:36Z | 2020-11-28T18:22:36Z | 2020-11-28T18:23:50Z |
BUG: name retention in Index.intersection | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 9168041a4f474..10c52c00b3c4d 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -801,6 +801,7 @@ Other
- Fixed bug in metadata propagation incorrectly copying DataFrame columns as metadata when the column name overlaps with the metadata name (:issue:`37037`)
- Fixed metadata propagation in the :class:`Series.dt`, :class:`Series.str` accessors, :class:`DataFrame.duplicated`, :class:`DataFrame.stack`, :class:`DataFrame.unstack`, :class:`DataFrame.pivot`, :class:`DataFrame.append`, :class:`DataFrame.diff`, :class:`DataFrame.applymap` and :class:`DataFrame.update` methods (:issue:`28283`, :issue:`37381`)
- Fixed metadata propagation when selecting columns with ``DataFrame.__getitem__`` (:issue:`28283`)
+- Bug in :meth:`Index.intersection` with non-:class:`Index` failing to set the correct name on the returned :class:`Index` (:issue:`38111`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is an :class:`Index` or other list-like (:issue:`36384`)
- Bug in :meth:`Index.intersection` with non-matching numeric dtypes casting to ``object`` dtype instead of minimal common dtype (:issue:`38122`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError`` rather than a bare ``Exception`` (:issue:`35744`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 09fe885e47754..141b626d15d9d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2821,7 +2821,7 @@ def intersection(self, other, sort=False):
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
- other = ensure_index(other)
+ other, _ = self._convert_can_do_setop(other)
if self.equals(other) and not self.has_duplicates:
return self._get_reconciled_name_object(other)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 28ff5a8bacc71..9b8703f5c2fff 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -686,10 +686,17 @@ def intersection(self, other, sort=False):
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
+ other, _ = self._convert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
+ return self._intersection(other, sort=sort)
+
+ def _intersection(self, other: Index, sort=False) -> Index:
+ """
+ intersection specialized to the case with matching dtypes.
+ """
if len(self) == 0:
return self.copy()._get_reconciled_name_object(other)
if len(other) == 0:
@@ -704,10 +711,11 @@ def intersection(self, other, sort=False):
return result
elif not self._can_fast_intersect(other):
- result = Index.intersection(self, other, sort=sort)
- # We need to invalidate the freq because Index.intersection
+ result = Index._intersection(self, other, sort=sort)
+ # We need to invalidate the freq because Index._intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
+ result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq("infer")
# to make our life easier, "sort" the two ranges
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index bd92926941aa1..dd9e16cb6cd5f 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -124,7 +124,11 @@ def setop_check(method):
def wrapped(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
- other = ensure_index(other)
+ other, _ = self._convert_can_do_setop(other)
+
+ if op_name == "intersection":
+ if self.equals(other):
+ return self._get_reconciled_name_object(other)
if not isinstance(other, IntervalIndex):
result = getattr(self.astype(object), op_name)(other)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 4aedf03ca1800..b9acb12890ecb 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3603,7 +3603,12 @@ def intersection(self, other, sort=False):
if self.equals(other):
if self.has_duplicates:
return self.unique().rename(result_names)
- return self.rename(result_names)
+ return self._get_reconciled_name_object(other)
+
+ return self._intersection(other, sort=sort)
+
+ def _intersection(self, other, sort=False):
+ other, result_names = self._convert_can_do_setop(other)
if not is_object_dtype(other.dtype):
# The intersection is empty
@@ -3721,7 +3726,7 @@ def _convert_can_do_setop(self, other):
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
- other = MultiIndex.from_tuples(other)
+ other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tupels_to_object_array if we
# have non-object dtype
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index b223e583d0ce0..3f70582be267c 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -639,15 +639,19 @@ def _setop(self, other, sort, opname: str):
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
- other = ensure_index(other)
+ other, _ = self._convert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
- elif is_object_dtype(other.dtype):
+ return self._intersection(other, sort=sort)
+
+ def _intersection(self, other, sort=False):
+
+ if is_object_dtype(other.dtype):
return self.astype("O").intersection(other, sort=sort)
- elif not is_dtype_equal(self.dtype, other.dtype):
+ elif not self._is_comparable_dtype(other.dtype):
# We can infer that the intersection is empty.
# assert_can_do_setop ensures that this is not just a mismatched freq
this = self[:0].astype("O")
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 669bf115df104..6380551fc202c 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -15,6 +15,7 @@
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
+ is_dtype_equal,
is_float,
is_integer,
is_list_like,
@@ -504,11 +505,21 @@ def intersection(self, other, sort=False):
intersection : Index
"""
self._validate_sort_keyword(sort)
+ self._assert_can_do_setop(other)
+ other, _ = self._convert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
+ return self._intersection(other, sort=sort)
+
+ def _intersection(self, other, sort=False):
+
if not isinstance(other, RangeIndex):
+ if is_dtype_equal(other.dtype, self.dtype):
+ # Int64Index
+ result = super()._intersection(other, sort=sort)
+ return self._wrap_setop_result(other, result)
return super().intersection(other, sort=sort)
if not len(self) or not len(other):
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index c8edd30e3f7aa..3b6d29a15e7dc 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -471,10 +471,11 @@ def test_intersection_bug(self):
def test_intersection_list(self):
# GH#35876
+ # values is not an Index -> no name -> retain "a"
values = [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")]
idx = DatetimeIndex(values, name="a")
res = idx.intersection(values)
- tm.assert_index_equal(res, idx.rename(None))
+ tm.assert_index_equal(res, idx)
def test_month_range_union_tz_pytz(self, sort):
from pytz import timezone
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 2675c4569a8e9..b6e793ba334ff 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -98,13 +98,20 @@ def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
("Period[D]", "float64", "object"),
],
)
-def test_union_dtypes(left, right, expected):
+@pytest.mark.parametrize("names", [("foo", "foo", "foo"), ("foo", "bar", None)])
+def test_union_dtypes(left, right, expected, names):
left = pandas_dtype(left)
right = pandas_dtype(right)
- a = pd.Index([], dtype=left)
- b = pd.Index([], dtype=right)
- result = a.union(b).dtype
- assert result == expected
+ a = pd.Index([], dtype=left, name=names[0])
+ b = pd.Index([], dtype=right, name=names[1])
+ result = a.union(b)
+ assert result.dtype == expected
+ assert result.name == names[2]
+
+ # Testing name retention
+ # TODO: pin down desired dtype; do we want it to be commutative?
+ result = a.intersection(b)
+ assert result.name == names[2]
def test_dunder_inplace_setops_deprecated(index):
@@ -388,6 +395,25 @@ def test_intersect_unequal(self, index, fname, sname, expected_name):
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
+ def test_intersection_name_retention_with_nameless(self, index):
+ if isinstance(index, MultiIndex):
+ index = index.rename(list(range(index.nlevels)))
+ else:
+ index = index.rename("foo")
+
+ other = np.asarray(index)
+
+ result = index.intersection(other)
+ assert result.name == index.name
+
+ # empty other, same dtype
+ result = index.intersection(other[:0])
+ assert result.name == index.name
+
+ # empty `self`
+ result = index[:0].intersection(other)
+ assert result.name == index.name
+
def test_difference_preserves_type_empty(self, index, sort):
# GH#20040
# If taking difference of a set and itself, it
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
its going to take a few passes to get the behavior consistent across all subclasses. once done, we can de-duplicate the boilerplate. | https://api.github.com/repos/pandas-dev/pandas/pulls/38111 | 2020-11-27T15:56:23Z | 2020-12-02T01:46:07Z | 2020-12-02T01:46:06Z | 2020-12-02T01:49:46Z |
DOC: Add behavior for Index argument in DataFrame.loc | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 2f0b6ab0662dd..e3c94fa3941c0 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -259,10 +259,11 @@ def loc(self) -> "_LocIndexer":
e.g. ``[True, False, True]``.
- An alignable boolean Series. The index of the key will be aligned before
masking.
+ - An alignable Index. The Index of the returned selection will be the input.
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above)
- See more at :ref:`Selection by Label <indexing.label>`
+ See more at :ref:`Selection by Label <indexing.label>`.
Raises
------
@@ -332,6 +333,14 @@ def loc(self) -> "_LocIndexer":
max_speed shield
sidewinder 7 8
+ Index (same behavior as ``df.reindex``)
+
+ >>> df.loc[pd.Index(["cobra", "viper"], name="foo")]
+ max_speed shield
+ foo
+ cobra 1 2
+ viper 4 5
+
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
| - [x] closes #36850
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/38109 | 2020-11-27T12:48:37Z | 2020-11-29T18:06:24Z | 2020-11-29T18:06:24Z | 2020-11-29T18:06:28Z |
BUG: calling at[] on a Series with a single-level MultiIndex returns a Series, not a scalar | diff --git a/pandas/core/series.py b/pandas/core/series.py
index d493ac0a8c051..75f62b20163b3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -82,7 +82,11 @@
sanitize_array,
)
from pandas.core.generic import NDFrame
-from pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple
+from pandas.core.indexers import (
+ deprecate_ndim_indexing,
+ is_list_like_indexer,
+ unpack_1tuple,
+)
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.api import Float64Index, Index, MultiIndex, ensure_index
import pandas.core.indexes.base as ibase
@@ -922,6 +926,8 @@ def _get_value(self, label, takeable: bool = False):
if takeable:
return self._values[label]
+ if isinstance(self.index, MultiIndex) and not is_list_like_indexer(label):
+ label = (label,)
# Similar to Index.get_value, but we do not fall back to positional
loc = self.index.get_loc(label)
return self.index._get_values_for_loc(self, loc, label)
| - [ ] closes #GH38053
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38108 | 2020-11-27T10:25:00Z | 2020-11-27T10:25:33Z | null | 2020-11-27T10:26:57Z |
BUG in Series.interpolate: limit_area/limit_direction kwargs with method="pad"/"bfill" have no effect | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 84eb3b3f15780..493e5b53a5559 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -670,6 +670,7 @@ Missing
- Bug in :meth:`.SeriesGroupBy.transform` now correctly handles missing values for ``dropna=False`` (:issue:`35014`)
- Bug in :meth:`Series.nunique` with ``dropna=True`` was returning incorrect results when both ``NA`` and ``None`` missing values were present (:issue:`37566`)
+- Bug in :meth:`Series.interpolate` where kwarg ``limit_area`` and ``limit_direction`` had no effect when using methods ``pad`` and ``backfill`` (:issue:`31048`)
-
MultiIndex
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index d912c908815f8..1c08888aa85fd 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1261,6 +1261,7 @@ def interpolate(
axis=axis,
inplace=inplace,
limit=limit,
+ limit_area=limit_area,
downcast=downcast,
)
# validate the interp method
@@ -1287,6 +1288,7 @@ def _interpolate_with_fill(
axis: int = 0,
inplace: bool = False,
limit: Optional[int] = None,
+ limit_area: Optional[str] = None,
downcast: Optional[str] = None,
) -> List["Block"]:
""" fillna but using the interpolate machinery """
@@ -1301,6 +1303,7 @@ def _interpolate_with_fill(
method=method,
axis=axis,
limit=limit,
+ limit_area=limit_area,
)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 0afffbc1460e0..e374ba435a0bd 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -1,13 +1,13 @@
"""
Routines for filling missing data.
"""
-
+from functools import partial
from typing import Any, List, Optional, Set, Union
import numpy as np
from pandas._libs import algos, lib
-from pandas._typing import ArrayLike, DtypeObj
+from pandas._typing import ArrayLike, Axis, DtypeObj
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
@@ -528,16 +528,92 @@ def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolat
return P(x)
+def _interpolate_with_limit_area(
+ values: ArrayLike, method: str, limit: Optional[int], limit_area: Optional[str]
+) -> ArrayLike:
+ """
+ Apply interpolation and limit_area logic to values along a to-be-specified axis.
+
+ Parameters
+ ----------
+ values: array-like
+ Input array.
+ method: str
+ Interpolation method. Could be "bfill" or "pad"
+ limit: int, optional
+ Index limit on interpolation.
+ limit_area: str
+ Limit area for interpolation. Can be "inside" or "outside"
+
+ Returns
+ -------
+ values: array-like
+ Interpolated array.
+ """
+
+ invalid = isna(values)
+
+ if not invalid.all():
+ first = find_valid_index(values, "first")
+ last = find_valid_index(values, "last")
+
+ values = interpolate_2d(
+ values,
+ method=method,
+ limit=limit,
+ )
+
+ if limit_area == "inside":
+ invalid[first : last + 1] = False
+ elif limit_area == "outside":
+ invalid[:first] = invalid[last + 1 :] = False
+
+ values[invalid] = np.nan
+
+ return values
+
+
def interpolate_2d(
values,
- method="pad",
- axis=0,
- limit=None,
+ method: str = "pad",
+ axis: Axis = 0,
+ limit: Optional[int] = None,
+ limit_area: Optional[str] = None,
):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
+
+ Parameters
+ ----------
+ values: array-like
+ Input array.
+ method: str, default "pad"
+ Interpolation method. Could be "bfill" or "pad"
+ axis: 0 or 1
+ Interpolation axis
+ limit: int, optional
+ Index limit on interpolation.
+ limit_area: str, optional
+ Limit area for interpolation. Can be "inside" or "outside"
+
+ Returns
+ -------
+ values: array-like
+ Interpolated array.
"""
+ if limit_area is not None:
+ return np.apply_along_axis(
+ partial(
+ _interpolate_with_limit_area,
+ method=method,
+ limit=limit,
+ limit_area=limit_area,
+ ),
+ axis,
+ values,
+ )
+
orig_values = values
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py
index 7c64d10675edd..8740a309eec13 100644
--- a/pandas/tests/series/methods/test_interpolate.py
+++ b/pandas/tests/series/methods/test_interpolate.py
@@ -458,6 +458,82 @@ def test_interp_limit_direction_raises(self, method, limit_direction, expected):
with pytest.raises(ValueError, match=msg):
s.interpolate(method=method, limit_direction=limit_direction)
+ @pytest.mark.parametrize(
+ "data, expected_data, kwargs",
+ (
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan],
+ {"method": "pad", "limit_area": "inside"},
+ ),
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan],
+ {"method": "pad", "limit_area": "inside", "limit": 1},
+ ),
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0],
+ {"method": "pad", "limit_area": "outside"},
+ ),
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan],
+ {"method": "pad", "limit_area": "outside", "limit": 1},
+ ),
+ (
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+ {"method": "pad", "limit_area": "outside", "limit": 1},
+ ),
+ (
+ range(5),
+ range(5),
+ {"method": "pad", "limit_area": "outside", "limit": 1},
+ ),
+ ),
+ )
+ def test_interp_limit_area_with_pad(self, data, expected_data, kwargs):
+ # GH26796
+
+ s = Series(data)
+ expected = Series(expected_data)
+ result = s.interpolate(**kwargs)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "data, expected_data, kwargs",
+ (
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan],
+ {"method": "bfill", "limit_area": "inside"},
+ ),
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan],
+ {"method": "bfill", "limit_area": "inside", "limit": 1},
+ ),
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],
+ {"method": "bfill", "limit_area": "outside"},
+ ),
+ (
+ [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],
+ [np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],
+ {"method": "bfill", "limit_area": "outside", "limit": 1},
+ ),
+ ),
+ )
+ def test_interp_limit_area_with_backfill(self, data, expected_data, kwargs):
+ # GH26796
+
+ s = Series(data)
+ expected = Series(expected_data)
+ result = s.interpolate(**kwargs)
+ tm.assert_series_equal(result, expected)
+
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
| - [x] closes #26796
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Picking up #31048 | https://api.github.com/repos/pandas-dev/pandas/pulls/38106 | 2020-11-27T05:06:06Z | 2020-12-01T22:05:06Z | 2020-12-01T22:05:06Z | 2020-12-01T22:45:17Z |
REF: implement _should_compare | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 09fe885e47754..06b57662149ad 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4937,6 +4937,43 @@ def get_indexer_for(self, target, **kwargs):
indexer, _ = self.get_indexer_non_unique(target)
return indexer
+ def _get_indexer_non_comparable(self, target: "Index", method, unique: bool = True):
+ """
+ Called from get_indexer or get_indexer_non_unique when the target
+ is of a non-comparable dtype.
+
+ For get_indexer lookups with method=None, get_indexer is an _equality_
+ check, so non-comparable dtypes mean we will always have no matches.
+
+ For get_indexer lookups with a method, get_indexer is an _inequality_
+ check, so non-comparable dtypes mean we will always raise TypeError.
+
+ Parameters
+ ----------
+ target : Index
+ method : str or None
+ unique : bool, default True
+ * True if called from get_indexer.
+ * False if called from get_indexer_non_unique.
+
+ Raises
+ ------
+ TypeError
+ If doing an inequality check, i.e. method is not None.
+ """
+ if method is not None:
+ other = _unpack_nested_dtype(target)
+ raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}")
+
+ no_matches = -1 * np.ones(target.shape, dtype=np.intp)
+ if unique:
+ # This is for get_indexer
+ return no_matches
+ else:
+ # This is for get_indexer_non_unique
+ missing = np.arange(len(target), dtype=np.intp)
+ return no_matches, missing
+
@property
def _index_as_unique(self):
"""
@@ -4972,6 +5009,14 @@ def _maybe_promote(self, other: "Index"):
return self, other
+ def _should_compare(self, other: "Index") -> bool:
+ """
+ Check if `self == other` can ever have non-False entries.
+ """
+ other = _unpack_nested_dtype(other)
+ dtype = other.dtype
+ return self._is_comparable_dtype(dtype) or is_object_dtype(dtype)
+
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
@@ -6119,3 +6164,24 @@ def get_unanimous_names(*indexes: Index) -> Tuple[Label, ...]:
name_sets = [{*ns} for ns in zip_longest(*name_tups)]
names = tuple(ns.pop() if len(ns) == 1 else None for ns in name_sets)
return names
+
+
+def _unpack_nested_dtype(other: Index) -> Index:
+ """
+ When checking if our dtype is comparable with another, we need
+ to unpack CategoricalDtype to look at its categories.dtype.
+
+ Parameters
+ ----------
+ other : Index
+
+ Returns
+ -------
+ Index
+ """
+ dtype = other.dtype
+ if is_categorical_dtype(dtype):
+ # If there is ever a SparseIndex, this could get dispatched
+ # here too.
+ return dtype.categories
+ return other
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index b223e583d0ce0..ae0126df4f088 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -452,13 +452,10 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = ensure_index(target)
- if isinstance(target, PeriodIndex):
- if not self._is_comparable_dtype(target.dtype):
- # i.e. target.freq != self.freq
- # No matches
- no_matches = -1 * np.ones(self.shape, dtype=np.intp)
- return no_matches
+ if not self._should_compare(target):
+ return self._get_indexer_non_comparable(target, method, unique=True)
+ if isinstance(target, PeriodIndex):
target = target._get_engine_target() # i.e. target.asi8
self_index = self._int64index
else:
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 9b203e1b17517..c03c89f32f73e 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -21,6 +21,28 @@
)
import pandas._testing as tm
+dti4 = date_range("2016-01-01", periods=4)
+dti = dti4[:-1]
+rng = pd.Index(range(3))
+
+
+@pytest.fixture(
+ params=[
+ dti,
+ dti.tz_localize("UTC"),
+ dti.to_period("W"),
+ dti - dti[0],
+ rng,
+ pd.Index([1, 2, 3]),
+ pd.Index([2.0, 3.0, 4.0]),
+ pd.Index([4, 5, 6], dtype="u8"),
+ pd.IntervalIndex.from_breaks(dti4),
+ ]
+)
+def non_comparable_idx(request):
+ # All have length 3
+ return request.param
+
class TestGetItem:
def test_ellipsis(self):
@@ -438,6 +460,37 @@ def test_get_indexer_mismatched_dtype(self):
result = pi.get_indexer_non_unique(pi2)[0]
tm.assert_numpy_array_equal(result, expected)
+ def test_get_indexer_mismatched_dtype_different_length(self, non_comparable_idx):
+ # without method we arent checking inequalities, so get all-missing
+ # but do not raise
+ dti = date_range("2016-01-01", periods=3)
+ pi = dti.to_period("D")
+
+ other = non_comparable_idx
+
+ res = pi[:-1].get_indexer(other)
+ expected = -np.ones(other.shape, dtype=np.intp)
+ tm.assert_numpy_array_equal(res, expected)
+
+ @pytest.mark.parametrize("method", ["pad", "backfill", "nearest"])
+ def test_get_indexer_mismatched_dtype_with_method(self, non_comparable_idx, method):
+ dti = date_range("2016-01-01", periods=3)
+ pi = dti.to_period("D")
+
+ other = non_comparable_idx
+
+ msg = re.escape(f"Cannot compare dtypes {pi.dtype} and {other.dtype}")
+ with pytest.raises(TypeError, match=msg):
+ pi.get_indexer(other, method=method)
+
+ for dtype in ["object", "category"]:
+ other2 = other.astype(dtype)
+ if dtype == "object" and isinstance(other, PeriodIndex):
+ continue
+ # For object dtype we are liable to get a different exception message
+ with pytest.raises(TypeError):
+ pi.get_indexer(other2, method=method)
+
def test_get_indexer_non_unique(self):
# GH 17717
p1 = Period("2017-09-02")
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Index._is_comparable_dtype is limited because it misses a few cases: object dtype, and sub-dtypes within categorical. This implements Indes._should_compare, which handles those correctly. It then implements `_get_indexer_non_comparable` for cases in which we short-circuit non-comparable dtypes.
As a proof of concept, this then uses _should_compare and _get_indexer_non_comparable for `PeriodIndex.get_indexer`.
The behavior change, which this tests, is one that IIUC is a bug. That is, when we do get_indexer with a method and non-comparable dtypes, we should raise instead of return all minus-ones.
If implemented, we'll be able to use _should_compare to simplify all of the get_indexer, get_indexer_non_unique, and set op methods. | https://api.github.com/repos/pandas-dev/pandas/pulls/38105 | 2020-11-27T04:11:58Z | 2020-12-02T01:51:11Z | 2020-12-02T01:51:11Z | 2020-12-02T02:35:02Z |
PERF: Index.searchsorted | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index afd6bbb6c57e0..7bae912a070a9 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1804,7 +1804,7 @@ def func(arr, indexer, out, fill_value=np.nan):
# ------------ #
-def searchsorted(arr, value, side="left", sorter=None):
+def searchsorted(arr, value, side="left", sorter=None) -> np.ndarray:
"""
Find indices where elements should be inserted to maintain order.
@@ -1853,7 +1853,7 @@ def searchsorted(arr, value, side="left", sorter=None):
if (
isinstance(arr, np.ndarray)
- and is_integer_dtype(arr)
+ and is_integer_dtype(arr.dtype)
and (is_integer(value) or is_integer_dtype(value))
):
# if `arr` and `value` have different dtypes, `arr` would be
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index 6c35b882b5d67..3f146e273326c 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -248,6 +248,10 @@ def __getitem__(self, key):
deprecate_ndim_indexing(result)
return result
+ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
+ # overriding IndexOpsMixin improves performance GH#38083
+ return self._data.searchsorted(value, side=side, sorter=sorter)
+
# ---------------------------------------------------------------------
def _check_indexing_method(self, method):
| - [x] closes #38083
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38103 | 2020-11-27T02:44:09Z | 2020-11-27T07:24:04Z | 2020-11-27T07:24:03Z | 2020-11-27T15:25:02Z |
REF: use _validate_fill_value in Index.insert | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 7d2e3746c4b94..3274725016b40 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -20,7 +20,7 @@
import numpy as np
-from pandas._libs import lib, tslib
+from pandas._libs import lib, missing as libmissing, tslib
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
@@ -519,6 +519,11 @@ def maybe_promote(dtype, fill_value=np.nan):
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
+
+ Raises
+ ------
+ ValueError
+ If fill_value is a non-scalar and dtype is not object.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
@@ -550,6 +555,9 @@ def maybe_promote(dtype, fill_value=np.nan):
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
+ elif is_valid_nat_for_dtype(fill_value, dtype):
+ # e.g. pd.NA, which is not accepted by Timestamp constructor
+ fill_value = np.datetime64("NaT", "ns")
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
@@ -563,6 +571,9 @@ def maybe_promote(dtype, fill_value=np.nan):
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
+ elif is_valid_nat_for_dtype(fill_value, dtype):
+ # e.g pd.NA, which is not accepted by the Timedelta constructor
+ fill_value = np.timedelta64("NaT", "ns")
else:
try:
fv = Timedelta(fill_value)
@@ -636,7 +647,7 @@ def maybe_promote(dtype, fill_value=np.nan):
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
- elif fill_value is None:
+ elif fill_value is None or fill_value is libmissing.NA:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
@@ -646,7 +657,8 @@ def maybe_promote(dtype, fill_value=np.nan):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
- fill_value = np.nan
+ if fill_value is not libmissing.NA:
+ fill_value = np.nan
else:
dtype = np.dtype(np.object_)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e83bc9c1448eb..2261a6a20e58f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -35,6 +35,7 @@
from pandas.core.dtypes.cast import (
find_common_type,
maybe_cast_to_integer_array,
+ maybe_promote,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
@@ -4196,28 +4197,15 @@ def _string_data_error(cls, data):
"to explicitly cast to a numeric type"
)
- @final
- def _coerce_scalar_to_index(self, item):
- """
- We need to coerce a scalar to a compat for our index type.
-
- Parameters
- ----------
- item : scalar item to coerce
- """
- dtype = self.dtype
-
- if self._is_numeric_dtype and isna(item):
- # We can't coerce to the numeric dtype of "self" (unless
- # it's float) if there are NaN values in our output.
- dtype = None
-
- return Index([item], dtype=dtype, **self._get_attributes_dict())
-
def _validate_fill_value(self, value):
"""
- Check if the value can be inserted into our array, and convert
- it to an appropriate native type if necessary.
+ Check if the value can be inserted into our array without casting,
+ and convert it to an appropriate native type if necessary.
+
+ Raises
+ ------
+ TypeError
+ If the value cannot be inserted into an array of this dtype.
"""
return value
@@ -5583,8 +5571,22 @@ def insert(self, loc: int, item):
"""
# Note: this method is overridden by all ExtensionIndex subclasses,
# so self is never backed by an EA.
+
+ try:
+ item = self._validate_fill_value(item)
+ except TypeError:
+ if is_scalar(item):
+ dtype, item = maybe_promote(self.dtype, item)
+ else:
+ # maybe_promote would raise ValueError
+ dtype = np.dtype(object)
+
+ return self.astype(dtype).insert(loc, item)
+
arr = np.asarray(self)
- item = self._coerce_scalar_to_index(item)._values
+
+ # Use Index constructor to ensure we get tuples cast correctly.
+ item = Index([item], dtype=self.dtype)._values
idx = np.concatenate((arr[:loc], item, arr[loc:]))
return Index(idx, name=self.name)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index ed76e26a57634..117200ee53116 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -16,6 +16,7 @@
is_float,
is_float_dtype,
is_integer_dtype,
+ is_number,
is_numeric_dtype,
is_scalar,
is_signed_integer_dtype,
@@ -112,23 +113,36 @@ def _shallow_copy(self, values=None, name: Label = lib.no_default):
return Float64Index._simple_new(values, name=name)
return super()._shallow_copy(values=values, name=name)
+ @doc(Index._validate_fill_value)
def _validate_fill_value(self, value):
- """
- Convert value to be insertable to ndarray.
- """
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
- elif isinstance(value, str) or lib.is_complex(value):
- raise TypeError
elif is_scalar(value) and isna(value):
if is_valid_nat_for_dtype(value, self.dtype):
value = self._na_value
+ if self.dtype.kind != "f":
+ # raise so that caller can cast
+ raise TypeError
else:
# NaT, np.datetime64("NaT"), np.timedelta64("NaT")
raise TypeError
+ elif is_scalar(value):
+ if not is_number(value):
+ # e.g. datetime64, timedelta64, datetime, ...
+ raise TypeError
+
+ elif lib.is_complex(value):
+ # at least until we have a ComplexIndx
+ raise TypeError
+
+ elif is_float(value) and self.dtype.kind != "f":
+ if not value.is_integer():
+ raise TypeError
+ value = int(value)
+
return value
def _convert_tolerance(self, tolerance, target):
@@ -168,15 +182,6 @@ def _is_all_dates(self) -> bool:
"""
return False
- @doc(Index.insert)
- def insert(self, loc: int, item):
- try:
- item = self._validate_fill_value(item)
- except TypeError:
- return self.astype(object).insert(loc, item)
-
- return super().insert(loc, item)
-
def _union(self, other, sort):
# Right now, we treat union(int, float) a bit special.
# See https://github.com/pandas-dev/pandas/issues/26778 for discussion
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index 74a11c9f33195..294abafa86812 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -110,6 +110,8 @@ def _assert_match(result_fill_value, expected_fill_value):
assert res_type == ex_type or res_type.__name__ == ex_type.__name__
match_value = result_fill_value == expected_fill_value
+ if match_value is pd.NA:
+ match_value = False
# Note: type check above ensures that we have the _same_ NA value
# for missing values, None == None (which is checked
@@ -569,8 +571,8 @@ def test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype):
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
-@pytest.mark.parametrize("fill_value", [None, np.nan, NaT])
-def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype_reduced, fill_value):
+def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype_reduced, nulls_fixture):
+ fill_value = nulls_fixture
dtype = np.dtype(any_numpy_dtype_reduced)
if is_integer_dtype(dtype) and fill_value is not NaT:
@@ -597,7 +599,10 @@ def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype_reduced, fill_val
else:
# all other cases cast to object, and use np.nan as missing value
expected_dtype = np.dtype(object)
- exp_val_for_scalar = np.nan
+ if fill_value is pd.NA:
+ exp_val_for_scalar = pd.NA
+ else:
+ exp_val_for_scalar = np.nan
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38102 | 2020-11-27T01:39:00Z | 2020-12-13T17:38:59Z | 2020-12-13T17:38:59Z | 2020-12-13T18:26:11Z |
BUG: Series.at returning Series with one element instead of scalar | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 27511c96faa5a..8491d939f27ac 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -625,6 +625,7 @@ Indexing
- Bug in :meth:`DataFrame.iloc` and :meth:`Series.iloc` aligning objects in ``__setitem__`` (:issue:`22046`)
- Bug in :meth:`DataFrame.loc` did not raise ``KeyError`` when missing combination was given with ``slice(None)`` for remaining levels (:issue:`19556`)
- Bug in :meth:`DataFrame.loc` raising ``TypeError`` when non-integer slice was given to select values from :class:`MultiIndex` (:issue:`25165`, :issue:`24263`)
+- Bug in :meth:`Series.at` returning :class:`Series` with one element instead of scalar when index is a :class:`MultiIndex` with one level (:issue:`38053`)
- Bug in :meth:`DataFrame.loc` returning and assigning elements in wrong order when indexer is differently ordered than the :class:`MultiIndex` to filter (:issue:`31330`, :issue:`34603`)
- Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.__getitem__` raising ``KeyError`` when columns were :class:`MultiIndex` with only one level (:issue:`29749`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 4b67deb2d102c..cc29c310aa7a2 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2527,6 +2527,10 @@ def _get_values_for_loc(self, series: "Series", loc, key):
if is_scalar(loc):
return new_values
+ if len(new_values) == 1 and not self.nlevels > 1:
+ # If more than one level left, we can not return a scalar
+ return new_values[0]
+
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index dd01f4e6a4f49..ce48fd1e5c905 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -268,35 +268,41 @@ def test_at_with_tuple_index_set():
assert series.at[1, 2] == 3
-def test_multiindex_at_get():
- # GH 26989
- # DataFrame.at and DataFrame.loc getter works with MultiIndex
- df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
- assert df.index.nlevels == 2
- assert df.at[(1, 3), "a"] == 1
- assert df.loc[(1, 3), "a"] == 1
-
- # Series.at and Series.loc getter works with MultiIndex
- series = df["a"]
- assert series.index.nlevels == 2
- assert series.at[1, 3] == 1
- assert series.loc[1, 3] == 1
-
-
-def test_multiindex_at_set():
- # GH 26989
- # DataFrame.at and DataFrame.loc setter works with MultiIndex
- df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
- assert df.index.nlevels == 2
- df.at[(1, 3), "a"] = 3
- assert df.at[(1, 3), "a"] == 3
- df.loc[(1, 3), "a"] = 4
- assert df.loc[(1, 3), "a"] == 4
-
- # Series.at and Series.loc setter works with MultiIndex
- series = df["a"]
- assert series.index.nlevels == 2
- series.at[1, 3] = 5
- assert series.at[1, 3] == 5
- series.loc[1, 3] = 6
- assert series.loc[1, 3] == 6
+class TestMultiIndexScalar:
+ def test_multiindex_at_get(self):
+ # GH 26989
+ # DataFrame.at and DataFrame.loc getter works with MultiIndex
+ df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
+ assert df.index.nlevels == 2
+ assert df.at[(1, 3), "a"] == 1
+ assert df.loc[(1, 3), "a"] == 1
+
+ # Series.at and Series.loc getter works with MultiIndex
+ series = df["a"]
+ assert series.index.nlevels == 2
+ assert series.at[1, 3] == 1
+ assert series.loc[1, 3] == 1
+
+ def test_multiindex_at_set(self):
+ # GH 26989
+ # DataFrame.at and DataFrame.loc setter works with MultiIndex
+ df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
+ assert df.index.nlevels == 2
+ df.at[(1, 3), "a"] = 3
+ assert df.at[(1, 3), "a"] == 3
+ df.loc[(1, 3), "a"] = 4
+ assert df.loc[(1, 3), "a"] == 4
+
+ # Series.at and Series.loc setter works with MultiIndex
+ series = df["a"]
+ assert series.index.nlevels == 2
+ series.at[1, 3] = 5
+ assert series.at[1, 3] == 5
+ series.loc[1, 3] = 6
+ assert series.loc[1, 3] == 6
+
+ def test_multiindex_at_get_one_level(self):
+ # GH#38053
+ s2 = Series((0, 1), index=[[False, True]])
+ result = s2.at[False]
+ assert result == 0
| - [x] closes #38053
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I think if we are at this place and len(values) is 1, we want to return a scalar instead of a Series. This happens only because loc is a slice (0,1) with one MultiIndex level instead of a scalar.
Should I create a file to move all MultiIndex at tests to the MultiIndex folder? | https://api.github.com/repos/pandas-dev/pandas/pulls/38101 | 2020-11-26T21:44:00Z | 2020-11-29T15:59:11Z | 2020-11-29T15:59:10Z | 2020-11-29T16:04:01Z |
BUG: fix wrong error message in deprecated 2D indexing of Series with datetime values | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 3aaa376242fea..d912c908815f8 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2393,6 +2393,28 @@ def quantile(self, qs, interpolation="linear", axis=0):
aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)
return self.make_block_same_class(aware, ndim=res_blk.ndim)
+ def _check_ndim(self, values, ndim):
+ """
+ ndim inference and validation.
+
+ This is overriden by the DatetimeTZBlock to check the case of 2D
+ data (values.ndim == 2), which should only be allowed if ndim is
+ also 2.
+ The case of 1D array is still allowed with both ndim of 1 or 2, as
+ if the case for other EAs. Therefore, we are only checking
+ `values.ndim > ndim` instead of `values.ndim != ndim` as for
+ consolidated blocks.
+ """
+ if ndim is None:
+ ndim = values.ndim
+
+ if values.ndim > ndim:
+ raise ValueError(
+ "Wrong number of dimensions. "
+ f"values.ndim != ndim [{values.ndim} != {ndim}]"
+ )
+ return ndim
+
class TimeDeltaBlock(DatetimeLikeBlockMixin):
__slots__ = ()
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b1b5d16eaf7f0..bfc8e7121aa91 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -916,7 +916,8 @@ def _get_values(self, indexer):
except ValueError:
# mpl compat if we look up e.g. ser[:, np.newaxis];
# see tests.series.timeseries.test_mpl_compat_hack
- return self._values[indexer]
+ # the asarray is needed to avoid returning a 2D DatetimeArray
+ return np.asarray(self._values[indexer])
def _get_value(self, label, takeable: bool = False):
"""
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 3686337141420..b4c30cb6d4cd2 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -389,10 +389,22 @@ def test_getitem_generator(string_series):
tm.assert_series_equal(result2, expected)
-def test_getitem_ndim_deprecated():
- s = Series([0, 1])
- with tm.assert_produces_warning(FutureWarning):
- s[:, None]
+@pytest.mark.parametrize(
+ "series",
+ [
+ Series([0, 1]),
+ Series(date_range("2012-01-01", periods=2)),
+ Series(date_range("2012-01-01", periods=2, tz="CET")),
+ ],
+)
+def test_getitem_ndim_deprecated(series):
+ with tm.assert_produces_warning(
+ FutureWarning, match="Support for multi-dimensional indexing"
+ ):
+ result = series[:, None]
+
+ expected = np.asarray(series)[:, None]
+ tm.assert_numpy_array_equal(result, expected)
def test_getitem_multilevel_scalar_slice_not_implemented(
| Closes #35527
For most Series types, this was properly raising the deprecation warning about 2D being deprecated, but for Series with datetimetz values, this started raising an AssertionError instead of only raising the warning.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38099 | 2020-11-26T21:37:12Z | 2020-12-01T00:14:53Z | 2020-12-01T00:14:52Z | 2020-12-01T10:02:42Z |
API: CategoricalIndex.append fallback to concat_compat | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6aff4f4bd41e2..61e4a6358de0b 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -454,6 +454,7 @@ Other API changes
- Passing an invalid ``fill_value`` to :meth:`Series.shift` with a ``CategoricalDtype`` now raises a ``TypeError`` instead of a ``ValueError`` (:issue:`37733`)
- Passing an invalid value to :meth:`IntervalIndex.insert` or :meth:`CategoricalIndex.insert` now raises a ``TypeError`` instead of a ``ValueError`` (:issue:`37733`)
- Attempting to reindex a Series with a :class:`CategoricalIndex` with an invalid ``fill_value`` now raises a ``TypeError`` instead of a ``ValueError`` (:issue:`37733`)
+- :meth:`CategoricalIndex.append` with an index that contains non-category values will now cast instead of raising ``TypeError`` (:issue:`38098`)
.. ---------------------------------------------------------------------------
@@ -634,6 +635,7 @@ Indexing
- Bug in :meth:`DataFrame.loc` returning and assigning elements in wrong order when indexer is differently ordered than the :class:`MultiIndex` to filter (:issue:`31330`, :issue:`34603`)
- Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.__getitem__` raising ``KeyError`` when columns were :class:`MultiIndex` with only one level (:issue:`29749`)
- Bug in :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__` raising blank ``KeyError`` without missing keys for :class:`IntervalIndex` (:issue:`27365`)
+- Bug in setting a new label on a :class:`DataFrame` or :class:`Series` with a :class:`CategoricalIndex` incorrectly raising ``TypeError`` when the new label is not among the index's categories (:issue:`38098`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c49f3f9457161..c86652acbcd0f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4180,12 +4180,6 @@ def _coerce_scalar_to_index(self, item):
return Index([item], dtype=dtype, **self._get_attributes_dict())
- def _to_safe_for_reshape(self):
- """
- Convert to object if we are a categorical.
- """
- return self
-
def _validate_fill_value(self, value):
"""
Check if the value can be inserted into our array, and convert
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 7956b3a623333..abf70fd150345 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -399,10 +399,6 @@ def unique(self, level=None):
# of result, not self.
return type(self)._simple_new(result, name=self.name)
- def _to_safe_for_reshape(self):
- """ convert to object if we are a categorical """
- return self.astype("object")
-
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
@@ -637,11 +633,19 @@ def map(self, mapper):
mapped = self._values.map(mapper)
return Index(mapped, name=self.name)
- def _concat(self, to_concat: List["Index"], name: Label) -> "CategoricalIndex":
+ def _concat(self, to_concat: List["Index"], name: Label) -> Index:
# if calling index is category, don't check dtype of others
- codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
- cat = self._data._from_backing_data(codes)
- return type(self)._simple_new(cat, name=name)
+ try:
+ codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
+ except TypeError:
+ # not all to_concat elements are among our categories (or NA)
+ from pandas.core.dtypes.concat import concat_compat
+
+ res = concat_compat(to_concat)
+ return Index(res, name=name)
+ else:
+ cat = self._data._from_backing_data(codes)
+ return type(self)._simple_new(cat, name=name)
def _delegate_method(self, name: str, *args, **kwargs):
""" method delegation to the ._values """
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 9b4b459d9a122..91711165c15f1 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1684,10 +1684,6 @@ def unique(self, level=None):
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
- def _to_safe_for_reshape(self):
- """ convert to object if we are a categorical """
- return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
-
def to_frame(self, index=True, name=None):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 22887cede51ed..40496a5b8671b 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -268,19 +268,13 @@ def _add_margins(
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
- try:
- # check the result column and leave floats
- for dtype in set(result.dtypes):
- cols = result.select_dtypes([dtype]).columns
- margin_dummy[cols] = margin_dummy[cols].apply(
- maybe_downcast_to_dtype, args=(dtype,)
- )
- result = result.append(margin_dummy)
- except TypeError:
-
- # we cannot reshape, so coerce the axis
- result.index = result.index._to_safe_for_reshape()
- result = result.append(margin_dummy)
+ # check the result column and leave floats
+ for dtype in set(result.dtypes):
+ cols = result.select_dtypes([dtype]).columns
+ margin_dummy[cols] = margin_dummy[cols].apply(
+ maybe_downcast_to_dtype, args=(dtype,)
+ )
+ result = result.append(margin_dummy)
result.index.names = row_names
return result
@@ -328,16 +322,7 @@ def _all_key(key):
# we are going to mutate this, so need to copy!
piece = piece.copy()
- try:
- piece[all_key] = margin[key]
- except ValueError:
- # we cannot reshape, so coerce the axis
- piece.set_axis(
- piece._get_axis(cat_axis)._to_safe_for_reshape(),
- axis=cat_axis,
- inplace=True,
- )
- piece[all_key] = margin[key]
+ piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index 2e03c00638a5c..3bab57e1d265e 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -57,10 +57,10 @@ def test_append(self):
expected = CategoricalIndex(list("aabbcaca"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
- # invalid objects
- msg = "cannot append a non-category item to a CategoricalIndex"
- with pytest.raises(TypeError, match=msg):
- ci.append(Index(["a", "d"]))
+ # invalid objects -> cast to object via concat_compat
+ result = ci.append(Index(["a", "d"]))
+ expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"])
+ tm.assert_index_equal(result, expected, exact=True)
# GH14298 - if base object is not categorical -> coerce to object
result = Index(["c", "a"]).append(ci)
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 6fff706e27cd2..1b9b6452b2e33 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -57,9 +57,12 @@ def test_loc_scalar(self):
with pytest.raises(KeyError, match=r"^'d'$"):
df.loc["d"]
- msg = "cannot append a non-category item to a CategoricalIndex"
- with pytest.raises(TypeError, match=msg):
- df.loc["d"] = 10
+ df2 = df.copy()
+ expected = df2.copy()
+ expected.index = expected.index.astype(object)
+ expected.loc["d"] = 10
+ df2.loc["d"] = 10
+ tm.assert_frame_equal(df2, expected)
msg = "'fill_value=d' is not present in this Categorical's categories"
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/reshape/concat/test_categorical.py b/pandas/tests/reshape/concat/test_categorical.py
index 388575c5a3b86..6dae28003d3b6 100644
--- a/pandas/tests/reshape/concat/test_categorical.py
+++ b/pandas/tests/reshape/concat/test_categorical.py
@@ -1,5 +1,4 @@
import numpy as np
-import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
@@ -137,13 +136,18 @@ def test_categorical_index_preserver(self):
).set_index("B")
tm.assert_frame_equal(result, expected)
- # wrong categories
+ # wrong categories -> uses concat_compat, which casts to object
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
- msg = "categories must match existing categories when appending"
- with pytest.raises(TypeError, match=msg):
- pd.concat([df2, df3])
+ result = pd.concat([df2, df3])
+ expected = pd.concat(
+ [
+ df2.set_axis(df2.index.astype(object), 0),
+ df3.set_axis(df3.index.astype(object), 0),
+ ]
+ )
+ tm.assert_frame_equal(result, expected)
def test_concat_categorical_tz(self):
# GH-23816
| - [x] closes #14586
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The main thing being changed here is `CategoricalIndex._concat` to better match the behavior of all the other `FooIndex._concat` methods. Everything else being changed here is just cleanup that this makes possible.
cc @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/38098 | 2020-11-26T21:26:43Z | 2020-11-29T15:59:57Z | 2020-11-29T15:59:57Z | 2020-11-29T16:17:11Z |
PERF: replace_list | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index f6ff38201fdfa..74b5a184df95d 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -861,7 +861,15 @@ def _replace_list(
"""
See BlockManager._replace_list docstring.
"""
- src_len = len(src_list) - 1
+ # Exclude anything that we know we won't contain
+ pairs = [
+ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x)
+ ]
+ if not len(pairs):
+ # shortcut, nothing to replace
+ return [self] if inplace else [self.copy()]
+
+ src_len = len(pairs) - 1
def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray:
"""
@@ -874,15 +882,19 @@ def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray:
s = maybe_box_datetimelike(s)
return compare_or_regex_search(self.values, s, regex, mask)
- # Calculate the mask once, prior to the call of comp
- # in order to avoid repeating the same computations
- mask = ~isna(self.values)
+ if self.is_object:
+ # Calculate the mask once, prior to the call of comp
+ # in order to avoid repeating the same computations
+ mask = ~isna(self.values)
+ masks = [comp(s[0], mask, regex) for s in pairs]
+ else:
+ # GH#38086 faster if we know we dont need to check for regex
+ masks = [missing.mask_missing(self.values, s[0]) for s in pairs]
- masks = [comp(s, mask, regex) for s in src_list]
masks = [_extract_bool_array(x) for x in masks]
rb = [self if inplace else self.copy()]
- for i, (src, dest) in enumerate(zip(src_list, dest_list)):
+ for i, (src, dest) in enumerate(pairs):
new_rb: List["Block"] = []
for blk in rb:
m = masks[i]
@@ -1037,7 +1049,7 @@ def _putmask_simple(self, mask: np.ndarray, value: Any):
if lib.is_scalar(value) and isinstance(values, np.ndarray):
value = convert_scalar_for_putitemlike(value, values.dtype)
- if self.is_extension or self.is_object:
+ if self.is_extension or (self.is_object and not lib.is_scalar(value)):
# GH#19266 using np.putmask gives unexpected results with listlike value
if is_list_like(value) and len(value) == len(values):
values[mask] = value[mask]
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 565debb98d8cc..713b0e86067a5 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -75,10 +75,9 @@ def test_replace(self, datetime_series):
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
- # make sure that we aren't just masking a TypeError because bools don't
- # implement indexing
- with pytest.raises(TypeError, match="Cannot compare types .+"):
- ser.replace([1, 2], [np.nan, 0])
+ # ser is dt64 so can't hold 1 or 2, so this replace is a no-op
+ result = ser.replace([1, 2], [np.nan, 0])
+ tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
closes #38086, this gets us back to (very slightly ahead of) 1.1.4 performance for the cases there, will leave to @jorisvandenbossche whether that is enough to close the issue. | https://api.github.com/repos/pandas-dev/pandas/pulls/38097 | 2020-11-26T21:19:47Z | 2020-11-27T01:01:26Z | 2020-11-27T01:01:26Z | 2020-11-27T01:20:07Z |
TST: Suppress http logs in tests | diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index e9f228b5973b5..bcc666a88e3be 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -1,3 +1,4 @@
+import logging
import os
import shlex
import subprocess
@@ -49,6 +50,8 @@ def s3_base(worker_id):
pytest.importorskip("s3fs")
pytest.importorskip("boto3")
requests = pytest.importorskip("requests")
+ # GH 38090: Suppress http logs in tests by moto_server
+ logging.getLogger("werkzeug").disabled = True
with tm.ensure_safe_environment_variables():
# temporary workaround as moto fails for botocore >= 1.11 otherwise,
| - [x] closes #38090
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Confirming that this does the trick. | https://api.github.com/repos/pandas-dev/pandas/pulls/38096 | 2020-11-26T19:32:25Z | 2020-11-28T17:33:57Z | 2020-11-28T17:33:56Z | 2020-11-28T18:33:18Z |
CLN: testing window namespace | diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py
index ceabf71747cb8..802ece77fd36d 100644
--- a/pandas/tests/window/moments/test_moments_consistency_rolling.py
+++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py
@@ -5,8 +5,7 @@
import pandas.util._test_decorators as td
-import pandas as pd
-from pandas import DataFrame, DatetimeIndex, Index, Series
+from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
@@ -240,7 +239,7 @@ def test_rolling_functions_window_non_shrinkage_binary(f):
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
- index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
+ index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
df_result = f(df)
@@ -482,12 +481,10 @@ def test_moment_functions_zero_length_pairwise(f):
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
- index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
+ index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
)
df2_expected = DataFrame(
- index=pd.MultiIndex.from_product(
- [df2.index, df2.columns], names=["bar", "foo"]
- ),
+ index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
columns=Index(["a"], name="foo"),
dtype="float64",
)
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index 39b3a9a630760..ac6dd0bad619a 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -3,8 +3,7 @@
import pandas.util._test_decorators as td
-import pandas as pd
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, date_range
import pandas._testing as tm
@@ -546,7 +545,7 @@ def test_rolling_quantile_np_percentile():
# is analogous to Numpy's percentile
row = 10
col = 5
- idx = pd.date_range("20100101", periods=row, freq="B")
+ idx = date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 6ce425e2575db..52c629f96b713 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -1,8 +1,17 @@
import numpy as np
import pytest
-import pandas as pd
-from pandas import DataFrame, Index, Series, Timestamp, concat
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ Period,
+ Series,
+ Timestamp,
+ concat,
+ date_range,
+ timedelta_range,
+)
import pandas._testing as tm
from pandas.core.base import SpecificationError
@@ -78,7 +87,7 @@ def test_agg():
result = r.aggregate([np.mean, np.std])
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
- expected.columns = pd.MultiIndex.from_product([["A", "B"], ["mean", "std"]])
+ expected.columns = MultiIndex.from_product([["A", "B"], ["mean", "std"]])
tm.assert_frame_equal(result, expected)
result = r.aggregate({"A": np.mean, "B": np.std})
@@ -88,7 +97,7 @@ def test_agg():
result = r.aggregate({"A": ["mean", "std"]})
expected = concat([a_mean, a_std], axis=1)
- expected.columns = pd.MultiIndex.from_tuples([("A", "mean"), ("A", "std")])
+ expected.columns = MultiIndex.from_tuples([("A", "mean"), ("A", "std")])
tm.assert_frame_equal(result, expected)
result = r["A"].aggregate(["mean", "sum"])
@@ -110,7 +119,7 @@ def test_agg():
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
exp_cols = [("A", "mean"), ("A", "std"), ("B", "mean"), ("B", "std")]
- expected.columns = pd.MultiIndex.from_tuples(exp_cols)
+ expected.columns = MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
@@ -134,7 +143,7 @@ def test_agg_consistency():
r = df.rolling(window=3)
result = r.agg([np.sum, np.mean]).columns
- expected = pd.MultiIndex.from_product([list("AB"), ["sum", "mean"]])
+ expected = MultiIndex.from_product([list("AB"), ["sum", "mean"]])
tm.assert_index_equal(result, expected)
result = r["A"].agg([np.sum, np.mean]).columns
@@ -142,7 +151,7 @@ def test_agg_consistency():
tm.assert_index_equal(result, expected)
result = r.agg({"A": [np.sum, np.mean]}).columns
- expected = pd.MultiIndex.from_tuples([("A", "sum"), ("A", "mean")])
+ expected = MultiIndex.from_tuples([("A", "sum"), ("A", "mean")])
tm.assert_index_equal(result, expected)
@@ -159,7 +168,7 @@ def test_agg_nested_dicts():
expected = concat(
[r["A"].mean(), r["A"].std(), r["B"].mean(), r["B"].std()], axis=1
)
- expected.columns = pd.MultiIndex.from_tuples(
+ expected.columns = MultiIndex.from_tuples(
[("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")]
)
with pytest.raises(SpecificationError, match=msg):
@@ -191,21 +200,21 @@ def test_count_nonnumeric_types():
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
- "datetime": pd.date_range("20170101", periods=3),
- "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
+ "datetime": date_range("20170101", periods=3),
+ "timedelta": timedelta_range("1 s", periods=3, freq="s"),
"periods": [
- pd.Period("2012-01"),
- pd.Period("2012-02"),
- pd.Period("2012-03"),
+ Period("2012-01"),
+ Period("2012-02"),
+ Period("2012-03"),
],
"fl_inf": [1.0, 2.0, np.Inf],
"fl_nan": [1.0, 2.0, np.NaN],
"str_nan": ["aa", "bb", np.NaN],
"dt_nat": dt_nat_col,
"periods_nat": [
- pd.Period("2012-01"),
- pd.Period("2012-02"),
- pd.Period(None),
+ Period("2012-01"),
+ Period("2012-02"),
+ Period(None),
],
},
columns=cols,
@@ -298,11 +307,11 @@ def test_multiple_agg_funcs(func, window_size, expected_vals):
else:
window = f()
- index = pd.MultiIndex.from_tuples(
+ index = MultiIndex.from_tuples(
[("A", 0), ("A", 1), ("A", 2), ("B", 3), ("B", 4), ("B", 5), ("B", 6)],
names=["stock", None],
)
- columns = pd.MultiIndex.from_tuples(
+ columns = MultiIndex.from_tuples(
[("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")]
)
expected = DataFrame(expected_vals, index=index, columns=columns)
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 3405502e54e70..01804faad5a5e 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -3,8 +3,7 @@
from pandas.errors import UnsupportedFunctionCall
-import pandas as pd
-from pandas import DataFrame, Series
+from pandas import DataFrame, DatetimeIndex, Series
import pandas._testing as tm
from pandas.core.window import Expanding
@@ -82,8 +81,8 @@ def test_empty_df_expanding(expander):
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
- expected = DataFrame(index=pd.DatetimeIndex([]))
- result = DataFrame(index=pd.DatetimeIndex([])).expanding(expander).sum()
+ expected = DataFrame(index=DatetimeIndex([]))
+ result = DataFrame(index=DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index 7a75ff1cff5bc..f9b5a5fe9a3c1 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -1,9 +1,9 @@
import numpy as np
import pytest
-import pandas as pd
-from pandas import DataFrame, MultiIndex, Series
+from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, to_datetime
import pandas._testing as tm
+from pandas.api.indexers import BaseIndexer
from pandas.core.groupby.groupby import get_groupby
@@ -133,7 +133,7 @@ def test_rolling_apply_mutability(self):
df = DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6})
g = df.groupby("A")
- mi = pd.MultiIndex.from_tuples(
+ mi = MultiIndex.from_tuples(
[("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)]
)
@@ -162,9 +162,7 @@ def foo(x):
result = df.groupby("id").value.rolling(1).apply(foo, raw=raw_value)
expected = Series(
[expected_value] * 3,
- index=pd.MultiIndex.from_tuples(
- ((1, 0), (1, 1), (1, 2)), names=["id", None]
- ),
+ index=MultiIndex.from_tuples(((1, 0), (1, 1), (1, 2)), names=["id", None]),
name="value",
)
tm.assert_series_equal(result, expected)
@@ -175,7 +173,7 @@ def test_groupby_rolling_center_center(self):
result = series.groupby(series).rolling(center=True, window=3).mean()
expected = Series(
[np.nan] * 5,
- index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),
+ index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),
)
tm.assert_series_equal(result, expected)
@@ -183,7 +181,7 @@ def test_groupby_rolling_center_center(self):
result = series.groupby(series).rolling(center=True, window=3).mean()
expected = Series(
[np.nan] * 4,
- index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),
+ index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),
)
tm.assert_series_equal(result, expected)
@@ -191,7 +189,7 @@ def test_groupby_rolling_center_center(self):
result = df.groupby("a").rolling(center=True, window=3).mean()
expected = DataFrame(
[np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
(
("a", 0),
("a", 1),
@@ -215,7 +213,7 @@ def test_groupby_rolling_center_center(self):
result = df.groupby("a").rolling(center=True, window=3).mean()
expected = DataFrame(
[np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
(
("a", 0),
("a", 1),
@@ -238,7 +236,7 @@ def test_groupby_rolling_center_on(self):
# GH 37141
df = DataFrame(
data={
- "Date": pd.date_range("2020-01-01", "2020-01-10"),
+ "Date": date_range("2020-01-01", "2020-01-10"),
"gb": ["group_1"] * 6 + ["group_2"] * 4,
"value": range(10),
}
@@ -251,18 +249,18 @@ def test_groupby_rolling_center_on(self):
expected = Series(
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 7.0, 7.5, 7.5, 7.5],
name="value",
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
(
- ("group_1", pd.Timestamp("2020-01-01")),
- ("group_1", pd.Timestamp("2020-01-02")),
- ("group_1", pd.Timestamp("2020-01-03")),
- ("group_1", pd.Timestamp("2020-01-04")),
- ("group_1", pd.Timestamp("2020-01-05")),
- ("group_1", pd.Timestamp("2020-01-06")),
- ("group_2", pd.Timestamp("2020-01-07")),
- ("group_2", pd.Timestamp("2020-01-08")),
- ("group_2", pd.Timestamp("2020-01-09")),
- ("group_2", pd.Timestamp("2020-01-10")),
+ ("group_1", Timestamp("2020-01-01")),
+ ("group_1", Timestamp("2020-01-02")),
+ ("group_1", Timestamp("2020-01-03")),
+ ("group_1", Timestamp("2020-01-04")),
+ ("group_1", Timestamp("2020-01-05")),
+ ("group_1", Timestamp("2020-01-06")),
+ ("group_2", Timestamp("2020-01-07")),
+ ("group_2", Timestamp("2020-01-08")),
+ ("group_2", Timestamp("2020-01-09")),
+ ("group_2", Timestamp("2020-01-10")),
),
names=["gb", "Date"],
),
@@ -305,7 +303,7 @@ def test_groupby_subselect_rolling(self):
expected = DataFrame(
[np.nan, np.nan, 2.0, np.nan],
columns=["b"],
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
),
)
@@ -314,7 +312,7 @@ def test_groupby_subselect_rolling(self):
result = df.groupby("a")["b"].rolling(2).max()
expected = Series(
[np.nan, np.nan, 2.0, np.nan],
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
),
name="b",
@@ -323,7 +321,7 @@ def test_groupby_subselect_rolling(self):
def test_groupby_rolling_custom_indexer(self):
# GH 35557
- class SimpleIndexer(pd.api.indexers.BaseIndexer):
+ class SimpleIndexer(BaseIndexer):
def get_window_bounds(
self, num_values=0, min_periods=None, center=None, closed=None
):
@@ -351,7 +349,7 @@ def test_groupby_rolling_subset_with_closed(self):
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
- "date": [pd.Timestamp("2019-01-01")] * 6,
+ "date": [Timestamp("2019-01-01")] * 6,
}
)
result = (
@@ -359,9 +357,9 @@ def test_groupby_rolling_subset_with_closed(self):
)
expected = Series(
[np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],
- index=pd.MultiIndex.from_tuples(
- [("A", pd.Timestamp("2019-01-01"))] * 3
- + [("B", pd.Timestamp("2019-01-01"))] * 3,
+ index=MultiIndex.from_tuples(
+ [("A", Timestamp("2019-01-01"))] * 3
+ + [("B", Timestamp("2019-01-01"))] * 3,
names=["group", "date"],
),
name="column1",
@@ -375,7 +373,7 @@ def test_groupby_subset_rolling_subset_with_closed(self):
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
- "date": [pd.Timestamp("2019-01-01")] * 6,
+ "date": [Timestamp("2019-01-01")] * 6,
}
)
@@ -386,9 +384,9 @@ def test_groupby_subset_rolling_subset_with_closed(self):
)
expected = Series(
[np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],
- index=pd.MultiIndex.from_tuples(
- [("A", pd.Timestamp("2019-01-01"))] * 3
- + [("B", pd.Timestamp("2019-01-01"))] * 3,
+ index=MultiIndex.from_tuples(
+ [("A", Timestamp("2019-01-01"))] * 3
+ + [("B", Timestamp("2019-01-01"))] * 3,
names=["group", "date"],
),
name="column1",
@@ -400,7 +398,7 @@ def test_groupby_rolling_index_changed(self, func):
# GH: #36018 nlevels of MultiIndex changed
ds = Series(
[1, 2, 2],
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("c", "z")], names=["1", "2"]
),
name="a",
@@ -409,7 +407,7 @@ def test_groupby_rolling_index_changed(self, func):
result = getattr(ds.groupby(ds).rolling(2), func)()
expected = Series(
[np.nan, np.nan, 2.0],
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
[(1, "a", "x"), (2, "a", "y"), (2, "c", "z")], names=["a", "1", "2"]
),
name="a",
@@ -420,23 +418,23 @@ def test_groupby_rolling_empty_frame(self):
# GH 36197
expected = DataFrame({"s1": []})
result = expected.groupby("s1").rolling(window=1).sum()
- expected.index = pd.MultiIndex.from_tuples([], names=["s1", None])
+ expected.index = MultiIndex.from_tuples([], names=["s1", None])
tm.assert_frame_equal(result, expected)
expected = DataFrame({"s1": [], "s2": []})
result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()
- expected.index = pd.MultiIndex.from_tuples([], names=["s1", "s2", None])
+ expected.index = MultiIndex.from_tuples([], names=["s1", "s2", None])
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_string_index(self):
# GH: 36727
df = DataFrame(
[
- ["A", "group_1", pd.Timestamp(2019, 1, 1, 9)],
- ["B", "group_1", pd.Timestamp(2019, 1, 2, 9)],
- ["Z", "group_2", pd.Timestamp(2019, 1, 3, 9)],
- ["H", "group_1", pd.Timestamp(2019, 1, 6, 9)],
- ["E", "group_2", pd.Timestamp(2019, 1, 20, 9)],
+ ["A", "group_1", Timestamp(2019, 1, 1, 9)],
+ ["B", "group_1", Timestamp(2019, 1, 2, 9)],
+ ["Z", "group_2", Timestamp(2019, 1, 3, 9)],
+ ["H", "group_1", Timestamp(2019, 1, 6, 9)],
+ ["E", "group_2", Timestamp(2019, 1, 20, 9)],
],
columns=["index", "group", "eventTime"],
).set_index("index")
@@ -447,11 +445,11 @@ def test_groupby_rolling_string_index(self):
result = rolling_groups.apply(lambda df: df.shape[0])
expected = DataFrame(
[
- ["A", "group_1", pd.Timestamp(2019, 1, 1, 9), 1.0],
- ["B", "group_1", pd.Timestamp(2019, 1, 2, 9), 2.0],
- ["H", "group_1", pd.Timestamp(2019, 1, 6, 9), 3.0],
- ["Z", "group_2", pd.Timestamp(2019, 1, 3, 9), 1.0],
- ["E", "group_2", pd.Timestamp(2019, 1, 20, 9), 1.0],
+ ["A", "group_1", Timestamp(2019, 1, 1, 9), 1.0],
+ ["B", "group_1", Timestamp(2019, 1, 2, 9), 2.0],
+ ["H", "group_1", Timestamp(2019, 1, 6, 9), 3.0],
+ ["Z", "group_2", Timestamp(2019, 1, 3, 9), 1.0],
+ ["E", "group_2", Timestamp(2019, 1, 20, 9), 1.0],
],
columns=["index", "group", "eventTime", "count_to_date"],
).set_index(["group", "index"])
@@ -468,7 +466,7 @@ def test_groupby_rolling_no_sort(self):
expected = DataFrame(
np.array([[2.0, 2.0], [1.0, 1.0]]),
columns=["foo", "bar"],
- index=pd.MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]),
+ index=MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]),
)
tm.assert_frame_equal(result, expected)
@@ -479,7 +477,7 @@ def test_groupby_rolling_count_closed_on(self):
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
- "date": pd.date_range(end="20190101", periods=6),
+ "date": date_range(end="20190101", periods=6),
}
)
result = (
@@ -490,14 +488,14 @@ def test_groupby_rolling_count_closed_on(self):
expected = Series(
[np.nan, 1.0, 1.0, np.nan, 1.0, 1.0],
name="column1",
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
[
- ("A", pd.Timestamp("2018-12-27")),
- ("A", pd.Timestamp("2018-12-29")),
- ("A", pd.Timestamp("2018-12-31")),
- ("B", pd.Timestamp("2018-12-28")),
- ("B", pd.Timestamp("2018-12-30")),
- ("B", pd.Timestamp("2019-01-01")),
+ ("A", Timestamp("2018-12-27")),
+ ("A", Timestamp("2018-12-29")),
+ ("A", Timestamp("2018-12-31")),
+ ("B", Timestamp("2018-12-28")),
+ ("B", Timestamp("2018-12-30")),
+ ("B", Timestamp("2019-01-01")),
],
names=["group", "date"],
),
@@ -516,7 +514,7 @@ def test_groupby_rolling_sem(self, func, kwargs):
result = getattr(df.groupby("a"), func)(**kwargs).sem()
expected = DataFrame(
{"a": [np.nan] * 5, "b": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]},
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
[("a", 0), ("a", 1), ("b", 2), ("b", 3), ("b", 4)], names=["a", None]
),
)
@@ -529,7 +527,7 @@ def test_groupby_rolling_nans_in_index(self, rollings, key):
# GH: 34617
df = DataFrame(
{
- "a": pd.to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]),
+ "a": to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]),
"b": [1, 2, 3],
"c": [1, 1, 1],
}
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 1cfbb57d582a3..1658cca347786 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -5,9 +5,20 @@
from pandas.errors import UnsupportedFunctionCall
-import pandas as pd
-from pandas import DataFrame, Series, date_range
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ MultiIndex,
+ Series,
+ Timedelta,
+ Timestamp,
+ date_range,
+ period_range,
+ to_datetime,
+ to_timedelta,
+)
import pandas._testing as tm
+from pandas.api.indexers import BaseIndexer
from pandas.core.window import Rolling
@@ -61,35 +72,35 @@ def test_invalid_constructor(frame_or_series, w):
c(window=2, min_periods=1, center=w)
-@pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3)])
+@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3)])
def test_constructor_with_timedelta_window(window):
# GH 15440
n = 10
df = DataFrame(
- {"value": np.arange(n)}, index=pd.date_range("2015-12-24", periods=n, freq="D")
+ {"value": np.arange(n)}, index=date_range("2015-12-24", periods=n, freq="D")
)
expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))
result = df.rolling(window=window).sum()
expected = DataFrame(
{"value": expected_data},
- index=pd.date_range("2015-12-24", periods=n, freq="D"),
+ index=date_range("2015-12-24", periods=n, freq="D"),
)
tm.assert_frame_equal(result, expected)
expected = df.rolling("3D").sum()
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3), "3D"])
+@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3), "3D"])
def test_constructor_timedelta_window_and_minperiods(window, raw):
# GH 15305
n = 10
df = DataFrame(
- {"value": np.arange(n)}, index=pd.date_range("2017-08-08", periods=n, freq="D")
+ {"value": np.arange(n)}, index=date_range("2017-08-08", periods=n, freq="D")
)
expected = DataFrame(
{"value": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))},
- index=pd.date_range("2017-08-08", periods=n, freq="D"),
+ index=date_range("2017-08-08", periods=n, freq="D"),
)
result_roll_sum = df.rolling(window=window, min_periods=2).sum()
result_roll_generic = df.rolling(window=window, min_periods=2).apply(sum, raw=raw)
@@ -129,7 +140,7 @@ def test_closed_fixed_binary_col():
data = [0, 1, 1, 0, 0, 1, 0, 1]
df = DataFrame(
{"binary_col": data},
- index=pd.date_range(start="2020-01-01", freq="min", periods=len(data)),
+ index=date_range(start="2020-01-01", freq="min", periods=len(data)),
)
rolling = df.rolling(window=len(df), closed="left", min_periods=1)
@@ -137,7 +148,7 @@ def test_closed_fixed_binary_col():
expected = DataFrame(
[np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571],
columns=["binary_col"],
- index=pd.date_range(start="2020-01-01", freq="min", periods=len(data)),
+ index=date_range(start="2020-01-01", freq="min", periods=len(data)),
)
tm.assert_frame_equal(result, expected)
@@ -146,7 +157,7 @@ def test_closed_fixed_binary_col():
def test_closed_empty(closed, arithmetic_win_operators):
# GH 26005
func_name = arithmetic_win_operators
- ser = Series(data=np.arange(5), index=pd.date_range("2000", periods=5, freq="2D"))
+ ser = Series(data=np.arange(5), index=date_range("2000", periods=5, freq="2D"))
roll = ser.rolling("1D", closed=closed)
result = getattr(roll, func_name)()
@@ -157,7 +168,7 @@ def test_closed_empty(closed, arithmetic_win_operators):
@pytest.mark.parametrize("func", ["min", "max"])
def test_closed_one_entry(func):
# GH24718
- ser = Series(data=[2], index=pd.date_range("2000", periods=1))
+ ser = Series(data=[2], index=date_range("2000", periods=1))
result = getattr(ser.rolling("10D", closed="left"), func)()
tm.assert_series_equal(result, Series([np.nan], index=ser.index))
@@ -166,14 +177,12 @@ def test_closed_one_entry(func):
def test_closed_one_entry_groupby(func):
# GH24718
ser = DataFrame(
- data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3)
+ data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=date_range("2000", periods=3)
)
result = getattr(
ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func
)()
- exp_idx = pd.MultiIndex.from_arrays(
- arrays=[[1, 1, 2], ser.index], names=("A", None)
- )
+ exp_idx = MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], names=("A", None))
expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B")
tm.assert_series_equal(result, expected)
@@ -195,7 +204,7 @@ def test_closed_one_entry_groupby(func):
def test_closed_min_max_datetime(input_dtype, func, closed, expected):
# see gh-21704
ser = Series(
- data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10)
+ data=np.arange(10).astype(input_dtype), index=date_range("2000", periods=10)
)
result = getattr(ser.rolling("3D", closed=closed), func)()
@@ -205,7 +214,7 @@ def test_closed_min_max_datetime(input_dtype, func, closed, expected):
def test_closed_uneven():
# see gh-21704
- ser = Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
+ ser = Series(data=np.arange(10), index=date_range("2000", periods=10))
# uneven
ser = ser.drop(index=ser.index[[1, 5]])
@@ -229,7 +238,7 @@ def test_closed_uneven():
)
def test_closed_min_max_minp(func, closed, expected):
# see gh-21704
- ser = Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
+ ser = Series(data=np.arange(10), index=date_range("2000", periods=10))
ser[ser.index[-3:]] = np.nan
result = getattr(ser.rolling("3D", min_periods=2, closed=closed), func)()
expected = Series(expected, index=ser.index)
@@ -247,7 +256,7 @@ def test_closed_min_max_minp(func, closed, expected):
)
def test_closed_median_quantile(closed, expected):
# GH 26005
- ser = Series(data=np.arange(10), index=pd.date_range("2000", periods=10))
+ ser = Series(data=np.arange(10), index=date_range("2000", periods=10))
roll = ser.rolling("3D", closed=closed)
expected = Series(expected, index=ser.index)
@@ -268,8 +277,8 @@ def tests_empty_df_rolling(roller):
# Verifies that datetime and integer rolling windows can be applied to
# empty DataFrames with datetime index
- expected = DataFrame(index=pd.DatetimeIndex([]))
- result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum()
+ expected = DataFrame(index=DatetimeIndex([]))
+ result = DataFrame(index=DatetimeIndex([])).rolling(roller).sum()
tm.assert_frame_equal(result, expected)
@@ -303,11 +312,9 @@ def test_missing_minp_zero_variable():
# https://github.com/pandas-dev/pandas/pull/18921
x = Series(
[np.nan] * 4,
- index=pd.DatetimeIndex(
- ["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"]
- ),
+ index=DatetimeIndex(["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"]),
)
- result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum()
+ result = x.rolling(Timedelta("2d"), min_periods=0).sum()
expected = Series(0.0, index=x.index)
tm.assert_series_equal(result, expected)
@@ -315,7 +322,7 @@ def test_missing_minp_zero_variable():
def test_multi_index_names():
# GH 16789, 16825
- cols = pd.MultiIndex.from_product([["A", "B"], ["C", "D", "E"]], names=["1", "2"])
+ cols = MultiIndex.from_product([["A", "B"], ["C", "D", "E"]], names=["1", "2"])
df = DataFrame(np.ones((10, 6)), columns=cols)
result = df.rolling(3).cov()
@@ -366,7 +373,7 @@ def test_rolling_datetime(axis_frame, tz_naive_fixture):
# GH-28192
tz = tz_naive_fixture
df = DataFrame(
- {i: [1] * 2 for i in pd.date_range("2019-8-01", "2019-08-03", freq="D", tz=tz)}
+ {i: [1] * 2 for i in date_range("2019-8-01", "2019-08-03", freq="D", tz=tz)}
)
if axis_frame in [0, "index"]:
result = df.T.rolling("2D", axis=axis_frame).sum().T
@@ -376,11 +383,11 @@ def test_rolling_datetime(axis_frame, tz_naive_fixture):
{
**{
i: [1.0] * 2
- for i in pd.date_range("2019-8-01", periods=1, freq="D", tz=tz)
+ for i in date_range("2019-8-01", periods=1, freq="D", tz=tz)
},
**{
i: [2.0] * 2
- for i in pd.date_range("2019-8-02", "2019-8-03", freq="D", tz=tz)
+ for i in date_range("2019-8-02", "2019-8-03", freq="D", tz=tz)
},
}
)
@@ -390,7 +397,7 @@ def test_rolling_datetime(axis_frame, tz_naive_fixture):
def test_rolling_window_as_string():
# see gh-22590
date_today = datetime.now()
- days = pd.date_range(date_today, date_today + timedelta(365), freq="D")
+ days = date_range(date_today, date_today + timedelta(365), freq="D")
npr = np.random.RandomState(seed=421)
@@ -683,13 +690,13 @@ def test_iter_rolling_datetime(expected, expected_index, window):
[
(
{"level": 0},
- pd.MultiIndex.from_tuples(
+ MultiIndex.from_tuples(
[(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None]
),
),
(
{"by": "X"},
- pd.MultiIndex.from_tuples(
+ MultiIndex.from_tuples(
[(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=["X", None]
),
),
@@ -717,15 +724,15 @@ def test_rolling_numerical_accuracy_kahan_mean(add):
df = DataFrame(
{"A": [3002399751580331.0 + add, -0.0, -0.0]},
index=[
- pd.Timestamp("19700101 09:00:00"),
- pd.Timestamp("19700101 09:00:03"),
- pd.Timestamp("19700101 09:00:06"),
+ Timestamp("19700101 09:00:00"),
+ Timestamp("19700101 09:00:03"),
+ Timestamp("19700101 09:00:06"),
],
)
result = (
df.resample("1s").ffill().rolling("3s", closed="left", min_periods=3).mean()
)
- dates = pd.date_range("19700101 09:00:00", periods=7, freq="S")
+ dates = date_range("19700101 09:00:00", periods=7, freq="S")
expected = DataFrame(
{
"A": [
@@ -753,8 +760,8 @@ def test_rolling_numerical_accuracy_kahan_sum():
def test_rolling_numerical_accuracy_jump():
# GH: 32761
- index = pd.date_range(start="2020-01-01", end="2020-01-02", freq="60s").append(
- pd.DatetimeIndex(["2020-01-03"])
+ index = date_range(start="2020-01-01", end="2020-01-02", freq="60s").append(
+ DatetimeIndex(["2020-01-03"])
)
data = np.random.rand(len(index))
@@ -775,7 +782,7 @@ def test_rolling_numerical_accuracy_small_values():
def test_rolling_numerical_too_large_numbers():
# GH: 11645
- dates = pd.date_range("2015-01-01", periods=10, freq="D")
+ dates = date_range("2015-01-01", periods=10, freq="D")
ds = Series(data=range(10), index=dates, dtype=np.float64)
ds[2] = -9e33
result = ds.rolling(5).mean()
@@ -823,7 +830,7 @@ def test_rolling_axis_one_with_nan():
@pytest.mark.parametrize(
"value",
- ["test", pd.to_datetime("2019-12-31"), pd.to_timedelta("1 days 06:05:01.00003")],
+ ["test", to_datetime("2019-12-31"), to_timedelta("1 days 06:05:01.00003")],
)
def test_rolling_axis_1_non_numeric_dtypes(value):
# GH: 20649
@@ -849,13 +856,11 @@ def test_rolling_on_df_transposed():
("index", "window"),
[
(
- pd.period_range(start="2020-01-01 08:00", end="2020-01-01 08:08", freq="T"),
+ period_range(start="2020-01-01 08:00", end="2020-01-01 08:08", freq="T"),
"2T",
),
(
- pd.period_range(
- start="2020-01-01 08:00", end="2020-01-01 12:00", freq="30T"
- ),
+ period_range(start="2020-01-01 08:00", end="2020-01-01 12:00", freq="30T"),
"1h",
),
],
@@ -932,18 +937,18 @@ def test_timeoffset_as_window_parameter_for_corr():
1.0000000000000002,
],
},
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
[
- (pd.Timestamp("20130101 09:00:00"), "B"),
- (pd.Timestamp("20130101 09:00:00"), "A"),
- (pd.Timestamp("20130102 09:00:02"), "B"),
- (pd.Timestamp("20130102 09:00:02"), "A"),
- (pd.Timestamp("20130103 09:00:03"), "B"),
- (pd.Timestamp("20130103 09:00:03"), "A"),
- (pd.Timestamp("20130105 09:00:05"), "B"),
- (pd.Timestamp("20130105 09:00:05"), "A"),
- (pd.Timestamp("20130106 09:00:06"), "B"),
- (pd.Timestamp("20130106 09:00:06"), "A"),
+ (Timestamp("20130101 09:00:00"), "B"),
+ (Timestamp("20130101 09:00:00"), "A"),
+ (Timestamp("20130102 09:00:02"), "B"),
+ (Timestamp("20130102 09:00:02"), "A"),
+ (Timestamp("20130103 09:00:03"), "B"),
+ (Timestamp("20130103 09:00:03"), "A"),
+ (Timestamp("20130105 09:00:05"), "B"),
+ (Timestamp("20130105 09:00:05"), "A"),
+ (Timestamp("20130106 09:00:06"), "B"),
+ (Timestamp("20130106 09:00:06"), "A"),
]
),
)
@@ -951,11 +956,11 @@ def test_timeoffset_as_window_parameter_for_corr():
df = DataFrame(
{"B": [0, 1, 2, 4, 3], "A": [7, 4, 6, 9, 3]},
index=[
- pd.Timestamp("20130101 09:00:00"),
- pd.Timestamp("20130102 09:00:02"),
- pd.Timestamp("20130103 09:00:03"),
- pd.Timestamp("20130105 09:00:05"),
- pd.Timestamp("20130106 09:00:06"),
+ Timestamp("20130101 09:00:00"),
+ Timestamp("20130102 09:00:02"),
+ Timestamp("20130103 09:00:03"),
+ Timestamp("20130105 09:00:05"),
+ Timestamp("20130106 09:00:06"),
],
)
@@ -1039,7 +1044,7 @@ def test_rolling_non_monotonic(method, expected):
use_expanding = [True, False, True, False, True, True, True, True]
df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})
- class CustomIndexer(pd.api.indexers.BaseIndexer):
+ class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
@@ -1061,7 +1066,7 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
@pytest.mark.parametrize(
("index", "window"),
- [([0, 1, 2, 3, 4], 2), (pd.date_range("2001-01-01", freq="D", periods=5), "2D")],
+ [([0, 1, 2, 3, 4], 2), (date_range("2001-01-01", freq="D", periods=5), "2D")],
)
def test_rolling_corr_timedelta_index(index, window):
# GH: 31286
@@ -1080,7 +1085,7 @@ def test_groupby_rolling_nan_included():
result = df.groupby("group", dropna=False).rolling(1, min_periods=1).mean()
expected = DataFrame(
{"B": [0.0, 2.0, 3.0, 1.0, 4.0]},
- index=pd.MultiIndex.from_tuples(
+ index=MultiIndex.from_tuples(
[("g1", 0), ("g1", 2), ("g2", 3), (np.nan, 1), (np.nan, 4)],
names=["group", None],
),
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
It appears we are heading this general direction in pandas imports in tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/38095 | 2020-11-26T19:18:41Z | 2020-11-26T23:04:36Z | 2020-11-26T23:04:36Z | 2020-11-27T18:42:48Z |
REGR: fix regression in groupby aggregation with out-of-bounds datetimes | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index a8bbf692a72e5..b34df1726dd17 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in inplace operations on :class:`Series` with ``ExtensionDtype`` with NumPy dtyped operand (:issue:`37910`)
- Fixed regression in metadata propagation for ``groupby`` iterator (:issue:`37343`)
- Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
+- Fixed regression in :meth:`DataFrame.groupby` aggregation with out-of-bounds datetime objects in an object-dtype column (:issue:`36003`)
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 9459cd297c758..ad6329c588bbe 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -44,7 +44,9 @@ cdef class _BaseGrouper:
Slider islider, Slider vslider):
if cached_typ is None:
cached_ityp = self.ityp(islider.buf)
- cached_typ = self.typ(vslider.buf, index=cached_ityp, name=self.name)
+ cached_typ = self.typ(
+ vslider.buf, dtype=vslider.buf.dtype, index=cached_ityp, name=self.name
+ )
else:
# See the comment in indexes/base.py about _index_data.
# We need this for EA-backed indexes that have a reference
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index dba039b66d22d..820d51325b424 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1,6 +1,7 @@
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
+import datetime
import functools
from functools import partial
@@ -1156,3 +1157,21 @@ def test_agg_no_suffix_index():
result = df["A"].agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"], name="A")
tm.assert_series_equal(result, expected)
+
+
+def test_aggregate_datetime_objects():
+ # https://github.com/pandas-dev/pandas/issues/36003
+ # ensure we don't raise an error but keep object dtype for out-of-bounds
+ # datetimes
+ df = DataFrame(
+ {
+ "A": ["X", "Y"],
+ "B": [
+ datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
+ datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
+ ],
+ }
+ )
+ result = df.groupby("A").B.max()
+ expected = df.set_index("A")["B"]
+ tm.assert_series_equal(result, expected)
| Closes #36003 | https://api.github.com/repos/pandas-dev/pandas/pulls/38094 | 2020-11-26T16:46:02Z | 2020-11-27T20:12:02Z | 2020-11-27T20:12:02Z | 2020-11-27T20:47:55Z |
BUG: merge_ordered fails with list-like left_by or right_by | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6f046d3a9379d..6ab9f9a58cb0c 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -737,6 +737,7 @@ Reshaping
- Bug in :meth:`DataFrame.apply` not setting index of return value when ``func`` return type is ``dict`` (:issue:`37544`)
- Bug in :func:`concat` resulting in a ``ValueError`` when at least one of both inputs had a non-unique index (:issue:`36263`)
- Bug in :meth:`DataFrame.merge` and :meth:`pandas.merge` returning inconsistent ordering in result for ``how=right`` and ``how=left`` (:issue:`35382`)
+- Bug in :func:`merge_ordered` couldn't handle list-like ``left_by`` or ``right_by`` (:issue:`35269`)
Sparse
^^^^^^
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 3b755c40721fb..7b3d68500dd98 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -140,9 +140,7 @@ def _groupby_and_merge(by, on, left: "DataFrame", right: "DataFrame", merge_piec
# make sure join keys are in the merged
# TODO, should merge_pieces do this?
- for k in by:
- if k in merged:
- merged[k] = key
+ merged[by] = key
pieces.append(merged)
diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py
index 17f2f44f45fce..8389a6bb9be10 100644
--- a/pandas/tests/reshape/merge/test_merge_ordered.py
+++ b/pandas/tests/reshape/merge/test_merge_ordered.py
@@ -115,3 +115,65 @@ def test_doc_example(self):
)
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "left, right, on, left_by, right_by, expected",
+ [
+ (
+ DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
+ DataFrame({"T": [2], "E": [1]}),
+ ["T"],
+ ["G", "H"],
+ None,
+ DataFrame(
+ {
+ "G": ["g"] * 3,
+ "H": ["h"] * 3,
+ "T": [1, 2, 3],
+ "E": [np.nan, 1.0, np.nan],
+ }
+ ),
+ ),
+ (
+ DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
+ DataFrame({"T": [2], "E": [1]}),
+ "T",
+ ["G", "H"],
+ None,
+ DataFrame(
+ {
+ "G": ["g"] * 3,
+ "H": ["h"] * 3,
+ "T": [1, 2, 3],
+ "E": [np.nan, 1.0, np.nan],
+ }
+ ),
+ ),
+ (
+ DataFrame({"T": [2], "E": [1]}),
+ DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
+ ["T"],
+ None,
+ ["G", "H"],
+ DataFrame(
+ {
+ "T": [1, 2, 3],
+ "E": [np.nan, 1.0, np.nan],
+ "G": ["g"] * 3,
+ "H": ["h"] * 3,
+ }
+ ),
+ ),
+ ],
+ )
+ def test_list_type_by(self, left, right, on, left_by, right_by, expected):
+ # GH 35269
+ result = merge_ordered(
+ left=left,
+ right=right,
+ on=on,
+ left_by=left_by,
+ right_by=right_by,
+ )
+
+ tm.assert_frame_equal(result, expected)
| - [x] closes #35269
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38089 | 2020-11-26T15:05:37Z | 2020-11-29T17:45:41Z | 2020-11-29T17:45:40Z | 2020-11-30T00:41:16Z |
DOC: clarify docs related to deprecation of indexing DataFrame with single partial datetime-string | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 169c0cfbbb87e..354c510b843dd 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -588,10 +588,12 @@ would include matching times on an included date:
.. warning::
- Indexing ``DataFrame`` rows with strings is deprecated in pandas 1.2.0 and will be removed in a future version. Use ``frame.loc[dtstring]`` instead.
+ Indexing ``DataFrame`` rows with a *single* string with getitem (e.g. ``frame[dtstring]``)
+ is deprecated starting with pandas 1.2.0 (given the ambiguity whether it is indexing
+ the rows or selecting a column) and will be removed in a future version. The equivalent
+ with ``.loc`` (e.g. ``frame.loc[dtstring]``) is still supported.
.. ipython:: python
- :okwarning:
dft = pd.DataFrame(
np.random.randn(100000, 1),
@@ -599,34 +601,30 @@ would include matching times on an included date:
index=pd.date_range("20130101", periods=100000, freq="T"),
)
dft
- dft["2013"]
+ dft.loc["2013"]
This starts on the very first time in the month, and includes the last date and
time for the month:
.. ipython:: python
- :okwarning:
dft["2013-1":"2013-2"]
This specifies a stop time **that includes all of the times on the last day**:
.. ipython:: python
- :okwarning:
dft["2013-1":"2013-2-28"]
This specifies an **exact** stop time (and is not the same as the above):
.. ipython:: python
- :okwarning:
dft["2013-1":"2013-2-28 00:00:00"]
We are stopping on the included end-point as it is part of the index:
.. ipython:: python
- :okwarning:
dft["2013-1-15":"2013-1-15 12:30:00"]
@@ -652,7 +650,6 @@ We are stopping on the included end-point as it is part of the index:
Slicing with string indexing also honors UTC offset.
.. ipython:: python
- :okwarning:
df = pd.DataFrame([0], index=pd.DatetimeIndex(["2019-01-01"], tz="US/Pacific"))
df
@@ -704,15 +701,14 @@ If index resolution is second, then the minute-accurate timestamp gives a
series_second.index.resolution
series_second["2011-12-31 23:59"]
-If the timestamp string is treated as a slice, it can be used to index ``DataFrame`` with ``[]`` as well.
+If the timestamp string is treated as a slice, it can be used to index ``DataFrame`` with ``.loc[]`` as well.
.. ipython:: python
- :okwarning:
dft_minute = pd.DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6]}, index=series_minute.index
)
- dft_minute["2011-12-31 23"]
+ dft_minute.loc["2011-12-31 23"]
.. warning::
@@ -2080,7 +2076,6 @@ You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodI
Passing a string representing a lower frequency than ``PeriodIndex`` returns partial sliced data.
.. ipython:: python
- :okwarning:
ps["2011"]
@@ -2090,7 +2085,7 @@ Passing a string representing a lower frequency than ``PeriodIndex`` returns par
index=pd.period_range("2013-01-01 9:00", periods=600, freq="T"),
)
dfp
- dfp["2013-01-01 10H"]
+ dfp.loc["2013-01-01 10H"]
As with ``DatetimeIndex``, the endpoints will be included in the result. The example below slices data starting from 10:00 to 11:59.
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 6f046d3a9379d..c171004f08aab 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -466,7 +466,9 @@ Deprecations
- Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`)
- :meth:`DataFrame.lookup` is deprecated and will be removed in a future version, use :meth:`DataFrame.melt` and :meth:`DataFrame.loc` instead (:issue:`18682`)
- The method :meth:`Index.to_native_types` is deprecated. Use ``.astype(str)`` instead (:issue:`28867`)
-- Deprecated indexing :class:`DataFrame` rows with datetime-like strings ``df[string]``, use ``df.loc[string]`` instead (:issue:`36179`)
+- Deprecated indexing :class:`DataFrame` rows with a single datetime-like string as ``df[string]``
+ (given the ambiguity whether it is indexing the rows or selecting a column), use
+ ``df.loc[string]`` instead (:issue:`36179`)
- Deprecated casting an object-dtype index of ``datetime`` objects to :class:`.DatetimeIndex` in the :class:`Series` constructor (:issue:`23598`)
- Deprecated :meth:`Index.is_all_dates` (:issue:`27744`)
- The default value of ``regex`` for :meth:`Series.str.replace` will change from ``True`` to ``False`` in a future release. In addition, single character regular expressions will *not* be treated as literal strings when ``regex=True`` is set. (:issue:`24804`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index f6d14a1c1503c..83a28e3fb9089 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2199,9 +2199,10 @@ def convert_to_index_sliceable(obj: "DataFrame", key):
try:
res = idx._get_string_slice(key)
warnings.warn(
- "Indexing on datetimelike rows with `frame[string]` is "
- "deprecated and will be removed in a future version. "
- "Use `frame.loc[string]` instead.",
+ "Indexing a DataFrame with a datetimelike index using a single "
+ "string to slice the rows, like `frame[string]`, is deprecated "
+ "and will be removed in a future version. Use `frame.loc[string]` "
+ "instead.",
FutureWarning,
stacklevel=3,
)
| A follow-up on https://github.com/pandas-dev/pandas/pull/36179#discussion_r494830867 (cc @jbrockmendel). Attempt to clarify what is actually deprecated, remove the `:okwarning:` for several cases in the docs that were not actually deprecated, and updated to use the preferred `.loc` for some others. | https://api.github.com/repos/pandas-dev/pandas/pulls/38088 | 2020-11-26T14:15:02Z | 2020-11-26T15:37:27Z | 2020-11-26T15:37:27Z | 2020-11-26T15:44:49Z |
BLD: Only enable -Werror in the CI jobs | diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 78951c9def7cb..c36422884f2ec 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -108,6 +108,12 @@ fi
echo "activate pandas-dev"
source activate pandas-dev
+# Explicitly set an environment variable indicating that this is pandas' CI environment.
+#
+# This allows us to enable things like -Werror that shouldn't be activated in
+# downstream CI jobs that may also build pandas from source.
+export PANDAS_CI=1
+
echo
echo "remove any installed pandas package"
echo "w/o removing anything else"
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 29b0e99a3a356..552c8ced6b08a 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -33,6 +33,14 @@ Bug fixes
.. ---------------------------------------------------------------------------
+.. _whatsnew_115.other:
+
+Other
+~~~~~
+- Only set ``-Werror`` as a compiler flag in the CI jobs (:issue:`33315`, :issue:`33314`)
+
+.. ---------------------------------------------------------------------------
+
.. _whatsnew_115.contributors:
Contributors
diff --git a/setup.py b/setup.py
index 9f33c045df6ed..0b1007794bbdb 100755
--- a/setup.py
+++ b/setup.py
@@ -409,15 +409,16 @@ def run(self):
endian_macro = [("__LITTLE_ENDIAN__", "1")]
+extra_compile_args = []
+extra_link_args = []
if is_platform_windows():
- extra_compile_args = []
- extra_link_args = []
if debugging_symbols_requested:
extra_compile_args.append("/Z7")
extra_link_args.append("/DEBUG")
else:
- extra_compile_args = ["-Werror"]
- extra_link_args = []
+ # PANDAS_CI=1 is set by ci/setup_env.sh
+ if os.environ.get("PANDAS_CI", "0") == "1":
+ extra_compile_args.append("-Werror")
if debugging_symbols_requested:
extra_compile_args.append("-g")
| closes #33315
closes #33314
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38087 | 2020-11-26T13:34:40Z | 2020-11-27T20:03:58Z | 2020-11-27T20:03:58Z | 2020-12-01T16:41:28Z |
TST : Categorical DataFrame.at overwritting row | diff --git a/pandas/tests/indexing/test_at.py b/pandas/tests/indexing/test_at.py
index c721ba2e6daad..fbf33999386e6 100644
--- a/pandas/tests/indexing/test_at.py
+++ b/pandas/tests/indexing/test_at.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series, Timestamp
+from pandas import CategoricalDtype, DataFrame, Series, Timestamp
import pandas._testing as tm
@@ -26,6 +26,23 @@ def test_at_setitem_mixed_index_assignment(self):
ser.at[1] = 22
assert ser.iat[3] == 22
+ def test_at_setitem_categorical_missing(self):
+ df = DataFrame(
+ index=range(3), columns=range(3), dtype=CategoricalDtype(["foo", "bar"])
+ )
+ df.at[1, 1] = "foo"
+
+ expected = DataFrame(
+ [
+ [np.nan, np.nan, np.nan],
+ [np.nan, "foo", np.nan],
+ [np.nan, np.nan, np.nan],
+ ],
+ dtype=CategoricalDtype(["foo", "bar"]),
+ )
+
+ tm.assert_frame_equal(df, expected)
+
class TestAtSetItemWithExpansion:
def test_at_setitem_expansion_series_dt64tz_value(self, tz_naive_fixture):
| - [ ] closes #37763
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38085 | 2020-11-26T13:27:48Z | 2020-11-26T18:08:12Z | 2020-11-26T18:08:12Z | 2020-12-03T13:54:21Z |
BLD: Use conda(-forge) compilers | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index ced0554c51fdf..3c5a88333be56 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -146,7 +146,7 @@ Creating a development environment
----------------------------------
To test out code changes, you'll need to build pandas from source, which
-requires a C compiler and Python environment. If you're making documentation
+requires a C/C++ compiler and Python environment. If you're making documentation
changes, you can skip to :ref:`contributing.documentation` but you won't be able
to build the documentation locally before pushing your changes.
@@ -195,6 +195,13 @@ operations. To install pandas from source, you need to compile these C
extensions, which means you need a C compiler. This process depends on which
platform you're using.
+If you have setup your environment using ``conda``, the packages ``c-compiler``
+and ``cxx-compiler`` will install a fitting compiler for your platform that is
+compatible with the remaining conda packages. On Windows and macOS, you will
+also need to install the SDKs as they have to be distributed separately.
+These packages will be automatically installed by using ``pandas``'s
+``environment.yml``.
+
**Windows**
You will need `Build Tools for Visual Studio 2017
@@ -206,12 +213,33 @@ You will need `Build Tools for Visual Studio 2017
scrolling down to "All downloads" -> "Tools for Visual Studio 2019".
In the installer, select the "C++ build tools" workload.
+You can install the necessary components on the commandline using
+`vs_buildtools.exe <https://aka.ms/vs/16/release/vs_buildtools.exe>`_:
+
+.. code::
+
+ vs_buildtools.exe --quiet --wait --norestart --nocache ^
+ --installPath C:\BuildTools ^
+ --add "Microsoft.VisualStudio.Workload.VCTools;includeRecommended" ^
+ --add Microsoft.VisualStudio.Component.VC.v141 ^
+ --add Microsoft.VisualStudio.Component.VC.v141.x86.x64 ^
+ --add Microsoft.VisualStudio.Component.Windows10SDK.17763
+
+To setup the right paths on the commandline, call
+``"C:\BuildTools\VC\Auxiliary\Build\vcvars64.bat" -vcvars_ver=14.16 10.0.17763.0``.
+
**macOS**
-Information about compiler installation can be found here:
+To use the ``conda``-based compilers, you will need to install the
+Developer Tools using ``xcode-select --install``. Otherwise
+information about compiler installation can be found here:
https://devguide.python.org/setup/#macos
-**Unix**
+**Linux**
+
+For Linux-based ``conda`` installations, you won't have to install any
+additional components outside of the conda environment. The instructions
+below are only needed if your setup isn't based on conda environments.
Some Linux distributions will come with a pre-installed C compiler. To find out
which compilers (and versions) are installed on your system::
@@ -243,11 +271,10 @@ Let us know if you have any difficulties by opening an issue or reaching out on
Creating a Python environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Now that you have a C compiler, create an isolated pandas development
-environment:
+Now create an isolated pandas development environment:
-* Install either `Anaconda <https://www.anaconda.com/download/>`_ or `miniconda
- <https://conda.io/miniconda.html>`_
+* Install either `Anaconda <https://www.anaconda.com/download/>`_, `miniconda
+ <https://conda.io/miniconda.html>`_, or `miniforge <https://github.com/conda-forge/miniforge>`_
* Make sure your conda is up to date (``conda update conda``)
* Make sure that you have :ref:`cloned the repository <contributing.forking>`
* ``cd`` to the pandas source directory
diff --git a/environment.yml b/environment.yml
index 77a9c5fd4822d..b99b856187fb6 100644
--- a/environment.yml
+++ b/environment.yml
@@ -12,6 +12,9 @@ dependencies:
- asv
# building
+ # The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms.
+ - c-compiler
+ - cxx-compiler
- cython>=0.29.21
# code checks
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index c6d00eb58a969..1ad9ec03925a0 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -19,7 +19,7 @@
import yaml
-EXCLUDE = {"python"}
+EXCLUDE = {"python", "c-compiler", "cxx-compiler"}
RENAME = {"pytables": "tables", "pyqt": "pyqt5", "dask-core": "dask"}
@@ -48,6 +48,9 @@ def conda_package_to_pip(package):
break
+ if package in EXCLUDE:
+ return
+
if package in RENAME:
return RENAME[package]
| Use the compiler setup from conda-forge inside a conda environment. The compilers in conda-forge can be different and slightly incompatible with the system ones causing some headaches. Also this hopefully helps with initial setup of new contributors.
Is this worth a whatsnew entry?
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38084 | 2020-11-26T13:24:34Z | 2020-11-26T19:13:32Z | 2020-11-26T19:13:32Z | 2020-11-26T19:13:32Z |
Backport PR #38064 on branch 1.1.x (DOC: tidy 1.1.5 release notes) | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index dd88f79371d65..a8bbf692a72e5 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -14,10 +14,13 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
-- Regression in addition of a timedelta-like scalar to a :class:`DatetimeIndex` raising incorrectly (:issue:`37295`)
+- Fixed regression in addition of a timedelta-like scalar to a :class:`DatetimeIndex` raising incorrectly (:issue:`37295`)
- Fixed regression in :meth:`Series.groupby` raising when the :class:`Index` of the :class:`Series` had a tuple as its name (:issue:`37755`)
- Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` for ``__setitem__`` when one-dimensional tuple was given to select from :class:`MultiIndex` (:issue:`37711`)
- Fixed regression in inplace operations on :class:`Series` with ``ExtensionDtype`` with NumPy dtyped operand (:issue:`37910`)
+- Fixed regression in metadata propagation for ``groupby`` iterator (:issue:`37343`)
+- Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
+- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
.. ---------------------------------------------------------------------------
@@ -25,11 +28,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
-- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
-- Bug in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
-- Bug in :class:`RollingGroupby` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
- Bug in pytables methods in python 3.9 (:issue:`38041`)
--
.. ---------------------------------------------------------------------------
| Backport PR #38064: DOC: tidy 1.1.5 release notes | https://api.github.com/repos/pandas-dev/pandas/pulls/38081 | 2020-11-26T13:04:16Z | 2020-11-26T13:56:29Z | 2020-11-26T13:56:29Z | 2020-11-26T13:56:29Z |
DOC: Clean .ix references in indexing.rst | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index d456289c5c3f4..817ea3445f995 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -584,48 +584,20 @@ without using a temporary variable.
(bb.groupby(['year', 'team']).sum()
.loc[lambda df: df['r'] > 100])
-.. _indexing.deprecate_ix:
-IX indexer is deprecated
-------------------------
-
-.. warning::
-
- .. versionchanged:: 1.0.0
-
- The ``.ix`` indexer was removed, in favor of the more strict ``.iloc`` and ``.loc`` indexers.
+.. _combining_positional_and_label_based_indexing:
-``.ix`` offers a lot of magic on the inference of what the user wants to do. To wit, ``.ix`` can decide
-to index *positionally* OR via *labels* depending on the data type of the index. This has caused quite a
-bit of user confusion over the years.
+Combining positional and label-based indexing
+---------------------------------------------
-The recommended methods of indexing are:
-
-* ``.loc`` if you want to *label* index.
-* ``.iloc`` if you want to *positionally* index.
+If you wish to get the 0th and the 2nd elements from the index in the 'A' column, you can do:
.. ipython:: python
dfd = pd.DataFrame({'A': [1, 2, 3],
'B': [4, 5, 6]},
index=list('abc'))
-
dfd
-
-Previous behavior, where you wish to get the 0th and the 2nd elements from the index in the 'A' column.
-
-.. code-block:: ipython
-
- In [3]: dfd.ix[[0, 2], 'A']
- Out[3]:
- a 1
- c 3
- Name: A, dtype: int64
-
-Using ``.loc``. Here we will select the appropriate indexes from the index, then use *label* indexing.
-
-.. ipython:: python
-
dfd.loc[dfd.index[[0, 2]], 'A']
This can also be expressed using ``.iloc``, by explicitly getting locations on the indexers, and using
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst
index 8ae5ea5726fe9..b1773f9dc8d16 100644
--- a/doc/source/whatsnew/v0.20.0.rst
+++ b/doc/source/whatsnew/v0.20.0.rst
@@ -1315,7 +1315,7 @@ The recommended methods of indexing are:
- ``.loc`` if you want to *label* index
- ``.iloc`` if you want to *positionally* index.
-Using ``.ix`` will now show a ``DeprecationWarning`` with a link to some examples of how to convert code :ref:`here <indexing.deprecate_ix>`.
+Using ``.ix`` will now show a ``DeprecationWarning`` with a link to some examples of how to convert code `here <https://pandas.pydata.org/pandas-docs/version/1.0/user_guide/indexing.html#ix-indexer-is-deprecated>`__.
.. ipython:: python
| `DataFrame.ix` is removed from the code base, so rephrase a section in the docs that was referencing it. | https://api.github.com/repos/pandas-dev/pandas/pulls/38080 | 2020-11-26T11:17:31Z | 2020-11-26T16:18:51Z | 2020-11-26T16:18:51Z | 2020-11-26T16:43:45Z |
TST: mark all plotting tests as slow | diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 77a4c4a8faf5e..dc2aaf9292040 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -21,6 +21,8 @@
from pandas.io.formats.printing import pprint_thing
import pandas.plotting as plotting
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@@ -39,7 +41,6 @@ def setup_method(self, method):
}
)
- @pytest.mark.slow
def test_plot(self):
from pandas.plotting._matplotlib.compat import mpl_ge_3_1_0
@@ -171,13 +172,11 @@ def test_nonnumeric_exclude(self):
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
- @pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
ax = df.plot(x="a", y="b")
self._check_text_labels(ax.xaxis.get_label(), "a")
- @pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(np.random.randn(2, 2), columns=["a", "b"])
@@ -185,7 +184,6 @@ def test_donot_overwrite_index_name(self):
df.plot(y="b", label="LABEL")
assert df.index.name == "NAME"
- @pytest.mark.slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
@@ -210,7 +208,6 @@ def test_plot_xy(self):
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
- @pytest.mark.slow
@pytest.mark.parametrize(
"input_log, expected_log", [(True, "log"), ("sym", "symlog")]
)
@@ -239,7 +236,6 @@ def test_invalid_logscale(self, input_param):
with pytest.raises(ValueError, match=msg):
df.plot(**{input_param: "sm"})
- @pytest.mark.slow
def test_xcompat(self):
import pandas as pd
@@ -488,7 +484,6 @@ def test_area_sharey_dont_overwrite(self):
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
- @pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(np.random.randn(5, 5))
@@ -509,7 +504,6 @@ def test_bar_linewidth(self):
for r in ax.patches:
assert r.get_linewidth() == 2
- @pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(np.random.randn(5, 5))
@@ -547,7 +541,6 @@ def test_bar_barwidth(self):
for r in ax.patches:
assert r.get_height() == width
- @pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(np.random.rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
@@ -576,7 +569,6 @@ def test_bar_bottom_left(self):
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
- @pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]})
ax = df.plot.bar()
@@ -592,7 +584,6 @@ def test_bar_nan(self):
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
- @pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = DataFrame(
@@ -622,7 +613,6 @@ def test_bar_categorical(self):
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
- @pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(
np.random.randn(6, 4),
@@ -673,14 +663,12 @@ def test_scatterplot_object_data(self):
_check_plot_works(df.plot.scatter, x=0, y=1)
@pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")])
- @pytest.mark.slow
def test_plot_scatter_with_categorical_data(self, x, y):
# after fixing GH 18755, should be able to plot categorical data
df = DataFrame({"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])})
_check_plot_works(df.plot.scatter, x=x, y=y)
- @pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(
np.random.randn(6, 4),
@@ -739,7 +727,6 @@ def test_plot_scatter_with_s(self):
ax = df.plot.scatter(x="a", y="b", s="c")
tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes())
- @pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(
np.random.randn(6, 4),
@@ -772,7 +759,6 @@ def test_plot_bar(self):
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
- @pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df["height"]
@@ -801,7 +787,6 @@ def test_boxplot(self):
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
- @pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
@@ -832,7 +817,6 @@ def test_boxplot_vertical(self):
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
- @pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(
np.random.randn(6, 4),
@@ -854,7 +838,6 @@ def test_boxplot_return_type(self):
result = df.plot.box(return_type="both")
self._check_box_return_type(result, "both")
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
df = DataFrame(np.random.randn(100, 4))
@@ -877,14 +860,12 @@ def test_kde_df(self):
axes = df.plot(kind="kde", logy=True, subplots=True)
self._check_ax_scales(axes, yaxis="log")
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind="kde")
- @pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
@@ -966,7 +947,6 @@ def _check_box_coord(
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False)
- @pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame(
{
@@ -1098,12 +1078,10 @@ def test_hist_df_coord(self):
expected_w=np.array([6, 7, 8, 9, 10]),
)
- @pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(np.random.randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
- @pytest.mark.slow
def test_df_legend_labels(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
@@ -1217,7 +1195,6 @@ def test_legend_name(self):
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "new")
- @pytest.mark.slow
def test_no_legend(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(np.random.rand(3, 3), columns=["a", "b", "c"])
@@ -1226,7 +1203,6 @@ def test_no_legend(self):
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
- @pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
@@ -1245,7 +1221,6 @@ def test_style_by_column(self):
for idx, line in enumerate(ax.get_lines()[: len(markers)]):
assert line.get_marker() == markers[idx]
- @pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
@@ -1302,7 +1277,6 @@ def test_all_invalid_plot_data(self):
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
- @pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(np.random.randn(10, 2), dtype=object)
@@ -1372,7 +1346,6 @@ def test_xy_args_integer(self, x, y, colnames):
df.columns = colnames
_check_plot_works(df.plot, x=x, y=y)
- @pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
@@ -1388,7 +1361,6 @@ def test_hexbin_basic(self):
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
- @pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
@@ -1398,7 +1370,6 @@ def test_hexbin_with_c(self):
ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=np.std)
assert len(ax.collections) == 1
- @pytest.mark.slow
@pytest.mark.parametrize(
"kwargs, expected",
[
@@ -1412,7 +1383,6 @@ def test_hexbin_cmap(self, kwargs, expected):
ax = df.plot.hexbin(x="A", y="B", **kwargs)
assert ax.collections[0].cmap.name == expected
- @pytest.mark.slow
def test_pie_df(self):
df = DataFrame(
np.random.rand(5, 3),
@@ -1484,7 +1454,6 @@ def test_pie_df_nan(self):
expected_labels = base_expected[:i] + base_expected[i + 1 :]
assert result_labels == expected_labels
- @pytest.mark.slow
def test_errorbar_plot(self):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
@@ -1531,7 +1500,6 @@ def test_errorbar_plot(self):
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
- @pytest.mark.slow
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
def test_errorbar_plot_different_kinds(self, kind):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
@@ -1565,7 +1533,6 @@ def test_errorbar_plot_different_kinds(self, kind):
self._check_has_errorbars(axes, xerr=1, yerr=1)
@pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
- @pytest.mark.slow
def test_errorbar_plot_iterator(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
@@ -1575,7 +1542,6 @@ def test_errorbar_plot_iterator(self):
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
- @pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
@@ -1585,7 +1551,6 @@ def test_errorbar_with_integer_column_names(self):
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
- @pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
@@ -1608,7 +1573,6 @@ def test_errorbar_with_partial_columns(self):
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
- @pytest.mark.slow
@pytest.mark.parametrize("kind", ["line", "bar", "barh"])
def test_errorbar_timeseries(self, kind):
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
@@ -1713,7 +1677,6 @@ def _check_errorbar_color(containers, expected, has_err="has_xerr"):
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, "green", has_err="has_yerr")
- @pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
@@ -1768,7 +1731,6 @@ def _check(axes):
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
- @pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
@@ -1854,7 +1816,6 @@ def test_memory_leak(self):
# need to actually access something to get an error
results[key].lines
- @pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.gridspec as gridspec
@@ -1970,7 +1931,6 @@ def _get_boxed_grid():
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
- @pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
@@ -2026,7 +1986,6 @@ def test_secondary_axis_font_size(self, method):
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
- @pytest.mark.slow
def test_x_string_values_ticks(self):
# Test if string plot index have a fixed xtick position
# GH: 7612, GH: 22334
@@ -2046,7 +2005,6 @@ def test_x_string_values_ticks(self):
assert labels_position["Tuesday"] == 1.0
assert labels_position["Wednesday"] == 2.0
- @pytest.mark.slow
def test_x_multiindex_values_ticks(self):
# Test if multiindex plot index have a fixed xtick position
# GH: 15912
@@ -2190,7 +2148,6 @@ def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel):
assert ax.get_xlabel() == (xcol if xlabel is None else xlabel)
assert ax.get_ylabel() == (ycol if ylabel is None else ylabel)
- @pytest.mark.slow
@pytest.mark.parametrize("method", ["bar", "barh"])
def test_bar_ticklabel_consistence(self, method):
# Draw two consecutiv bar plot with consistent ticklabels
diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py
index d9fe7363a15ad..f6f17f35fa6d8 100644
--- a/pandas/tests/plotting/frame/test_frame_color.py
+++ b/pandas/tests/plotting/frame/test_frame_color.py
@@ -12,6 +12,8 @@
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestDataFrameColor(TestPlotBase):
@@ -98,7 +100,6 @@ def test_color_and_marker(self, color, expected):
assert all(i.get_linestyle() == "--" for i in ax.lines)
assert all(i.get_marker() == "d" for i in ax.lines)
- @pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
@@ -152,7 +153,6 @@ def test_bar_user_colors(self):
]
assert result == expected
- @pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
@@ -175,7 +175,6 @@ def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
)
- @pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
@@ -188,7 +187,6 @@ def test_if_hexbin_xaxis_label_is_visible(self):
assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
assert ax.xaxis.get_label().get_visible()
- @pytest.mark.slow
def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
import matplotlib.pyplot as plt
@@ -250,7 +248,6 @@ def test_scatter_colorbar_different_cmap(self):
assert ax.collections[0].cmap.name == "cividis"
assert ax.collections[1].cmap.name == "magma"
- @pytest.mark.slow
def test_line_colors(self):
from matplotlib import cm
@@ -295,13 +292,11 @@ def test_line_colors(self):
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
- @pytest.mark.slow
def test_dont_modify_colors(self):
colors = ["r", "g", "b"]
DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
- @pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
@@ -370,7 +365,6 @@ def test_line_colors_and_styles_subplots(self):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
- @pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
@@ -415,7 +409,6 @@ def test_area_colors(self):
for h in handles:
assert h.get_alpha() == 0.5
- @pytest.mark.slow
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
@@ -450,7 +443,6 @@ def test_hist_colors(self):
self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
tm.close()
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
from matplotlib import cm
@@ -471,7 +463,6 @@ def test_kde_colors(self):
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
from matplotlib import cm
@@ -528,7 +519,6 @@ def test_kde_colors_and_styles_subplots(self):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
- @pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
# TODO: outside this func?
@@ -609,13 +599,11 @@ def test_default_color_cycle(self):
expected = self._unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
- @pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", colorbar=None)
assert ax.collections[0].colorbar is None
- @pytest.mark.slow
def test_mixing_cmap_and_colormap_raises(self):
df = self.hexbin_df
msg = "Only specify one of `cmap` and `colormap`"
diff --git a/pandas/tests/plotting/frame/test_frame_groupby.py b/pandas/tests/plotting/frame/test_frame_groupby.py
index 9c1676d6d97fb..bc35e02e6a581 100644
--- a/pandas/tests/plotting/frame/test_frame_groupby.py
+++ b/pandas/tests/plotting/frame/test_frame_groupby.py
@@ -9,6 +9,8 @@
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestDataFramePlotsGroupby(TestPlotBase):
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py
index 413c5b8a87dc7..bb7da2b808da7 100644
--- a/pandas/tests/plotting/frame/test_frame_subplots.py
+++ b/pandas/tests/plotting/frame/test_frame_subplots.py
@@ -15,6 +15,8 @@
from pandas.io.formats.printing import pprint_thing
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestDataFramePlotsSubplots(TestPlotBase):
@@ -33,7 +35,6 @@ def setup_method(self, method):
}
)
- @pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
@@ -72,7 +73,6 @@ def test_subplots(self):
for ax in axes:
assert ax.get_legend() is None
- @pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start="2014-07-01", freq="M", periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
@@ -190,7 +190,6 @@ def test_subplots_timeseries_y_axis_not_supported(self):
== testdata["datetime_mixed_tz"].values
).all()
- @pytest.mark.slow
def test_subplots_layout_multi_column(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
@@ -224,7 +223,6 @@ def test_subplots_layout_multi_column(self):
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
- @pytest.mark.slow
@pytest.mark.parametrize(
"kwargs, expected_axes_num, expected_layout, expected_shape",
[
@@ -246,7 +244,6 @@ def test_subplots_layout_single_column(
)
assert axes.shape == expected_shape
- @pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
@@ -258,7 +255,6 @@ def test_subplots_warnings(self):
)
df.plot(subplots=True, layout=(3, 2))
- @pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
@@ -358,7 +354,6 @@ def test_subplots_sharex_axes_existing_axes(self):
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
- @pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
@@ -380,7 +375,6 @@ def test_subplots_dup_columns(self):
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
- @pytest.mark.slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
@@ -391,7 +385,6 @@ def test_bar_log_no_subplots(self):
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
- @pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
@@ -402,7 +395,6 @@ def test_bar_log_subplots(self):
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
- @pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
@@ -422,7 +414,6 @@ def test_boxplot_subplots_return_type(self):
check_ax_title=False,
)
- @pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
@@ -513,7 +504,6 @@ def test_xlabel_ylabel_dataframe_subplots(
assert all(ax.get_ylabel() == str(new_label) for ax in axes)
assert all(ax.get_xlabel() == str(new_label) for ax in axes)
- @pytest.mark.slow
@pytest.mark.parametrize(
"kwargs",
[
@@ -552,7 +542,6 @@ def test_bar_align_multiple_columns(self, kwargs):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, **kwargs)
- @pytest.mark.slow
@pytest.mark.parametrize(
"kwargs",
[
@@ -568,7 +557,6 @@ def test_bar_align_single_column(self, kwargs):
df = DataFrame(np.random.randn(5))
self._check_bar_alignment(df, **kwargs)
- @pytest.mark.slow
@pytest.mark.parametrize(
"kwargs",
[
@@ -584,7 +572,6 @@ def test_bar_barwidth_position(self, kwargs):
df = DataFrame(np.random.randn(5, 5))
self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs)
- @pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(np.random.randn(5, 5))
diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py
index 9025f8c361a82..567d159f723a5 100644
--- a/pandas/tests/plotting/test_backend.py
+++ b/pandas/tests/plotting/test_backend.py
@@ -12,6 +12,9 @@
setattr(dummy_backend, "plot", lambda *args, **kwargs: "used_dummy")
+pytestmark = pytest.mark.slow
+
+
@pytest.fixture
def restore_backend():
"""Restore the plotting backend to matplotlib"""
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 9e1a8d473b9d6..438b8ceafe773 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -1,3 +1,5 @@
+""" Test cases for .boxplot method """
+
import itertools
import string
@@ -12,12 +14,11 @@
import pandas.plotting as plotting
-""" Test cases for .boxplot method """
+pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
- @pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
@@ -42,7 +43,6 @@ def test_boxplot_legacy1(self):
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
- @pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
@@ -77,13 +77,11 @@ def test_boxplot_legacy2(self):
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
- @pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
- @pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
@@ -111,7 +109,6 @@ def test_boxplot_return_type_legacy(self):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
- @pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
@@ -138,13 +135,11 @@ def _check_ax_limits(col, ax):
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
- @pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
- @pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
@@ -220,7 +215,6 @@ def test_specified_props_kwd(self, props, expected):
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
- @pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
@@ -229,7 +223,6 @@ def test_boxplot_legacy1(self):
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
- @pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
@@ -241,7 +234,6 @@ def test_boxplot_legacy2(self):
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
- @pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
@@ -252,7 +244,6 @@ def test_boxplot_legacy3(self):
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
- @pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
@@ -276,7 +267,6 @@ def test_grouped_plot_fignums(self):
res = df.groupby("gender").hist()
tm.close()
- @pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
@@ -311,7 +301,6 @@ def test_grouped_box_return_type(self):
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
- @pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
@@ -405,7 +394,6 @@ def test_grouped_box_layout(self):
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
- @pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
diff --git a/pandas/tests/plotting/test_common.py b/pandas/tests/plotting/test_common.py
index af67ed7ec215b..2664dc8e1b090 100644
--- a/pandas/tests/plotting/test_common.py
+++ b/pandas/tests/plotting/test_common.py
@@ -5,6 +5,8 @@
from pandas import DataFrame
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestCommon(TestPlotBase):
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 583ed040c20d5..ae14318cdaa49 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -31,6 +31,9 @@
dates = pytest.importorskip("matplotlib.dates")
+pytestmark = pytest.mark.slow
+
+
def test_registry_mpl_resets():
# Check that Matplotlib converters are properly reset (see issue #27481)
code = (
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 590758bc01fbb..e047204ba1d60 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -18,6 +18,8 @@
from pandas.tseries.offsets import WeekOfMonth
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestTSPlot(TestPlotBase):
@@ -43,7 +45,6 @@ def setup_method(self, method):
def teardown_method(self, method):
tm.close()
- @pytest.mark.slow
def test_ts_plot_with_tz(self, tz_aware_fixture):
# GH2877, GH17173, GH31205, GH31580
tz = tz_aware_fixture
@@ -65,7 +66,6 @@ def test_fontsize_set_correctly(self):
for label in ax.get_xticklabels() + ax.get_yticklabels():
assert label.get_fontsize() == 2
- @pytest.mark.slow
def test_frame_inferred(self):
# inferred freq
idx = date_range("1/1/1987", freq="MS", periods=100)
@@ -105,7 +105,6 @@ def test_nonnumeric_exclude(self):
with pytest.raises(TypeError, match=msg):
df["A"].plot()
- @pytest.mark.slow
def test_tsplot(self):
_, ax = self.plt.subplots()
@@ -137,7 +136,6 @@ def test_both_style_and_color(self):
with pytest.raises(ValueError, match=msg):
s.plot(style="b-", color="#000099")
- @pytest.mark.slow
def test_high_freq(self):
freaks = ["ms", "us"]
for freq in freaks:
@@ -154,7 +152,6 @@ def test_get_datevalue(self):
assert get_datevalue(Period(1987, "A"), "M") == Period("1987-12", "M").ordinal
assert get_datevalue("1/1/1987", "D") == Period("1987-1-1", "D").ordinal
- @pytest.mark.slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
@@ -179,12 +176,10 @@ def check_format_of_first_point(ax, expected_string):
check_format_of_first_point(ax, "t = 2014-01-01 y = 1.000000")
tm.close()
- @pytest.mark.slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
- @pytest.mark.slow
@pytest.mark.parametrize(
"frqncy", ["1S", "3S", "5T", "7H", "4D", "8W", "11M", "3A"]
)
@@ -195,17 +190,14 @@ def test_line_plot_period_mlt_series(self, frqncy):
s = Series(np.random.randn(len(idx)), idx)
_check_plot_works(s.plot, s.index.freq.rule_code)
- @pytest.mark.slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
- @pytest.mark.slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
- @pytest.mark.slow
@pytest.mark.parametrize(
"frqncy", ["1S", "3S", "5T", "7H", "4D", "8W", "11M", "3A"]
)
@@ -218,13 +210,11 @@ def test_line_plot_period_mlt_frame(self, frqncy):
freq = df.index.asfreq(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
- @pytest.mark.slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
- @pytest.mark.slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
@@ -241,7 +231,6 @@ def test_fake_inferred_business(self):
ts.plot(ax=ax)
assert not hasattr(ax, "freq")
- @pytest.mark.slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
@@ -250,13 +239,11 @@ def test_plot_offset_freq(self):
ser = Series(np.random.randn(len(dr)), index=dr)
_check_plot_works(ser.plot)
- @pytest.mark.slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), index=dr)
_check_plot_works(ser.plot)
- @pytest.mark.slow
def test_uhf(self):
import pandas.plotting._matplotlib.converter as conv
@@ -275,7 +262,6 @@ def test_uhf(self):
if len(rs):
assert xp == rs
- @pytest.mark.slow
def test_irreg_hf(self):
idx = date_range("2012-6-22 21:59:51", freq="S", periods=100)
df = DataFrame(np.random.randn(len(idx), 2), index=idx)
@@ -322,7 +308,6 @@ def test_business_freq(self):
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == "B"
- @pytest.mark.slow
def test_business_freq_convert(self):
bts = tm.makeTimeSeries(300).asfreq("BM")
ts = bts.to_period("M")
@@ -360,7 +345,6 @@ def test_dataframe(self):
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
- @pytest.mark.slow
def test_axis_limits(self):
def _test(ax):
xlim = ax.get_xlim()
@@ -411,7 +395,6 @@ def test_get_finder(self):
assert conv.get_finder(to_offset("A")) == conv._annual_finder
assert conv.get_finder(to_offset("W")) == conv._daily_finder
- @pytest.mark.slow
def test_finder_daily(self):
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
@@ -434,7 +417,6 @@ def test_finder_daily(self):
assert rs1 == xpl1
assert rs2 == xpl2
- @pytest.mark.slow
def test_finder_quarterly(self):
yrs = [3.5, 11]
@@ -457,7 +439,6 @@ def test_finder_quarterly(self):
assert rs1 == xpl1
assert rs2 == xpl2
- @pytest.mark.slow
def test_finder_monthly(self):
yrs = [1.15, 2.5, 4, 11]
@@ -490,7 +471,6 @@ def test_finder_monthly_long(self):
xp = Period("1989Q1", "M").ordinal
assert rs == xp
- @pytest.mark.slow
def test_finder_annual(self):
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
xp = [Period(x, freq="A").ordinal for x in xp]
@@ -506,7 +486,6 @@ def test_finder_annual(self):
assert rs == xp
- @pytest.mark.slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range("1/1/1999", freq="Min", periods=nminutes)
@@ -531,7 +510,6 @@ def test_finder_hourly(self):
assert rs == xp
- @pytest.mark.slow
def test_gaps(self):
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
@@ -586,7 +564,6 @@ def test_gaps(self):
mask = data.mask
assert mask[2:5, 1].all()
- @pytest.mark.slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
@@ -609,7 +586,6 @@ def test_gap_upsample(self):
mask = data.mask
assert mask[5:25, 1].all()
- @pytest.mark.slow
def test_secondary_y(self):
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
@@ -638,7 +614,6 @@ def test_secondary_y(self):
assert hasattr(ax2, "left_ax")
assert not hasattr(ax2, "right_ax")
- @pytest.mark.slow
def test_secondary_y_ts(self):
idx = date_range("1/1/2000", periods=10)
ser = Series(np.random.randn(10), idx)
@@ -664,7 +639,6 @@ def test_secondary_y_ts(self):
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_secondary_kde(self):
@@ -676,7 +650,6 @@ def test_secondary_kde(self):
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == "right"
- @pytest.mark.slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
@@ -684,7 +657,6 @@ def test_secondary_bar(self):
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == "right"
- @pytest.mark.slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=["a", "b", "c"])
axes = df.plot(secondary_y=["a", "c"], subplots=True)
@@ -692,7 +664,6 @@ def test_secondary_frame(self):
assert axes[1].get_yaxis().get_ticks_position() == self.default_tick_position
assert axes[2].get_yaxis().get_ticks_position() == "right"
- @pytest.mark.slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=["a", "b", "c"])
axes = df.plot(kind="bar", secondary_y=["a", "c"], subplots=True)
@@ -722,7 +693,6 @@ def test_mixed_freq_regular_first(self):
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
- @pytest.mark.slow
def test_mixed_freq_irregular_first(self):
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
@@ -753,7 +723,6 @@ def test_mixed_freq_regular_first_df(self):
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
- @pytest.mark.slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
@@ -779,7 +748,6 @@ def test_mixed_freq_hf_first(self):
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == "D"
- @pytest.mark.slow
def test_mixed_freq_alignment(self):
ts_ind = date_range("2012-01-01 13:00", "2012-01-02", freq="H")
ts_data = np.random.randn(12)
@@ -793,7 +761,6 @@ def test_mixed_freq_alignment(self):
assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]
- @pytest.mark.slow
def test_mixed_freq_lf_first(self):
idxh = date_range("1/1/1999", periods=365, freq="D")
@@ -873,7 +840,6 @@ def test_nat_handling(self):
assert s.index.min() <= Series(xdata).min()
assert Series(xdata).max() <= s.index.max()
- @pytest.mark.slow
def test_to_weekly_resampling(self):
idxh = date_range("1/1/1999", periods=52, freq="W")
idxl = date_range("1/1/1999", periods=12, freq="M")
@@ -885,7 +851,6 @@ def test_to_weekly_resampling(self):
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq
- @pytest.mark.slow
def test_from_weekly_resampling(self):
idxh = date_range("1/1/1999", periods=52, freq="W")
idxl = date_range("1/1/1999", periods=12, freq="M")
@@ -909,7 +874,6 @@ def test_from_weekly_resampling(self):
tm.assert_numpy_array_equal(xdata, expected_h)
tm.close()
- @pytest.mark.slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range("1/1/1999", periods=52, freq="W")
idxl = date_range("1/1/1999", periods=12, freq="M")
@@ -1001,7 +965,6 @@ def test_from_resampling_area_line_mixed(self):
expected_y += low[i].values
tm.assert_numpy_array_equal(lines.get_ydata(orig=False), expected_y)
- @pytest.mark.slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range("2014-07-01 09:00", freq="S", periods=50)
@@ -1025,7 +988,6 @@ def test_mixed_freq_second_millisecond(self):
for line in ax.get_lines():
assert PeriodIndex(data=line.get_xdata()).freq == "L"
- @pytest.mark.slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
@@ -1046,7 +1008,6 @@ def test_irreg_dtypes(self):
_, ax = self.plt.subplots()
_check_plot_works(df.plot, ax=ax)
- @pytest.mark.slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
@@ -1071,7 +1032,6 @@ def test_time(self):
xp = time(h, m, s).strftime("%H:%M")
assert xp == rs
- @pytest.mark.slow
def test_time_change_xlim(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
@@ -1113,7 +1073,6 @@ def test_time_change_xlim(self):
xp = time(h, m, s).strftime("%H:%M")
assert xp == rs
- @pytest.mark.slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
@@ -1145,7 +1104,6 @@ def test_time_musec(self):
xp = time(h, m, s, us).strftime("%H:%M")
assert xp == rs
- @pytest.mark.slow
def test_secondary_upsample(self):
idxh = date_range("1/1/1999", periods=365, freq="D")
idxl = date_range("1/1/1999", periods=12, freq="M")
@@ -1161,7 +1119,6 @@ def test_secondary_upsample(self):
for line in ax.left_ax.get_lines():
assert PeriodIndex(line.get_xdata()).freq == "D"
- @pytest.mark.slow
def test_secondary_legend(self):
fig = self.plt.figure()
ax = fig.add_subplot(211)
@@ -1263,7 +1220,6 @@ def test_format_date_axis(self):
if len(line.get_text()) > 0:
assert line.get_rotation() == 30
- @pytest.mark.slow
def test_ax_plot(self):
x = date_range(start="2012-01-02", periods=10, freq="D")
y = list(range(len(x)))
@@ -1271,7 +1227,6 @@ def test_ax_plot(self):
lines = ax.plot(x, y, label="Y")
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
- @pytest.mark.slow
def test_mpl_nopandas(self):
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
@@ -1290,7 +1245,6 @@ def test_mpl_nopandas(self):
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
- @pytest.mark.slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
from pandas.plotting._matplotlib.converter import DatetimeConverter
@@ -1308,7 +1262,6 @@ def test_irregular_ts_shared_ax_xlim(self):
assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax)
assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax)
- @pytest.mark.slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
@@ -1325,7 +1278,6 @@ def test_secondary_y_non_ts_xlim(self):
assert left_before >= left_after
assert right_before < right_after
- @pytest.mark.slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start="2000-01-01", periods=4, freq="D")
@@ -1342,7 +1294,6 @@ def test_secondary_y_regular_ts_xlim(self):
assert left_before >= left_after
assert right_before < right_after
- @pytest.mark.slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range("2000-01-01", periods=10000, freq="min")
@@ -1358,7 +1309,6 @@ def test_secondary_y_mixed_freq_ts_xlim(self):
assert left_before == left_after
assert right_before == right_after
- @pytest.mark.slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
from pandas.plotting._matplotlib.converter import DatetimeConverter
@@ -1452,7 +1402,6 @@ def test_hist(self):
_, ax = self.plt.subplots()
ax.hist([x, x], weights=[w1, w2])
- @pytest.mark.slow
def test_overlapping_datetime(self):
# GB 6608
s1 = Series(
diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py
index 7ed29507fe0f4..f73ceee577a18 100644
--- a/pandas/tests/plotting/test_groupby.py
+++ b/pandas/tests/plotting/test_groupby.py
@@ -11,6 +11,8 @@
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index ab0024559333e..f700b2934cd8c 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -9,6 +9,8 @@
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
@@ -21,7 +23,6 @@ def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
- @pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
@@ -45,13 +46,11 @@ def test_hist_legacy(self):
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
- @pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
- @pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
@@ -60,7 +59,6 @@ def test_hist_layout(self):
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
- @pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
@@ -98,7 +96,6 @@ def test_hist_layout_with_by(self):
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
- @pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import gcf, subplot
@@ -112,13 +109,11 @@ def test_hist_no_overlap(self):
axes = fig.axes
assert len(axes) == 2
- @pytest.mark.slow
def test_hist_by_no_extra_plots(self):
df = self.hist_df
axes = df.height.hist(by=df.gender) # noqa
assert len(self.plt.get_fignums()) == 1
- @pytest.mark.slow
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure
@@ -170,7 +165,6 @@ def test_hist_with_legend_raises(self, by):
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
- @pytest.mark.slow
def test_hist_df_legacy(self):
from matplotlib.patches import Rectangle
@@ -256,7 +250,6 @@ def test_hist_df_legacy(self):
with pytest.raises(AttributeError):
ser.hist(foo="bar")
- @pytest.mark.slow
def test_hist_non_numerical_or_datetime_raises(self):
# gh-10444, GH32590
df = DataFrame(
@@ -282,7 +275,6 @@ def test_hist_non_numerical_or_datetime_raises(self):
with pytest.raises(ValueError, match=msg):
df_o.hist()
- @pytest.mark.slow
def test_hist_layout(self):
df = DataFrame(np.random.randn(100, 2))
df[2] = to_datetime(
@@ -321,7 +313,6 @@ def test_hist_layout(self):
with pytest.raises(ValueError):
df.hist(layout=(-1, -1))
- @pytest.mark.slow
# GH 9351
def test_tight_layout(self):
df = DataFrame(np.random.randn(100, 2))
@@ -444,7 +435,6 @@ def test_hist_with_legend_raises(self, by, column):
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
- @pytest.mark.slow
def test_grouped_hist_legacy(self):
from matplotlib.patches import Rectangle
@@ -514,7 +504,6 @@ def test_grouped_hist_legacy(self):
with pytest.raises(ValueError, match=msg):
df.hist(by="C", figsize="default")
- @pytest.mark.slow
def test_grouped_hist_legacy2(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
@@ -528,7 +517,6 @@ def test_grouped_hist_legacy2(self):
assert len(self.plt.get_fignums()) == 2
tm.close()
- @pytest.mark.slow
def test_grouped_hist_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
@@ -583,7 +571,6 @@ def test_grouped_hist_layout(self):
axes = df.hist(column=["height", "weight", "category"])
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
- @pytest.mark.slow
def test_grouped_hist_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
@@ -603,7 +590,6 @@ def test_grouped_hist_multiple_axes(self):
# pass different number of axes from required
axes = df.hist(column="height", ax=axes)
- @pytest.mark.slow
def test_axis_share_x(self):
df = self.hist_df
# GH4089
@@ -617,7 +603,6 @@ def test_axis_share_x(self):
assert not ax1._shared_y_axes.joined(ax1, ax2)
assert not ax2._shared_y_axes.joined(ax1, ax2)
- @pytest.mark.slow
def test_axis_share_y(self):
df = self.hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True)
@@ -630,7 +615,6 @@ def test_axis_share_y(self):
assert not ax1._shared_x_axes.joined(ax1, ax2)
assert not ax2._shared_x_axes.joined(ax1, ax2)
- @pytest.mark.slow
def test_axis_share_xy(self):
df = self.hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True)
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index f37d83cd0783e..1208100ed2dce 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -11,6 +11,8 @@
import pandas.plotting as plotting
+pytestmark = pytest.mark.slow
+
@td.skip_if_mpl
def test_import_error_message():
@@ -66,7 +68,6 @@ def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
- @pytest.mark.slow
def test_autocorrelation_plot(self):
from pandas.plotting import autocorrelation_plot
@@ -76,14 +77,12 @@ def test_autocorrelation_plot(self):
ax = autocorrelation_plot(self.ts, label="Test")
self._check_legend_labels(ax, labels=["Test"])
- @pytest.mark.slow
def test_lag_plot(self):
from pandas.plotting import lag_plot
_check_plot_works(lag_plot, series=self.ts)
_check_plot_works(lag_plot, series=self.ts, lag=5)
- @pytest.mark.slow
def test_bootstrap_plot(self):
from pandas.plotting import bootstrap_plot
@@ -127,7 +126,6 @@ def test_scatter_matrix_axis(self):
self._check_text_labels(axes0_labels, expected)
self._check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
- @pytest.mark.slow
def test_andrews_curves(self, iris):
from matplotlib import cm
@@ -203,7 +201,6 @@ def test_andrews_curves(self, iris):
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, linecolors=colors)
- @pytest.mark.slow
def test_parallel_coordinates(self, iris):
from matplotlib import cm
@@ -277,7 +274,6 @@ def test_parallel_coordinates_with_sorted_labels(self):
# labels and colors are ordered strictly increasing
assert prev[1] < nxt[1] and prev[0] < nxt[0]
- @pytest.mark.slow
def test_radviz(self, iris):
from matplotlib import cm
@@ -310,7 +306,6 @@ def test_radviz(self, iris):
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=colors)
- @pytest.mark.slow
def test_subplot_titles(self, iris):
df = iris.drop("Name", axis=1).head()
# Use the column names as the subplot titles
@@ -411,7 +406,6 @@ def test_get_standard_colors_no_appending(self):
p = df.A.plot.bar(figsize=(16, 7), color=color_list)
assert p.patches[1].get_facecolor() == p.patches[17].get_facecolor()
- @pytest.mark.slow
def test_dictionary_color(self):
# issue-8193
# Test plot color dictionary format
@@ -432,7 +426,6 @@ def test_dictionary_color(self):
colors = [rect.get_color() for rect in ax.get_lines()[0:2]]
assert all(color == expected[index] for index, color in enumerate(colors))
- @pytest.mark.slow
def test_has_externally_shared_axis_x_axis(self):
# GH33819
# Test _has_externally_shared_axis() works for x-axis
@@ -458,7 +451,6 @@ def test_has_externally_shared_axis_x_axis(self):
assert func(plots[0][2], "x")
assert not func(plots[0][3], "x")
- @pytest.mark.slow
def test_has_externally_shared_axis_y_axis(self):
# GH33819
# Test _has_externally_shared_axis() works for y-axis
@@ -484,7 +476,6 @@ def test_has_externally_shared_axis_y_axis(self):
assert func(plots[2][0], "y")
assert not func(plots[3][0], "y")
- @pytest.mark.slow
def test_has_externally_shared_axis_invalid_compare_axis(self):
# GH33819
# Test _has_externally_shared_axis() raises an exception when
@@ -502,7 +493,6 @@ def test_has_externally_shared_axis_invalid_compare_axis(self):
with pytest.raises(ValueError, match=msg):
func(plots[0][0], "z")
- @pytest.mark.slow
def test_externally_shared_axes(self):
# Example from GH33819
# Create data
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index b8dd2ada87506..9da2336fb9342 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -16,6 +16,8 @@
import pandas.plotting as plotting
+pytestmark = pytest.mark.slow
+
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
@@ -34,7 +36,6 @@ def setup_method(self, method):
self.iseries = tm.makePeriodSeries()
self.iseries.name = "iseries"
- @pytest.mark.slow
def test_plot(self):
_check_plot_works(self.ts.plot, label="foo")
_check_plot_works(self.ts.plot, use_index=False)
@@ -70,7 +71,6 @@ def test_plot(self):
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
- @pytest.mark.slow
def test_plot_figsize_and_title(self):
# figsize and title
_, ax = self.plt.subplots()
@@ -222,7 +222,6 @@ def test_line_use_index_false(self):
label2 = ax2.get_xlabel()
assert label2 == ""
- @pytest.mark.slow
def test_bar_log(self):
expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])
@@ -256,7 +255,6 @@ def test_bar_log(self):
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
- @pytest.mark.slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
_, ax = self.plt.subplots()
@@ -311,7 +309,6 @@ def test_unsorted_index_xlim(self):
assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0])
assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0])
- @pytest.mark.slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
@@ -368,14 +365,12 @@ def test_pie_nan(self):
result = [x.get_text() for x in ax.texts]
assert result == expected
- @pytest.mark.slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 10
- @pytest.mark.slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
@@ -389,7 +384,6 @@ def test_hist_df_with_nonnumerics(self):
ax = df.plot.hist(ax=ax) # bins=10
assert len(ax.patches) == 40
- @pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
@@ -413,13 +407,11 @@ def test_hist_legacy(self):
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
- @pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
- @pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
@@ -428,7 +420,6 @@ def test_hist_layout(self):
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
- @pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
@@ -464,7 +455,6 @@ def test_hist_layout_with_by(self):
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
- @pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import gcf, subplot
@@ -478,7 +468,6 @@ def test_hist_no_overlap(self):
axes = fig.axes
assert len(axes) == 2
- @pytest.mark.slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list("abcd"))
@@ -517,7 +506,6 @@ def test_hist_secondary_legend(self):
assert ax.get_yaxis().get_visible()
tm.close()
- @pytest.mark.slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list("abc"))
@@ -581,7 +569,6 @@ def test_df_series_secondary_legend(self):
assert ax.get_yaxis().get_visible()
tm.close()
- @pytest.mark.slow
@pytest.mark.parametrize(
"input_logy, expected_scale", [(True, "log"), ("sym", "symlog")]
)
@@ -597,14 +584,12 @@ def test_secondary_logy(self, input_logy, expected_scale):
assert ax1.get_yscale() == expected_scale
assert ax2.get_yscale() == expected_scale
- @pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(np.random.randn(2))
with pytest.raises(ValueError):
_, ax = self.plt.subplots()
x.plot(style="k--", color="k", ax=ax)
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde(self):
@@ -627,7 +612,6 @@ def test_hist_kde(self):
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [""] * len(ylabels))
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_kwargs(self):
sample_points = np.linspace(-100, 100, 20)
@@ -641,7 +625,6 @@ def test_kde_kwargs(self):
self._check_ax_scales(ax, yaxis="log")
self._check_text_labels(ax.yaxis.get_label(), "Density")
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
s = Series(np.random.uniform(size=50))
@@ -651,7 +634,6 @@ def test_kde_missing_vals(self):
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
- @pytest.mark.slow
def test_hist_kwargs(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(bins=5, ax=ax)
@@ -668,7 +650,6 @@ def test_hist_kwargs(self):
ax = self.ts.plot.hist(align="left", stacked=True, ax=ax)
tm.close()
- @pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde_color(self):
_, ax = self.plt.subplots()
@@ -684,7 +665,6 @@ def test_hist_kde_color(self):
assert len(lines) == 1
self._check_colors(lines, ["r"])
- @pytest.mark.slow
def test_boxplot_series(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.box(logy=True, ax=ax)
@@ -694,7 +674,6 @@ def test_boxplot_series(self):
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [""] * len(ylabels))
- @pytest.mark.slow
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (
@@ -708,7 +687,6 @@ def test_kind_both_ways(self):
getattr(s.plot, kind)()
self.plt.close()
- @pytest.mark.slow
def test_invalid_plot_data(self):
s = Series(list("abcd"))
_, ax = self.plt.subplots()
@@ -718,7 +696,6 @@ def test_invalid_plot_data(self):
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
- @pytest.mark.slow
def test_valid_object_plot(self):
s = Series(range(10), dtype=object)
for kind in plotting.PlotAccessor._common_kinds:
@@ -738,7 +715,6 @@ def test_invalid_kind(self):
with pytest.raises(ValueError):
s.plot(kind="aasdf")
- @pytest.mark.slow
def test_dup_datetime_index_plot(self):
dr1 = date_range("1/1/2009", periods=4)
dr2 = date_range("1/2/2009", periods=4)
@@ -767,7 +743,6 @@ def test_errorbar_asymmetrical(self):
tm.close()
- @pytest.mark.slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name="x")
@@ -813,7 +788,6 @@ def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
- @pytest.mark.slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
@@ -821,7 +795,6 @@ def test_series_grid_settings(self):
plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,
)
- @pytest.mark.slow
def test_standard_colors(self):
from pandas.plotting._matplotlib.style import get_standard_colors
@@ -838,7 +811,6 @@ def test_standard_colors(self):
result = get_standard_colors(3, color=[c])
assert result == [c] * 3
- @pytest.mark.slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
diff --git a/pandas/tests/plotting/test_style.py b/pandas/tests/plotting/test_style.py
index 665bda15724fd..3c48eeaccbf34 100644
--- a/pandas/tests/plotting/test_style.py
+++ b/pandas/tests/plotting/test_style.py
@@ -5,6 +5,8 @@
pytest.importorskip("matplotlib")
from pandas.plotting._matplotlib.style import get_standard_colors
+pytestmark = pytest.mark.slow
+
class TestGetStandardColors:
@pytest.mark.parametrize(
| - [ ] xref https://github.com/pandas-dev/pandas/pull/37735#issuecomment-732530791
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Marked all plotting tests as slow globally at the top of each module. | https://api.github.com/repos/pandas-dev/pandas/pulls/38079 | 2020-11-26T04:45:52Z | 2020-11-28T17:36:37Z | 2020-11-28T17:36:37Z | 2020-11-30T03:11:16Z |
CLN: fix flake8 C406, C409, and some of C408 | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 22f002e6cb79a..6ce63ff8badca 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -486,7 +486,7 @@ def setup(self):
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
- self.df = DataFrame(dict(a=arr, b=arr))
+ self.df = DataFrame({"a": arr, "b": arr})
def time_sum(self):
self.df.groupby(["a"])["b"].sum()
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 12682a68fe177..a0ec6f96042fc 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -472,8 +472,8 @@ def index_with_missing(request):
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
# For setting missing values in the top level of MultiIndex
vals = ind.tolist()
- vals[0] = tuple([None]) + vals[0][1:]
- vals[-1] = tuple([None]) + vals[-1][1:]
+ vals[0] = (None,) + vals[0][1:]
+ vals[-1] = (None,) + vals[-1][1:]
return MultiIndex.from_tuples(vals)
else:
vals[0] = None
diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index e48a42599a2a0..b6713bc760c5e 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -105,7 +105,7 @@ def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
- indexer = tuple([indexer])
+ indexer = (indexer,)
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b5900ead246f3..32792ad1796f8 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3558,7 +3558,7 @@ def _reindex_non_unique(self, target):
cur_labels = self.take(indexer[check]).values
cur_indexer = ensure_int64(length[check])
- new_labels = np.empty(tuple([len(indexer)]), dtype=object)
+ new_labels = np.empty((len(indexer),), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index f6d14a1c1503c..4874aab04738b 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1934,7 +1934,7 @@ def _align_series(self, indexer, ser: "Series", multiindex_indexer: bool = False
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
- indexer = tuple([indexer])
+ indexer = (indexer,)
if isinstance(indexer, tuple):
@@ -2073,7 +2073,7 @@ def __getitem__(self, key):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
- key = tuple([key])
+ key = (key,)
else:
raise ValueError("Invalid call for scalar access (getting)!")
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 760765e3a20e6..57a84ed2bc3a4 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1544,7 +1544,7 @@ def __init__(
)
self.axes = [axis]
- self.blocks = tuple([block])
+ self.blocks = (block,)
@classmethod
def from_blocks(
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1e4c0e07de403..d493ac0a8c051 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -368,7 +368,7 @@ def _init_dict(self, data, index=None, dtype=None):
values = na_value_for_dtype(dtype)
keys = index
else:
- keys, values = tuple([]), []
+ keys, values = tuple(), []
# Input is now list-like, so rely on "standard" construction:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index e8b61c3c40291..d97ba6183c955 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -873,30 +873,26 @@ def __init__(self):
(255, np.dtype(np.float64)),
]
)
- self.DTYPE_MAP_XML = dict(
- [
- (32768, np.dtype(np.uint8)), # Keys to GSO
- (65526, np.dtype(np.float64)),
- (65527, np.dtype(np.float32)),
- (65528, np.dtype(np.int32)),
- (65529, np.dtype(np.int16)),
- (65530, np.dtype(np.int8)),
- ]
- )
+ self.DTYPE_MAP_XML = {
+ 32768: np.dtype(np.uint8), # Keys to GSO
+ 65526: np.dtype(np.float64),
+ 65527: np.dtype(np.float32),
+ 65528: np.dtype(np.int32),
+ 65529: np.dtype(np.int16),
+ 65530: np.dtype(np.int8),
+ }
# error: Argument 1 to "list" has incompatible type "str";
# expected "Iterable[int]" [arg-type]
self.TYPE_MAP = list(range(251)) + list("bhlfd") # type: ignore[arg-type]
- self.TYPE_MAP_XML = dict(
- [
- # Not really a Q, unclear how to handle byteswap
- (32768, "Q"),
- (65526, "d"),
- (65527, "f"),
- (65528, "l"),
- (65529, "h"),
- (65530, "b"),
- ]
- )
+ self.TYPE_MAP_XML = {
+ # Not really a Q, unclear how to handle byteswap
+ 32768: "Q",
+ 65526: "d",
+ 65527: "f",
+ 65528: "l",
+ 65529: "h",
+ 65530: "b",
+ }
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
@@ -3138,24 +3134,22 @@ def _write_map(self) -> None:
all blocks have been written.
"""
if not self._map:
- self._map = dict(
- (
- ("stata_data", 0),
- ("map", self.handles.handle.tell()),
- ("variable_types", 0),
- ("varnames", 0),
- ("sortlist", 0),
- ("formats", 0),
- ("value_label_names", 0),
- ("variable_labels", 0),
- ("characteristics", 0),
- ("data", 0),
- ("strls", 0),
- ("value_labels", 0),
- ("stata_data_close", 0),
- ("end-of-file", 0),
- )
- )
+ self._map = {
+ "stata_data": 0,
+ "map": self.handles.handle.tell(),
+ "variable_types": 0,
+ "varnames": 0,
+ "sortlist": 0,
+ "formats": 0,
+ "value_label_names": 0,
+ "variable_labels": 0,
+ "characteristics": 0,
+ "data": 0,
+ "strls": 0,
+ "value_labels": 0,
+ "stata_data_close": 0,
+ "end-of-file": 0,
+ }
# Move to start of map
self.handles.handle.seek(self._map["map"])
bio = BytesIO()
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 35ffb0a246e25..a3d30cf0bc3c6 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -1288,8 +1288,8 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
("seconds", 2),
("microseconds", 5),
]
- for i, kwd in enumerate(relative_kwargs):
- off = DateOffset(**dict([kwd]))
+ for i, (unit, value) in enumerate(relative_kwargs):
+ off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py
index 63280f5ccf8cd..a6fdb82e48197 100644
--- a/pandas/tests/base/test_conversion.py
+++ b/pandas/tests/base/test_conversion.py
@@ -109,7 +109,7 @@ def test_iterable_map(self, index_or_series, dtype, rdtype):
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
- rdtype = tuple([rdtype])
+ rdtype = (rdtype,)
assert result in rdtype
@pytest.mark.parametrize(
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py
index f080d4b8bcc36..15952f36b0fae 100644
--- a/pandas/tests/frame/apply/test_frame_apply.py
+++ b/pandas/tests/frame/apply/test_frame_apply.py
@@ -562,11 +562,9 @@ def test_apply_dict(self):
# GH 8735
A = DataFrame([["foo", "bar"], ["spam", "eggs"]])
- A_dicts = Series(
- [dict([(0, "foo"), (1, "spam")]), dict([(0, "bar"), (1, "eggs")])]
- )
+ A_dicts = Series([{0: "foo", 1: "spam"}, {0: "bar", 1: "eggs"}])
B = DataFrame([[0, 1], [2, 3]])
- B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
+ B_dicts = Series([{0: 0, 1: 2}, {0: 1, 1: 3}])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
@@ -1221,7 +1219,7 @@ def test_agg_reduce(self, axis, float_frame):
tm.assert_frame_equal(result, expected)
# dict input with scalars
- func = dict([(name1, "mean"), (name2, "sum")])
+ func = {name1: "mean", name2: "sum"}
result = float_frame.agg(func, axis=axis)
expected = Series(
[
@@ -1233,7 +1231,7 @@ def test_agg_reduce(self, axis, float_frame):
tm.assert_series_equal(result, expected)
# dict input with lists
- func = dict([(name1, ["mean"]), (name2, ["sum"])])
+ func = {name1: ["mean"], name2: ["sum"]}
result = float_frame.agg(func, axis=axis)
expected = DataFrame(
{
@@ -1249,33 +1247,25 @@ def test_agg_reduce(self, axis, float_frame):
tm.assert_frame_equal(result, expected)
# dict input with lists with multiple
- func = dict([(name1, ["mean", "sum"]), (name2, ["sum", "max"])])
+ func = {name1: ["mean", "sum"], name2: ["sum", "max"]}
result = float_frame.agg(func, axis=axis)
expected = pd.concat(
- dict(
- [
- (
- name1,
- Series(
- [
- float_frame.loc(other_axis)[name1].mean(),
- float_frame.loc(other_axis)[name1].sum(),
- ],
- index=["mean", "sum"],
- ),
- ),
- (
- name2,
- Series(
- [
- float_frame.loc(other_axis)[name2].sum(),
- float_frame.loc(other_axis)[name2].max(),
- ],
- index=["sum", "max"],
- ),
- ),
- ]
- ),
+ {
+ name1: Series(
+ [
+ float_frame.loc(other_axis)[name1].mean(),
+ float_frame.loc(other_axis)[name1].sum(),
+ ],
+ index=["mean", "sum"],
+ ),
+ name2: Series(
+ [
+ float_frame.loc(other_axis)[name2].sum(),
+ float_frame.loc(other_axis)[name2].max(),
+ ],
+ index=["sum", "max"],
+ ),
+ },
axis=1,
)
expected = expected.T if axis in {1, "columns"} else expected
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index ff9646d45c0ac..e33009f4597f0 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1503,10 +1503,29 @@ def test_loc_getitem_index_namedtuple(self):
result = df.loc[IndexType("foo", "bar")]["A"]
assert result == 1
- @pytest.mark.parametrize("tpl", [tuple([1]), tuple([1, 2])])
+ @pytest.mark.parametrize(
+ "tpl",
+ [
+ (1,),
+ (
+ 1,
+ 2,
+ ),
+ ],
+ )
def test_loc_getitem_index_single_double_tuples(self, tpl):
# GH 20991
- idx = Index([tuple([1]), tuple([1, 2])], name="A", tupleize_cols=False)
+ idx = Index(
+ [
+ (1,),
+ (
+ 1,
+ 2,
+ ),
+ ],
+ name="A",
+ tupleize_cols=False,
+ )
df = DataFrame(index=idx)
result = df.loc[[tpl]]
diff --git a/pandas/tests/frame/methods/test_dtypes.py b/pandas/tests/frame/methods/test_dtypes.py
index 0105eef435121..840e23604939a 100644
--- a/pandas/tests/frame/methods/test_dtypes.py
+++ b/pandas/tests/frame/methods/test_dtypes.py
@@ -32,8 +32,8 @@ def test_empty_frame_dtypes(self):
norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc"))
)
- df = DataFrame(dict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
- ex_dtypes = Series(dict([("a", np.int64), ("b", np.bool_), ("c", np.float64)]))
+ df = DataFrame({"a": 1, "b": True, "c": 1.0}, index=[1, 2, 3])
+ ex_dtypes = Series({"a": np.int64, "b": np.bool_, "c": np.float64})
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
@@ -66,12 +66,12 @@ def test_dtypes_are_correct_after_column_slice(self):
df = DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
tm.assert_series_equal(
df.dtypes,
- Series(dict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
+ Series({"a": np.float_, "b": np.float_, "c": np.float_}),
)
- tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series(dict([("c", np.float_)])))
+ tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series({"c": np.float_}))
tm.assert_series_equal(
df.dtypes,
- Series(dict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
+ Series({"a": np.float_, "b": np.float_, "c": np.float_}),
)
def test_dtypes_gh8722(self, float_string_frame):
@@ -90,10 +90,10 @@ def test_dtypes_gh8722(self, float_string_frame):
def test_dtypes_timedeltas(self):
df = DataFrame(
- dict(
- A=Series(date_range("2012-1-1", periods=3, freq="D")),
- B=Series([timedelta(days=i) for i in range(3)]),
- )
+ {
+ "A": Series(date_range("2012-1-1", periods=3, freq="D")),
+ "B": Series([timedelta(days=i) for i in range(3)]),
+ }
)
result = df.dtypes
expected = Series(
diff --git a/pandas/tests/frame/methods/test_select_dtypes.py b/pandas/tests/frame/methods/test_select_dtypes.py
index 4599761909c33..2a8826cedd50a 100644
--- a/pandas/tests/frame/methods/test_select_dtypes.py
+++ b/pandas/tests/frame/methods/test_select_dtypes.py
@@ -201,16 +201,14 @@ def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
def test_select_dtypes_duplicate_columns(self):
# GH20839
df = DataFrame(
- dict(
- [
- ("a", list("abc")),
- ("b", list(range(1, 4))),
- ("c", np.arange(3, 6).astype("u1")),
- ("d", np.arange(4.0, 7.0, dtype="float64")),
- ("e", [True, False, True]),
- ("f", pd.date_range("now", periods=3).values),
- ]
- )
+ {
+ "a": ["a", "b", "c"],
+ "b": [1, 2, 3],
+ "c": np.arange(3, 6).astype("u1"),
+ "d": np.arange(4.0, 7.0, dtype="float64"),
+ "e": [True, False, True],
+ "f": pd.date_range("now", periods=3).values,
+ }
)
df.columns = ["a", "a", "b", "b", "b", "c"]
@@ -268,10 +266,10 @@ def test_select_dtypes_bad_datetime64(self):
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
- dict(
- A=Timestamp("20130102", tz="US/Eastern"),
- B=Timestamp("20130603", tz="CET"),
- ),
+ {
+ "A": Timestamp("20130102", tz="US/Eastern"),
+ "B": Timestamp("20130603", tz="CET"),
+ },
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index d32ca454b5fb2..e279c5872da03 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -181,7 +181,7 @@ def _make_mixed_dtypes_df(typ, ad=None):
for d, a in zip(dtypes, arrays):
assert a.dtype == d
if ad is None:
- ad = dict()
+ ad = {}
ad.update({d: a for d, a in zip(dtypes, arrays)})
return DataFrame(ad)
@@ -197,7 +197,7 @@ def _check_mixed_dtypes(df, dtypes=None):
_check_mixed_dtypes(df)
# add lots of types
- df = _make_mixed_dtypes_df("float", dict(A=1, B="foo", C="bar"))
+ df = _make_mixed_dtypes_df("float", {"A": 1, "B": "foo", "C": "bar"})
_check_mixed_dtypes(df)
# GH 622
@@ -356,8 +356,8 @@ def test_constructor_dict(self):
# GH 14381
# Dict with None value
- frame_none = DataFrame(dict(a=None), index=[0])
- frame_none_list = DataFrame(dict(a=[None]), index=[0])
+ frame_none = DataFrame({"a": None}, index=[0])
+ frame_none_list = DataFrame({"a": [None]}, index=[0])
assert frame_none._get_value(0, "a") is None
assert frame_none_list._get_value(0, "a") is None
tm.assert_frame_equal(frame_none, frame_none_list)
@@ -1540,14 +1540,12 @@ def test_from_dict_columns_parameter(self):
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(
- dict([("A", [1, 2]), ("B", [4, 5])]),
+ {"A": [1, 2], "B": [4, 5]},
orient="columns",
columns=["one", "two"],
)
with pytest.raises(ValueError, match=msg):
- DataFrame.from_dict(
- dict([("A", [1, 2]), ("B", [4, 5])]), columns=["one", "two"]
- )
+ DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"])
@pytest.mark.parametrize(
"data_dict, keys, orient",
@@ -1590,7 +1588,7 @@ def test_constructor_Series_named(self):
arr = np.random.randn(10)
s = Series(arr, name="x")
df = DataFrame(s)
- expected = DataFrame(dict(x=s))
+ expected = DataFrame({"x": s})
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index dba039b66d22d..451e063dd0f95 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -348,7 +348,7 @@ def bar(x):
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
- d = dict([["C", np.mean], ["D", dict([["foo", np.mean], ["bar", np.std]])]])
+ d = {"C": np.mean, "D": {"foo": np.mean, "bar": np.std}}
grouped.aggregate(d)
# But without renaming, these functions are OK
@@ -1047,7 +1047,7 @@ def test_groupby_get_by_index():
# GH 33439
df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
- expected = DataFrame(dict(A=["S", "W"], B=[1.0, 2.0])).set_index("A")
+ expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")
pd.testing.assert_frame_equal(res, expected)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 8271d0c45313d..8cf77ca6335f4 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -33,39 +33,37 @@ def f(a):
return result.reindex(index, fill_value=fill_value).sort_index()
-_results_for_groupbys_with_missing_categories = dict(
+_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
- [
- ("all", np.NaN),
- ("any", np.NaN),
- ("count", 0),
- ("corrwith", np.NaN),
- ("first", np.NaN),
- ("idxmax", np.NaN),
- ("idxmin", np.NaN),
- ("last", np.NaN),
- ("mad", np.NaN),
- ("max", np.NaN),
- ("mean", np.NaN),
- ("median", np.NaN),
- ("min", np.NaN),
- ("nth", np.NaN),
- ("nunique", 0),
- ("prod", np.NaN),
- ("quantile", np.NaN),
- ("sem", np.NaN),
- ("size", 0),
- ("skew", np.NaN),
- ("std", np.NaN),
- ("sum", 0),
- ("var", np.NaN),
- ]
-)
+ "all": np.NaN,
+ "any": np.NaN,
+ "count": 0,
+ "corrwith": np.NaN,
+ "first": np.NaN,
+ "idxmax": np.NaN,
+ "idxmin": np.NaN,
+ "last": np.NaN,
+ "mad": np.NaN,
+ "max": np.NaN,
+ "mean": np.NaN,
+ "median": np.NaN,
+ "min": np.NaN,
+ "nth": np.NaN,
+ "nunique": 0,
+ "prod": np.NaN,
+ "quantile": np.NaN,
+ "sem": np.NaN,
+ "size": 0,
+ "skew": np.NaN,
+ "std": np.NaN,
+ "sum": 0,
+ "var": np.NaN,
+}
def test_apply_use_categorical_name(df):
@@ -1151,7 +1149,7 @@ def df_cat(df):
@pytest.mark.parametrize(
- "operation, kwargs", [("agg", dict(dtype="category")), ("apply", dict())]
+ "operation, kwargs", [("agg", {"dtype": "category"}), ("apply", {})]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py
index 9a6a892307da8..6413b110dff2e 100644
--- a/pandas/tests/indexes/base_class/test_setops.py
+++ b/pandas/tests/indexes/base_class/test_setops.py
@@ -223,8 +223,8 @@ def test_tuple_union_bug(self, method, expected, sort):
expected = Index(expected)
tm.assert_index_equal(result, expected)
- @pytest.mark.parametrize("first_list", [list("ba"), list()])
- @pytest.mark.parametrize("second_list", [list("ab"), list()])
+ @pytest.mark.parametrize("first_list", [["b", "a"], []])
+ @pytest.mark.parametrize("second_list", [["a", "b"], []])
@pytest.mark.parametrize(
"first_name, second_name, expected_name",
[("A", "B", None), (None, "B", None), ("A", None, None)],
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index cbbe3aca9ccbe..17c072556313d 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -34,7 +34,7 @@ def test_ops_properties_basic(self, datetime_series):
getattr(datetime_series, op)
# attribute access should still work!
- s = Series(dict(year=2000, month=1, day=10))
+ s = Series({"year": 2000, "month": 1, "day": 10})
assert s.year == 2000
assert s.month == 1
assert s.day == 10
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index cd063a0c3f74b..e5d178581136b 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -235,11 +235,11 @@ def test_remove_unused_levels_large(first_type, second_type):
size = 1 << 16
df = DataFrame(
- dict(
- first=rng.randint(0, 1 << 13, size).astype(first_type),
- second=rng.randint(0, 1 << 10, size).astype(second_type),
- third=rng.rand(size),
- )
+ {
+ "first": rng.randint(0, 1 << 13, size).astype(first_type),
+ "second": rng.randint(0, 1 << 10, size).astype(second_type),
+ "third": rng.rand(size),
+ }
)
df = df.groupby(["first", "second"]).sum()
df = df[df.third < 0.1]
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 660c32d44a7aa..45e443053410a 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -78,7 +78,7 @@ def test_range_slice_outofbounds(self, make_range):
# GH#5407
idx = make_range(start="2013/10/01", freq="D", periods=10)
- df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
+ df = DataFrame({"units": [100 + i for i in range(10)]}, index=idx)
empty = DataFrame(index=type(idx)([], freq="D"), columns=["units"])
empty["units"] = empty["units"].astype("int64")
diff --git a/pandas/tests/indexes/ranges/test_constructors.py b/pandas/tests/indexes/ranges/test_constructors.py
index f573da44e99b3..7dd893bd16720 100644
--- a/pandas/tests/indexes/ranges/test_constructors.py
+++ b/pandas/tests/indexes/ranges/test_constructors.py
@@ -12,13 +12,13 @@ class TestRangeIndexConstructors:
@pytest.mark.parametrize(
"args, kwargs, start, stop, step",
[
- ((5,), dict(), 0, 5, 1),
- ((1, 5), dict(), 1, 5, 1),
- ((1, 5, 2), dict(), 1, 5, 2),
- ((0,), dict(), 0, 0, 1),
- ((0, 0), dict(), 0, 0, 1),
- (tuple(), dict(start=0), 0, 0, 1),
- (tuple(), dict(stop=0), 0, 0, 1),
+ ((5,), {}, 0, 5, 1),
+ ((1, 5), {}, 1, 5, 1),
+ ((1, 5, 2), {}, 1, 5, 2),
+ ((0,), {}, 0, 0, 1),
+ ((0, 0), {}, 0, 0, 1),
+ ((), {"start": 0}, 0, 0, 1),
+ ((), {"stop": 0}, 0, 0, 1),
],
)
def test_constructor(self, args, kwargs, start, stop, step, name):
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 656d25bec2a6b..fb6f4da2a482e 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -94,7 +94,7 @@ def setup_method(self, method):
# form agglomerates
for kind in self._kinds:
- d = dict()
+ d = {}
for typ in self._typs:
d[typ] = getattr(self, f"{kind}_{typ}")
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py
index 024cc3ad72688..d58bc4713f99f 100644
--- a/pandas/tests/indexing/multiindex/test_slice.py
+++ b/pandas/tests/indexing/multiindex/test_slice.py
@@ -23,7 +23,12 @@ def test_per_axis_per_level_getitem(self):
result = df.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :]
expected = df.loc[
[
- tuple([a, b, c, d])
+ (
+ a,
+ b,
+ c,
+ d,
+ )
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
@@ -32,7 +37,12 @@ def test_per_axis_per_level_getitem(self):
expected = df.loc[
[
- tuple([a, b, c, d])
+ (
+ a,
+ b,
+ c,
+ d,
+ )
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3")
and (c == "C1" or c == "C2" or c == "C3")
@@ -84,7 +94,7 @@ def test_per_axis_per_level_getitem(self):
result = df.loc["A", "a"]
expected = DataFrame(
- dict(bar=[1, 5, 9], foo=[0, 4, 8]),
+ {"bar": [1, 5, 9], "foo": [0, 4, 8]},
index=Index([1, 2, 3], name="two"),
columns=Index(["bar", "foo"], name="lvl1"),
)
@@ -99,7 +109,12 @@ def test_per_axis_per_level_getitem(self):
result = s.loc["A1":"A3", :, ["C1", "C3"]]
expected = s.loc[
[
- tuple([a, b, c, d])
+ (
+ a,
+ b,
+ c,
+ d,
+ )
for a, b, c, d in s.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
@@ -150,19 +165,19 @@ def test_multiindex_slicers_non_unique(self):
# non-unique mi index support
df = (
DataFrame(
- dict(
- A=["foo", "foo", "foo", "foo"],
- B=["a", "a", "a", "a"],
- C=[1, 2, 1, 3],
- D=[1, 2, 3, 4],
- )
+ {
+ "A": ["foo", "foo", "foo", "foo"],
+ "B": ["a", "a", "a", "a"],
+ "C": [1, 2, 1, 3],
+ "D": [1, 2, 3, 4],
+ }
)
.set_index(["A", "B", "C"])
.sort_index()
)
assert not df.index.is_unique
expected = (
- DataFrame(dict(A=["foo", "foo"], B=["a", "a"], C=[1, 1], D=[1, 3]))
+ DataFrame({"A": ["foo", "foo"], "B": ["a", "a"], "C": [1, 1], "D": [1, 3]})
.set_index(["A", "B", "C"])
.sort_index()
)
@@ -175,19 +190,19 @@ def test_multiindex_slicers_non_unique(self):
df = (
DataFrame(
- dict(
- A=["foo", "foo", "foo", "foo"],
- B=["a", "a", "a", "a"],
- C=[1, 2, 1, 2],
- D=[1, 2, 3, 4],
- )
+ {
+ "A": ["foo", "foo", "foo", "foo"],
+ "B": ["a", "a", "a", "a"],
+ "C": [1, 2, 1, 2],
+ "D": [1, 2, 3, 4],
+ }
)
.set_index(["A", "B", "C"])
.sort_index()
)
assert not df.index.is_unique
expected = (
- DataFrame(dict(A=["foo", "foo"], B=["a", "a"], C=[1, 1], D=[1, 3]))
+ DataFrame({"A": ["foo", "foo"], "B": ["a", "a"], "C": [1, 1], "D": [1, 3]})
.set_index(["A", "B", "C"])
.sort_index()
)
@@ -393,7 +408,12 @@ def test_per_axis_per_level_doc_examples(self):
result = df.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :]
expected = df.loc[
[
- tuple([a, b, c, d])
+ (
+ a,
+ b,
+ c,
+ d,
+ )
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
@@ -405,7 +425,12 @@ def test_per_axis_per_level_doc_examples(self):
result = df.loc[(slice(None), slice(None), ["C1", "C3"]), :]
expected = df.loc[
[
- tuple([a, b, c, d])
+ (
+ a,
+ b,
+ c,
+ d,
+ )
for a, b, c, d in df.index.values
if (c == "C1" or c == "C3")
]
@@ -461,7 +486,12 @@ def test_loc_axis_arguments(self):
result = df.loc(axis=0)["A1":"A3", :, ["C1", "C3"]]
expected = df.loc[
[
- tuple([a, b, c, d])
+ (
+ a,
+ b,
+ c,
+ d,
+ )
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
@@ -471,7 +501,12 @@ def test_loc_axis_arguments(self):
result = df.loc(axis="index")[:, :, ["C1", "C3"]]
expected = df.loc[
[
- tuple([a, b, c, d])
+ (
+ a,
+ b,
+ c,
+ d,
+ )
for a, b, c, d in df.index.values
if (c == "C1" or c == "C3")
]
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index d162468235767..90fa6e94d1bc8 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -121,13 +121,13 @@ def test_setitem_chained_setfault(self):
tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data}))
# GH 6056
- expected = DataFrame(dict(A=[np.nan, "bar", "bah", "foo", "bar"]))
- df = DataFrame(dict(A=np.array(["foo", "bar", "bah", "foo", "bar"])))
+ expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]})
+ df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
df["A"].iloc[0] = np.nan
result = df.head()
tm.assert_frame_equal(result, expected)
- df = DataFrame(dict(A=np.array(["foo", "bar", "bah", "foo", "bar"])))
+ df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
df.A.iloc[0] = np.nan
result = df.head()
tm.assert_frame_equal(result, expected)
@@ -299,12 +299,12 @@ def random_text(nobs=100):
# Mixed type setting but same dtype & changing dtype
df = DataFrame(
- dict(
- A=date_range("20130101", periods=5),
- B=np.random.randn(5),
- C=np.arange(5, dtype="int64"),
- D=list("abcde"),
- )
+ {
+ "A": date_range("20130101", periods=5),
+ "B": np.random.randn(5),
+ "C": np.arange(5, dtype="int64"),
+ "D": ["a", "b", "c", "d", "e"],
+ }
)
with pytest.raises(com.SettingWithCopyError, match=msg):
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 9ae9566ac87ef..554b93c7cab5a 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -560,15 +560,17 @@ def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(
- dict(A=np.arange(5, dtype="int64"), B=np.arange(5, 10, dtype="int64"))
+ {"A": np.arange(5, dtype="int64"), "B": np.arange(5, 10, dtype="int64")}
)
df.iloc[2:4] = [[10, 11], [12, 13]]
- expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
+ expected = DataFrame({"A": [0, 1, 10, 12, 4], "B": [5, 6, 11, 13, 9]})
tm.assert_frame_equal(df, expected)
- df = DataFrame(dict(A=list("abcde"), B=np.arange(5, 10, dtype="int64")))
+ df = DataFrame(
+ {"A": ["a", "b", "c", "d", "e"], "B": np.arange(5, 10, dtype="int64")}
+ )
df.iloc[2:4] = [["x", 11], ["y", 13]]
- expected = DataFrame(dict(A=["a", "b", "x", "y", "e"], B=[5, 6, 11, 13, 9]))
+ expected = DataFrame({"A": ["a", "b", "x", "y", "e"], "B": [5, 6, 11, 13, 9]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [[0], slice(None, 1, None), np.array([0])])
@@ -645,7 +647,10 @@ def test_iloc_mask(self):
except (ValueError, IndexingError, NotImplementedError) as e:
ans = str(e)
- key = tuple([idx, method])
+ key = (
+ idx,
+ method,
+ )
r = expected.get(key)
if r != ans:
raise AssertionError(
@@ -859,7 +864,7 @@ def test_iloc_setitem_pure_position_based(self, indexer):
def test_iloc_setitem_dictionary_value(self):
# GH#37728
df = DataFrame({"x": [1, 2], "y": [2, 2]})
- rhs = dict(x=9, y=99)
+ rhs = {"x": 9, "y": 99}
df.iloc[1] = rhs
expected = DataFrame({"x": [1, 9], "y": [2, 99]})
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index b52c2ebbbc584..f750b3667cec2 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -463,12 +463,12 @@ def test_multi_assign(self):
# broadcasting on the rhs is required
df = DataFrame(
- dict(
- A=[1, 2, 0, 0, 0],
- B=[0, 0, 0, 10, 11],
- C=[0, 0, 0, 10, 11],
- D=[3, 4, 5, 6, 7],
- )
+ {
+ "A": [1, 2, 0, 0, 0],
+ "B": [0, 0, 0, 10, 11],
+ "C": [0, 0, 0, 10, 11],
+ "D": [3, 4, 5, 6, 7],
+ }
)
expected = df.copy()
@@ -743,7 +743,7 @@ def test_slice_with_zero_step_raises(self):
def test_indexing_assignment_dict_already_exists(self):
df = DataFrame({"x": [1, 2, 6], "y": [2, 2, 8], "z": [-5, 0, 5]}).set_index("z")
expected = df.copy()
- rhs = dict(x=9, y=99)
+ rhs = {"x": 9, "y": 99}
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index ac3e944716486..f8ad16be607a5 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -183,17 +183,26 @@ def test_loc_setitem_dups(self):
}
).set_index("me")
- indexer = tuple(["r", ["bar", "bar2"]])
+ indexer = (
+ "r",
+ ["bar", "bar2"],
+ )
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
- indexer = tuple(["r", "bar"])
+ indexer = (
+ "r",
+ "bar",
+ )
df = df_orig.copy()
df.loc[indexer] *= 2.0
assert df.loc[indexer] == 2.0 * df_orig.loc[indexer]
- indexer = tuple(["t", ["bar", "bar2"]])
+ indexer = (
+ "t",
+ ["bar", "bar2"],
+ )
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
@@ -563,7 +572,7 @@ def test_loc_setitem_frame(self):
# setting issue
df = DataFrame(index=[3, 5, 4], columns=["A"])
df.loc[[4, 3, 5], "A"] = np.array([1, 2, 3], dtype="int64")
- expected = DataFrame(dict(A=Series([1, 2, 3], index=[4, 3, 5]))).reindex(
+ expected = DataFrame({"A": Series([1, 2, 3], index=[4, 3, 5])}).reindex(
index=[3, 5, 4]
)
tm.assert_frame_equal(df, expected)
@@ -585,7 +594,7 @@ def test_loc_setitem_frame(self):
df.loc[keys2, "B"] = val2
expected = DataFrame(
- dict(A=Series(val1, index=keys1), B=Series(val2, index=keys2))
+ {"A": Series(val1, index=keys1), "B": Series(val2, index=keys2)}
).reindex(index=index)
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 3bf37f4cade8b..a25922cfd2a0b 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -500,17 +500,17 @@ def test_partial_set_empty_frame_empty_consistencies(self):
# consistency on empty frames
df = DataFrame(columns=["x", "y"])
df["x"] = [1, 2]
- expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
+ expected = DataFrame({"x": [1, 2], "y": [np.nan, np.nan]})
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=["x", "y"])
df["x"] = ["1", "2"]
- expected = DataFrame(dict(x=["1", "2"], y=[np.nan, np.nan]), dtype=object)
+ expected = DataFrame({"x": ["1", "2"], "y": [np.nan, np.nan]}, dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=["x", "y"])
df.loc[0, "x"] = 1
- expected = DataFrame(dict(x=[1], y=[np.nan]))
+ expected = DataFrame({"x": [1], "y": [np.nan]})
tm.assert_frame_equal(df, expected, check_dtype=False)
@pytest.mark.parametrize(
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py
index 193baa8c3ed74..e9f228b5973b5 100644
--- a/pandas/tests/io/conftest.py
+++ b/pandas/tests/io/conftest.py
@@ -36,7 +36,7 @@ def feather_file(datapath):
@pytest.fixture
def s3so(worker_id):
worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
- return dict(client_kwargs={"endpoint_url": f"http://127.0.0.1:555{worker_id}/"})
+ return {"client_kwargs": {"endpoint_url": f"http://127.0.0.1:555{worker_id}/"}}
@pytest.fixture(scope="session")
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index 569bc8a04862e..fca98175a0a24 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -199,7 +199,7 @@ def test_clipboard_copy_strings(self, sep, excel, df):
def test_read_clipboard_infer_excel(self, request, mock_clipboard):
# gh-19010: avoid warnings
- clip_kwargs = dict(engine="python")
+ clip_kwargs = {"engine": "python"}
text = dedent(
"""
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index cef5d28b8ccf0..58ae5196151c1 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -173,7 +173,7 @@ def test_path_localpath(self):
@td.skip_if_no("pyarrow", min_version="0.16.1.dev")
def test_passthrough_keywords(self):
df = tm.makeDataFrame().reset_index()
- self.check_round_trip(df, write_kwargs=dict(version=1))
+ self.check_round_trip(df, write_kwargs={"version": 1})
@td.skip_if_no("pyarrow")
@tm.network
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index e3c2f20f80ee3..1be6022bc0fcd 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -346,13 +346,13 @@ def _load_test1_data(self):
def _load_test2_data(self):
df = DataFrame(
- dict(
- A=[4, 1, 3, 6],
- B=["asd", "gsq", "ylt", "jkl"],
- C=[1.1, 3.1, 6.9, 5.3],
- D=[False, True, True, False],
- E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
- )
+ {
+ "A": [4, 1, 3, 6],
+ "B": ["asd", "gsq", "ylt", "jkl"],
+ "C": [1.1, 3.1, 6.9, 5.3],
+ "D": [False, True, True, False],
+ "E": ["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
+ }
)
df["E"] = to_datetime(df["E"])
diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py
index 79fc6bae1a9eb..e83196e9c7d56 100644
--- a/pandas/tests/resample/test_period_index.py
+++ b/pandas/tests/resample/test_period_index.py
@@ -72,7 +72,7 @@ def test_asfreq_fill_value(self, series):
@pytest.mark.parametrize("freq", ["H", "12H", "2D", "W"])
@pytest.mark.parametrize("kind", [None, "period", "timestamp"])
- @pytest.mark.parametrize("kwargs", [dict(on="date"), dict(level="d")])
+ @pytest.mark.parametrize("kwargs", [{"on": "date"}, {"level": "d"}])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 29f2aea1648ec..5588b185793cc 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -427,7 +427,7 @@ def test_agg_misc():
msg = r"Column\(s\) \['result1', 'result2'\] do not exist"
for t in cases:
with pytest.raises(pd.core.base.SpecificationError, match=msg):
- t[["A", "B"]].agg(dict([("result1", np.sum), ("result2", np.mean)]))
+ t[["A", "B"]].agg({"result1": np.sum, "result2": np.mean})
# agg with different hows
expected = pd.concat(
@@ -437,7 +437,7 @@ def test_agg_misc():
[("A", "sum"), ("A", "std"), ("B", "mean"), ("B", "std")]
)
for t in cases:
- result = t.agg(dict([("A", ["sum", "std"]), ("B", ["mean", "std"])]))
+ result = t.agg({"A": ["sum", "std"], "B": ["mean", "std"]})
tm.assert_frame_equal(result, expected, check_like=True)
# equivalent of using a selection list / or not
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index a1351ce782669..db93e831e8e0b 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -330,8 +330,8 @@ def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
- result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
- expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
+ result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
+ expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
@@ -441,9 +441,7 @@ def test_concat_ordered_dict(self):
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
- result = pd.concat(
- dict([("First", Series(range(3))), ("Another", Series(range(4)))])
- )
+ result = pd.concat({"First": Series(range(3)), "Another": Series(range(4))})
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 143bac3ad136a..f44909b61ff7a 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1935,9 +1935,7 @@ def test_merge_index_types(index):
result = left.merge(right, on=["index_col"])
- expected = DataFrame(
- dict([("left_data", [1, 2]), ("right_data", [1.0, 2.0])]), index=index
- )
+ expected = DataFrame({"left_data": [1, 2], "right_data": [1.0, 2.0]}, index=index)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py
index 537bedfd1a6b9..a32adeb612e7c 100644
--- a/pandas/tests/reshape/test_get_dummies.py
+++ b/pandas/tests/reshape/test_get_dummies.py
@@ -567,7 +567,7 @@ def test_dataframe_dummies_preserve_categorical_dtype(self, dtype, ordered):
@pytest.mark.parametrize("sparse", [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
- df = DataFrame.from_dict(dict([("GDP", [1, 2]), ("Nation", ["AB", "CD"])]))
+ df = DataFrame.from_dict({"GDP": [1, 2], "Nation": ["AB", "CD"]})
df = get_dummies(df, columns=["Nation"], sparse=sparse)
df2 = df.reindex(columns=["GDP"])
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 92675f387fe1e..36d1b0911c909 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -309,24 +309,27 @@ def test_basics_nanos(self):
"value, check_kwargs",
[
[946688461000000000, {}],
- [946688461000000000 / 1000, dict(unit="us")],
- [946688461000000000 / 1_000_000, dict(unit="ms")],
- [946688461000000000 / 1_000_000_000, dict(unit="s")],
- [10957, dict(unit="D", h=0)],
+ [946688461000000000 / 1000, {"unit": "us"}],
+ [946688461000000000 / 1_000_000, {"unit": "ms"}],
+ [946688461000000000 / 1_000_000_000, {"unit": "s"}],
+ [10957, {"unit": "D", "h": 0}],
[
(946688461000000000 + 500000) / 1000000000,
- dict(unit="s", us=499, ns=964),
+ {"unit": "s", "us": 499, "ns": 964},
],
- [(946688461000000000 + 500000000) / 1000000000, dict(unit="s", us=500000)],
- [(946688461000000000 + 500000) / 1000000, dict(unit="ms", us=500)],
- [(946688461000000000 + 500000) / 1000, dict(unit="us", us=500)],
- [(946688461000000000 + 500000000) / 1000000, dict(unit="ms", us=500000)],
- [946688461000000000 / 1000.0 + 5, dict(unit="us", us=5)],
- [946688461000000000 / 1000.0 + 5000, dict(unit="us", us=5000)],
- [946688461000000000 / 1000000.0 + 0.5, dict(unit="ms", us=500)],
- [946688461000000000 / 1000000.0 + 0.005, dict(unit="ms", us=5, ns=5)],
- [946688461000000000 / 1000000000.0 + 0.5, dict(unit="s", us=500000)],
- [10957 + 0.5, dict(unit="D", h=12)],
+ [
+ (946688461000000000 + 500000000) / 1000000000,
+ {"unit": "s", "us": 500000},
+ ],
+ [(946688461000000000 + 500000) / 1000000, {"unit": "ms", "us": 500}],
+ [(946688461000000000 + 500000) / 1000, {"unit": "us", "us": 500}],
+ [(946688461000000000 + 500000000) / 1000000, {"unit": "ms", "us": 500000}],
+ [946688461000000000 / 1000.0 + 5, {"unit": "us", "us": 5}],
+ [946688461000000000 / 1000.0 + 5000, {"unit": "us", "us": 5000}],
+ [946688461000000000 / 1000000.0 + 0.5, {"unit": "ms", "us": 500}],
+ [946688461000000000 / 1000000.0 + 0.005, {"unit": "ms", "us": 5, "ns": 5}],
+ [946688461000000000 / 1000000000.0 + 0.5, {"unit": "s", "us": 500000}],
+ [10957 + 0.5, {"unit": "D", "h": 12}],
],
)
def test_unit(self, value, check_kwargs):
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 565debb98d8cc..faa7b872d9d06 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -253,7 +253,7 @@ def test_replace_with_dictlike_and_string_dtype(self):
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
- tm.assert_series_equal(s, s.replace(dict()))
+ tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = pd.Series([])
diff --git a/pandas/tests/series/methods/test_to_csv.py b/pandas/tests/series/methods/test_to_csv.py
index 72db87362584d..a22e125e68cba 100644
--- a/pandas/tests/series/methods/test_to_csv.py
+++ b/pandas/tests/series/methods/test_to_csv.py
@@ -13,7 +13,7 @@
class TestSeriesToCSV:
def read_csv(self, path, **kwargs):
- params = dict(squeeze=True, index_col=0, header=None, parse_dates=True)
+ params = {"squeeze": True, "index_col": 0, "header": None, "parse_dates": True}
params.update(**kwargs)
header = params.get("header")
diff --git a/pandas/tests/series/methods/test_to_frame.py b/pandas/tests/series/methods/test_to_frame.py
index b324fab5d97d4..6d52ab9da3f1b 100644
--- a/pandas/tests/series/methods/test_to_frame.py
+++ b/pandas/tests/series/methods/test_to_frame.py
@@ -12,13 +12,13 @@ def test_to_frame(self, datetime_series):
datetime_series.name = "testname"
rs = datetime_series.to_frame()
xp = DataFrame(
- dict(testname=datetime_series.values), index=datetime_series.index
+ {"testname": datetime_series.values}, index=datetime_series.index
)
tm.assert_frame_equal(rs, xp)
rs = datetime_series.to_frame(name="testdifferent")
xp = DataFrame(
- dict(testdifferent=datetime_series.values), index=datetime_series.index
+ {"testdifferent": datetime_series.values}, index=datetime_series.index
)
tm.assert_frame_equal(rs, xp)
diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py
index 0e8bf8f052206..c3c58f29fcbf6 100644
--- a/pandas/tests/series/test_reductions.py
+++ b/pandas/tests/series/test_reductions.py
@@ -66,7 +66,7 @@ def test_sum_with_level():
@pytest.mark.parametrize("func", [np.any, np.all])
-@pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())])
+@pytest.mark.parametrize("kwargs", [{"keepdims": True}, {"out": object()}])
def test_validate_any_all_out_keepdims_raises(kwargs, func):
ser = Series([1, 2])
param = list(kwargs)[0]
diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py
index e2f050650b298..3c867f7582b7d 100644
--- a/pandas/tests/series/test_validate.py
+++ b/pandas/tests/series/test_validate.py
@@ -17,7 +17,7 @@
def test_validate_bool_args(string_series, func, inplace):
"""Tests for error handling related to data types of method arguments."""
msg = 'For argument "inplace" expected type bool'
- kwargs = dict(inplace=inplace)
+ kwargs = {"inplace": inplace}
if func == "_set_name":
kwargs["name"] = "hello"
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 5f85ae2ec2318..da1c91a1ad218 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -270,12 +270,24 @@ def test_int64_overflow_issues(self):
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
- vals.append(k + tuple([lv, rv]))
+ vals.append(
+ k
+ + (
+ lv,
+ rv,
+ )
+ )
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
- vals.append(k + tuple([np.nan, rv]))
+ vals.append(
+ k
+ + (
+ np.nan,
+ rv,
+ )
+ )
def align(df):
df = df.sort_values(df.columns.tolist())
diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py
index a40fcd725d604..e3f586d391fc6 100644
--- a/pandas/tests/tslibs/test_array_to_datetime.py
+++ b/pandas/tests/tslibs/test_array_to_datetime.py
@@ -117,7 +117,7 @@ def test_number_looking_strings_not_into_datetime(data):
@pytest.mark.parametrize("errors", ["coerce", "raise"])
def test_coerce_outside_ns_bounds(invalid_date, errors):
arr = np.array([invalid_date], dtype="object")
- kwargs = dict(values=arr, errors=errors)
+ kwargs = {"values": arr, "errors": errors}
if errors == "raise":
msg = "Out of bounds nanosecond timestamp"
@@ -144,7 +144,7 @@ def test_coerce_outside_ns_bounds_one_valid():
@pytest.mark.parametrize("errors", ["ignore", "coerce"])
def test_coerce_of_invalid_datetimes(errors):
arr = np.array(["01-01-2013", "not_a_date", "1"], dtype=object)
- kwargs = dict(values=arr, errors=errors)
+ kwargs = {"values": arr, "errors": errors}
if errors == "ignore":
# Without coercing, the presence of any invalid
diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index 70fa724464226..e580b9112f3ec 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -75,7 +75,7 @@ def test_does_not_convert_mixed_integer(date_string, expected):
[
(
"2013Q5",
- dict(),
+ {},
(
"Incorrect quarterly string is given, "
"quarter must be between 1 and 4: 2013Q5"
@@ -84,7 +84,7 @@ def test_does_not_convert_mixed_integer(date_string, expected):
# see gh-5418
(
"2013Q1",
- dict(freq="INVLD-L-DEC-SAT"),
+ {"freq": "INVLD-L-DEC-SAT"},
(
"Unable to retrieve month information "
"from given freq: INVLD-L-DEC-SAT"
diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py
index c4bc3b7ee352d..ec8cb29c6dead 100644
--- a/pandas/tests/util/test_assert_almost_equal.py
+++ b/pandas/tests/util/test_assert_almost_equal.py
@@ -228,7 +228,7 @@ def test_assert_not_almost_equal_dicts(a, b):
@pytest.mark.parametrize("val", [1, 2])
def test_assert_almost_equal_dict_like_object(val):
dict_val = 1
- real_dict = dict(a=val)
+ real_dict = {"a": val}
class DictLikeObj:
def keys(self):
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index cf618f7c828aa..779d93eb14f24 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -305,14 +305,27 @@ def test_hash_with_tuple():
expected = Series([10345501319357378243, 8331063931016360761], dtype=np.uint64)
tm.assert_series_equal(result, expected)
- df2 = DataFrame({"data": [tuple([1]), tuple([2])]})
+ df2 = DataFrame({"data": [(1,), (2,)]})
result = hash_pandas_object(df2)
expected = Series([9408946347443669104, 3278256261030523334], dtype=np.uint64)
tm.assert_series_equal(result, expected)
# require that the elements of such tuples are themselves hashable
- df3 = DataFrame({"data": [tuple([1, []]), tuple([2, {}])]})
+ df3 = DataFrame(
+ {
+ "data": [
+ (
+ 1,
+ [],
+ ),
+ (
+ 2,
+ {},
+ ),
+ ]
+ }
+ )
with pytest.raises(TypeError, match="unhashable type: 'list'"):
hash_pandas_object(df3)
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index ac77bfe0dfb48..6ce425e2575db 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -307,6 +307,6 @@ def test_multiple_agg_funcs(func, window_size, expected_vals):
)
expected = DataFrame(expected_vals, index=index, columns=columns)
- result = window.agg(dict((("low", ["mean", "max"]), ("high", ["mean", "min"]))))
+ result = window.agg({"low": ["mean", "max"], "high": ["mean", "min"]})
tm.assert_frame_equal(result, expected)
diff --git a/setup.cfg b/setup.cfg
index 7b404cb294f58..244e6f18bb0ef 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,9 +22,7 @@ ignore =
W504, # line break after binary operator
E402, # module level import not at top of file
E731, # do not assign a lambda expression, use a def
- C406, # Unnecessary list literal - rewrite as a dict literal.
C408, # Unnecessary dict call - rewrite as a literal.
- C409, # Unnecessary list passed to tuple() - rewrite as a tuple literal.
S001 # found modulo formatter (incorrect picks up mod operations)
exclude =
doc/sphinxext/*.py,
| https://api.github.com/repos/pandas-dev/pandas/pulls/38078 | 2020-11-26T03:29:26Z | 2020-11-26T17:54:02Z | 2020-11-26T17:54:02Z | 2020-12-05T18:53:01Z | |
BUG: raise consistent exception on slicing failure | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c074ae2c066f6..c49f3f9457161 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3433,11 +3433,11 @@ def _convert_list_indexer(self, keyarr):
return None
@final
- def _invalid_indexer(self, form: str_t, key):
+ def _invalid_indexer(self, form: str_t, key) -> TypeError:
"""
Consistent invalid indexer message.
"""
- raise TypeError(
+ return TypeError(
f"cannot do {form} indexing on {type(self).__name__} with these "
f"indexers [{key}] of type {type(key).__name__}"
)
@@ -5238,7 +5238,7 @@ def _validate_indexer(self, form: str_t, key, kind: str_t):
elif is_integer(key):
pass
else:
- self._invalid_indexer(form, key)
+ raise self._invalid_indexer(form, key)
def _maybe_cast_slice_bound(self, label, side: str_t, kind):
"""
@@ -5267,7 +5267,7 @@ def _maybe_cast_slice_bound(self, label, side: str_t, kind):
# datetimelike Indexes
# reject them, if index does not contain label
if (is_float(label) or is_integer(label)) and label not in self.values:
- self._invalid_indexer("slice", label)
+ raise self._invalid_indexer("slice", label)
return label
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index b39a36d95d27b..f6eeb121b1ac0 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -717,7 +717,11 @@ def _maybe_cast_slice_bound(self, label, side: str, kind):
if isinstance(label, str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
- parsed, reso = parsing.parse_time_string(label, freq)
+ try:
+ parsed, reso = parsing.parse_time_string(label, freq)
+ except parsing.DateParseError as err:
+ raise self._invalid_indexer("slice", label) from err
+
reso = Resolution.from_attrname(reso)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
@@ -732,7 +736,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind):
elif isinstance(label, (self._data._recognized_scalars, date)):
self._deprecate_mismatched_indexing(label)
else:
- self._invalid_indexer("slice", label)
+ raise self._invalid_indexer("slice", label)
return self._maybe_cast_for_get_loc(label)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 0f9a0052c18d0..5dff07ee4c6dd 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -578,10 +578,9 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str):
return bounds[0 if side == "left" else 1]
except ValueError as err:
# string cannot be parsed as datetime-like
- # TODO: we need tests for this case
- raise KeyError(label) from err
+ raise self._invalid_indexer("slice", label) from err
elif is_integer(label) or is_float(label):
- self._invalid_indexer("slice", label)
+ raise self._invalid_indexer("slice", label)
return label
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index f44a1701bfa9b..fcab3e1f6a0a4 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -223,7 +223,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind):
else:
return lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
elif not isinstance(label, self._data._recognized_scalars):
- self._invalid_indexer("slice", label)
+ raise self._invalid_indexer("slice", label)
return label
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 45e443053410a..878a89bd52cb1 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -90,6 +90,34 @@ def test_range_slice_outofbounds(self, make_range):
tm.assert_frame_equal(df["2013-06":"2013-09"], empty)
tm.assert_frame_equal(df["2013-11":"2013-12"], empty)
+ @pytest.mark.parametrize("make_range", [date_range, period_range])
+ def test_maybe_cast_slice_bound(self, make_range, frame_or_series):
+ idx = make_range(start="2013/10/01", freq="D", periods=10)
+
+ obj = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
+ if frame_or_series is not DataFrame:
+ obj = obj["units"]
+
+ msg = (
+ f"cannot do slice indexing on {type(idx).__name__} with "
+ r"these indexers \[foo\] of type str"
+ )
+
+ # Check the lower-level calls are raising where expected.
+ with pytest.raises(TypeError, match=msg):
+ idx._maybe_cast_slice_bound("foo", "left", "loc")
+ with pytest.raises(TypeError, match=msg):
+ idx.get_slice_bound("foo", "left", "loc")
+
+ with pytest.raises(TypeError, match=msg):
+ obj["2013/09/30":"foo"]
+ with pytest.raises(TypeError, match=msg):
+ obj["foo":"2013/09/30"]
+ with pytest.raises(TypeError, match=msg):
+ obj.loc["2013/09/30":"foo"]
+ with pytest.raises(TypeError, match=msg):
+ obj.loc["foo":"2013/09/30"]
+
def test_partial_slice_doesnt_require_monotonicity(self):
# See also: DatetimeIndex test ofm the same name
dti = date_range("2014-01-01", periods=30, freq="30D")
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38077 | 2020-11-26T03:25:46Z | 2020-11-26T22:25:03Z | 2020-11-26T22:25:03Z | 2020-11-27T17:41:13Z |
CLN: inconsistent namespace usage in tests.indexes | diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index ad9a2f112caac..2f22236d55ff3 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -172,7 +172,7 @@ def test_astype_object(self):
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_astype_object_tz(self, tz):
- idx = pd.date_range(start="2013-01-01", periods=4, freq="M", name="idx", tz=tz)
+ idx = date_range(start="2013-01-01", periods=4, freq="M", name="idx", tz=tz)
expected_list = [
Timestamp("2013-01-31", tz=tz),
Timestamp("2013-02-28", tz=tz),
@@ -288,7 +288,7 @@ def test_dti_astype_period(self):
class TestAstype:
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_astype_category(self, tz):
- obj = pd.date_range("2000", periods=2, tz=tz, name="idx")
+ obj = date_range("2000", periods=2, tz=tz, name="idx")
result = obj.astype("category")
expected = pd.CategoricalIndex(
[Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)],
@@ -302,7 +302,7 @@ def test_astype_category(self, tz):
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_astype_array_fallback(self, tz):
- obj = pd.date_range("2000", periods=2, tz=tz, name="idx")
+ obj = date_range("2000", periods=2, tz=tz, name="idx")
result = obj.astype(bool)
expected = Index(np.array([True, True]), name="idx")
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index fc59df29ef18f..3bb25adb0f36b 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -35,7 +35,7 @@ def test_freq_validation_with_nat(self, dt_cls):
@pytest.mark.parametrize(
"index",
[
- pd.date_range("2016-01-01", periods=5, tz="US/Pacific"),
+ date_range("2016-01-01", periods=5, tz="US/Pacific"),
pd.timedelta_range("1 Day", periods=5),
],
)
@@ -103,14 +103,14 @@ def test_construction_caching(self):
df = pd.DataFrame(
{
- "dt": pd.date_range("20130101", periods=3),
- "dttz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
+ "dt": date_range("20130101", periods=3),
+ "dttz": date_range("20130101", periods=3, tz="US/Eastern"),
"dt_with_null": [
Timestamp("20130101"),
pd.NaT,
Timestamp("20130103"),
],
- "dtns": pd.date_range("20130101", periods=3, freq="ns"),
+ "dtns": date_range("20130101", periods=3, freq="ns"),
}
)
assert df.dttz.dtype.tz.zone == "US/Eastern"
@@ -121,7 +121,7 @@ def test_construction_caching(self):
)
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
- i = pd.date_range("20130101", periods=5, freq="H", tz=tz)
+ i = date_range("20130101", periods=5, freq="H", tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@@ -132,7 +132,7 @@ def test_construction_with_alt(self, kwargs, tz_aware_fixture):
)
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
- i = pd.date_range("20130101", periods=5, freq="H", tz=tz)
+ i = date_range("20130101", periods=5, freq="H", tz=tz)
i = i._with_freq(None)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
@@ -754,7 +754,7 @@ def test_construction_int_rountrip(self, tz_naive_fixture):
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
- index = pd.date_range(
+ index = date_range(
Timestamp(2000, 1, 1),
Timestamp(2005, 1, 1),
freq="MS",
@@ -804,7 +804,7 @@ def test_constructor_with_ambiguous_keyword_arg(self):
start = Timestamp(year=2020, month=11, day=1, hour=1).tz_localize(
timezone, ambiguous=False
)
- result = pd.date_range(start=start, periods=2, ambiguous=False)
+ result = date_range(start=start, periods=2, ambiguous=False)
tm.assert_index_equal(result, expected)
# ambiguous keyword in end
@@ -812,7 +812,7 @@ def test_constructor_with_ambiguous_keyword_arg(self):
end = Timestamp(year=2020, month=11, day=2, hour=1).tz_localize(
timezone, ambiguous=False
)
- result = pd.date_range(end=end, periods=2, ambiguous=False)
+ result = date_range(end=end, periods=2, ambiguous=False)
tm.assert_index_equal(result, expected)
def test_constructor_with_nonexistent_keyword_arg(self):
@@ -824,7 +824,7 @@ def test_constructor_with_nonexistent_keyword_arg(self):
start = Timestamp("2015-03-29 02:30:00").tz_localize(
timezone, nonexistent="shift_forward"
)
- result = pd.date_range(start=start, periods=2, freq="H")
+ result = date_range(start=start, periods=2, freq="H")
expected = DatetimeIndex(
[
Timestamp("2015-03-29 03:00:00+02:00", tz=timezone),
@@ -838,7 +838,7 @@ def test_constructor_with_nonexistent_keyword_arg(self):
end = Timestamp("2015-03-29 02:30:00").tz_localize(
timezone, nonexistent="shift_forward"
)
- result = pd.date_range(end=end, periods=2, freq="H")
+ result = date_range(end=end, periods=2, freq="H")
expected = DatetimeIndex(
[
Timestamp("2015-03-29 01:00:00+01:00", tz=timezone),
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 237c82436eb84..7c70b58318a11 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -650,10 +650,10 @@ def test_timezone_comparaison_assert(self):
def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):
# GH 23270
tz = tz_aware_fixture
- result = pd.date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
- expected = pd.date_range(
- end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz
- )[::-1]
+ result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
+ expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[
+ ::-1
+ ]
tm.assert_index_equal(result, expected)
@@ -739,10 +739,10 @@ def test_3(self):
def test_precision_finer_than_offset(self):
# GH#9907
- result1 = pd.date_range(
+ result1 = date_range(
start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q"
)
- result2 = pd.date_range(
+ result2 = date_range(
start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"
)
expected1_list = [
@@ -788,9 +788,9 @@ def test_mismatching_tz_raises_err(self, start, end):
# issue 18488
msg = "Start and end cannot both be tz-aware with different timezones"
with pytest.raises(TypeError, match=msg):
- pd.date_range(start, end)
+ date_range(start, end)
with pytest.raises(TypeError, match=msg):
- pd.date_range(start, end, freq=BDay())
+ date_range(start, end, freq=BDay())
class TestBusinessDateRange:
@@ -849,18 +849,18 @@ def test_bdays_and_open_boundaries(self, closed):
# GH 6673
start = "2018-07-21" # Saturday
end = "2018-07-29" # Sunday
- result = pd.date_range(start, end, freq="B", closed=closed)
+ result = date_range(start, end, freq="B", closed=closed)
bday_start = "2018-07-23" # Monday
bday_end = "2018-07-27" # Friday
- expected = pd.date_range(bday_start, bday_end, freq="D")
+ expected = date_range(bday_start, bday_end, freq="D")
tm.assert_index_equal(result, expected)
# Note: we do _not_ expect the freqs to match here
def test_bday_near_overflow(self):
# GH#24252 avoid doing unnecessary addition that _would_ overflow
start = Timestamp.max.floor("D").to_pydatetime()
- rng = pd.date_range(start, end=None, periods=1, freq="B")
+ rng = date_range(start, end=None, periods=1, freq="B")
expected = DatetimeIndex([start], freq="B")
tm.assert_index_equal(rng, expected)
@@ -869,7 +869,7 @@ def test_bday_overflow_error(self):
msg = "Out of bounds nanosecond timestamp"
start = Timestamp.max.floor("D").to_pydatetime()
with pytest.raises(OutOfBoundsDatetime, match=msg):
- pd.date_range(start, periods=2, freq="B")
+ date_range(start, periods=2, freq="B")
class TestCustomDateRange:
@@ -995,7 +995,7 @@ def test_all_custom_freq(self, freq):
def test_range_with_millisecond_resolution(self, start_end):
# https://github.com/pandas-dev/pandas/issues/24110
start, end = start_end
- result = pd.date_range(start=start, end=end, periods=2, closed="left")
+ result = date_range(start=start, end=end, periods=2, closed="left")
expected = DatetimeIndex([start])
tm.assert_index_equal(result, expected)
@@ -1003,7 +1003,7 @@ def test_range_with_millisecond_resolution(self, start_end):
def test_date_range_with_custom_holidays():
# GH 30593
freq = pd.offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"])
- result = pd.date_range(start="2020-11-25 15:00", periods=4, freq=freq)
+ result = date_range(start="2020-11-25 15:00", periods=4, freq=freq)
expected = DatetimeIndex(
[
"2020-11-25 15:00:00",
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index b35aa28ffc40b..2657fc817ec3a 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -68,7 +68,7 @@ def test_time_loc(self): # GH8667
step = 24 * 3600
for n in ns:
- idx = pd.date_range("2014-11-26", periods=n, freq="S")
+ idx = date_range("2014-11-26", periods=n, freq="S")
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
@@ -89,10 +89,10 @@ def test_time_overflow_for_32bit_machines(self):
# overflow.
periods = np.int_(1000)
- idx1 = pd.date_range(start="2000", periods=periods, freq="S")
+ idx1 = date_range(start="2000", periods=periods, freq="S")
assert len(idx1) == periods
- idx2 = pd.date_range(end="2000", periods=periods, freq="S")
+ idx2 = date_range(end="2000", periods=periods, freq="S")
assert len(idx2) == periods
def test_nat(self):
@@ -251,7 +251,7 @@ def test_ns_index(self):
index = DatetimeIndex(dt, freq=freq, name="time")
self.assert_index_parameters(index)
- new_index = pd.date_range(start=index[0], end=index[-1], freq=index.freq)
+ new_index = date_range(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_factorize(self):
@@ -304,7 +304,7 @@ def test_factorize(self):
def test_factorize_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH#13750
- base = pd.date_range("2016-11-05", freq="H", periods=100, tz=tz)
+ base = date_range("2016-11-05", freq="H", periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
@@ -317,14 +317,14 @@ def test_factorize_tz(self, tz_naive_fixture):
def test_factorize_dst(self):
# GH 13750
- idx = pd.date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern")
+ idx = date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern")
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
- idx = pd.date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern")
+ idx = date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern")
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
@@ -350,7 +350,7 @@ def test_unique(self, arr, expected):
def test_asarray_tz_naive(self):
# This shouldn't produce a warning.
- idx = pd.date_range("2000", periods=2)
+ idx = date_range("2000", periods=2)
# M8[ns] by default
result = np.asarray(idx)
@@ -365,7 +365,7 @@ def test_asarray_tz_naive(self):
def test_asarray_tz_aware(self):
tz = "US/Central"
- idx = pd.date_range("2000", periods=2, tz=tz)
+ idx = date_range("2000", periods=2, tz=tz)
expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]")
result = np.asarray(idx, dtype="datetime64[ns]")
@@ -393,7 +393,7 @@ def test_to_frame_datetime_tz(self):
def test_split_non_utc(self):
# GH 14042
- indices = pd.date_range("2016-01-01 00:00:00+0200", freq="S", periods=10)
+ indices = date_range("2016-01-01 00:00:00+0200", freq="S", periods=10)
result = np.split(indices, indices_or_sections=[])[0]
expected = indices._with_freq(None)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 59269b9b54ddc..232ebc608e465 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -17,7 +17,7 @@
class TestGetItem:
def test_ellipsis(self):
# GH#21282
- idx = pd.date_range(
+ idx = date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
@@ -29,12 +29,12 @@ def test_getitem_slice_keeps_name(self):
# GH4226
st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles")
et = Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles")
- dr = pd.date_range(st, et, freq="H", name="timebucket")
+ dr = date_range(st, et, freq="H", name="timebucket")
assert dr[1:].name == dr.name
def test_getitem(self):
- idx1 = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
- idx2 = pd.date_range(
+ idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
+ idx2 = date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
@@ -43,21 +43,21 @@ def test_getitem(self):
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx[0:5]
- expected = pd.date_range(
+ expected = date_range(
"2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
- expected = pd.date_range(
+ expected = date_range(
"2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
- expected = pd.date_range(
+ expected = date_range(
"2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
@@ -74,7 +74,7 @@ def test_getitem(self):
assert result.freq == expected.freq
def test_dti_business_getitem(self):
- rng = pd.bdate_range(START, END)
+ rng = bdate_range(START, END)
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="B")
tm.assert_index_equal(smaller, exp)
@@ -94,7 +94,7 @@ def test_dti_business_getitem(self):
assert rng[4] == rng[np.int_(4)]
def test_dti_business_getitem_matplotlib_hackaround(self):
- rng = pd.bdate_range(START, END)
+ rng = bdate_range(START, END)
with tm.assert_produces_warning(FutureWarning):
# GH#30588 multi-dimensional indexing deprecated
values = rng[:, None]
@@ -102,7 +102,7 @@ def test_dti_business_getitem_matplotlib_hackaround(self):
tm.assert_numpy_array_equal(values, expected)
def test_dti_custom_getitem(self):
- rng = pd.bdate_range(START, END, freq="C")
+ rng = bdate_range(START, END, freq="C")
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="C")
tm.assert_index_equal(smaller, exp)
@@ -121,7 +121,7 @@ def test_dti_custom_getitem(self):
assert rng[4] == rng[np.int_(4)]
def test_dti_custom_getitem_matplotlib_hackaround(self):
- rng = pd.bdate_range(START, END, freq="C")
+ rng = bdate_range(START, END, freq="C")
with tm.assert_produces_warning(FutureWarning):
# GH#30588 multi-dimensional indexing deprecated
values = rng[:, None]
@@ -155,7 +155,7 @@ def test_where_doesnt_retain_freq(self):
def test_where_other(self):
# other is ndarray or Index
- i = pd.date_range("20130101", periods=3, tz="US/Eastern")
+ i = date_range("20130101", periods=3, tz="US/Eastern")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
@@ -173,7 +173,7 @@ def test_where_other(self):
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
- dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
+ dti = date_range("20130101", periods=3, tz="US/Eastern")
i2 = Index([pd.NaT, pd.NaT] + dti[2:].tolist())
@@ -202,7 +202,7 @@ def test_where_invalid_dtypes(self):
def test_where_mismatched_nat(self, tz_aware_fixture):
tz = tz_aware_fixture
- dti = pd.date_range("2013-01-01", periods=3, tz=tz)
+ dti = date_range("2013-01-01", periods=3, tz=tz)
cond = np.array([True, False, True])
msg = "value should be a 'Timestamp', 'NaT', or array of those. Got"
@@ -211,7 +211,7 @@ def test_where_mismatched_nat(self, tz_aware_fixture):
dti.where(cond, np.timedelta64("NaT", "ns"))
def test_where_tz(self):
- i = pd.date_range("20130101", periods=3, tz="US/Eastern")
+ i = date_range("20130101", periods=3, tz="US/Eastern")
result = i.where(notna(i))
expected = i
tm.assert_index_equal(result, expected)
@@ -226,8 +226,8 @@ def test_where_tz(self):
class TestTake:
def test_take(self):
# GH#10295
- idx1 = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
- idx2 = pd.date_range(
+ idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
+ idx2 = date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
@@ -236,21 +236,21 @@ def test_take(self):
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx.take([0, 1, 2])
- expected = pd.date_range(
+ expected = date_range(
"2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
- expected = pd.date_range(
+ expected = date_range(
"2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
- expected = pd.date_range(
+ expected = date_range(
"2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
@@ -277,7 +277,7 @@ def test_take(self):
assert result.freq is None
def test_take_invalid_kwargs(self):
- idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
+ idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
@@ -302,7 +302,7 @@ def test_take2(self, tz):
datetime(2010, 1, 1, 21),
]
- idx = pd.date_range(
+ idx = date_range(
start="2010-01-01 09:00",
end="2010-02-01 09:00",
freq="H",
@@ -392,7 +392,7 @@ def test_take_fill_value_with_timezone(self):
class TestGetLoc:
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
def test_get_loc_method_exact_match(self, method):
- idx = pd.date_range("2000-01-01", periods=3)
+ idx = date_range("2000-01-01", periods=3)
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
@@ -401,7 +401,7 @@ def test_get_loc_method_exact_match(self, method):
assert idx.get_loc(idx[1], method, tolerance=pd.Timedelta("0 days")) == 1
def test_get_loc(self):
- idx = pd.date_range("2000-01-01", periods=3)
+ idx = date_range("2000-01-01", periods=3)
assert idx.get_loc("2000-01-01", method="nearest") == 0
assert idx.get_loc("2000-01-01T12", method="nearest") == 1
@@ -458,7 +458,7 @@ def test_get_loc(self):
assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
# time indexing
- idx = pd.date_range("2000-01-01", periods=24, freq="H")
+ idx = date_range("2000-01-01", periods=24, freq="H")
tm.assert_numpy_array_equal(
idx.get_loc(time(12)), np.array([12]), check_dtype=False
)
@@ -481,7 +481,7 @@ def test_get_loc_time_nat(self):
def test_get_loc_tz_aware(self):
# https://github.com/pandas-dev/pandas/issues/32140
- dti = pd.date_range(
+ dti = date_range(
Timestamp("2019-12-12 00:00:00", tz="US/Eastern"),
Timestamp("2019-12-13 00:00:00", tz="US/Eastern"),
freq="5s",
@@ -509,7 +509,7 @@ def test_get_loc_nat(self):
@pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])
def test_get_loc_timedelta_invalid_key(self, key):
# GH#20464
- dti = pd.date_range("1970-01-01", periods=10)
+ dti = date_range("1970-01-01", periods=10)
msg = "Cannot index DatetimeIndex with [Tt]imedelta"
with pytest.raises(TypeError, match=msg):
dti.get_loc(key)
@@ -552,7 +552,7 @@ def test_get_indexer_date_objs(self):
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer(self):
- idx = pd.date_range("2000-01-01", periods=3)
+ idx = date_range("2000-01-01", periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
@@ -654,7 +654,7 @@ def test_maybe_cast_slice_duplicate_monotonic(self):
class TestDatetimeIndex:
def test_get_value(self):
# specifically make sure we have test for np.datetime64 key
- dti = pd.date_range("2016-01-01", periods=3)
+ dti = date_range("2016-01-01", periods=3)
arr = np.arange(6, 9)
ser = pd.Series(arr, index=dti)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 88c837e32d261..333a1ac169bb7 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -14,7 +14,7 @@
class TestTimeSeries:
def test_range_edges(self):
# GH#13672
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000000001"),
end=Timestamp("1970-01-01 00:00:00.000000004"),
freq="N",
@@ -30,7 +30,7 @@ def test_range_edges(self):
)
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000000004"),
end=Timestamp("1970-01-01 00:00:00.000000001"),
freq="N",
@@ -38,7 +38,7 @@ def test_range_edges(self):
exp = DatetimeIndex([], freq="N")
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000000001"),
end=Timestamp("1970-01-01 00:00:00.000000001"),
freq="N",
@@ -46,7 +46,7 @@ def test_range_edges(self):
exp = DatetimeIndex(["1970-01-01 00:00:00.000000001"], freq="N")
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000001"),
end=Timestamp("1970-01-01 00:00:00.000004"),
freq="U",
@@ -62,7 +62,7 @@ def test_range_edges(self):
)
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 00:00:00.001"),
end=Timestamp("1970-01-01 00:00:00.004"),
freq="L",
@@ -78,7 +78,7 @@ def test_range_edges(self):
)
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 00:00:01"),
end=Timestamp("1970-01-01 00:00:04"),
freq="S",
@@ -94,7 +94,7 @@ def test_range_edges(self):
)
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 00:01"),
end=Timestamp("1970-01-01 00:04"),
freq="T",
@@ -110,7 +110,7 @@ def test_range_edges(self):
)
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01 01:00"),
end=Timestamp("1970-01-01 04:00"),
freq="H",
@@ -126,7 +126,7 @@ def test_range_edges(self):
)
tm.assert_index_equal(idx, exp)
- idx = pd.date_range(
+ idx = date_range(
start=Timestamp("1970-01-01"), end=Timestamp("1970-01-04"), freq="D"
)
exp = DatetimeIndex(
@@ -137,9 +137,9 @@ def test_range_edges(self):
class TestDatetime64:
def test_datetimeindex_accessors(self):
- dti_naive = pd.date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
+ dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
- dti_tz = pd.date_range(
+ dti_tz = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern"
)
for dti in [dti_naive, dti_tz]:
@@ -227,7 +227,7 @@ def test_datetimeindex_accessors(self):
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name")
tm.assert_index_equal(res, exp)
- dti = pd.date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4)
+ dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
@@ -329,7 +329,7 @@ def test_datetime_name_accessors(self, time_locale):
expected_months = calendar.month_name[1:]
# GH#11128
- dti = pd.date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
+ dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
english_days = [
"Monday",
"Tuesday",
@@ -350,7 +350,7 @@ def test_datetime_name_accessors(self, time_locale):
assert np.isnan(ts.day_name(locale=time_locale))
# GH#12805
- dti = pd.date_range(freq="M", start="2012", end="2013")
+ dti = date_range(freq="M", start="2012", end="2013")
result = dti.month_name(locale=time_locale)
expected = Index([month.capitalize() for month in expected_months])
@@ -388,7 +388,7 @@ def test_iter_readonly():
def test_week_and_weekofyear_are_deprecated():
# GH#33595 Deprecate week and weekofyear
- idx = pd.date_range(start="2019-12-29", freq="D", periods=4)
+ idx = date_range(start="2019-12-29", freq="D", periods=4)
with tm.assert_produces_warning(FutureWarning):
idx.week
with tm.assert_produces_warning(FutureWarning):
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index cbbe3aca9ccbe..faa135d649cd9 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -50,7 +50,7 @@ def test_repeat_range(self, tz_naive_fixture):
assert result.freq is None
assert len(result) == 5 * len(rng)
- index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
+ index = date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
@@ -58,7 +58,7 @@ def test_repeat_range(self, tz_naive_fixture):
tm.assert_index_equal(res, exp)
assert res.freq is None
- index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
+ index = date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
@@ -90,7 +90,7 @@ def test_repeat(self, tz_naive_fixture):
reps = 2
msg = "the 'axis' parameter is not supported"
- rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
+ rng = date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
@@ -128,17 +128,17 @@ def test_resolution(self, tz_naive_fixture, freq, expected):
if freq == "A" and not IS64 and isinstance(tz, tzlocal):
pytest.xfail(reason="OverflowError inside tzlocal past 2038")
- idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
+ idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
- idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
+ idx = date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
- exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
+ exp_idx = date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
expected.index = expected.index._with_freq(None)
@@ -146,7 +146,7 @@ def test_value_counts_unique(self, tz_naive_fixture):
tm.assert_series_equal(obj.value_counts(), expected)
- expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
+ expected = date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
expected = expected._with_freq(None)
tm.assert_index_equal(idx.unique(), expected)
@@ -261,7 +261,7 @@ def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture)
def test_drop_duplicates_metadata(self, freq_sample):
# GH 10115
- idx = pd.date_range("2011-01-01", freq=freq_sample, periods=10, name="idx")
+ idx = date_range("2011-01-01", freq=freq_sample, periods=10, name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
@@ -287,7 +287,7 @@ def test_drop_duplicates_metadata(self, freq_sample):
)
def test_drop_duplicates(self, freq_sample, keep, expected, index):
# to check Index/Series compat
- idx = pd.date_range("2011-01-01", freq=freq_sample, periods=10, name="idx")
+ idx = date_range("2011-01-01", freq=freq_sample, periods=10, name="idx")
idx = idx.append(idx[:5])
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected)
@@ -301,7 +301,7 @@ def test_drop_duplicates(self, freq_sample, keep, expected, index):
def test_infer_freq(self, freq_sample):
# GH 11018
- idx = pd.date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10)
+ idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10)
result = DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq_sample
@@ -361,7 +361,7 @@ def test_freq_view_safe(self):
# Setting the freq for one DatetimeIndex shouldn't alter the freq
# for another that views the same data
- dti = pd.date_range("2016-01-01", periods=5)
+ dti = date_range("2016-01-01", periods=5)
dta = dti._data
dti2 = DatetimeIndex(dta)._with_freq(None)
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index 3dbfd8b64cbba..c8edd30e3f7aa 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -54,19 +54,19 @@ def test_union3(self, sort, box):
@pytest.mark.parametrize("tz", tz)
def test_union(self, tz, sort):
- rng1 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz)
- other1 = pd.date_range("1/6/2000", freq="D", periods=5, tz=tz)
- expected1 = pd.date_range("1/1/2000", freq="D", periods=10, tz=tz)
+ rng1 = date_range("1/1/2000", freq="D", periods=5, tz=tz)
+ other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz)
+ expected1 = date_range("1/1/2000", freq="D", periods=10, tz=tz)
expected1_notsorted = DatetimeIndex(list(other1) + list(rng1))
- rng2 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz)
- other2 = pd.date_range("1/4/2000", freq="D", periods=5, tz=tz)
- expected2 = pd.date_range("1/1/2000", freq="D", periods=8, tz=tz)
+ rng2 = date_range("1/1/2000", freq="D", periods=5, tz=tz)
+ other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz)
+ expected2 = date_range("1/1/2000", freq="D", periods=8, tz=tz)
expected2_notsorted = DatetimeIndex(list(other2) + list(rng2[:3]))
- rng3 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz)
+ rng3 = date_range("1/1/2000", freq="D", periods=5, tz=tz)
other3 = DatetimeIndex([], tz=tz)
- expected3 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz)
+ expected3 = date_range("1/1/2000", freq="D", periods=5, tz=tz)
expected3_notsorted = rng3
for rng, other, exp, exp_notsorted in [
@@ -156,7 +156,7 @@ def test_union_freq_infer(self):
# When taking the union of two DatetimeIndexes, we infer
# a freq even if the arguments don't have freq. This matches
# TimedeltaIndex behavior.
- dti = pd.date_range("2016-01-01", periods=5)
+ dti = date_range("2016-01-01", periods=5)
left = dti[[0, 1, 3, 4]]
right = dti[[2, 3, 1]]
@@ -175,7 +175,7 @@ def test_union_dataframe_index(self):
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({"s1": s1, "s2": s2})
- exp = pd.date_range("1/1/1980", "1/1/2012", freq="MS")
+ exp = date_range("1/1/1980", "1/1/2012", freq="MS")
tm.assert_index_equal(df.index, exp)
def test_union_with_DatetimeIndex(self, sort):
@@ -309,11 +309,11 @@ def test_difference(self, tz, sort):
rng_dates = ["1/2/2000", "1/3/2000", "1/1/2000", "1/4/2000", "1/5/2000"]
rng1 = DatetimeIndex(rng_dates, tz=tz)
- other1 = pd.date_range("1/6/2000", freq="D", periods=5, tz=tz)
+ other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz)
expected1 = DatetimeIndex(rng_dates, tz=tz)
rng2 = DatetimeIndex(rng_dates, tz=tz)
- other2 = pd.date_range("1/4/2000", freq="D", periods=5, tz=tz)
+ other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz)
expected2 = DatetimeIndex(rng_dates[:3], tz=tz)
rng3 = DatetimeIndex(rng_dates, tz=tz)
diff --git a/pandas/tests/indexes/datetimes/test_shift.py b/pandas/tests/indexes/datetimes/test_shift.py
index 3c202005f7933..611df5d99cb9c 100644
--- a/pandas/tests/indexes/datetimes/test_shift.py
+++ b/pandas/tests/indexes/datetimes/test_shift.py
@@ -49,7 +49,7 @@ def test_dti_shift_tzaware(self, tz_naive_fixture):
def test_dti_shift_freqs(self):
# test shift for DatetimeIndex and non DatetimeIndex
# GH#8083
- drange = pd.date_range("20130101", periods=5)
+ drange = date_range("20130101", periods=5)
result = drange.shift(1)
expected = DatetimeIndex(
["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"],
@@ -123,7 +123,7 @@ def test_dti_shift_near_midnight(self, shift, result_time):
def test_shift_periods(self):
# GH#22458 : argument 'n' was deprecated in favor of 'periods'
- idx = pd.date_range(start=START, end=END, periods=3)
+ idx = date_range(start=START, end=END, periods=3)
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 8a73f564ef064..add1bd4bb3972 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -400,10 +400,10 @@ def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
- dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
+ dti = date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
- dti_utc = pd.date_range(
+ dti_utc = date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
@@ -412,11 +412,11 @@ def test_dti_tz_localize(self, prefix):
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
- dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
+ dti = date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dti.tz_localize(tzstr)
- dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
+ dti = date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"):
dti.tz_localize(tzstr)
@@ -606,8 +606,8 @@ def test_dti_construction_nonexistent_endpoint(self, tz, option, expected):
assert times[-1] == Timestamp(expected, tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
- dr = pd.bdate_range("1/1/2009", "1/1/2010")
- dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
+ dr = bdate_range("1/1/2009", "1/1/2010")
+ dr_utc = bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
@@ -805,7 +805,7 @@ def test_dti_tz_constructors(self, tzstr):
arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"]
idx1 = to_datetime(arr).tz_localize(tzstr)
- idx2 = pd.date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr)
+ idx2 = date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr)
idx2 = idx2._with_freq(None) # the others all have freq=None
idx3 = DatetimeIndex(arr, tz=tzstr)
idx4 = DatetimeIndex(np.array(arr), tz=tzstr)
@@ -874,7 +874,7 @@ def test_drop_dst_boundary(self):
start = Timestamp("201710290100", tz=tz)
end = Timestamp("201710290300", tz=tz)
- index = pd.date_range(start=start, end=end, freq=freq)
+ index = date_range(start=start, end=end, freq=freq)
expected = DatetimeIndex(
[
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index a2ca686d0412d..85f3d17fdd0d4 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -190,8 +190,8 @@ def test_from_arrays_tuples(idx):
def test_from_arrays_index_series_datetimetz():
- idx1 = pd.date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
- idx2 = pd.date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo")
+ idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
+ idx2 = date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo")
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
@@ -232,8 +232,8 @@ def test_from_arrays_index_series_period():
def test_from_arrays_index_datetimelike_mixed():
- idx1 = pd.date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
- idx2 = pd.date_range("2015-01-01 10:00", freq="H", periods=3)
+ idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
+ idx2 = date_range("2015-01-01 10:00", freq="H", periods=3)
idx3 = pd.timedelta_range("1 days", freq="D", periods=3)
idx4 = pd.period_range("2011-01-01", freq="D", periods=3)
@@ -667,7 +667,7 @@ def test_from_frame_dtype_fidelity():
# GH 22420
df = pd.DataFrame(
{
- "dates": pd.date_range("19910905", periods=6, tz="US/Eastern"),
+ "dates": date_range("19910905", periods=6, tz="US/Eastern"),
"a": [1, 1, 1, 2, 2, 2],
"b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
"c": ["x", "x", "y", "z", "x", "y"],
@@ -677,7 +677,7 @@ def test_from_frame_dtype_fidelity():
expected_mi = MultiIndex.from_arrays(
[
- pd.date_range("19910905", periods=6, tz="US/Eastern"),
+ date_range("19910905", periods=6, tz="US/Eastern"),
[1, 1, 1, 2, 2, 2],
pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
["x", "x", "y", "z", "x", "y"],
@@ -754,7 +754,7 @@ def test_datetimeindex():
idx1 = pd.DatetimeIndex(
["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo"
)
- idx2 = pd.date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern")
+ idx2 = date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern")
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index e0241c2c5eadd..6bce89c520ce6 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -696,7 +696,7 @@ def test_contains_top_level(self):
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(
- levels=[["C"], pd.date_range("2012-01-01", periods=5)],
+ levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
@@ -757,7 +757,7 @@ def test_timestamp_multiindex_indexer():
# https://github.com/pandas-dev/pandas/issues/26944
idx = MultiIndex.from_product(
[
- pd.date_range("2019-01-01T00:15:33", periods=100, freq="H", name="date"),
+ date_range("2019-01-01T00:15:33", periods=100, freq="H", name="date"),
["x"],
[3],
]
@@ -766,7 +766,7 @@ def test_timestamp_multiindex_indexer():
result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"]
qidx = MultiIndex.from_product(
[
- pd.date_range(
+ date_range(
start="2019-01-02T00:15:33",
end="2019-01-05T03:15:33",
freq="H",
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 19dfa9137cc5c..9b203e1b17517 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -362,7 +362,7 @@ def test_get_loc2(self):
def test_get_loc_invalid_string_raises_keyerror(self):
# GH#34240
- pi = pd.period_range("2000", periods=3, name="A")
+ pi = period_range("2000", periods=3, name="A")
with pytest.raises(KeyError, match="A"):
pi.get_loc("A")
@@ -713,7 +713,7 @@ def test_get_value(self):
def test_loc_str(self):
# https://github.com/pandas-dev/pandas/issues/33964
- index = pd.period_range(start="2000", periods=20, freq="B")
+ index = period_range(start="2000", periods=20, freq="B")
series = Series(range(20), index=index)
assert series.loc["2000-01-14"] == 9
@@ -821,7 +821,7 @@ def test_contains_nat(self):
class TestAsOfLocs:
def test_asof_locs_mismatched_type(self):
- dti = pd.date_range("2016-01-01", periods=3)
+ dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("H")
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 53467819c3ba0..a4f23741650ec 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -90,16 +90,16 @@ def test_constructor_copy(self, index):
@pytest.mark.parametrize(
"index",
[
- pd.date_range(
+ date_range(
"2015-01-01 10:00",
freq="D",
periods=3,
tz="US/Eastern",
name="Green Eggs & Ham",
), # DTI with tz
- pd.date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz
+ date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz
pd.timedelta_range("1 days", freq="D", periods=3), # td
- pd.period_range("2015-01-01", freq="D", periods=3), # period
+ period_range("2015-01-01", freq="D", periods=3), # period
],
)
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
@@ -125,11 +125,11 @@ def test_constructor_from_index_dtlike(self, cast_as_obj, index):
"index,has_tz",
[
(
- pd.date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
+ date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
True,
), # datetimetz
(pd.timedelta_range("1 days", freq="D", periods=3), False), # td
- (pd.period_range("2015-01-01", freq="D", periods=3), False), # period
+ (period_range("2015-01-01", freq="D", periods=3), False), # period
],
)
def test_constructor_from_series_dtlike(self, index, has_tz):
@@ -341,7 +341,7 @@ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass):
# .asi8 produces integers, so these are considered epoch timestamps
# ^the above will be true in a later version. Right now we `.view`
# the i8 values as NS_DTYPE, effectively treating them as wall times.
- index = pd.date_range("2011-01-01", periods=5)
+ index = date_range("2011-01-01", periods=5)
arg = getattr(index, attr)
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
@@ -550,7 +550,7 @@ def test_asof(self, index):
assert isinstance(index.asof(d), Timestamp)
def test_asof_datetime_partial(self):
- index = pd.date_range("2010-01-01", periods=2, freq="m")
+ index = date_range("2010-01-01", periods=2, freq="m")
expected = Timestamp("2010-02-28")
result = index.asof("2010-02")
assert result == expected
@@ -718,7 +718,7 @@ def test_union_identity(self, index, sort):
def test_union_dt_as_obj(self, sort):
# TODO: Replace with fixturesult
index = self.create_index()
- date_index = pd.date_range("2019-01-01", periods=10)
+ date_index = date_range("2019-01-01", periods=10)
first_cat = index.union(date_index)
second_cat = index.union(index)
@@ -1639,7 +1639,7 @@ def test_isin_empty(self, empty):
[1.0, 2.0, 3.0, 4.0],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
- pd.date_range("2018-01-01", freq="D", periods=4),
+ date_range("2018-01-01", freq="D", periods=4),
],
)
def test_boolean_cmp(self, values):
@@ -1812,8 +1812,8 @@ def test_take_bad_bounds_raises(self):
np.array(["A", "B", "C"]),
np.array(["C", "B", "A"]),
# Must preserve name even if dtype changes
- pd.date_range("20130101", periods=3).values,
- pd.date_range("20130101", periods=3).tolist(),
+ date_range("20130101", periods=3).values,
+ date_range("20130101", periods=3).tolist(),
],
)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name, labels):
diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py
index a908cada5b5dc..6f82e77faca7a 100644
--- a/pandas/tests/indexes/timedeltas/test_astype.py
+++ b/pandas/tests/indexes/timedeltas/test_astype.py
@@ -104,7 +104,7 @@ def test_astype_raises(self, dtype):
idx.astype(dtype)
def test_astype_category(self):
- obj = pd.timedelta_range("1H", periods=2, freq="H")
+ obj = timedelta_range("1H", periods=2, freq="H")
result = obj.astype("category")
expected = pd.CategoricalIndex([Timedelta("1H"), Timedelta("2H")])
@@ -115,7 +115,7 @@ def test_astype_category(self):
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
- obj = pd.timedelta_range("1H", periods=2)
+ obj = timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py
index 1c0104f340f75..a07977702531e 100644
--- a/pandas/tests/indexes/timedeltas/test_constructors.py
+++ b/pandas/tests/indexes/timedeltas/test_constructors.py
@@ -27,7 +27,7 @@ def test_infer_from_tdi(self):
# GH#23539
# fast-path for inferring a frequency if the passed data already
# has one
- tdi = pd.timedelta_range("1 second", periods=10 ** 7, freq="1s")
+ tdi = timedelta_range("1 second", periods=10 ** 7, freq="1s")
result = TimedeltaIndex(tdi, freq="infer")
assert result.freq == tdi.freq
@@ -40,7 +40,7 @@ def test_infer_from_tdi_mismatch(self):
# GH#23539
# fast-path for invalidating a frequency if the passed data already
# has one and it does not match the `freq` input
- tdi = pd.timedelta_range("1 second", periods=100, freq="1s")
+ tdi = timedelta_range("1 second", periods=100, freq="1s")
msg = (
"Inferred frequency .* from passed values does "
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 37aa9653550fb..d79865c1446db 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -65,7 +65,7 @@ def test_getitem(self):
)
def test_timestamp_invalid_key(self, key):
# GH#20464
- tdi = pd.timedelta_range(0, periods=10)
+ tdi = timedelta_range(0, periods=10)
with pytest.raises(KeyError, match=re.escape(repr(key))):
tdi.get_loc(key)
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 52097dbe610ef..3578174e17141 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -69,7 +69,7 @@ def test_nonunique_contains(self):
def test_unknown_attribute(self):
# see gh-9680
- tdi = pd.timedelta_range(start=0, periods=10, freq="1s")
+ tdi = timedelta_range(start=0, periods=10, freq="1s")
ts = Series(np.random.normal(size=10), index=tdi)
assert "foo" not in ts.__dict__.keys()
msg = "'Series' object has no attribute 'foo'"
@@ -138,7 +138,7 @@ def test_order(self):
def test_drop_duplicates_metadata(self, freq_sample):
# GH 10115
- idx = pd.timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")
+ idx = timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
@@ -164,7 +164,7 @@ def test_drop_duplicates_metadata(self, freq_sample):
)
def test_drop_duplicates(self, freq_sample, keep, expected, index):
# to check Index/Series compat
- idx = pd.timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")
+ idx = timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")
idx = idx.append(idx[:5])
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected)
@@ -178,13 +178,13 @@ def test_drop_duplicates(self, freq_sample, keep, expected, index):
def test_infer_freq(self, freq_sample):
# GH#11018
- idx = pd.timedelta_range("1", freq=freq_sample, periods=10)
+ idx = timedelta_range("1", freq=freq_sample, periods=10)
result = TimedeltaIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq_sample
def test_repeat(self):
- index = pd.timedelta_range("1 days", periods=2, freq="D")
+ index = timedelta_range("1 days", periods=2, freq="D")
exp = TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py
index 6a2238d90b590..2f9e1a88a04a8 100644
--- a/pandas/tests/indexes/timedeltas/test_scalar_compat.py
+++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py
@@ -7,7 +7,6 @@
from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
-import pandas as pd
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
import pandas._testing as tm
@@ -43,7 +42,7 @@ def test_tdi_total_seconds(self):
)
def test_tdi_round(self):
- td = pd.timedelta_range(start="16801 days", periods=5, freq="30Min")
+ td = timedelta_range(start="16801 days", periods=5, freq="30Min")
elt = td[1]
expected_rng = TimedeltaIndex(
diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py
index 94fdfefa497a3..2e4e4bfde9202 100644
--- a/pandas/tests/indexes/timedeltas/test_setops.py
+++ b/pandas/tests/indexes/timedeltas/test_setops.py
@@ -82,7 +82,7 @@ def test_union_freq_infer(self):
# When taking the union of two TimedeltaIndexes, we infer
# a freq even if the arguments don't have freq. This matches
# DatetimeIndex behavior.
- tdi = pd.timedelta_range("1 Day", periods=5)
+ tdi = timedelta_range("1 Day", periods=5)
left = tdi[[0, 1, 3, 4]]
right = tdi[[2, 3, 1]]
| Finding these annoying while doing other work in this directory.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38076 | 2020-11-26T03:00:39Z | 2020-11-26T16:05:02Z | 2020-11-26T16:05:02Z | 2020-11-26T16:15:46Z |
PERF: fix regression in tz_convert_from_utc | diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index f08a86b1262e6..1049682af08e8 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -426,7 +426,7 @@ def tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
int64 ndarray of converted
"""
cdef:
- int64_t[:] converted
+ const int64_t[:] converted
if len(vals) == 0:
return np.array([], dtype=np.int64)
@@ -437,7 +437,7 @@ def tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
@cython.boundscheck(False)
@cython.wraparound(False)
-cdef int64_t[:] _tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
+cdef const int64_t[:] _tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
"""
Convert the given values (in i8) either to UTC or from UTC.
@@ -459,7 +459,7 @@ cdef int64_t[:] _tz_convert_from_utc(const int64_t[:] vals, tzinfo tz):
str typ
if is_utc(tz):
- converted = vals.copy()
+ return vals
elif is_tzlocal(tz):
converted = np.empty(n, dtype=np.int64)
for i in range(n):
| - [x] closes #35803
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry - N/A I believe
Get rid of an unnecessary copy. | https://api.github.com/repos/pandas-dev/pandas/pulls/38074 | 2020-11-25T23:10:32Z | 2020-11-26T15:35:30Z | 2020-11-26T15:35:30Z | 2020-11-26T15:35:34Z |
BUG: Series.where casting dt64 to int64 | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 77bc080892e6c..da262993ca858 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -191,6 +191,8 @@ Datetimelike
- Bug in :meth:`DataFrame.first` and :meth:`Series.first` returning two months for offset one month when first day is last calendar day (:issue:`29623`)
- Bug in constructing a :class:`DataFrame` or :class:`Series` with mismatched ``datetime64`` data and ``timedelta64`` dtype, or vice-versa, failing to raise ``TypeError`` (:issue:`38575`)
- Bug in :meth:`DatetimeIndex.intersection`, :meth:`DatetimeIndex.symmetric_difference`, :meth:`PeriodIndex.intersection`, :meth:`PeriodIndex.symmetric_difference` always returning object-dtype when operating with :class:`CategoricalIndex` (:issue:`38741`)
+- Bug in :meth:`Series.where` incorrectly casting ``datetime64`` values to ``int64`` (:issue:`37682`)
+-
Timedelta
^^^^^^^^^
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 50d12703c3a30..ae131d8a51ba1 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -161,7 +161,8 @@ def __init__(self, values: Union[np.ndarray, "PandasArray"], copy: bool = False)
f"'values' must be a NumPy array, not {type(values).__name__}"
)
- if values.ndim != 1:
+ if values.ndim == 0:
+ # Technically we support 2, but do not advertise that fact.
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index d42039e710666..9f4596c16902f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1332,6 +1332,22 @@ def shift(self, periods: int, axis: int = 0, fill_value=None):
return [self.make_block(new_values)]
+ def _maybe_reshape_where_args(self, values, other, cond, axis):
+ transpose = self.ndim == 2
+
+ cond = _extract_bool_array(cond)
+
+ # If the default broadcasting would go in the wrong direction, then
+ # explicitly reshape other instead
+ if getattr(other, "ndim", 0) >= 1:
+ if values.ndim - 1 == other.ndim and axis == 1:
+ other = other.reshape(tuple(other.shape + (1,)))
+ elif transpose and values.ndim == self.ndim - 1:
+ # TODO(EA2D): not neceesssary with 2D EAs
+ cond = cond.T
+
+ return other, cond
+
def where(
self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0
) -> List["Block"]:
@@ -1354,7 +1370,6 @@ def where(
"""
import pandas.core.computation.expressions as expressions
- cond = _extract_bool_array(cond)
assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame))
assert errors in ["raise", "ignore"]
@@ -1365,17 +1380,7 @@ def where(
if transpose:
values = values.T
- # If the default broadcasting would go in the wrong direction, then
- # explicitly reshape other instead
- if getattr(other, "ndim", 0) >= 1:
- if values.ndim - 1 == other.ndim and axis == 1:
- other = other.reshape(tuple(other.shape + (1,)))
- elif transpose and values.ndim == self.ndim - 1:
- # TODO(EA2D): not neceesssary with 2D EAs
- cond = cond.T
-
- if not hasattr(cond, "shape"):
- raise ValueError("where must have a condition that is ndarray like")
+ other, cond = self._maybe_reshape_where_args(values, other, cond, axis)
if cond.ravel("K").all():
result = values
@@ -2128,6 +2133,26 @@ def to_native_types(self, na_rep="NaT", **kwargs):
result = arr._format_native_types(na_rep=na_rep, **kwargs)
return self.make_block(result)
+ def where(
+ self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0
+ ) -> List["Block"]:
+ # TODO(EA2D): reshape unnecessary with 2D EAs
+ arr = self.array_values().reshape(self.shape)
+
+ other, cond = self._maybe_reshape_where_args(arr, other, cond, axis)
+
+ try:
+ res_values = arr.T.where(cond, other).T
+ except (ValueError, TypeError):
+ return super().where(
+ other, cond, errors=errors, try_cast=try_cast, axis=axis
+ )
+
+ # TODO(EA2D): reshape not needed with 2D EAs
+ res_values = res_values.reshape(self.values.shape)
+ nb = self.make_block_same_class(res_values)
+ return [nb]
+
def _can_hold_element(self, element: Any) -> bool:
arr = self.array_values()
@@ -2196,6 +2221,7 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
fillna = DatetimeBlock.fillna # i.e. Block.fillna
fill_value = DatetimeBlock.fill_value
_can_hold_na = DatetimeBlock._can_hold_na
+ where = DatetimeBlock.where
array_values = ExtensionBlock.array_values
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index 779cb7a2350ee..f14d5349dcea3 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -278,7 +278,7 @@ def test_array_inference_fails(data):
tm.assert_extension_array_equal(result, expected)
-@pytest.mark.parametrize("data", [np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]]])
+@pytest.mark.parametrize("data", [np.array(0)])
def test_nd_raises(data):
with pytest.raises(ValueError, match="PandasArray must be 1-dimensional"):
pd.array(data, dtype="int64")
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index 27bbb47e1d0d1..59c68fba53e25 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -464,3 +464,35 @@ def test_where_categorical(klass):
df = klass(["A", "A", "B", "B", "C"], dtype="category")
res = df.where(df != "C")
tm.assert_equal(exp, res)
+
+
+def test_where_datetimelike_categorical(tz_naive_fixture):
+ # GH#37682
+ tz = tz_naive_fixture
+
+ dr = pd.date_range("2001-01-01", periods=3, tz=tz)._with_freq(None)
+ lvals = pd.DatetimeIndex([dr[0], dr[1], pd.NaT])
+ rvals = pd.Categorical([dr[0], pd.NaT, dr[2]])
+
+ mask = np.array([True, True, False])
+
+ # DatetimeIndex.where
+ res = lvals.where(mask, rvals)
+ tm.assert_index_equal(res, dr)
+
+ # DatetimeArray.where
+ res = lvals._data.where(mask, rvals)
+ tm.assert_datetime_array_equal(res, dr._data)
+
+ # Series.where
+ res = Series(lvals).where(mask, rvals)
+ tm.assert_series_equal(res, Series(dr))
+
+ # DataFrame.where
+ if tz is None:
+ res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
+ else:
+ with pytest.xfail(reason="frame._values loses tz"):
+ res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
+
+ tm.assert_frame_equal(res, pd.DataFrame(dr))
| - [x] closes #37682
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This sits on top of #38021 | https://api.github.com/repos/pandas-dev/pandas/pulls/38073 | 2020-11-25T22:43:24Z | 2020-12-29T18:09:25Z | 2020-12-29T18:09:25Z | 2020-12-29T18:13:45Z |
TST/REF: take things out of Base tests | diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index b2b3f76824b9e..2e03c00638a5c 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -28,81 +28,6 @@ def test_can_hold_identifiers(self):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
- @pytest.mark.parametrize(
- "func,op_name",
- [
- (lambda idx: idx - idx, "__sub__"),
- (lambda idx: idx + idx, "__add__"),
- (lambda idx: idx - ["a", "b"], "__sub__"),
- (lambda idx: idx + ["a", "b"], "__add__"),
- (lambda idx: ["a", "b"] - idx, "__rsub__"),
- (lambda idx: ["a", "b"] + idx, "__radd__"),
- ],
- )
- def test_disallow_addsub_ops(self, func, op_name):
- # GH 10039
- # set ops (+/-) raise TypeError
- idx = Index(Categorical(["a", "b"]))
- cat_or_list = "'(Categorical|list)' and '(Categorical|list)'"
- msg = "|".join(
- [
- f"cannot perform {op_name} with this index type: CategoricalIndex",
- "can only concatenate list",
- rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}",
- ]
- )
- with pytest.raises(TypeError, match=msg):
- func(idx)
-
- def test_method_delegation(self):
-
- ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
- result = ci.set_categories(list("cab"))
- tm.assert_index_equal(
- result, CategoricalIndex(list("aabbca"), categories=list("cab"))
- )
-
- ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
- result = ci.rename_categories(list("efg"))
- tm.assert_index_equal(
- result, CategoricalIndex(list("ffggef"), categories=list("efg"))
- )
-
- # GH18862 (let rename_categories take callables)
- result = ci.rename_categories(lambda x: x.upper())
- tm.assert_index_equal(
- result, CategoricalIndex(list("AABBCA"), categories=list("CAB"))
- )
-
- ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
- result = ci.add_categories(["d"])
- tm.assert_index_equal(
- result, CategoricalIndex(list("aabbca"), categories=list("cabd"))
- )
-
- ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
- result = ci.remove_categories(["c"])
- tm.assert_index_equal(
- result,
- CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")),
- )
-
- ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
- result = ci.as_unordered()
- tm.assert_index_equal(result, ci)
-
- ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
- result = ci.as_ordered()
- tm.assert_index_equal(
- result,
- CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True),
- )
-
- # invalid
- msg = "cannot use inplace with CategoricalIndex"
- with pytest.raises(ValueError, match=msg):
- ci.set_categories(list("cab"), inplace=True)
-
def test_append(self):
ci = self.create_index()
@@ -387,6 +312,24 @@ def test_frame_repr(self):
expected = " A\na 1\nb 2\nc 3"
assert result == expected
+ def test_reindex_base(self):
+ # See test_reindex.py
+ pass
+
+ def test_map_str(self):
+ # See test_map.py
+ pass
+
+
+class TestCategoricalIndex2:
+ # Tests that are not overriding a test in Base
+
+ def test_format_different_scalar_lengths(self):
+ # GH35439
+ idx = CategoricalIndex(["aaaaaaaaa", "b"])
+ expected = ["aaaaaaaaa", "b"]
+ assert idx.format() == expected
+
@pytest.mark.parametrize(
"dtype, engine_type",
[
@@ -410,16 +353,77 @@ def test_engine_type(self, dtype, engine_type):
assert np.issubdtype(ci.codes.dtype, dtype)
assert isinstance(ci._engine, engine_type)
- def test_reindex_base(self):
- # See test_reindex.py
- pass
+ @pytest.mark.parametrize(
+ "func,op_name",
+ [
+ (lambda idx: idx - idx, "__sub__"),
+ (lambda idx: idx + idx, "__add__"),
+ (lambda idx: idx - ["a", "b"], "__sub__"),
+ (lambda idx: idx + ["a", "b"], "__add__"),
+ (lambda idx: ["a", "b"] - idx, "__rsub__"),
+ (lambda idx: ["a", "b"] + idx, "__radd__"),
+ ],
+ )
+ def test_disallow_addsub_ops(self, func, op_name):
+ # GH 10039
+ # set ops (+/-) raise TypeError
+ idx = Index(Categorical(["a", "b"]))
+ cat_or_list = "'(Categorical|list)' and '(Categorical|list)'"
+ msg = "|".join(
+ [
+ f"cannot perform {op_name} with this index type: CategoricalIndex",
+ "can only concatenate list",
+ rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
+ func(idx)
- def test_map_str(self):
- # See test_map.py
- pass
+ def test_method_delegation(self):
- def test_format_different_scalar_lengths(self):
- # GH35439
- idx = CategoricalIndex(["aaaaaaaaa", "b"])
- expected = ["aaaaaaaaa", "b"]
- assert idx.format() == expected
+ ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
+ result = ci.set_categories(list("cab"))
+ tm.assert_index_equal(
+ result, CategoricalIndex(list("aabbca"), categories=list("cab"))
+ )
+
+ ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
+ result = ci.rename_categories(list("efg"))
+ tm.assert_index_equal(
+ result, CategoricalIndex(list("ffggef"), categories=list("efg"))
+ )
+
+ # GH18862 (let rename_categories take callables)
+ result = ci.rename_categories(lambda x: x.upper())
+ tm.assert_index_equal(
+ result, CategoricalIndex(list("AABBCA"), categories=list("CAB"))
+ )
+
+ ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
+ result = ci.add_categories(["d"])
+ tm.assert_index_equal(
+ result, CategoricalIndex(list("aabbca"), categories=list("cabd"))
+ )
+
+ ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
+ result = ci.remove_categories(["c"])
+ tm.assert_index_equal(
+ result,
+ CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")),
+ )
+
+ ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
+ result = ci.as_unordered()
+ tm.assert_index_equal(result, ci)
+
+ ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))
+ result = ci.as_ordered()
+ tm.assert_index_equal(
+ result,
+ CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True),
+ )
+
+ # invalid
+ msg = "cannot use inplace with CategoricalIndex"
+ with pytest.raises(ValueError, match=msg):
+ ci.set_categories(list("cab"), inplace=True)
diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py
index 7bf1ea7355b61..b4af1cb5859f0 100644
--- a/pandas/tests/indexes/interval/test_astype.py
+++ b/pandas/tests/indexes/interval/test_astype.py
@@ -15,7 +15,7 @@
import pandas._testing as tm
-class Base:
+class AstypeTests:
"""Tests common to IntervalIndex with any subtype"""
def test_astype_idempotent(self, index):
@@ -72,7 +72,7 @@ def test_astype_invalid_dtype(self, index):
index.astype("fake_dtype")
-class TestIntSubtype(Base):
+class TestIntSubtype(AstypeTests):
"""Tests specific to IntervalIndex with integer-like subtype"""
indexes = [
@@ -124,7 +124,7 @@ def test_subtype_integer_errors(self):
index.astype(dtype)
-class TestFloatSubtype(Base):
+class TestFloatSubtype(AstypeTests):
"""Tests specific to IntervalIndex with float subtype"""
indexes = [
@@ -179,7 +179,7 @@ def test_subtype_datetimelike(self, index, subtype):
index.astype(dtype)
-class TestDatetimelikeSubtype(Base):
+class TestDatetimelikeSubtype(AstypeTests):
"""Tests specific to IntervalIndex with datetime-like subtype"""
indexes = [
diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py
index cc782a6e3bb81..738f0be2dbc86 100644
--- a/pandas/tests/indexes/interval/test_base.py
+++ b/pandas/tests/indexes/interval/test_base.py
@@ -52,6 +52,15 @@ def test_where(self, closed, klass):
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
+ def test_getitem_2d_deprecated(self):
+ # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable
+ idx = self.create_index()
+ with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ idx[:, None]
+
+
+class TestPutmask:
@pytest.mark.parametrize("tz", ["US/Pacific", None])
def test_putmask_dt64(self, tz):
# GH#37968
@@ -75,10 +84,3 @@ def test_putmask_td64(self):
result = idx.putmask(mask, idx[-1])
expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:]))
tm.assert_index_equal(result, expected)
-
- def test_getitem_2d_deprecated(self):
- # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable
- idx = self.create_index()
- with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- idx[:, None]
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index 82933a90d976e..8b4cafc17a202 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -29,7 +29,7 @@ def name(request):
return request.param
-class Base:
+class ConstructorTests:
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
@@ -182,7 +182,7 @@ def test_generic_errors(self, constructor):
constructor(**decreasing_kwargs)
-class TestFromArrays(Base):
+class TestFromArrays(ConstructorTests):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
@@ -231,7 +231,7 @@ def test_mixed_float_int(self, left_subtype, right_subtype):
assert result.dtype.subtype == expected_subtype
-class TestFromBreaks(Base):
+class TestFromBreaks(ConstructorTests):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
@@ -269,7 +269,7 @@ def test_left_right_dont_share_data(self):
assert result._left.base is None or result._left.base is not result._right.base
-class TestFromTuples(Base):
+class TestFromTuples(ConstructorTests):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
@@ -316,7 +316,7 @@ def test_na_tuples(self):
tm.assert_index_equal(idx_na_tuple, idx_na_element)
-class TestClassConstructors(Base):
+class TestClassConstructors(ConstructorTests):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index cd3a0e7b2241c..8c1272a6e971b 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -318,31 +318,6 @@ def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
assert idx.name == idx[1:].name
- def test_explicit_conversions(self):
-
- # GH 8608
- # add/sub are overridden explicitly for Float/Int Index
- idx = RangeIndex(5)
-
- # float conversions
- arr = np.arange(5, dtype="int64") * 3.2
- expected = Float64Index(arr)
- fidx = idx * 3.2
- tm.assert_index_equal(fidx, expected)
- fidx = 3.2 * idx
- tm.assert_index_equal(fidx, expected)
-
- # interops with numpy arrays
- expected = Float64Index(arr)
- a = np.zeros(5, dtype="float64")
- result = fidx - a
- tm.assert_index_equal(result, expected)
-
- expected = Float64Index(-arr)
- a = np.zeros(5, dtype="float64")
- result = a - fidx
- tm.assert_index_equal(result, expected)
-
def test_has_duplicates(self, index):
assert index.is_unique
assert not index.has_duplicates
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 11f2a9f07a4c2..ff1632e33c0fb 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -11,31 +11,18 @@
from pandas.tests.indexes.common import Base
-class Numeric(Base):
- def test_where(self):
- # Tested in numeric.test_indexing
- pass
-
- def test_can_hold_identifiers(self):
- idx = self.create_index()
- key = idx[0]
- assert idx._can_hold_identifiers_and_holds_name(key) is False
-
- def test_format(self):
- # GH35439
- idx = self.create_index()
- max_width = max(len(str(x)) for x in idx)
- expected = [str(x).ljust(max_width) for x in idx]
- assert idx.format() == expected
-
- def test_numeric_compat(self):
- pass # override Base method
-
- def test_explicit_conversions(self):
+class TestArithmetic:
+ @pytest.mark.parametrize(
+ "klass", [Float64Index, Int64Index, UInt64Index, RangeIndex]
+ )
+ def test_arithmetic_explicit_conversions(self, klass):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
- idx = self._holder(np.arange(5, dtype="int64"))
+ if klass is RangeIndex:
+ idx = RangeIndex(5)
+ else:
+ idx = klass(np.arange(5, dtype="int64"))
# float conversions
arr = np.arange(5, dtype="int64") * 3.2
@@ -56,6 +43,8 @@ def test_explicit_conversions(self):
result = a - fidx
tm.assert_index_equal(result, expected)
+
+class TestNumericIndex:
def test_index_groupby(self):
int_idx = Index(range(6))
float_idx = Index(np.arange(0, 0.6, 0.1))
@@ -84,6 +73,27 @@ def test_index_groupby(self):
expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]}
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
+
+class Numeric(Base):
+ def test_where(self):
+ # Tested in numeric.test_indexing
+ pass
+
+ def test_can_hold_identifiers(self):
+ idx = self.create_index()
+ key = idx[0]
+ assert idx._can_hold_identifiers_and_holds_name(key) is False
+
+ def test_format(self):
+ # GH35439
+ idx = self.create_index()
+ max_width = max(len(str(x)) for x in idx)
+ expected = [str(x).ljust(max_width) for x in idx]
+ assert idx.format() == expected
+
+ def test_numeric_compat(self):
+ pass # override Base method
+
def test_insert_na(self, nulls_fixture):
# GH 18295 (test missing)
index = self.create_index()
| Trying to clear up the confusing:
common.py defines Base
test_base.py imports Base from common
test_common does not
and try to get away from the create_index usage to just use the index fixture
Small steps.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38072 | 2020-11-25T22:15:45Z | 2020-11-26T17:57:57Z | 2020-11-26T17:57:57Z | 2020-11-26T18:48:31Z |
BUG: DataFrame.loc returning empty result with negative stepsize for MultiIndex | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 84ac2d0c17676..ac7a2ddd477c9 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -659,6 +659,7 @@ Indexing
- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`MultiIndex` and a level named ``"0"`` (:issue:`37194`)
- Bug in :meth:`Series.__getitem__` when using an unsigned integer array as an indexer giving incorrect results or segfaulting instead of raising ``KeyError`` (:issue:`37218`)
- Bug in :meth:`Index.where` incorrectly casting numeric values to strings (:issue:`37591`)
+- Bug in :meth:`DataFrame.loc` returning empty result when indexer is a slice with negative step size (:issue:`38071`)
- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` raises when the index was of ``object`` dtype and the given numeric label was in the index (:issue:`26491`)
- Bug in :meth:`DataFrame.loc` returned requested key plus missing values when ``loc`` was applied to single level from a :class:`MultiIndex` (:issue:`27104`)
- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using a listlike indexer containing NA values (:issue:`37722`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d575c67cb36aa..6af6555007c2f 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3078,8 +3078,11 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
-
+ if step is not None and step < 0:
+ # Switch elements for negative step size
+ start, stop = stop - 1, start - 1
r = np.arange(start, stop, step)
+
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
@@ -3342,6 +3345,8 @@ def _reorder_indexer(
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
+ elif isinstance(k, slice) and k.step is not None and k.step < 0:
+ need_sort = True
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
@@ -3368,6 +3373,8 @@ def _reorder_indexer(
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
+ elif isinstance(k, slice) and k.step is not None and k.step < 0:
+ new_order = np.arange(n)[k][indexer]
elif isinstance(k, slice) and k.start is None and k.stop is None:
# slice(None) should not determine order GH#31330
new_order = np.ones((n,))[indexer]
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py
index d58bc4713f99f..51684f092aefd 100644
--- a/pandas/tests/indexing/multiindex/test_slice.py
+++ b/pandas/tests/indexing/multiindex/test_slice.py
@@ -779,3 +779,13 @@ def test_non_reducing_slice_on_multiindex(self):
result = df.loc[tslice_]
expected = DataFrame({("b", "d"): [4, 1]})
tm.assert_frame_equal(result, expected)
+
+ def test_loc_slice_negative_stepsize(self):
+ # GH#38071
+ mi = MultiIndex.from_product([["a", "b"], [0, 1]])
+ df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=mi)
+ result = df.loc[("a", slice(None, None, -1)), :]
+ expected = DataFrame(
+ [[3, 4], [1, 2]], index=MultiIndex.from_tuples([("a", 1), ("a", 0)])
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This was easier than thought. Should I add a whatsnew note for this? | https://api.github.com/repos/pandas-dev/pandas/pulls/38071 | 2020-11-25T21:19:03Z | 2020-12-02T18:38:54Z | 2020-12-02T18:38:54Z | 2020-12-02T18:43:00Z |
BUG: Index.drop raising Error when Index has duplicates | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 17cdb7538dad2..5479ac4df6afb 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -689,6 +689,7 @@ MultiIndex
- Bug in :meth:`DataFrame.reset_index` with ``NaT`` values in index raises ``ValueError`` with message ``"cannot convert float NaN to integer"`` (:issue:`36541`)
- Bug in :meth:`DataFrame.combine_first` when used with :class:`MultiIndex` containing string and ``NaN`` values raises ``TypeError`` (:issue:`36562`)
- Bug in :meth:`MultiIndex.drop` dropped ``NaN`` values when non existing key was given as input (:issue:`18853`)
+- Bug in :meth:`MultiIndex.drop` dropping more values than expected when index has duplicates and is not sorted (:issue:`33494`)
I/O
^^^
@@ -821,6 +822,7 @@ Other
- Bug in :meth:`Index.intersection` with non-matching numeric dtypes casting to ``object`` dtype instead of minimal common dtype (:issue:`38122`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError`` rather than a bare ``Exception`` (:issue:`35744`)
- Bug in ``dir`` where ``dir(obj)`` wouldn't show attributes defined on the instance for pandas objects (:issue:`37173`)
+- Bug in :meth:`Index.drop` raising ``InvalidIndexError`` when index has duplicates (:issue:`38051`)
- Bug in :meth:`RangeIndex.difference` returning :class:`Int64Index` in some cases where it should return :class:`RangeIndex` (:issue:`38028`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 40fcc824992b7..52ffb1567cb2d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5564,7 +5564,7 @@ def drop(self, labels, errors: str_t = "raise"):
"""
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
- indexer = self.get_indexer(labels)
+ indexer = self.get_indexer_for(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d575c67cb36aa..a28d33981bbbf 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2165,7 +2165,8 @@ def drop(self, codes, level=None, errors="raise"):
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
- inds.extend(range(loc.start, loc.stop))
+ step = loc.step if loc.step is not None else 1
+ inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py
index c39954b22b0f2..f7b1bc4729428 100644
--- a/pandas/tests/indexes/multi/test_drop.py
+++ b/pandas/tests/indexes/multi/test_drop.py
@@ -1,3 +1,5 @@
+import warnings
+
import numpy as np
import pytest
@@ -149,6 +151,16 @@ def test_drop_with_nan_in_index(nulls_fixture):
mi.drop(pd.Timestamp("2001"), level="date")
+def test_drop_with_non_monotonic_duplicates():
+ # GH#33494
+ mi = MultiIndex.from_tuples([(1, 2), (2, 3), (1, 2)])
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", PerformanceWarning)
+ result = mi.drop((1, 2))
+ expected = MultiIndex.from_tuples([(2, 3)])
+ tm.assert_index_equal(result, expected)
+
+
def test_single_level_drop_partially_missing_elements():
# GH 37820
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index ba49c51c9db8e..d5ca8a0f64fac 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -9,6 +9,7 @@
import pytest
from pandas._libs.tslib import Timestamp
+from pandas.compat import IS64
from pandas.compat.numpy import np_datetime64_compat
from pandas.util._test_decorators import async_mark
@@ -19,6 +20,7 @@
DatetimeIndex,
Float64Index,
Int64Index,
+ IntervalIndex,
PeriodIndex,
RangeIndex,
Series,
@@ -1505,6 +1507,17 @@ def test_drop_tuple(self, values, to_drop):
with pytest.raises(KeyError, match=msg):
removed.drop(drop_me)
+ def test_drop_with_duplicates_in_index(self, index):
+ # GH38051
+ if len(index) == 0 or isinstance(index, MultiIndex):
+ return
+ if isinstance(index, IntervalIndex) and not IS64:
+ pytest.skip("Cannot test IntervalIndex with int64 dtype on 32 bit platform")
+ index = index.unique().repeat(2)
+ expected = index[2:]
+ result = index.drop(index[0])
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize(
"attr",
[
| - [x] closes #38051
- [x] closes #33494
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
@jbrockmendel This should deal with duplicates. For MultiIndex sometimes a slice with stepzize greater than zero was given, which dropped to many elements | https://api.github.com/repos/pandas-dev/pandas/pulls/38070 | 2020-11-25T20:11:11Z | 2020-12-02T23:28:05Z | 2020-12-02T23:28:04Z | 2020-12-03T22:14:42Z |
ENH: NDArrayBackedExtensionArray.__array_function__ | diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index a2444b7ba5a0d..671ec653d6484 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -62,9 +62,25 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
return np.array(arr, *args, **kwargs)
+def _is_nep18_active():
+ # copied from dask.array.utils
+
+ class A:
+ def __array_function__(self, *args, **kwargs):
+ return True
+
+ try:
+ return np.concatenate([A()])
+ except ValueError:
+ return False
+
+
+IS_NEP18_ACTIVE = _is_nep18_active()
+
__all__ = [
"np",
"_np_version",
"np_version_under1p17",
"is_numpy_dev",
+ "IS_NEP18_ACTIVE",
]
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index 5cc6525dc3c9b..f448d08f09234 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -304,6 +304,68 @@ def __repr__(self) -> str:
# ------------------------------------------------------------------------
# __array_function__ methods
+ def __array_function__(self, func, types, args, kwargs):
+ for x in types:
+ if not issubclass(x, (np.ndarray, NDArrayBackedExtensionArray)):
+ return NotImplemented
+
+ if not args:
+ # TODO: if this fails, are we bound for a RecursionError?
+ for key, value in kwargs.items():
+ if value is self:
+ # See if we can treat self as the first arg
+ import inspect
+
+ sig = inspect.signature(func)
+ params = sig.parameters
+ first_argname = next(iter(params))
+ if first_argname == key:
+ args = (value,)
+ del kwargs[key]
+ break
+ else:
+ kwargs[key] = np.asarray(self)
+ break
+
+ if args and args[0] is self:
+
+ if func in [np.delete, np.repeat, np.atleast_2d]:
+ res_data = func(self._ndarray, *args[1:], **kwargs)
+ return self._from_backing_data(res_data)
+
+ # TODO: do we need to convert args to kwargs to ensure nv checks
+ # are correct?
+ if func is np.amin:
+ # error: "NDArrayBackedExtensionArray" has no attribute "min"
+ return self.min(*args[1:], **kwargs) # type:ignore[attr-defined]
+ if func is np.amax:
+ # error: "NDArrayBackedExtensionArray" has no attribute "max"
+ return self.max(*args[1:], **kwargs) # type:ignore[attr-defined]
+
+ if func is np.sum:
+ # Need to do explicitly otherise np.sum(TimedeltaArray)
+ # doesnt wrap in Timedelta.
+ # error: "NDArrayBackedExtensionArray" has no attribute "sum"
+ return self.sum(*args[1:], **kwargs) # type:ignore[attr-defined]
+
+ if func is np.argsort:
+ if len(args) > 1:
+ # try to make sure that we are passing kwargs along correclty
+ raise NotImplementedError
+ return self.argsort(*args[1:], **kwargs)
+
+ if not any(x is self for x in args):
+ # e.g. np.conatenate we get args[0] is a tuple containing self
+ largs = list(args)
+ for i, arg in enumerate(largs):
+ if isinstance(arg, (list, tuple)):
+ arg = type(arg)(x if x is not self else np.asarray(x) for x in arg)
+ largs[i] = arg
+ args = tuple(largs)
+
+ args = [x if x is not self else np.asarray(x) for x in args]
+ return func(*args, **kwargs)
+
def putmask(self, mask, value):
"""
Analogue to np.putmask(self, mask, value)
diff --git a/pandas/tests/arrays/test_ndarray_backed.py b/pandas/tests/arrays/test_ndarray_backed.py
new file mode 100644
index 0000000000000..03c23eb01863c
--- /dev/null
+++ b/pandas/tests/arrays/test_ndarray_backed.py
@@ -0,0 +1,112 @@
+"""
+Tests for EA subclasses subclassing NDArrayBackedExtensionArray
+"""
+
+import numpy as np
+import pytest
+
+from pandas.compat.numpy import IS_NEP18_ACTIVE
+
+from pandas import date_range
+import pandas._testing as tm
+from pandas.core.arrays import Categorical, PandasArray
+
+pytestmark = pytest.mark.skipif(
+ not IS_NEP18_ACTIVE,
+ reason="__array_function__ is not enabled by default until numpy 1.17",
+)
+
+
+class ArrayFunctionTests:
+ # Tests for subclasses that do not explicitly support 2D yet.
+ def test_delete_no_axis(self, array):
+ # with no axis, operates on flattened version
+ result = np.delete(array, 1)
+
+ backing = np.delete(array._ndarray.ravel(), 1)
+ expected = array._from_backing_data(backing)
+ tm.assert_equal(result, expected)
+
+ def test_repeat(self, array):
+ result = np.repeat(array, 2)
+
+ backing = np.repeat(array._ndarray.ravel(), 2)
+ expected = array._from_backing_data(backing)
+ tm.assert_equal(result, expected)
+
+
+class ArrayFunctionTests2D(ArrayFunctionTests):
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_delete_axis(self, array, axis):
+ result = np.delete(array, 1, axis=axis)
+ if axis == 0:
+ assert result.shape == (array.shape[0] - 1, array.shape[1])
+ else:
+ assert result.shape == (array.shape[0], array.shape[1] - 1)
+
+ backing = np.delete(array._ndarray, 1, axis=axis)
+ expected = array._from_backing_data(backing)
+ tm.assert_equal(result, expected)
+
+ # axis as an arg instead of as a kwarg
+ result = np.delete(array, 1, axis)
+ tm.assert_equal(result, expected)
+
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_repeat_axis(self, array, axis):
+ result = np.repeat(array, 2, axis=axis)
+
+ backing = np.repeat(array._ndarray, 2, axis=axis)
+ expected = array._from_backing_data(backing)
+ tm.assert_equal(result, expected)
+
+ # axis as an arg instead of a kwarg
+ result = np.repeat(array, 2, axis)
+ tm.assert_equal(result, expected)
+
+ def test_atleast_2d(self, array):
+ result = np.atleast_2d(array)
+
+ assert result.ndim >= 2
+
+ if array.ndim == 1:
+ assert result.shape == (1, array.size)
+ else:
+ assert result.shape == array.shape
+
+
+class TestDatetimeArray(ArrayFunctionTests2D):
+ @pytest.fixture(params=[1, 2])
+ def array(self):
+ dti = date_range("1994-05-12", periods=12, tz="US/Pacific")
+ dta = dti._data.reshape(3, 4)
+ return dta
+
+
+class TestTimedeltaArray(ArrayFunctionTests2D):
+ @pytest.fixture
+ def array(self):
+ dti = date_range("1994-05-12", periods=12, tz="US/Pacific")
+ dta = dti._data.reshape(3, 4)
+ return dta - dta[0, 0]
+
+
+class TestPeriodArray(ArrayFunctionTests2D):
+ @pytest.fixture
+ def array(self):
+ dti = date_range("1994-05-12", periods=12)
+ pa = dti._data.to_period("D")
+ return pa.reshape(3, 4)
+
+
+class TestPandasArray(ArrayFunctionTests):
+ @pytest.fixture
+ def array(self):
+ return PandasArray(np.arange(12))
+
+
+class TestCategorical(ArrayFunctionTests):
+ @pytest.fixture
+ def array(self):
+ dti = date_range("1994-05-12", periods=12, tz="US/Pacific")
+ return Categorical(dti)
| motivated by getting np.delete and np.repeat working, had to implement a few others to get the tests passing.
If we go down this path, I plan to incrementally add support for others, some of which (e.g. vstack) will avoid some object-dtype casting.
cc @TomAugspurger @shoyer @seberg any suggestions to clean the implementation, particularly the fallback and the unknown-args comments? | https://api.github.com/repos/pandas-dev/pandas/pulls/38068 | 2020-11-25T18:32:41Z | 2020-12-21T21:37:43Z | null | 2021-03-02T15:54:21Z |
DOC: tidy 1.1.5 release notes | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index dd88f79371d65..a8bbf692a72e5 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -14,10 +14,13 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
-- Regression in addition of a timedelta-like scalar to a :class:`DatetimeIndex` raising incorrectly (:issue:`37295`)
+- Fixed regression in addition of a timedelta-like scalar to a :class:`DatetimeIndex` raising incorrectly (:issue:`37295`)
- Fixed regression in :meth:`Series.groupby` raising when the :class:`Index` of the :class:`Series` had a tuple as its name (:issue:`37755`)
- Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` for ``__setitem__`` when one-dimensional tuple was given to select from :class:`MultiIndex` (:issue:`37711`)
- Fixed regression in inplace operations on :class:`Series` with ``ExtensionDtype`` with NumPy dtyped operand (:issue:`37910`)
+- Fixed regression in metadata propagation for ``groupby`` iterator (:issue:`37343`)
+- Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
+- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
.. ---------------------------------------------------------------------------
@@ -25,11 +28,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
-- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
-- Bug in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
-- Bug in :class:`RollingGroupby` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
- Bug in pytables methods in python 3.9 (:issue:`38041`)
--
.. ---------------------------------------------------------------------------
| https://api.github.com/repos/pandas-dev/pandas/pulls/38064 | 2020-11-25T13:34:43Z | 2020-11-26T13:03:39Z | 2020-11-26T13:03:39Z | 2020-11-26T13:07:33Z | |
DOC: move info docs to DataFrameInfo | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 803d1c914c954..8c85c4e961d99 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -206,8 +206,8 @@
format as fmt,
)
from pandas.io.formats.info import (
- BaseInfo,
DataFrameInfo,
+ frame_sub_kwargs,
)
import pandas.plotting
@@ -3138,122 +3138,7 @@ def to_xml(
return xml_formatter.write_output()
# ----------------------------------------------------------------------
- @Substitution(
- klass="DataFrame",
- type_sub=" and columns",
- max_cols_sub=dedent(
- """\
- max_cols : int, optional
- When to switch from the verbose to the truncated output. If the
- DataFrame has more than `max_cols` columns, the truncated output
- is used. By default, the setting in
- ``pandas.options.display.max_info_columns`` is used."""
- ),
- show_counts_sub=dedent(
- """\
- show_counts : bool, optional
- Whether to show the non-null counts. By default, this is shown
- only if the DataFrame is smaller than
- ``pandas.options.display.max_info_rows`` and
- ``pandas.options.display.max_info_columns``. A value of True always
- shows the counts, and False never shows the counts.
- null_counts : bool, optional
- .. deprecated:: 1.2.0
- Use show_counts instead."""
- ),
- examples_sub=dedent(
- """\
- >>> int_values = [1, 2, 3, 4, 5]
- >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
- >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
- >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
- ... "float_col": float_values})
- >>> df
- int_col text_col float_col
- 0 1 alpha 0.00
- 1 2 beta 0.25
- 2 3 gamma 0.50
- 3 4 delta 0.75
- 4 5 epsilon 1.00
-
- Prints information of all columns:
-
- >>> df.info(verbose=True)
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 5 entries, 0 to 4
- Data columns (total 3 columns):
- # Column Non-Null Count Dtype
- --- ------ -------------- -----
- 0 int_col 5 non-null int64
- 1 text_col 5 non-null object
- 2 float_col 5 non-null float64
- dtypes: float64(1), int64(1), object(1)
- memory usage: 248.0+ bytes
-
- Prints a summary of columns count and its dtypes but not per column
- information:
-
- >>> df.info(verbose=False)
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 5 entries, 0 to 4
- Columns: 3 entries, int_col to float_col
- dtypes: float64(1), int64(1), object(1)
- memory usage: 248.0+ bytes
-
- Pipe output of DataFrame.info to buffer instead of sys.stdout, get
- buffer content and writes to a text file:
-
- >>> import io
- >>> buffer = io.StringIO()
- >>> df.info(buf=buffer)
- >>> s = buffer.getvalue()
- >>> with open("df_info.txt", "w",
- ... encoding="utf-8") as f: # doctest: +SKIP
- ... f.write(s)
- 260
-
- The `memory_usage` parameter allows deep introspection mode, specially
- useful for big DataFrames and fine-tune memory optimization:
-
- >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
- >>> df = pd.DataFrame({
- ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
- ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
- ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
- ... })
- >>> df.info()
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 1000000 entries, 0 to 999999
- Data columns (total 3 columns):
- # Column Non-Null Count Dtype
- --- ------ -------------- -----
- 0 column_1 1000000 non-null object
- 1 column_2 1000000 non-null object
- 2 column_3 1000000 non-null object
- dtypes: object(3)
- memory usage: 22.9+ MB
-
- >>> df.info(memory_usage='deep')
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 1000000 entries, 0 to 999999
- Data columns (total 3 columns):
- # Column Non-Null Count Dtype
- --- ------ -------------- -----
- 0 column_1 1000000 non-null object
- 1 column_2 1000000 non-null object
- 2 column_3 1000000 non-null object
- dtypes: object(3)
- memory usage: 165.9 MB"""
- ),
- see_also_sub=dedent(
- """\
- DataFrame.describe: Generate descriptive statistics of DataFrame
- columns.
- DataFrame.memory_usage: Memory usage of DataFrame columns."""
- ),
- version_added_sub="",
- )
- @doc(BaseInfo.render)
+ @doc(DataFrameInfo.render, **frame_sub_kwargs)
def info(
self,
verbose: bool | None = None,
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index ddd2420731028..9340d020cd6ce 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -5,6 +5,7 @@
abstractmethod,
)
import sys
+from textwrap import dedent
from typing import (
TYPE_CHECKING,
Iterable,
@@ -19,6 +20,7 @@
Dtype,
WriteBuffer,
)
+from pandas.util._decorators import doc
from pandas.core.indexes.api import Index
@@ -32,6 +34,186 @@
)
+frame_max_cols_sub = dedent(
+ """\
+ max_cols : int, optional
+ When to switch from the verbose to the truncated output. If the
+ DataFrame has more than `max_cols` columns, the truncated output
+ is used. By default, the setting in
+ ``pandas.options.display.max_info_columns`` is used."""
+)
+
+
+show_counts_sub = dedent(
+ """\
+ show_counts : bool, optional
+ Whether to show the non-null counts. By default, this is shown
+ only if the DataFrame is smaller than
+ ``pandas.options.display.max_info_rows`` and
+ ``pandas.options.display.max_info_columns``. A value of True always
+ shows the counts, and False never shows the counts.
+ null_counts : bool, optional
+ .. deprecated:: 1.2.0
+ Use show_counts instead."""
+)
+
+
+frame_examples_sub = dedent(
+ """\
+ >>> int_values = [1, 2, 3, 4, 5]
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
+ >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
+ >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
+ ... "float_col": float_values})
+ >>> df
+ int_col text_col float_col
+ 0 1 alpha 0.00
+ 1 2 beta 0.25
+ 2 3 gamma 0.50
+ 3 4 delta 0.75
+ 4 5 epsilon 1.00
+
+ Prints information of all columns:
+
+ >>> df.info(verbose=True)
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 5 entries, 0 to 4
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 int_col 5 non-null int64
+ 1 text_col 5 non-null object
+ 2 float_col 5 non-null float64
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 248.0+ bytes
+
+ Prints a summary of columns count and its dtypes but not per column
+ information:
+
+ >>> df.info(verbose=False)
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 5 entries, 0 to 4
+ Columns: 3 entries, int_col to float_col
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 248.0+ bytes
+
+ Pipe output of DataFrame.info to buffer instead of sys.stdout, get
+ buffer content and writes to a text file:
+
+ >>> import io
+ >>> buffer = io.StringIO()
+ >>> df.info(buf=buffer)
+ >>> s = buffer.getvalue()
+ >>> with open("df_info.txt", "w",
+ ... encoding="utf-8") as f: # doctest: +SKIP
+ ... f.write(s)
+ 260
+
+ The `memory_usage` parameter allows deep introspection mode, specially
+ useful for big DataFrames and fine-tune memory optimization:
+
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
+ >>> df = pd.DataFrame({
+ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
+ ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
+ ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
+ ... })
+ >>> df.info()
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 1000000 entries, 0 to 999999
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
+ dtypes: object(3)
+ memory usage: 22.9+ MB
+
+ >>> df.info(memory_usage='deep')
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 1000000 entries, 0 to 999999
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
+ dtypes: object(3)
+ memory usage: 165.9 MB"""
+)
+
+
+frame_see_also_sub = dedent(
+ """\
+ DataFrame.describe: Generate descriptive statistics of DataFrame
+ columns.
+ DataFrame.memory_usage: Memory usage of DataFrame columns."""
+)
+
+
+frame_sub_kwargs = {
+ "klass": "DataFrame",
+ "type_sub": " and columns",
+ "max_cols_sub": frame_max_cols_sub,
+ "show_counts_sub": show_counts_sub,
+ "examples_sub": frame_examples_sub,
+ "see_also_sub": frame_see_also_sub,
+ "version_added_sub": "",
+}
+
+
+INFO_DOCSTRING = dedent(
+ """
+ Print a concise summary of a {klass}.
+
+ This method prints information about a {klass} including
+ the index dtype{type_sub}, non-null values and memory usage.
+ {version_added_sub}\
+
+ Parameters
+ ----------
+ data : {klass}
+ {klass} to print information about.
+ verbose : bool, optional
+ Whether to print the full summary. By default, the setting in
+ ``pandas.options.display.max_info_columns`` is followed.
+ buf : writable buffer, defaults to sys.stdout
+ Where to send the output. By default, the output is printed to
+ sys.stdout. Pass a writable buffer if you need to further process
+ the output.
+ {max_cols_sub}
+ memory_usage : bool, str, optional
+ Specifies whether total memory usage of the {klass}
+ elements (including the index) should be displayed. By default,
+ this follows the ``pandas.options.display.memory_usage`` setting.
+
+ True always show memory usage. False never shows memory usage.
+ A value of 'deep' is equivalent to "True with deep introspection".
+ Memory usage is shown in human-readable units (base-2
+ representation). Without deep introspection a memory estimation is
+ made based in column dtype and number of rows assuming values
+ consume the same memory amount for corresponding dtypes. With deep
+ memory introspection, a real memory usage calculation is performed
+ at the cost of computational resources.
+ {show_counts_sub}
+
+ Returns
+ -------
+ None
+ This method prints a summary of a {klass} and returns None.
+
+ See Also
+ --------
+ {see_also_sub}
+
+ Examples
+ --------
+ {examples_sub}
+ """
+)
+
+
def _put_str(s: str | Dtype, space: int) -> str:
"""
Make string of specified length, padding to the right if necessary.
@@ -178,53 +360,7 @@ def render(
verbose: bool | None,
show_counts: bool | None,
) -> None:
- """
- Print a concise summary of a %(klass)s.
-
- This method prints information about a %(klass)s including
- the index dtype%(type_sub)s, non-null values and memory usage.
- %(version_added_sub)s\
-
- Parameters
- ----------
- data : %(klass)s
- %(klass)s to print information about.
- verbose : bool, optional
- Whether to print the full summary. By default, the setting in
- ``pandas.options.display.max_info_columns`` is followed.
- buf : writable buffer, defaults to sys.stdout
- Where to send the output. By default, the output is printed to
- sys.stdout. Pass a writable buffer if you need to further process
- the output.
- %(max_cols_sub)s
- memory_usage : bool, str, optional
- Specifies whether total memory usage of the %(klass)s
- elements (including the index) should be displayed. By default,
- this follows the ``pandas.options.display.memory_usage`` setting.
-
- True always show memory usage. False never shows memory usage.
- A value of 'deep' is equivalent to "True with deep introspection".
- Memory usage is shown in human-readable units (base-2
- representation). Without deep introspection a memory estimation is
- made based in column dtype and number of rows assuming values
- consume the same memory amount for corresponding dtypes. With deep
- memory introspection, a real memory usage calculation is performed
- at the cost of computational resources.
- %(show_counts_sub)s
-
- Returns
- -------
- None
- This method prints a summary of a %(klass)s and returns None.
-
- See Also
- --------
- %(see_also_sub)s
-
- Examples
- --------
- %(examples_sub)s
- """
+ pass
class DataFrameInfo(BaseInfo):
@@ -286,6 +422,16 @@ def memory_usage_bytes(self) -> int:
deep = False
return self.data.memory_usage(index=True, deep=deep).sum()
+ @doc(
+ INFO_DOCSTRING,
+ klass="DataFrame",
+ type_sub=" and columns",
+ max_cols_sub=frame_max_cols_sub,
+ show_counts_sub=show_counts_sub,
+ examples_sub=frame_examples_sub,
+ see_also_sub=frame_see_also_sub,
+ version_added_sub="",
+ )
def render(
self,
*,
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Precursor for #37320
Move ``info`` docstring from ``pandas/core/frame.py`` to class ``DataFrameInfo`` using ``doc`` decorator.
@simonjayhawkins need your help here as discussed previously. | https://api.github.com/repos/pandas-dev/pandas/pulls/38062 | 2020-11-25T12:12:54Z | 2021-11-29T14:46:59Z | 2021-11-29T14:46:59Z | 2021-11-29T14:47:03Z |
Backport PR #38041 on branch 1.1.x: BUG: pytables in py39 (#38041) | diff --git a/ci/deps/azure-39.yaml b/ci/deps/azure-39.yaml
index 67edc83a9d738..c4c84e73fa684 100644
--- a/ci/deps/azure-39.yaml
+++ b/ci/deps/azure-39.yaml
@@ -15,3 +15,8 @@ dependencies:
- numpy
- python-dateutil
- pytz
+
+ # optional dependencies
+ - pytables
+ - scipy
+ - pyarrow=1.0
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 609c3650c8cc2..dd88f79371d65 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -28,6 +28,7 @@ Bug fixes
- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
- Bug in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
- Bug in :class:`RollingGroupby` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
+- Bug in pytables methods in python 3.9 (:issue:`38041`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 001eb1789007f..a3389a80b017a 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -426,6 +426,10 @@ def visit_Subscript(self, node, **kwargs):
except AttributeError:
pass
+ if isinstance(slobj, Term):
+ # In py39 np.ndarray lookups with Term containing int raise
+ slobj = slobj.value
+
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index df014171be817..290828daacd9c 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -4500,7 +4500,7 @@ def test_categorical(self, setup_path):
# Appending must have the same categories
df3 = df.copy()
- df3["s"].cat.remove_unused_categories(inplace=True)
+ df3["s"] = df3["s"].cat.remove_unused_categories()
with pytest.raises(ValueError):
store.append("df3", df3)
| Backport PR #38041 on branch 1.1.x | https://api.github.com/repos/pandas-dev/pandas/pulls/38061 | 2020-11-25T11:58:01Z | 2020-11-25T12:48:13Z | 2020-11-25T12:48:13Z | 2020-11-25T12:48:18Z |
Backport PR #37039: CI: move py39 build to conda #33948 | diff --git a/.travis.yml b/.travis.yml
index 1e5ea21b0f2d9..f43f4a1d16ff8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -29,14 +29,6 @@ matrix:
fast_finish: true
include:
- # In allowed failures
- - dist: bionic
- python: 3.9-dev
- env:
- - JOB="3.9-dev" PATTERN="(not slow and not network and not clipboard)"
- - env:
- - JOB="3.8" ENV_FILE="ci/deps/travis-38.yaml" PATTERN="(not slow and not network and not clipboard)"
-
- env:
- JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network and not clipboard)"
@@ -92,7 +84,7 @@ install:
script:
- echo "script start"
- echo "$JOB"
- - if [ "$JOB" != "3.9-dev" ]; then source activate pandas-dev; fi
+ - source activate pandas-dev
- ci/run_tests.sh
after_script:
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index f716974f6add1..457a1f3f507d4 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -65,6 +65,11 @@ jobs:
PANDAS_TESTING_MODE: "deprecate"
EXTRA_APT: "xsel"
+ py39:
+ ENV_FILE: ci/deps/azure-39.yaml
+ CONDA_PY: "39"
+ PATTERN: "not slow and not network and not clipboard"
+
steps:
- script: |
if [ "$(uname)" == "Linux" ]; then
diff --git a/ci/build39.sh b/ci/build39.sh
deleted file mode 100755
index f2ef11d5a71f4..0000000000000
--- a/ci/build39.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash -e
-# Special build for python3.9 until numpy puts its own wheels up
-
-sudo apt-get install build-essential gcc xvfb
-pip install --no-deps -U pip wheel setuptools
-pip install cython numpy python-dateutil pytz pytest pytest-xdist hypothesis
-
-python setup.py build_ext -inplace
-python -m pip install --no-build-isolation -e .
-
-python -c "import sys; print(sys.version_info)"
-python -c "import pandas as pd"
-python -c "import hypothesis"
diff --git a/ci/deps/azure-39.yaml b/ci/deps/azure-39.yaml
new file mode 100644
index 0000000000000..67edc83a9d738
--- /dev/null
+++ b/ci/deps/azure-39.yaml
@@ -0,0 +1,17 @@
+name: pandas-dev
+channels:
+ - conda-forge
+dependencies:
+ - python=3.9.*
+
+ # tools
+ - cython>=0.29.21
+ - pytest>=5.0.1
+ - pytest-xdist>=1.21
+ - hypothesis>=3.58.0
+ - pytest-azurepipelines
+
+ # pandas dependencies
+ - numpy
+ - python-dateutil
+ - pytz
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 065f9e56ea171..9adb6fe674099 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -1,10 +1,5 @@
#!/bin/bash -e
-if [ "$JOB" == "3.9-dev" ]; then
- /bin/bash ci/build39.sh
- exit 0
-fi
-
# edit the locale file if needed
if [[ "$(uname)" == "Linux" && -n "$LC_ALL" ]]; then
echo "Adding locale to the first line of pandas/__init__.py"
| Backport PR #37039 | https://api.github.com/repos/pandas-dev/pandas/pulls/38059 | 2020-11-25T10:33:24Z | 2020-11-25T11:51:02Z | 2020-11-25T11:51:02Z | 2020-11-25T11:51:18Z |
PERF: fix regression in creation of resulting index in RollingGroupby | diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 79a33c437ea5c..5a36cff7908f0 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -225,6 +225,20 @@ def time_rolling_offset(self, method):
getattr(self.groupby_roll_offset, method)()
+class GroupbyLargeGroups:
+ # https://github.com/pandas-dev/pandas/issues/38038
+ # specific example where the rolling operation on a larger dataframe
+ # is relatively cheap (few but large groups), but creation of
+ # MultiIndex of result can be expensive
+
+ def setup(self):
+ N = 100000
+ self.df = pd.DataFrame({"A": [1, 2] * int(N / 2), "B": np.random.randn(N)})
+
+ def time_rolling_multiindex_creation(self):
+ self.df.groupby("A").rolling(3).mean()
+
+
class GroupbyEWM:
params = ["cython", "numba"]
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index d0a935bfb4e32..53e7db492d8bb 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
- Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`).
- Fixed performance regression for :meth:`DataFrame.__setitem__` with list-like indexers (:issue:`37954`)
+- Fixed performance regression in ``df.groupby(..).rolling(..)`` (:issue:`38038`)
- Fixed regression in :meth:`MultiIndex.intersection` returning duplicates when at least one of the indexes had duplicates (:issue:`36915`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 51a1e2102c273..e6185f8ae0679 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -50,7 +50,6 @@
from pandas.core.aggregation import aggregate
from pandas.core.base import DataError, SelectionMixin
-import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.groupby.base import GotItemMixin, ShallowMixin
from pandas.core.indexes.api import Index, MultiIndex
@@ -791,22 +790,29 @@ def _apply(
# Our result will have still kept the column in the result
result = result.drop(columns=column_keys, errors="ignore")
- result_index_data = []
- for key, values in self._groupby.grouper.indices.items():
- for value in values:
- data = [
- *com.maybe_make_list(key),
- *com.maybe_make_list(
- grouped_object_index[value]
- if grouped_object_index is not None
- else []
- ),
- ]
- result_index_data.append(tuple(data))
-
- result_index = MultiIndex.from_tuples(
- result_index_data, names=result_index_names
+ codes = self._groupby.grouper.codes
+ levels = self._groupby.grouper.levels
+
+ group_indices = self._groupby.grouper.indices.values()
+ if group_indices:
+ indexer = np.concatenate(list(group_indices))
+ else:
+ indexer = np.array([], dtype=np.intp)
+ codes = [c.take(indexer) for c in codes]
+
+ # if the index of the original dataframe needs to be preserved, append
+ # this index (but reordered) to the codes/levels from the groupby
+ if grouped_object_index is not None:
+ idx = grouped_object_index.take(indexer)
+ if not isinstance(idx, MultiIndex):
+ idx = MultiIndex.from_arrays([idx])
+ codes.extend(list(idx.codes))
+ levels.extend(list(idx.levels))
+
+ result_index = MultiIndex(
+ levels, codes, names=result_index_names, verify_integrity=False
)
+
result.index = result_index
return result
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index f9b5a5fe9a3c1..b89fb35ac3a70 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -1,7 +1,15 @@
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, to_datetime
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ Series,
+ Timestamp,
+ date_range,
+ to_datetime,
+)
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer
from pandas.core.groupby.groupby import get_groupby
@@ -418,12 +426,23 @@ def test_groupby_rolling_empty_frame(self):
# GH 36197
expected = DataFrame({"s1": []})
result = expected.groupby("s1").rolling(window=1).sum()
- expected.index = MultiIndex.from_tuples([], names=["s1", None])
+ # GH-38057 from_tuples gives empty object dtype, we now get float/int levels
+ # expected.index = MultiIndex.from_tuples([], names=["s1", None])
+ expected.index = MultiIndex.from_product(
+ [Index([], dtype="float64"), Index([], dtype="int64")], names=["s1", None]
+ )
tm.assert_frame_equal(result, expected)
expected = DataFrame({"s1": [], "s2": []})
result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()
- expected.index = MultiIndex.from_tuples([], names=["s1", "s2", None])
+ expected.index = MultiIndex.from_product(
+ [
+ Index([], dtype="float64"),
+ Index([], dtype="float64"),
+ Index([], dtype="int64"),
+ ],
+ names=["s1", "s2", None],
+ )
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_string_index(self):
@@ -567,6 +586,60 @@ def test_groupby_rolling_index_level_and_column_label(self):
)
tm.assert_frame_equal(result, expected)
+ def test_groupby_rolling_resulting_multiindex(self):
+ # a few different cases checking the created MultiIndex of the result
+ # https://github.com/pandas-dev/pandas/pull/38057
+
+ # grouping by 1 columns -> 2-level MI as result
+ df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4})
+ result = df.groupby("b").rolling(3).mean()
+ expected_index = MultiIndex.from_tuples(
+ [(1, 0), (1, 2), (1, 4), (1, 6), (2, 1), (2, 3), (2, 5), (2, 7)],
+ names=["b", None],
+ )
+ tm.assert_index_equal(result.index, expected_index)
+
+ # grouping by 2 columns -> 3-level MI as result
+ df = DataFrame({"a": np.arange(12.0), "b": [1, 2] * 6, "c": [1, 2, 3, 4] * 3})
+ result = df.groupby(["b", "c"]).rolling(2).sum()
+ expected_index = MultiIndex.from_tuples(
+ [
+ (1, 1, 0),
+ (1, 1, 4),
+ (1, 1, 8),
+ (1, 3, 2),
+ (1, 3, 6),
+ (1, 3, 10),
+ (2, 2, 1),
+ (2, 2, 5),
+ (2, 2, 9),
+ (2, 4, 3),
+ (2, 4, 7),
+ (2, 4, 11),
+ ],
+ names=["b", "c", None],
+ )
+ tm.assert_index_equal(result.index, expected_index)
+
+ # grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result
+ df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4, "c": [1, 2, 3, 4] * 2})
+ df = df.set_index("c", append=True)
+ result = df.groupby("b").rolling(3).mean()
+ expected_index = MultiIndex.from_tuples(
+ [
+ (1, 0, 1),
+ (1, 2, 3),
+ (1, 4, 1),
+ (1, 6, 3),
+ (2, 1, 2),
+ (2, 3, 4),
+ (2, 5, 2),
+ (2, 7, 4),
+ ],
+ names=["b", None, "c"],
+ )
+ tm.assert_index_equal(result.index, expected_index)
+
class TestExpanding:
def setup_method(self):
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 1658cca347786..10b23cadfe279 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1085,8 +1085,15 @@ def test_groupby_rolling_nan_included():
result = df.groupby("group", dropna=False).rolling(1, min_periods=1).mean()
expected = DataFrame(
{"B": [0.0, 2.0, 3.0, 1.0, 4.0]},
- index=MultiIndex.from_tuples(
- [("g1", 0), ("g1", 2), ("g2", 3), (np.nan, 1), (np.nan, 4)],
+ # GH-38057 from_tuples puts the NaNs in the codes, result expects them
+ # to be in the levels, at the moment
+ # index=MultiIndex.from_tuples(
+ # [("g1", 0), ("g1", 2), ("g2", 3), (np.nan, 1), (np.nan, 4)],
+ # names=["group", None],
+ # ),
+ index=MultiIndex(
+ [["g1", "g2", np.nan], [0, 1, 2, 3, 4]],
+ [[0, 0, 1, 2, 2], [0, 2, 3, 1, 4]],
names=["group", None],
),
)
| Closes #38038
TODO: fix corner cases, add benchmark, .. | https://api.github.com/repos/pandas-dev/pandas/pulls/38057 | 2020-11-25T09:03:16Z | 2020-12-01T09:12:37Z | 2020-12-01T09:12:37Z | 2020-12-08T14:36:41Z |
CI: Renable window tests on Windows | diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 78d24c814840a..924ece4a60b26 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -25,7 +25,7 @@ PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=loadfile -s
if [[ $(uname) != "Linux" && $(uname) != "Darwin" ]]; then
# GH#37455 windows py38 build appears to be running out of memory
# skip collection of window tests
- PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/window/ --ignore=pandas/tests/plotting/"
+ PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/plotting/"
fi
echo $PYTEST_CMD
| - [x] closes #37535
- [ ] tests added / passed
Checking if the recent cleanup of the `tests/window` directory had any impact.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38056 | 2020-11-25T06:05:21Z | 2020-11-25T21:10:38Z | null | 2020-11-25T21:10:43Z |
CLN: remove ABCIndex | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 24680fc855b0d..cdcbc43055052 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -24,12 +24,7 @@
is_extension_array_dtype,
is_integer,
)
-from pandas.core.dtypes.generic import (
- ABCExtensionArray,
- ABCIndex,
- ABCIndexClass,
- ABCSeries,
-)
+from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna, isnull, notnull # noqa
@@ -105,7 +100,7 @@ def is_bool_indexer(key: Any) -> bool:
check_array_indexer : Check that `key` is a valid array to index,
and convert to an ndarray.
"""
- if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (
+ if isinstance(key, (ABCSeries, np.ndarray, ABCIndexClass)) or (
is_array_like(key) and is_extension_array_dtype(key.dtype)
):
if key.dtype == np.object_:
@@ -471,7 +466,9 @@ def convert_to_list_like(
Convert list-like or scalar input to list-like. List, numpy and pandas array-like
inputs are returned unmodified whereas others are converted to list.
"""
- if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)):
+ if isinstance(
+ values, (list, np.ndarray, ABCIndexClass, ABCSeries, ABCExtensionArray)
+ ):
# np.ndarray resolving as Any gives a false positive
return values # type: ignore[return-value]
elif isinstance(values, abc.Iterable) and not isinstance(values, str):
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 34891180906bb..0e5867809fe52 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -23,7 +23,6 @@ def _check(cls, inst) -> bool:
return meta(name, tuple(), dct)
-ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index",))
ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ", ("int64index",))
ABCUInt64Index = create_pandas_abc_type("ABCUInt64Index", "_typ", ("uint64index",))
ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",))
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 57f6a8ea0cca5..1b18f04ba603d 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -22,7 +22,7 @@
is_scalar,
)
from pandas.core.dtypes.concat import concat_compat
-from pandas.core.dtypes.generic import ABCIndex, ABCSeries
+from pandas.core.dtypes.generic import ABCSeries
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@@ -53,16 +53,22 @@ def _join_i8_wrapper(joinf, with_indexers: bool = True):
# error: 'staticmethod' used with a non-method
@staticmethod # type: ignore[misc]
def wrapper(left, right):
- if isinstance(left, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)):
+ # Note: these only get called with left.dtype == right.dtype
+ if isinstance(
+ left, (np.ndarray, DatetimeIndexOpsMixin, ABCSeries, DatetimeLikeArrayMixin)
+ ):
left = left.view("i8")
- if isinstance(right, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)):
+ if isinstance(
+ right,
+ (np.ndarray, DatetimeIndexOpsMixin, ABCSeries, DatetimeLikeArrayMixin),
+ ):
right = right.view("i8")
results = joinf(left, right)
if with_indexers:
# dtype should be timedelta64[ns] for TimedeltaIndex
# and datetime64[ns] for DatetimeIndex
- dtype = left.dtype.base
+ dtype = cast(np.dtype, left.dtype).base
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index 8142fc3e695a3..c855687552e82 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -27,7 +27,7 @@
is_object_dtype,
is_scalar,
)
-from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndex, ABCSeries
+from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, notna
from pandas.core.ops import missing
@@ -40,13 +40,11 @@ def comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
- if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
- # Note: these checks can be for ABCIndex and not ABCIndexClass
- # because that is the only object-dtype class.
+ if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
if not is_object_dtype(y.dtype):
y = y.astype(np.object_)
- if isinstance(y, (ABCSeries, ABCIndex)):
+ if isinstance(y, (ABCSeries, ABCIndexClass)):
y = y._values
if x.shape != y.shape:
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index e51b0546b0cee..847daa1e6b263 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -22,7 +22,6 @@ class TestABCClasses:
timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index)
def test_abc_types(self):
- assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
| It's a footgun.
BTW I expect the CI to fail until #38020 goes in, which will [demonstrate](https://github.com/pandas-dev/pandas/pull/38020#discussion_r529159757) that there is a test case that uses MultiIndex. | https://api.github.com/repos/pandas-dev/pandas/pulls/38055 | 2020-11-25T03:20:37Z | 2020-11-26T00:30:57Z | 2020-11-26T00:30:57Z | 2020-11-26T15:34:43Z |
TST: tighten assert_index_equal calls | diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 56fd633f5f22b..5864b547a552b 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -130,7 +130,7 @@ def test_reset_index(self, float_frame):
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
- tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
+ tm.assert_index_equal(deleveled.index, Index(range(len(deleveled))), exact=True)
# preserve column names
float_frame.columns.name = "columns"
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index e279c5872da03..a98723e9e31f8 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -352,7 +352,7 @@ def test_constructor_dict(self):
# with dict of empty list and Series
frame = DataFrame({"A": [], "B": []}, columns=["A", "B"])
- tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
+ tm.assert_index_equal(frame.index, RangeIndex(0), exact=True)
# GH 14381
# Dict with None value
@@ -802,14 +802,14 @@ def _check_basic_constructor(self, empty):
# automatic labeling
frame = DataFrame(mat)
- tm.assert_index_equal(frame.index, pd.Int64Index(range(2)))
- tm.assert_index_equal(frame.columns, pd.Int64Index(range(3)))
+ tm.assert_index_equal(frame.index, Index(range(2)), exact=True)
+ tm.assert_index_equal(frame.columns, Index(range(3)), exact=True)
frame = DataFrame(mat, index=[1, 2])
- tm.assert_index_equal(frame.columns, pd.Int64Index(range(3)))
+ tm.assert_index_equal(frame.columns, Index(range(3)), exact=True)
frame = DataFrame(mat, columns=["A", "B", "C"])
- tm.assert_index_equal(frame.index, pd.Int64Index(range(2)))
+ tm.assert_index_equal(frame.index, Index(range(2)), exact=True)
# 0-length axis
frame = DataFrame(empty((0, 3)))
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 3c0e4d83964c5..e250d8cf1b326 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -529,6 +529,7 @@ def test_equals_op(self):
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
+ # For RangeIndex we can convert to Int64Index
tm.assert_series_equal(series_a == item, Series(expected3))
def test_format(self):
@@ -656,6 +657,7 @@ def test_map(self):
expected = index
result = index.map(lambda x: x)
+ # For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
@@ -680,6 +682,7 @@ def test_map_dictlike(self, mapper):
expected = index
result = index.map(identity)
+ # For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
# empty mappable
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 1be17a9d6116a..0973cef7cfdc1 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -385,8 +385,8 @@ def test_difference_preserves_type_empty(self, index, sort):
if not index.is_unique:
return
result = index.difference(index, sort=sort)
- expected = index.drop(index)
- tm.assert_index_equal(result, expected)
+ expected = index[:0]
+ tm.assert_index_equal(result, expected, exact=True)
def test_intersection_difference_match_empty(self, index, sort):
# GH#20040
@@ -395,6 +395,6 @@ def test_intersection_difference_match_empty(self, index, sort):
# of an index with itself. Test for all types
if not index.is_unique:
return
- inter = index.intersection(index.drop(index))
+ inter = index.intersection(index[:0])
diff = index.difference(index, sort=sort)
- tm.assert_index_equal(inter, diff)
+ tm.assert_index_equal(inter, diff, exact=True)
diff --git a/pandas/tests/reshape/concat/test_series.py b/pandas/tests/reshape/concat/test_series.py
index 20838a418cfea..2d681e792914c 100644
--- a/pandas/tests/reshape/concat/test_series.py
+++ b/pandas/tests/reshape/concat/test_series.py
@@ -106,9 +106,9 @@ def test_concat_series_axis1_same_names_ignore_index(self):
s2 = Series(np.random.randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
- expected = Index([0, 1])
+ expected = Index(range(2))
- tm.assert_index_equal(result.columns, expected)
+ tm.assert_index_equal(result.columns, expected, exact=True)
@pytest.mark.parametrize(
"s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))]
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 00ef7a05f5902..ad07ced2fca66 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -504,7 +504,7 @@ def test_join_sort(self):
# smoke test
joined = left.join(right, on="key", sort=False)
- tm.assert_index_equal(joined.index, Index(list(range(4))))
+ tm.assert_index_equal(joined.index, Index(range(4)), exact=True)
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index d790a85c94193..5b13091470b09 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -556,7 +556,7 @@ def test_series_ctor_plus_datetimeindex(self):
def test_constructor_default_index(self):
s = Series([0, 1, 2])
- tm.assert_index_equal(s.index, Index(np.arange(3)))
+ tm.assert_index_equal(s.index, Index(range(3)), exact=True)
@pytest.mark.parametrize(
"input",
| https://api.github.com/repos/pandas-dev/pandas/pulls/38054 | 2020-11-25T02:53:57Z | 2020-11-26T23:02:44Z | 2020-11-26T23:02:44Z | 2020-11-26T23:07:43Z | |
BUG: get_indexer_non_unique not excluding boolean like get_indexer | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b5900ead246f3..fac4940c0b573 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4896,6 +4896,14 @@ def set_value(self, arr, key, value):
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ensure_index(target)
+
+ if target.is_boolean() and self.is_numeric():
+ # Treat boolean labels passed to a numeric index as not found. Without
+ # this fix False and True would be treated as 0 and 1 respectively.
+ # (GH #16877)
+ no_matches = -1 * np.ones(self.shape, dtype=np.intp)
+ return no_matches, no_matches
+
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 2e3a70e8c2215..ba2ac5d2e07b2 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1236,14 +1236,25 @@ def test_get_indexer_strings_raises(self):
["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2]
)
- @pytest.mark.parametrize("idx_class", [Int64Index, RangeIndex, Float64Index])
- def test_get_indexer_numeric_index_boolean_target(self, idx_class):
+ @pytest.mark.parametrize(
+ "idx_class", [Int64Index, RangeIndex, Float64Index, UInt64Index]
+ )
+ @pytest.mark.parametrize("method", ["get_indexer", "get_indexer_non_unique"])
+ def test_get_indexer_numeric_index_boolean_target(self, method, idx_class):
# GH 16877
numeric_index = idx_class(RangeIndex(4))
- result = numeric_index.get_indexer([True, False, True])
+ other = Index([True, False, True])
+
+ result = getattr(numeric_index, method)(other)
expected = np.array([-1, -1, -1], dtype=np.intp)
- tm.assert_numpy_array_equal(result, expected)
+ if method == "get_indexer":
+ tm.assert_numpy_array_equal(result, expected)
+ else:
+ expected = np.array([-1, -1, -1, -1], dtype=np.intp)
+
+ tm.assert_numpy_array_equal(result[0], expected)
+ tm.assert_numpy_array_equal(result[1], expected)
def test_get_indexer_with_NA_values(
self, unique_nulls_fixture, unique_nulls_fixture2
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
There's a check in get_indexer that is missing from get_indexer_non_unique, which this adds. This is going to make some de-duplication easier.
Hard to find a user-facing example bc indexing with all-bool indexer we treat it as a mask and not a list of labels. The closest I got to a user-facing example was in Series.drop, but that ran in to #38051. | https://api.github.com/repos/pandas-dev/pandas/pulls/38052 | 2020-11-25T02:26:54Z | 2020-11-26T17:56:18Z | 2020-11-26T17:56:18Z | 2020-11-26T18:48:00Z |
TST/REF: collect Index formatting tests | diff --git a/pandas/tests/indexes/base_class/test_formats.py b/pandas/tests/indexes/base_class/test_formats.py
new file mode 100644
index 0000000000000..f07b06acbfbdb
--- /dev/null
+++ b/pandas/tests/indexes/base_class/test_formats.py
@@ -0,0 +1,134 @@
+import numpy as np
+import pytest
+
+import pandas._config.config as cf
+
+from pandas import Index
+
+
+class TestIndexRendering:
+ @pytest.mark.parametrize(
+ "index,expected",
+ [
+ # ASCII
+ # short
+ (
+ Index(["a", "bb", "ccc"]),
+ """Index(['a', 'bb', 'ccc'], dtype='object')""",
+ ),
+ # multiple lines
+ (
+ Index(["a", "bb", "ccc"] * 10),
+ "Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
+ "'bb', 'ccc', 'a', 'bb', 'ccc',\n"
+ " 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
+ "'bb', 'ccc', 'a', 'bb', 'ccc',\n"
+ " 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
+ " dtype='object')",
+ ),
+ # truncated
+ (
+ Index(["a", "bb", "ccc"] * 100),
+ "Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n"
+ " ...\n"
+ " 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
+ " dtype='object', length=300)",
+ ),
+ # Non-ASCII
+ # short
+ (
+ Index(["あ", "いい", "ううう"]),
+ """Index(['あ', 'いい', 'ううう'], dtype='object')""",
+ ),
+ # multiple lines
+ (
+ Index(["あ", "いい", "ううう"] * 10),
+ (
+ "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
+ "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
+ " 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
+ "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
+ " 'あ', 'いい', 'ううう', 'あ', 'いい', "
+ "'ううう'],\n"
+ " dtype='object')"
+ ),
+ ),
+ # truncated
+ (
+ Index(["あ", "いい", "ううう"] * 100),
+ (
+ "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
+ "'あ', 'いい', 'ううう', 'あ',\n"
+ " ...\n"
+ " 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
+ "'ううう', 'あ', 'いい', 'ううう'],\n"
+ " dtype='object', length=300)"
+ ),
+ ),
+ ],
+ )
+ def test_string_index_repr(self, index, expected):
+ result = repr(index)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "index,expected",
+ [
+ # short
+ (
+ Index(["あ", "いい", "ううう"]),
+ ("Index(['あ', 'いい', 'ううう'], dtype='object')"),
+ ),
+ # multiple lines
+ (
+ Index(["あ", "いい", "ううう"] * 10),
+ (
+ "Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
+ "'ううう', 'あ', 'いい', 'ううう',\n"
+ " 'あ', 'いい', 'ううう', 'あ', 'いい', "
+ "'ううう', 'あ', 'いい', 'ううう',\n"
+ " 'あ', 'いい', 'ううう', 'あ', 'いい', "
+ "'ううう', 'あ', 'いい', 'ううう',\n"
+ " 'あ', 'いい', 'ううう'],\n"
+ " dtype='object')"
+ ""
+ ),
+ ),
+ # truncated
+ (
+ Index(["あ", "いい", "ううう"] * 100),
+ (
+ "Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
+ "'ううう', 'あ', 'いい', 'ううう',\n"
+ " 'あ',\n"
+ " ...\n"
+ " 'ううう', 'あ', 'いい', 'ううう', 'あ', "
+ "'いい', 'ううう', 'あ', 'いい',\n"
+ " 'ううう'],\n"
+ " dtype='object', length=300)"
+ ),
+ ),
+ ],
+ )
+ def test_string_index_repr_with_unicode_option(self, index, expected):
+ # Enable Unicode option -----------------------------------------
+ with cf.option_context("display.unicode.east_asian_width", True):
+ result = repr(index)
+ assert result == expected
+
+ def test_repr_summary(self):
+ with cf.option_context("display.max_seq_items", 10):
+ result = repr(Index(np.arange(1000)))
+ assert len(result) < 200
+ assert "..." in result
+
+ def test_index_repr_bool_nan(self):
+ # GH32146
+ arr = Index([True, False, np.nan], dtype=object)
+ exp1 = arr.format()
+ out1 = ["True", "False", "NaN"]
+ assert out1 == exp1
+
+ exp2 = repr(arr)
+ out2 = "Index([True, False, nan], dtype='object')"
+ assert out2 == exp2
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 2e3a70e8c2215..53467819c3ba0 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -8,8 +8,6 @@
import numpy as np
import pytest
-import pandas._config.config as cf
-
from pandas._libs.tslib import Timestamp
from pandas.compat.numpy import np_datetime64_compat
from pandas.util._test_decorators import async_mark
@@ -1907,115 +1905,6 @@ def test_dt_conversion_preserves_name(self, dt_conv):
index = Index(["01:02:03", "01:02:04"], name="label")
assert index.name == dt_conv(index).name
- @pytest.mark.parametrize(
- "index,expected",
- [
- # ASCII
- # short
- (
- Index(["a", "bb", "ccc"]),
- """Index(['a', 'bb', 'ccc'], dtype='object')""",
- ),
- # multiple lines
- (
- Index(["a", "bb", "ccc"] * 10),
- """\
-Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
- 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
- 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
- dtype='object')""",
- ),
- # truncated
- (
- Index(["a", "bb", "ccc"] * 100),
- """\
-Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
- ...
- 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
- dtype='object', length=300)""",
- ),
- # Non-ASCII
- # short
- (
- Index(["あ", "いい", "ううう"]),
- """Index(['あ', 'いい', 'ううう'], dtype='object')""",
- ),
- # multiple lines
- (
- Index(["あ", "いい", "ううう"] * 10),
- (
- "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
- "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
- " 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
- "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
- " 'あ', 'いい', 'ううう', 'あ', 'いい', "
- "'ううう'],\n"
- " dtype='object')"
- ),
- ),
- # truncated
- (
- Index(["あ", "いい", "ううう"] * 100),
- (
- "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
- "'あ', 'いい', 'ううう', 'あ',\n"
- " ...\n"
- " 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
- "'ううう', 'あ', 'いい', 'ううう'],\n"
- " dtype='object', length=300)"
- ),
- ),
- ],
- )
- def test_string_index_repr(self, index, expected):
- result = repr(index)
- assert result == expected
-
- @pytest.mark.parametrize(
- "index,expected",
- [
- # short
- (
- Index(["あ", "いい", "ううう"]),
- ("Index(['あ', 'いい', 'ううう'], dtype='object')"),
- ),
- # multiple lines
- (
- Index(["あ", "いい", "ううう"] * 10),
- (
- "Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
- "'ううう', 'あ', 'いい', 'ううう',\n"
- " 'あ', 'いい', 'ううう', 'あ', 'いい', "
- "'ううう', 'あ', 'いい', 'ううう',\n"
- " 'あ', 'いい', 'ううう', 'あ', 'いい', "
- "'ううう', 'あ', 'いい', 'ううう',\n"
- " 'あ', 'いい', 'ううう'],\n"
- " dtype='object')"
- ""
- ),
- ),
- # truncated
- (
- Index(["あ", "いい", "ううう"] * 100),
- (
- "Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
- "'ううう', 'あ', 'いい', 'ううう',\n"
- " 'あ',\n"
- " ...\n"
- " 'ううう', 'あ', 'いい', 'ううう', 'あ', "
- "'いい', 'ううう', 'あ', 'いい',\n"
- " 'ううう'],\n"
- " dtype='object', length=300)"
- ),
- ),
- ],
- )
- def test_string_index_repr_with_unicode_option(self, index, expected):
- # Enable Unicode option -----------------------------------------
- with cf.option_context("display.unicode.east_asian_width", True):
- result = repr(index)
- assert result == expected
-
def test_cached_properties_not_settable(self):
index = Index([1, 2, 3])
with pytest.raises(AttributeError, match="Can't set attribute"):
@@ -2236,12 +2125,6 @@ def test_is_monotonic_na(self, index):
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
- def test_repr_summary(self):
- with cf.option_context("display.max_seq_items", 10):
- result = repr(Index(np.arange(1000)))
- assert len(result) < 200
- assert "..." in result
-
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(["a", "b", "c"], name=0)
@@ -2265,17 +2148,6 @@ def test_intersect_str_dates(self):
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
- def test_index_repr_bool_nan(self):
- # GH32146
- arr = Index([True, False, np.nan], dtype=object)
- exp1 = arr.format()
- out1 = ["True", "False", "NaN"]
- assert out1 == exp1
-
- exp2 = repr(arr)
- out2 = "Index([True, False, nan], dtype='object')"
- assert out2 == exp2
-
@pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning")
def test_index_with_tuple_bool(self):
# GH34123
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38050 | 2020-11-25T01:36:22Z | 2020-11-25T21:01:58Z | 2020-11-25T21:01:58Z | 2020-11-25T21:37:08Z |
REF: share argmax axis validation test across all indexes | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 62e508c491740..5353969d9a556 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1956,6 +1956,7 @@ def min(self, *, skipna=True, **kwargs):
-------
min : the minimum of this `Categorical`
"""
+ nv.validate_minmax_axis(kwargs.get("axis", 0))
nv.validate_min((), kwargs)
self.check_for_ordered("min")
@@ -1992,6 +1993,7 @@ def max(self, *, skipna=True, **kwargs):
-------
max : the maximum of this `Categorical`
"""
+ nv.validate_minmax_axis(kwargs.get("axis", 0))
nv.validate_max((), kwargs)
self.check_for_ordered("max")
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 7bd7d29ec9703..abf4ddd681d69 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -113,6 +113,8 @@ def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
+ if kwarg == "axis":
+ msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 7ce8640d09777..14f9c2f9de284 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -10,19 +10,6 @@
class DatetimeLike(Base):
- def test_argmax_axis_invalid(self):
- # GH#23081
- msg = r"`axis` must be fewer than the number of dimensions \(1\)"
- rng = self.create_index()
- with pytest.raises(ValueError, match=msg):
- rng.argmax(axis=1)
- with pytest.raises(ValueError, match=msg):
- rng.argmin(axis=2)
- with pytest.raises(ValueError, match=msg):
- rng.min(axis=-2)
- with pytest.raises(ValueError, match=msg):
- rng.max(axis=-3)
-
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py
index 5e7065f785309..afeeb63217489 100644
--- a/pandas/tests/indexes/test_any_index.py
+++ b/pandas/tests/indexes/test_any_index.py
@@ -89,3 +89,17 @@ def test_str(self, index):
index.name = "foo"
assert "'foo'" in str(index)
assert type(index).__name__ in str(index)
+
+
+class TestReductions:
+ def test_argmax_axis_invalid(self, index):
+ # GH#23081
+ msg = r"`axis` must be fewer than the number of dimensions \(1\)"
+ with pytest.raises(ValueError, match=msg):
+ index.argmax(axis=1)
+ with pytest.raises(ValueError, match=msg):
+ index.argmin(axis=2)
+ with pytest.raises(ValueError, match=msg):
+ index.min(axis=-2)
+ with pytest.raises(ValueError, match=msg):
+ index.max(axis=-3)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38049 | 2020-11-25T00:10:51Z | 2020-11-26T17:55:27Z | 2020-11-26T17:55:27Z | 2020-11-26T18:52:17Z | |
ENH: adding support for Py3.6+ memory tracing for khash-maps | diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index cc080a87cfb5b..963fddd4d5af9 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -13,10 +13,14 @@ cnp.import_array()
from pandas._libs cimport util
-from pandas._libs.khash cimport kh_str_t, khiter_t
+from pandas._libs.khash cimport KHASH_TRACE_DOMAIN, kh_str_t, khiter_t
from pandas._libs.missing cimport checknull
+def get_hashtable_trace_domain():
+ return KHASH_TRACE_DOMAIN
+
+
cdef int64_t NPY_NAT = util.get_nat()
SIZE_HINT_LIMIT = (1 << 20) + 7
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index f7001c165870e..b582ed1533a8e 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -344,9 +344,11 @@ cdef class {{name}}HashTable(HashTable):
def sizeof(self, deep=False):
""" return the size of my table in bytes """
- return self.table.n_buckets * (sizeof({{dtype}}_t) + # keys
- sizeof(Py_ssize_t) + # vals
- sizeof(uint32_t)) # flags
+ overhead = 4 * sizeof(uint32_t) + 3 * sizeof(uint32_t*)
+ for_flags = max(1, self.table.n_buckets >> 5) * sizeof(uint32_t)
+ for_pairs = self.table.n_buckets * (sizeof({{dtype}}_t) + # keys
+ sizeof(Py_ssize_t)) # vals
+ return overhead + for_flags + for_pairs
cpdef get_item(self, {{dtype}}_t val):
cdef:
@@ -669,10 +671,11 @@ cdef class StringHashTable(HashTable):
self.table = NULL
def sizeof(self, deep=False):
- """ return the size of my table in bytes """
- return self.table.n_buckets * (sizeof(char *) + # keys
- sizeof(Py_ssize_t) + # vals
- sizeof(uint32_t)) # flags
+ overhead = 4 * sizeof(uint32_t) + 3 * sizeof(uint32_t*)
+ for_flags = max(1, self.table.n_buckets >> 5) * sizeof(uint32_t)
+ for_pairs = self.table.n_buckets * (sizeof(char *) + # keys
+ sizeof(Py_ssize_t)) # vals
+ return overhead + for_flags + for_pairs
cpdef get_item(self, str val):
cdef:
@@ -994,9 +997,11 @@ cdef class PyObjectHashTable(HashTable):
def sizeof(self, deep=False):
""" return the size of my table in bytes """
- return self.table.n_buckets * (sizeof(PyObject *) + # keys
- sizeof(Py_ssize_t) + # vals
- sizeof(uint32_t)) # flags
+ overhead = 4 * sizeof(uint32_t) + 3 * sizeof(uint32_t*)
+ for_flags = max(1, self.table.n_buckets >> 5) * sizeof(uint32_t)
+ for_pairs = self.table.n_buckets * (sizeof(PyObject *) + # keys
+ sizeof(Py_ssize_t)) # vals
+ return overhead + for_flags + for_pairs
cpdef get_item(self, object val):
cdef:
diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd
index 8b082747bf22b..0d0c5ae058b21 100644
--- a/pandas/_libs/khash.pxd
+++ b/pandas/_libs/khash.pxd
@@ -14,6 +14,8 @@ from numpy cimport (
cdef extern from "khash_python.h":
+ const int KHASH_TRACE_DOMAIN
+
ctypedef uint32_t khint_t
ctypedef khint_t khiter_t
diff --git a/pandas/_libs/src/klib/khash.h b/pandas/_libs/src/klib/khash.h
index ecd15d1893c23..bb56b2fe2d145 100644
--- a/pandas/_libs/src/klib/khash.h
+++ b/pandas/_libs/src/klib/khash.h
@@ -115,6 +115,24 @@ int main() {
#include "../inline_helper.h"
+// hooks for memory allocator, C-runtime allocator used per default
+#ifndef KHASH_MALLOC
+#define KHASH_MALLOC malloc
+#endif
+
+#ifndef KHASH_REALLOC
+#define KHASH_REALLOC realloc
+#endif
+
+#ifndef KHASH_CALLOC
+#define KHASH_CALLOC calloc
+#endif
+
+#ifndef KHASH_FREE
+#define KHASH_FREE free
+#endif
+
+
#if UINT_MAX == 0xffffffffu
typedef unsigned int khint32_t;
#elif ULONG_MAX == 0xffffffffu
@@ -138,7 +156,7 @@ typedef unsigned char khint8_t;
#endif
typedef double khfloat64_t;
-typedef double khfloat32_t;
+typedef float khfloat32_t;
typedef khint32_t khint_t;
typedef khint_t khiter_t;
@@ -265,14 +283,14 @@ static const double __ac_HASH_UPPER = 0.77;
khval_t *vals; \
} kh_##name##_t; \
SCOPE kh_##name##_t *kh_init_##name(void) { \
- return (kh_##name##_t*)calloc(1, sizeof(kh_##name##_t)); \
+ return (kh_##name##_t*)KHASH_CALLOC(1, sizeof(kh_##name##_t)); \
} \
SCOPE void kh_destroy_##name(kh_##name##_t *h) \
{ \
if (h) { \
- free(h->keys); free(h->flags); \
- free(h->vals); \
- free(h); \
+ KHASH_FREE(h->keys); KHASH_FREE(h->flags); \
+ KHASH_FREE(h->vals); \
+ KHASH_FREE(h); \
} \
} \
SCOPE void kh_clear_##name(kh_##name##_t *h) \
@@ -305,11 +323,11 @@ static const double __ac_HASH_UPPER = 0.77;
if (new_n_buckets < 4) new_n_buckets = 4; \
if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
else { /* hash table size to be changed (shrink or expand); rehash */ \
- new_flags = (khint32_t*)malloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ new_flags = (khint32_t*)KHASH_MALLOC(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
memset(new_flags, 0xff, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
if (h->n_buckets < new_n_buckets) { /* expand */ \
- h->keys = (khkey_t*)realloc(h->keys, new_n_buckets * sizeof(khkey_t)); \
- if (kh_is_map) h->vals = (khval_t*)realloc(h->vals, new_n_buckets * sizeof(khval_t)); \
+ h->keys = (khkey_t*)KHASH_REALLOC(h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (kh_is_map) h->vals = (khval_t*)KHASH_REALLOC(h->vals, new_n_buckets * sizeof(khval_t)); \
} /* otherwise shrink */ \
} \
} \
@@ -342,10 +360,10 @@ static const double __ac_HASH_UPPER = 0.77;
} \
} \
if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
- h->keys = (khkey_t*)realloc(h->keys, new_n_buckets * sizeof(khkey_t)); \
- if (kh_is_map) h->vals = (khval_t*)realloc(h->vals, new_n_buckets * sizeof(khval_t)); \
+ h->keys = (khkey_t*)KHASH_REALLOC(h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (kh_is_map) h->vals = (khval_t*)KHASH_REALLOC(h->vals, new_n_buckets * sizeof(khval_t)); \
} \
- free(h->flags); /* free the working space */ \
+ KHASH_FREE(h->flags); /* free the working space */ \
h->flags = new_flags; \
h->n_buckets = new_n_buckets; \
h->n_occupied = h->size; \
@@ -691,8 +709,8 @@ KHASH_MAP_INIT_INT64(int64, size_t)
KHASH_MAP_INIT_UINT64(uint64, size_t)
KHASH_MAP_INIT_INT16(int16, size_t)
KHASH_MAP_INIT_UINT16(uint16, size_t)
-KHASH_MAP_INIT_INT16(int8, size_t)
-KHASH_MAP_INIT_UINT16(uint8, size_t)
+KHASH_MAP_INIT_INT8(int8, size_t)
+KHASH_MAP_INIT_UINT8(uint8, size_t)
#endif /* __AC_KHASH_H */
diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h
index aea6d05745633..8e4e61b4f3077 100644
--- a/pandas/_libs/src/klib/khash_python.h
+++ b/pandas/_libs/src/klib/khash_python.h
@@ -1,6 +1,59 @@
#include <string.h>
#include <Python.h>
+// khash should report usage to tracemalloc
+#if PY_VERSION_HEX >= 0x03060000
+#include <pymem.h>
+#if PY_VERSION_HEX < 0x03070000
+#define PyTraceMalloc_Track _PyTraceMalloc_Track
+#define PyTraceMalloc_Untrack _PyTraceMalloc_Untrack
+#endif
+#else
+#define PyTraceMalloc_Track(...)
+#define PyTraceMalloc_Untrack(...)
+#endif
+
+
+static const int KHASH_TRACE_DOMAIN = 424242;
+void *traced_malloc(size_t size){
+ void * ptr = malloc(size);
+ if(ptr!=NULL){
+ PyTraceMalloc_Track(KHASH_TRACE_DOMAIN, (uintptr_t)ptr, size);
+ }
+ return ptr;
+}
+
+void *traced_calloc(size_t num, size_t size){
+ void * ptr = calloc(num, size);
+ if(ptr!=NULL){
+ PyTraceMalloc_Track(KHASH_TRACE_DOMAIN, (uintptr_t)ptr, num*size);
+ }
+ return ptr;
+}
+
+void *traced_realloc(void* old_ptr, size_t size){
+ void * ptr = realloc(old_ptr, size);
+ if(ptr!=NULL){
+ if(old_ptr != ptr){
+ PyTraceMalloc_Untrack(KHASH_TRACE_DOMAIN, (uintptr_t)old_ptr);
+ }
+ PyTraceMalloc_Track(KHASH_TRACE_DOMAIN, (uintptr_t)ptr, size);
+ }
+ return ptr;
+}
+
+void traced_free(void* ptr){
+ if(ptr!=NULL){
+ PyTraceMalloc_Untrack(KHASH_TRACE_DOMAIN, (uintptr_t)ptr);
+ }
+ free(ptr);
+}
+
+
+#define KHASH_MALLOC traced_malloc
+#define KHASH_REALLOC traced_realloc
+#define KHASH_CALLOC traced_calloc
+#define KHASH_FREE traced_free
#include "khash.h"
// Previously we were using the built in cpython hash function for doubles
@@ -128,7 +181,7 @@ typedef struct {
typedef kh_str_starts_t* p_kh_str_starts_t;
p_kh_str_starts_t PANDAS_INLINE kh_init_str_starts(void) {
- kh_str_starts_t *result = (kh_str_starts_t*)calloc(1, sizeof(kh_str_starts_t));
+ kh_str_starts_t *result = (kh_str_starts_t*)KHASH_CALLOC(1, sizeof(kh_str_starts_t));
result->table = kh_init_str();
return result;
}
@@ -151,7 +204,7 @@ khint_t PANDAS_INLINE kh_get_str_starts_item(const kh_str_starts_t* table, const
void PANDAS_INLINE kh_destroy_str_starts(kh_str_starts_t* table) {
kh_destroy_str(table->table);
- free(table);
+ KHASH_FREE(table);
}
void PANDAS_INLINE kh_resize_str_starts(kh_str_starts_t* table, khint_t val) {
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 27713b5bde201..4dcc08f21ba19 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2724,7 +2724,7 @@ def memory_usage(self, index=True, deep=False) -> Series:
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
- 5216
+ 5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
diff --git a/pandas/tests/base/test_misc.py b/pandas/tests/base/test_misc.py
index 8d5bda6291747..d02078814f60f 100644
--- a/pandas/tests/base/test_misc.py
+++ b/pandas/tests/base/test_misc.py
@@ -77,7 +77,7 @@ def test_memory_usage(index_or_series_obj):
if isinstance(obj, Index):
expected = 0
else:
- expected = 80 if IS64 else 48
+ expected = 108 if IS64 else 64
assert res_deep == res == expected
elif is_object or is_categorical:
# only deep will pick them up
diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py
index 5ef110e9672f0..a6fd421911d3e 100644
--- a/pandas/tests/libs/test_hashtable.py
+++ b/pandas/tests/libs/test_hashtable.py
@@ -1,3 +1,6 @@
+from contextlib import contextmanager
+import tracemalloc
+
import numpy as np
import pytest
@@ -6,9 +9,27 @@
import pandas._testing as tm
+@contextmanager
+def activated_tracemalloc():
+ tracemalloc.start()
+ try:
+ yield
+ finally:
+ tracemalloc.stop()
+
+
+def get_allocated_khash_memory():
+ snapshot = tracemalloc.take_snapshot()
+ snapshot = snapshot.filter_traces(
+ (tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),)
+ )
+ return sum(map(lambda x: x.size, snapshot.traces))
+
+
@pytest.mark.parametrize(
"table_type, dtype",
[
+ (ht.PyObjectHashTable, np.object_),
(ht.Int64HashTable, np.int64),
(ht.UInt64HashTable, np.uint64),
(ht.Float64HashTable, np.float64),
@@ -53,13 +74,15 @@ def test_get_set_contains_len(self, table_type, dtype):
assert str(index + 2) in str(excinfo.value)
def test_map(self, table_type, dtype):
- N = 77
- table = table_type()
- keys = np.arange(N).astype(dtype)
- vals = np.arange(N).astype(np.int64) + N
- table.map(keys, vals)
- for i in range(N):
- assert table.get_item(keys[i]) == i + N
+ # PyObjectHashTable has no map-method
+ if table_type != ht.PyObjectHashTable:
+ N = 77
+ table = table_type()
+ keys = np.arange(N).astype(dtype)
+ vals = np.arange(N).astype(np.int64) + N
+ table.map(keys, vals)
+ for i in range(N):
+ assert table.get_item(keys[i]) == i + N
def test_map_locations(self, table_type, dtype):
N = 8
@@ -101,6 +124,53 @@ def test_unique(self, table_type, dtype):
unique = table.unique(keys)
tm.assert_numpy_array_equal(unique, expected)
+ def test_tracemalloc_works(self, table_type, dtype):
+ if dtype in (np.int8, np.uint8):
+ N = 256
+ else:
+ N = 30000
+ keys = np.arange(N).astype(dtype)
+ with activated_tracemalloc():
+ table = table_type()
+ table.map_locations(keys)
+ used = get_allocated_khash_memory()
+ my_size = table.sizeof()
+ assert used == my_size
+ del table
+ assert get_allocated_khash_memory() == 0
+
+ def test_tracemalloc_for_empty(self, table_type, dtype):
+ with activated_tracemalloc():
+ table = table_type()
+ used = get_allocated_khash_memory()
+ my_size = table.sizeof()
+ assert used == my_size
+ del table
+ assert get_allocated_khash_memory() == 0
+
+
+def test_tracemalloc_works_for_StringHashTable():
+ N = 1000
+ keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)
+ with activated_tracemalloc():
+ table = ht.StringHashTable()
+ table.map_locations(keys)
+ used = get_allocated_khash_memory()
+ my_size = table.sizeof()
+ assert used == my_size
+ del table
+ assert get_allocated_khash_memory() == 0
+
+
+def test_tracemalloc_for_empty_StringHashTable():
+ with activated_tracemalloc():
+ table = ht.StringHashTable()
+ used = get_allocated_khash_memory()
+ my_size = table.sizeof()
+ assert used == my_size
+ del table
+ assert get_allocated_khash_memory() == 0
+
@pytest.mark.parametrize(
"table_type, dtype",
@@ -157,6 +227,7 @@ def get_ht_function(fun_name, type_suffix):
@pytest.mark.parametrize(
"dtype, type_suffix",
[
+ (np.object_, "object"),
(np.int64, "int64"),
(np.uint64, "uint64"),
(np.float64, "float64"),
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Tracemalloc is a standard tool for tracing of memory consumption in Python. Until now khash-maps were invisible for this tool.
This PR follows more or less numpy's approach (see https://github.com/numpy/numpy/commit/03534ec90dba2bcdcd649be64be57939dde4c6f5) for adding tracemalloc-support.
This PR fixes also silly mistakes from #37920 (see https://github.com/pandas-dev/pandas/pull/38048/commits/3291ed198432a80eb559382fbf9a35cb6a8ad749) and the somewhat imprecise implementation of `sizeof` for `XXXHashTable`-classes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38048 | 2020-11-24T22:48:40Z | 2020-11-26T17:43:44Z | 2020-11-26T17:43:44Z | 2020-11-27T08:15:07Z |
DOC: Remove repeated words and wrong /it's/ usage | diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst
index ced5b686b8246..f8e6bda2085d8 100644
--- a/doc/source/development/policies.rst
+++ b/doc/source/development/policies.rst
@@ -35,7 +35,7 @@ We will not introduce new deprecations in patch releases.
Deprecations will only be enforced in **major** releases. For example, if a
behavior is deprecated in pandas 1.2.0, it will continue to work, with a
warning, for all releases in the 1.x series. The behavior will change and the
-deprecation removed in the next next major release (2.0.0).
+deprecation removed in the next major release (2.0.0).
.. note::
diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst
index 905877cca61db..f2bb99dd2ebc0 100644
--- a/doc/source/user_guide/dsintro.rst
+++ b/doc/source/user_guide/dsintro.rst
@@ -439,7 +439,7 @@ Data Classes as introduced in `PEP557 <https://www.python.org/dev/peps/pep-0557>
can be passed into the DataFrame constructor.
Passing a list of dataclasses is equivalent to passing a list of dictionaries.
-Please be aware, that that all values in the list should be dataclasses, mixing
+Please be aware, that all values in the list should be dataclasses, mixing
types in the list would result in a TypeError.
.. ipython:: python
diff --git a/doc/source/user_guide/integer_na.rst b/doc/source/user_guide/integer_na.rst
index be38736f493b5..2d5673fe53be3 100644
--- a/doc/source/user_guide/integer_na.rst
+++ b/doc/source/user_guide/integer_na.rst
@@ -117,7 +117,7 @@ dtype if needed.
# coerce when needed
s + 0.01
-These dtypes can operate as part of of ``DataFrame``.
+These dtypes can operate as part of ``DataFrame``.
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst
index 4de76510c6bc1..c12adb2f1334f 100644
--- a/doc/source/whatsnew/v0.12.0.rst
+++ b/doc/source/whatsnew/v0.12.0.rst
@@ -419,7 +419,7 @@ Bug fixes
~~~~~~~~~
- Plotting functions now raise a ``TypeError`` before trying to plot anything
- if the associated objects have have a dtype of ``object`` (:issue:`1818`,
+ if the associated objects have a dtype of ``object`` (:issue:`1818`,
:issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to
numeric arrays if possible so that you can still plot, for example, an
object array with floats. This happens before any drawing takes place which
@@ -430,8 +430,8 @@ Bug fixes
- ``Series.str`` now supports iteration (:issue:`3638`). You can iterate over the
individual elements of each string in the ``Series``. Each iteration yields
- yields a ``Series`` with either a single character at each index of the
- original ``Series`` or ``NaN``. For example,
+ a ``Series`` with either a single character at each index of the original
+ ``Series`` or ``NaN``. For example,
.. ipython:: python
:okwarning:
diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst
index 5b279a4973963..b59938a9b9c9b 100644
--- a/doc/source/whatsnew/v0.14.0.rst
+++ b/doc/source/whatsnew/v0.14.0.rst
@@ -923,7 +923,7 @@ Bug fixes
- ``HDFStore.select_as_multiple`` handles start and stop the same way as ``select`` (:issue:`6177`)
- ``HDFStore.select_as_coordinates`` and ``select_column`` works with a ``where`` clause that results in filters (:issue:`6177`)
- Regression in join of non_unique_indexes (:issue:`6329`)
-- Issue with groupby ``agg`` with a single function and a a mixed-type frame (:issue:`6337`)
+- Issue with groupby ``agg`` with a single function and a mixed-type frame (:issue:`6337`)
- Bug in ``DataFrame.replace()`` when passing a non- ``bool``
``to_replace`` argument (:issue:`6332`)
- Raise when trying to align on different levels of a MultiIndex assignment (:issue:`3738`)
diff --git a/doc/source/whatsnew/v0.15.2.rst b/doc/source/whatsnew/v0.15.2.rst
index 95ca925f18692..b5b25796fea73 100644
--- a/doc/source/whatsnew/v0.15.2.rst
+++ b/doc/source/whatsnew/v0.15.2.rst
@@ -136,7 +136,7 @@ Enhancements
- Added ability to export Categorical data to Stata (:issue:`8633`). See :ref:`here <io.stata-categorical>` for limitations of categorical variables exported to Stata data files.
- Added flag ``order_categoricals`` to ``StataReader`` and ``read_stata`` to select whether to order imported categorical data (:issue:`8836`). See :ref:`here <io.stata-categorical>` for more information on importing categorical variables from Stata data files.
-- Added ability to export Categorical data to to/from HDF5 (:issue:`7621`). Queries work the same as if it was an object array. However, the ``category`` dtyped data is stored in a more efficient manner. See :ref:`here <io.hdf5-categorical>` for an example and caveats w.r.t. prior versions of pandas.
+- Added ability to export Categorical data to/from HDF5 (:issue:`7621`). Queries work the same as if it was an object array. However, the ``category`` dtyped data is stored in a more efficient manner. See :ref:`here <io.hdf5-categorical>` for an example and caveats w.r.t. prior versions of pandas.
- Added support for ``searchsorted()`` on ``Categorical`` class (:issue:`8420`).
Other enhancements:
diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst
index 39767684c01d0..269854111373f 100644
--- a/doc/source/whatsnew/v0.16.1.rst
+++ b/doc/source/whatsnew/v0.16.1.rst
@@ -6,7 +6,7 @@ Version 0.16.1 (May 11, 2015)
{{ header }}
-This is a minor bug-fix release from 0.16.0 and includes a a large number of
+This is a minor bug-fix release from 0.16.0 and includes a large number of
bug fixes along several new features, enhancements, and performance improvements.
We recommend that all users upgrade to this version.
@@ -72,7 +72,7 @@ setting the index of a ``DataFrame/Series`` with a ``category`` dtype would conv
Out[4]: Index(['c', 'a', 'b'], dtype='object')
-setting the index, will create create a ``CategoricalIndex``
+setting the index, will create a ``CategoricalIndex``
.. code-block:: ipython
diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst
index 194bb61f2c1c8..37e8c64ea9ced 100644
--- a/doc/source/whatsnew/v0.16.2.rst
+++ b/doc/source/whatsnew/v0.16.2.rst
@@ -6,7 +6,7 @@ Version 0.16.2 (June 12, 2015)
{{ header }}
-This is a minor bug-fix release from 0.16.1 and includes a a large number of
+This is a minor bug-fix release from 0.16.1 and includes a large number of
bug fixes along some new features (:meth:`~DataFrame.pipe` method), enhancements, and performance improvements.
We recommend that all users upgrade to this version.
diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst
index 636414cdab8d8..829c04dac9f2d 100644
--- a/doc/source/whatsnew/v0.18.0.rst
+++ b/doc/source/whatsnew/v0.18.0.rst
@@ -610,7 +610,7 @@ Subtraction by ``Timedelta`` in a ``Series`` by a ``Timestamp`` works (:issue:`1
pd.Timestamp('2012-01-01') - ser
-``NaT.isoformat()`` now returns ``'NaT'``. This change allows allows
+``NaT.isoformat()`` now returns ``'NaT'``. This change allows
``pd.Timestamp`` to rehydrate any timestamp like object from its isoformat
(:issue:`12300`).
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst
index 8ae5ea5726fe9..6239c37174534 100644
--- a/doc/source/whatsnew/v0.20.0.rst
+++ b/doc/source/whatsnew/v0.20.0.rst
@@ -1167,7 +1167,7 @@ Other API changes
- ``.loc`` has compat with ``.ix`` for accepting iterators, and NamedTuples (:issue:`15120`)
- ``interpolate()`` and ``fillna()`` will raise a ``ValueError`` if the ``limit`` keyword argument is not greater than 0. (:issue:`9217`)
- ``pd.read_csv()`` will now issue a ``ParserWarning`` whenever there are conflicting values provided by the ``dialect`` parameter and the user (:issue:`14898`)
-- ``pd.read_csv()`` will now raise a ``ValueError`` for the C engine if the quote character is larger than than one byte (:issue:`11592`)
+- ``pd.read_csv()`` will now raise a ``ValueError`` for the C engine if the quote character is larger than one byte (:issue:`11592`)
- ``inplace`` arguments now require a boolean value, else a ``ValueError`` is thrown (:issue:`14189`)
- ``pandas.api.types.is_datetime64_ns_dtype`` will now report ``True`` on a tz-aware dtype, similar to ``pandas.api.types.is_datetime64_any_dtype``
- ``DataFrame.asof()`` will return a null filled ``Series`` instead the scalar ``NaN`` if a match is not found (:issue:`15118`)
@@ -1663,11 +1663,11 @@ Indexing
- Bug in ``.reset_index()`` when an all ``NaN`` level of a ``MultiIndex`` would fail (:issue:`6322`)
- Bug in ``.reset_index()`` when raising error for index name already present in ``MultiIndex`` columns (:issue:`16120`)
- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`)
-- Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`)
+- Bug in the HTML display with a ``MultiIndex`` and truncation (:issue:`14882`)
- Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`)
- Bug in ``pd.concat()`` where the names of ``MultiIndex`` of resulting ``DataFrame`` are not handled correctly when ``None`` is presented in the names of ``MultiIndex`` of input ``DataFrame`` (:issue:`15787`)
- Bug in ``DataFrame.sort_index()`` and ``Series.sort_index()`` where ``na_position`` doesn't work with a ``MultiIndex`` (:issue:`14784`, :issue:`16604`)
-- Bug in in ``pd.concat()`` when combining objects with a ``CategoricalIndex`` (:issue:`16111`)
+- Bug in ``pd.concat()`` when combining objects with a ``CategoricalIndex`` (:issue:`16111`)
- Bug in indexing with a scalar and a ``CategoricalIndex`` (:issue:`16123`)
IO
diff --git a/doc/source/whatsnew/v0.21.0.rst b/doc/source/whatsnew/v0.21.0.rst
index 6035b89aa8643..1bbbbdc7e5410 100644
--- a/doc/source/whatsnew/v0.21.0.rst
+++ b/doc/source/whatsnew/v0.21.0.rst
@@ -50,7 +50,7 @@ Parquet is designed to faithfully serialize and de-serialize ``DataFrame`` s, su
dtypes, including extension dtypes such as datetime with timezones.
This functionality depends on either the `pyarrow <http://arrow.apache.org/docs/python/>`__ or `fastparquet <https://fastparquet.readthedocs.io/en/latest/>`__ library.
-For more details, see see :ref:`the IO docs on Parquet <io.parquet>`.
+For more details, see :ref:`the IO docs on Parquet <io.parquet>`.
.. _whatsnew_0210.enhancements.infer_objects:
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 9ef50045d5b5e..ce784231a47d2 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1622,7 +1622,7 @@ Timedelta
- Bug in :class:`DataFrame` with ``timedelta64[ns]`` dtype division by ``Timedelta``-like scalar incorrectly returning ``timedelta64[ns]`` dtype instead of ``float64`` dtype (:issue:`20088`, :issue:`22163`)
- Bug in adding a :class:`Index` with object dtype to a :class:`Series` with ``timedelta64[ns]`` dtype incorrectly raising (:issue:`22390`)
- Bug in multiplying a :class:`Series` with numeric dtype against a ``timedelta`` object (:issue:`22390`)
-- Bug in :class:`Series` with numeric dtype when adding or subtracting an an array or ``Series`` with ``timedelta64`` dtype (:issue:`22390`)
+- Bug in :class:`Series` with numeric dtype when adding or subtracting an array or ``Series`` with ``timedelta64`` dtype (:issue:`22390`)
- Bug in :class:`Index` with numeric dtype when multiplying or dividing an array with dtype ``timedelta64`` (:issue:`22390`)
- Bug in :class:`TimedeltaIndex` incorrectly allowing indexing with ``Timestamp`` object (:issue:`20464`)
- Fixed bug where subtracting :class:`Timedelta` from an object-dtyped array would raise ``TypeError`` (:issue:`21980`)
@@ -1868,7 +1868,7 @@ Reshaping
- :func:`pandas.core.groupby.GroupBy.rank` now raises a ``ValueError`` when an invalid value is passed for argument ``na_option`` (:issue:`22124`)
- Bug in :func:`get_dummies` with Unicode attributes in Python 2 (:issue:`22084`)
- Bug in :meth:`DataFrame.replace` raises ``RecursionError`` when replacing empty lists (:issue:`22083`)
-- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when dict is used as the ``to_replace`` value and one key in the dict is is another key's value, the results were inconsistent between using integer key and using string key (:issue:`20656`)
+- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when dict is used as the ``to_replace`` value and one key in the dict is another key's value, the results were inconsistent between using integer key and using string key (:issue:`20656`)
- Bug in :meth:`DataFrame.drop_duplicates` for empty ``DataFrame`` which incorrectly raises an error (:issue:`20516`)
- Bug in :func:`pandas.wide_to_long` when a string is passed to the stubnames argument and a column name is a substring of that stubname (:issue:`22468`)
- Bug in :func:`merge` when merging ``datetime64[ns, tz]`` data that contained a DST transition (:issue:`18885`)
diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst
index 8ff688eaa91e7..253ca4d4188e5 100644
--- a/doc/source/whatsnew/v0.6.0.rst
+++ b/doc/source/whatsnew/v0.6.0.rst
@@ -15,7 +15,7 @@ New features
~~~~~~~~~~~~
- :ref:`Added <reshaping.melt>` ``melt`` function to ``pandas.core.reshape``
- :ref:`Added <groupby.multiindex>` ``level`` parameter to group by level in Series and DataFrame descriptive statistics (:issue:`313`)
-- :ref:`Added <basics.head_tail>` ``head`` and ``tail`` methods to Series, analogous to to DataFrame (:issue:`296`)
+- :ref:`Added <basics.head_tail>` ``head`` and ``tail`` methods to Series, analogous to DataFrame (:issue:`296`)
- :ref:`Added <indexing.boolean>` ``Series.isin`` function which checks if each value is contained in a passed sequence (:issue:`289`)
- :ref:`Added <io.formatting>` ``float_format`` option to ``Series.to_string``
- :ref:`Added <io.parse_dates>` ``skip_footer`` (:issue:`291`) and ``converters`` (:issue:`343`) options to ``read_csv`` and ``read_table``
diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst
index b34c2a5c6a07c..781054fc4de7c 100644
--- a/doc/source/whatsnew/v0.8.0.rst
+++ b/doc/source/whatsnew/v0.8.0.rst
@@ -81,7 +81,7 @@ Time Series changes and improvements
timestamps are stored as UTC; Timestamps from DatetimeIndex objects with time
zone set will be localized to local time. Time zone conversions are therefore
essentially free. User needs to know very little about pytz library now; only
- time zone names as as strings are required. Time zone-aware timestamps are
+ time zone names as strings are required. Time zone-aware timestamps are
equal if and only if their UTC timestamps match. Operations between time
zone-aware time series with different time zones will result in a UTC-indexed
time series.
diff --git a/pandas/_testing.py b/pandas/_testing.py
index da2963e167767..68371b782aac2 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1768,7 +1768,7 @@ def box_expected(expected, box_cls, transpose=True):
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
- # for vector operations, we we need a DataFrame to be a single-row,
+ # for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index fcd47b37268cc..ecc94dd58066e 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -446,7 +446,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
# in1d is faster for small sizes
if len(comps) > 1_000_000 and len(values) <= 26 and not is_object_dtype(comps):
- # If the the values include nan we need to check for nan explicitly
+ # If the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
if isna(values).any():
f = lambda c, v: np.logical_or(np.in1d(c, v), np.isnan(c))
@@ -1551,7 +1551,7 @@ def take(arr, indices, axis: int = 0, allow_fill: bool = False, fill_value=None)
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
- other negative values raise a ``ValueError``.
+ negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 62e508c491740..5b230c175eaef 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -77,7 +77,7 @@ def func(self, other):
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
- # Two Categoricals can only be be compared if the categories are
+ # Two Categoricals can only be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index a5ebdd8d963e2..4aed39d7edb92 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -120,7 +120,7 @@ def coerce_to_array(
-------
tuple of (values, mask)
"""
- # if values is floating numpy array, preserve it's dtype
+ # if values is floating numpy array, preserve its dtype
if dtype is None and hasattr(values, "dtype"):
if is_float_dtype(values.dtype):
dtype = values.dtype
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index c9d7632e39228..2897c18acfb09 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -183,7 +183,7 @@ def coerce_to_array(
-------
tuple of (values, mask)
"""
- # if values is integer numpy array, preserve it's dtype
+ # if values is integer numpy array, preserve its dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 0cdce1eabccc6..4eb67dcd12728 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -144,7 +144,7 @@ class PandasArray(
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
- # that _typ to ensure that that users don't unnecessarily use EAs inside
+ # that _typ to ensure that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 8630867c64f88..c2be81cd46b3b 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -99,9 +99,8 @@ def __eq__(self, other: Any) -> bool:
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
- * it's an instance of this type and all of the
- the attributes in ``self._metadata`` are equal between
- `self` and `other`.
+ * it's an instance of this type and all of the attributes
+ in ``self._metadata`` are equal between `self` and `other`.
Parameters
----------
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 465ec821400e7..0f0e82f4ad4e2 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -391,7 +391,7 @@ def maybe_cast_to_extension_array(
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
- # Everything can be be converted to StringArrays, but we may not want to convert
+ # Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
@@ -1200,7 +1200,7 @@ def soft_convert_objects(
elif conversion_count > 1 and coerce:
raise ValueError(
"Only one of 'datetime', 'numeric' or "
- "'timedelta' can be True when when coerce=True."
+ "'timedelta' can be True when coerce=True."
)
if not is_object_dtype(values.dtype):
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 14184f044ae95..b4f6d587c6642 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1727,7 +1727,7 @@ def _validate_date_like_dtype(dtype) -> None:
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
- the frequency provided is too specific)
+ frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 01b34187997cb..07280702cf06f 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -47,7 +47,7 @@ class PandasExtensionDtype(ExtensionDtype):
type: Any
kind: Any
# The Any type annotations above are here only because mypy seems to have a
- # problem dealing with with multiple inheritance from PandasExtensionDtype
+ # problem dealing with multiple inheritance from PandasExtensionDtype
# and ExtensionDtype's @properties in the subclasses below. The kind and
# type variables in those subclasses are explicitly typed below.
subdtype = None
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6f6d94f0e9f8e..b14a80beb9f8c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6473,7 +6473,7 @@ def update(
1 b e
2 c f
- For Series, it's name attribute must be set.
+ For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7e8012d76fe1b..8f057a98eed2d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1114,7 +1114,7 @@ def rename_axis(self, mapper=lib.no_default, **kwargs):
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
- the corresponding index if mapper is a list or a scalar.
+ corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
@@ -2717,7 +2717,7 @@ def to_sql(
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
- An `sqlalchemy.engine.Connection` can also be passed to to `con`:
+ An `sqlalchemy.engine.Connection` can also be passed to `con`:
>>> with engine.begin() as connection:
... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
@@ -5483,7 +5483,7 @@ def __setattr__(self, name: str, value) -> None:
def _dir_additions(self) -> Set[str]:
"""
add the string-like attributes from the info_axis.
- If info_axis is a MultiIndex, it's first level values are used.
+ If info_axis is a MultiIndex, its first level values are used.
"""
additions = super()._dir_additions()
if self._info_axis._can_hold_strings:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 3395b9d36fd0c..244c47cd1f1ea 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -262,7 +262,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
return self._python_agg_general(func, *args, **kwargs)
except (ValueError, KeyError):
# TODO: KeyError is raised in _python_agg_general,
- # see see test_groupby.test_basic
+ # see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
@@ -1390,8 +1390,7 @@ def _transform_fast(self, result: DataFrame) -> DataFrame:
"""
obj = self._obj_with_exclusions
- # for each col, reshape to to size of original frame
- # by take operation
+ # for each col, reshape to size of original frame by take operation
ids, _, ngroup = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
output = [
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index fc80852f00c95..e9aab79d810e6 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -148,7 +148,7 @@ def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> "DataSplitter":
-------
Generator yielding subsetted objects
- __finalize__ has not been called for the the subsetted objects returned.
+ __finalize__ has not been called for the subsetted objects returned.
"""
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b5900ead246f3..c4f6dac1915ec 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1486,7 +1486,7 @@ def _get_level_number(self, level) -> int:
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
- For internal compatibility with with the Index API.
+ For internal compatibility with the Index API.
Sort the Index. This is for compat with MultiIndex
@@ -4451,7 +4451,7 @@ def equals(self, other: object) -> bool:
if not isinstance(other, Index):
return False
- # If other is a subclass of self and defines it's own equals method, we
+ # If other is a subclass of self and defines its own equals method, we
# dispatch to the subclass method. For instance for a MultiIndex,
# a d-level MultiIndex can equal d-tuple Index.
# Note: All EA-backed Index subclasses override equals
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 98752a21e44a2..8e7d429ce426d 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -479,7 +479,7 @@ def _needs_i8_conversion(self, key) -> bool:
"""
Check if a given key needs i8 conversion. Conversion is necessary for
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
- Interval-like requires conversion if it's endpoints are one of the
+ Interval-like requires conversion if its endpoints are one of the
aforementioned types.
Assumes that any list-like data has already been cast to an Index.
@@ -501,7 +501,7 @@ def _needs_i8_conversion(self, key) -> bool:
def _maybe_convert_i8(self, key):
"""
- Maybe convert a given key to it's equivalent i8 value(s). Used as a
+ Maybe convert a given key to its equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
@@ -540,7 +540,7 @@ def _maybe_convert_i8(self, key):
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
- # convert NaT from it's i8 value to np.nan so it's not viewed
+ # convert NaT from its i8 value to np.nan so it's not viewed
# as a valid value, maybe causing errors (e.g. is_overlapping)
key_i8 = key_i8.where(~key._isnan)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 11dd3598b4864..aef8855df6b03 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2311,7 +2311,7 @@ def reorder_levels(self, order):
def _get_codes_for_sorting(self):
"""
- we categorizing our codes by using the
+ we are categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index f6d14a1c1503c..080c307ac895f 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1021,7 +1021,7 @@ def _multi_take(self, tup: Tuple):
def _getitem_iterable(self, key, axis: int):
"""
- Index current object with an an iterable collection of keys.
+ Index current object with an iterable collection of keys.
Parameters
----------
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index d38974839394d..80c4cd5b44a92 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -1646,7 +1646,7 @@ def nanpercentile(
interpolation=interpolation,
)
- # Note: we have to do do `astype` and not view because in general we
+ # Note: we have to do `astype` and not view because in general we
# have float result at this point, not i8
return result.astype(values.dtype)
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index f9a08bf862644..0bea19bec2cdd 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -182,7 +182,7 @@ def _process_style(self, style: Dict[str, Any]) -> str:
Returns
-------
style_key : str
- Unique style key for for later reference in sheet
+ Unique style key for later reference in sheet
"""
from odf.style import (
ParagraphProperties,
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py
index ab9c9fe995008..ea291bcbfa44c 100644
--- a/pandas/io/formats/console.py
+++ b/pandas/io/formats/console.py
@@ -78,7 +78,7 @@ def check_main():
def in_ipython_frontend():
"""
- Check if we're inside an an IPython zmq frontend.
+ Check if we're inside an IPython zmq frontend.
Returns
-------
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index cbe2ed1ed838d..fbda78a1842ca 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -144,7 +144,7 @@ def _initialize_columns(self, cols: Optional[Sequence[Label]]) -> Sequence[Label
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
- # and make sure sure cols is just a list of labels
+ # and make sure cols is just a list of labels
new_cols = self.obj.columns
if isinstance(new_cols, ABCIndexClass):
return new_cols._format_native_types(**self._number_format)
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 72b07000146b2..ac453839792f3 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -308,7 +308,7 @@ def format_object_summary(
name : name, optional
defaults to the class name of the obj
indent_for_name : bool, default True
- Whether subsequent lines should be be indented to
+ Whether subsequent lines should be indented to
align with the name.
line_break_each_value : bool, default False
If True, inserts a line break for each value of ``obj``.
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index f80c5317598e7..0eeff44d0f74c 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -903,7 +903,7 @@ def set_table_attributes(self, attributes: str) -> "Styler":
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
- in addition to to automatic (by default) id.
+ in addition to automatic (by default) id.
Parameters
----------
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 51888e5021d80..1fea50ecade3c 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -212,7 +212,7 @@ def read_sql_table(
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
- A database URI could be provided as as str.
+ A database URI could be provided as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 014094923185f..27fac95a16b7a 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -126,7 +126,7 @@ def test_is_list_like_disallow_sets(maybe_list_like):
def test_is_list_like_recursion():
# GH 33721
- # interpreter would crash with with SIGABRT
+ # interpreter would crash with SIGABRT
def foo():
inference.is_list_like([])
foo()
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index f77b7cd4a6c3b..b7692eee16bf8 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -117,7 +117,7 @@ def test_describe_categorical(self):
def test_describe_empty_categorical_column(self):
# GH#26397
- # Ensure the index of an an empty categorical DataFrame column
+ # Ensure the index of an empty categorical DataFrame column
# also contains (count, unique, top, freq)
df = DataFrame({"empty_col": Categorical([])})
result = df.describe()
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index da556523a3341..78c438fa11a0e 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1615,7 +1615,7 @@ def test_groupby_multiindex_not_lexsorted():
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
- # the wake of GH5375
+ # wake of GH5375
df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py
index fb17e1df6341b..ac4477e60d5dc 100644
--- a/pandas/tests/indexes/conftest.py
+++ b/pandas/tests/indexes/conftest.py
@@ -13,7 +13,7 @@ def sort(request):
parameters [True, False].
We can't combine them as sort=True is not permitted
- in in the Index setops methods.
+ in the Index setops methods.
"""
return request.param
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 3bf37f4cade8b..353dfcf37c28d 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -208,7 +208,7 @@ def test_series_partial_set(self):
result = ser.reindex([2, 2, "x", 1])
tm.assert_series_equal(result, expected, check_index_type=True)
- # raises as nothing in in the index
+ # raises as nothing is in the index
msg = (
r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64'\)\] are "
r"in the \[index\]\""
@@ -289,7 +289,7 @@ def test_series_partial_set_with_name(self):
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[[2, 2, "x", 1]]
- # raises as nothing in in the index
+ # raises as nothing is in the index
msg = (
r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64', "
r"name='idx'\)\] are in the \[index\]\""
diff --git a/versioneer.py b/versioneer.py
index 288464f1efa44..e7fed874ae20f 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1541,7 +1541,7 @@ def get_cmdclass(cmdclass=None):
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
- # sandbox that restores sys.modules to it's pre-build state, so the
+ # sandbox that restores sys.modules to its pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md
index 515d23afb93ec..7cf78958370ac 100644
--- a/web/pandas/community/ecosystem.md
+++ b/web/pandas/community/ecosystem.md
@@ -6,7 +6,7 @@ encouraging because it means pandas is not only helping users to handle
their data tasks but also that it provides a better starting point for
developers to build powerful and more focused data tools. The creation
of libraries that complement pandas' functionality also allows pandas
-development to remain focused around it's original requirements.
+development to remain focused around its original requirements.
This is an inexhaustive list of projects that build on pandas in order
to provide tools in the PyData space. For a list of projects that depend
| - [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Remove (manually reviewed) repeated words from documentation/comments as well as wrong usage of `it's` (replaced by `its` where appropriated). | https://api.github.com/repos/pandas-dev/pandas/pulls/38047 | 2020-11-24T19:30:11Z | 2020-11-26T17:37:58Z | 2020-11-26T17:37:58Z | 2020-11-26T18:55:15Z |
CLN: dont mix Int64Index into PeriodIndex | diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index e25119162368f..0f9a0052c18d0 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -65,7 +65,7 @@ def _new_PeriodIndex(cls, **d):
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
-class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
+class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
@@ -436,8 +436,7 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False)
)
# _assert_can_do_setop ensures we have matching dtype
- result = Int64Index.join(
- self,
+ result = super().join(
other,
how=how,
level=level,
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 21f7899f24b51..d7ee4acc2e670 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -44,6 +44,7 @@
is_list_like,
is_string_dtype,
is_timedelta64_dtype,
+ needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
@@ -4771,7 +4772,7 @@ def _convert_index(name: str, index: Index, encoding: str, errors: str) -> Index
kind = _dtype_to_kind(dtype_name)
atom = DataIndexableCol._get_atom(converted)
- if isinstance(index, Int64Index):
+ if isinstance(index, Int64Index) or needs_i8_conversion(index.dtype):
# Includes Int64Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
# in which case "kind" is "integer", "integer", "datetime64",
# "timedelta64", and "integer", respectively.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38045 | 2020-11-24T18:07:05Z | 2020-11-26T17:34:48Z | 2020-11-26T17:34:48Z | 2020-11-26T18:50:07Z |
CLN: Remove .values from groupby.sem | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 6e26e9a43bb2a..ae3612c99d5cd 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1600,10 +1600,8 @@ def sem(self, ddof: int = 1):
cols = result.columns.get_indexer_for(
result.columns.difference(self.exclusions).unique()
)
- # TODO(GH-22046) - setting with iloc broken if labels are not unique
- # .values to remove labels
- result.iloc[:, cols] = (
- result.iloc[:, cols].values / np.sqrt(self.count().iloc[:, cols]).values
+ result.iloc[:, cols] = result.iloc[:, cols] / np.sqrt(
+ self.count().iloc[:, cols]
)
return result
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Closure of #22046 allows removing the workaround. | https://api.github.com/repos/pandas-dev/pandas/pulls/38044 | 2020-11-24T17:58:15Z | 2020-11-26T16:17:44Z | 2020-11-26T16:17:44Z | 2020-12-06T14:03:54Z |
TYP: Add cast to ABC Index-like types | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 7bae912a070a9..713d58b4df5be 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -168,6 +168,7 @@ def _ensure_data(
elif is_categorical_dtype(values.dtype) and (
is_categorical_dtype(dtype) or dtype is None
):
+ values = cast("Categorical", values)
values = values.codes
dtype = pandas_dtype("category")
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py
index 0e5867809fe52..ee46a123d03c4 100644
--- a/pandas/core/dtypes/generic.py
+++ b/pandas/core/dtypes/generic.py
@@ -4,7 +4,20 @@
from typing import TYPE_CHECKING, Type, cast
if TYPE_CHECKING:
- from pandas import DataFrame, Series
+ from pandas import (
+ CategoricalIndex,
+ DataFrame,
+ DatetimeIndex,
+ Float64Index,
+ Int64Index,
+ IntervalIndex,
+ MultiIndex,
+ PeriodIndex,
+ RangeIndex,
+ Series,
+ TimedeltaIndex,
+ UInt64Index,
+ )
from pandas.core.generic import NDFrame
@@ -23,23 +36,45 @@ def _check(cls, inst) -> bool:
return meta(name, tuple(), dct)
-ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ", ("int64index",))
-ABCUInt64Index = create_pandas_abc_type("ABCUInt64Index", "_typ", ("uint64index",))
-ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",))
-ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ", ("float64index",))
-ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",))
-ABCDatetimeIndex = create_pandas_abc_type(
- "ABCDatetimeIndex", "_typ", ("datetimeindex",)
-)
-ABCTimedeltaIndex = create_pandas_abc_type(
- "ABCTimedeltaIndex", "_typ", ("timedeltaindex",)
-)
-ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",))
-ABCCategoricalIndex = create_pandas_abc_type(
- "ABCCategoricalIndex", "_typ", ("categoricalindex",)
-)
-ABCIntervalIndex = create_pandas_abc_type(
- "ABCIntervalIndex", "_typ", ("intervalindex",)
+ABCInt64Index = cast(
+ "Type[Int64Index]",
+ create_pandas_abc_type("ABCInt64Index", "_typ", ("int64index",)),
+)
+ABCUInt64Index = cast(
+ "Type[UInt64Index]",
+ create_pandas_abc_type("ABCUInt64Index", "_typ", ("uint64index",)),
+)
+ABCRangeIndex = cast(
+ "Type[RangeIndex]",
+ create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)),
+)
+ABCFloat64Index = cast(
+ "Type[Float64Index]",
+ create_pandas_abc_type("ABCFloat64Index", "_typ", ("float64index",)),
+)
+ABCMultiIndex = cast(
+ "Type[MultiIndex]",
+ create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)),
+)
+ABCDatetimeIndex = cast(
+ "Type[DatetimeIndex]",
+ create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)),
+)
+ABCTimedeltaIndex = cast(
+ "Type[TimedeltaIndex]",
+ create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)),
+)
+ABCPeriodIndex = cast(
+ "Type[PeriodIndex]",
+ create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)),
+)
+ABCCategoricalIndex = cast(
+ "Type[CategoricalIndex]",
+ create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)),
+)
+ABCIntervalIndex = cast(
+ "Type[IntervalIndex]",
+ create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)),
)
ABCIndexClass = create_pandas_abc_type(
"ABCIndexClass",
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index f6eeb121b1ac0..57c86c332bb97 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -227,6 +227,7 @@ class DatetimeIndex(DatetimeTimedeltaMixin):
_is_numeric_dtype = False
_data: DatetimeArray
+ inferred_freq: Optional[str]
tz: Optional[tzinfo]
# --------------------------------------------------------------------
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 0212fd6f695cb..f6f3571955e6e 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -153,11 +153,11 @@ def pad_empties(x):
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
- out = (pad_empties(i) for i in out)
+ gen = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
- out = [[" " * len(i[-1])] * clevels + i for i in out]
+ out = [[" " * len(i[-1])] * clevels + i for i in gen]
# Add the column names to the last index column
cnames = self.frame.columns.names
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index 64cd43c230f28..ae4fff7b495d0 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -1,7 +1,7 @@
# TODO: Use the fact that axis can have units to simplify the process
import functools
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional, cast
import numpy as np
@@ -26,7 +26,7 @@
if TYPE_CHECKING:
from matplotlib.axes import Axes
- from pandas import Index, Series
+ from pandas import DatetimeIndex, Index, Series
# ---------------------------------------------------------------------
# Plotting functions and monkey patches
@@ -243,6 +243,7 @@ def maybe_convert_index(ax: "Axes", data):
if freq is None:
# We only get here for DatetimeIndex
+ data.index = cast("DatetimeIndex", data.index)
freq = data.index.inferred_freq
freq = to_offset(freq)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @simonjayhawkins
For the added type-ignore, the attribute `inferred_freq` is added dynamically onto the index instance. | https://api.github.com/repos/pandas-dev/pandas/pulls/38043 | 2020-11-24T17:40:31Z | 2020-11-28T17:46:03Z | 2020-11-28T17:46:03Z | 2020-12-06T14:03:54Z |
BUG: pytables in py39 | diff --git a/ci/deps/azure-39.yaml b/ci/deps/azure-39.yaml
index 67edc83a9d738..c4c84e73fa684 100644
--- a/ci/deps/azure-39.yaml
+++ b/ci/deps/azure-39.yaml
@@ -15,3 +15,8 @@ dependencies:
- numpy
- python-dateutil
- pytz
+
+ # optional dependencies
+ - pytables
+ - scipy
+ - pyarrow=1.0
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 609c3650c8cc2..dd88f79371d65 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -28,6 +28,7 @@ Bug fixes
- Bug in metadata propagation for ``groupby`` iterator (:issue:`37343`)
- Bug in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`)
- Bug in :class:`RollingGroupby` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`)
+- Bug in pytables methods in python 3.9 (:issue:`38041`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 6ec637a8b4845..0498d4d171c00 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -430,6 +430,10 @@ def visit_Subscript(self, node, **kwargs):
except AttributeError:
pass
+ if isinstance(slobj, Term):
+ # In py39 np.ndarray lookups with Term containing int raise
+ slobj = slobj.value
+
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 32d1cf82c4330..afd2f56efb935 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -4462,7 +4462,7 @@ def test_categorical(self, setup_path):
# Appending must have the same categories
df3 = df.copy()
- df3["s"].cat.remove_unused_categories(inplace=True)
+ df3["s"] = df3["s"].cat.remove_unused_categories()
with pytest.raises(ValueError):
store.append("df3", df3)
| There are failures I'm getting locally since homebrew bumped up to py39. I'm not seeing them on the CI, but they _did_ appear around the same time as these [different](https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=48423&view=logs&j=2d7fb38a-2053-50f3-a67c-09f6e91d3121&t=449937cc-3d50-56b5-5662-e489f41f1268) unexplained pytables failures started showing up. | https://api.github.com/repos/pandas-dev/pandas/pulls/38041 | 2020-11-24T17:26:35Z | 2020-11-25T03:58:19Z | 2020-11-25T03:58:19Z | 2020-11-25T15:28:59Z |
BUG: Series constructor drops nanoseconds of Timedelta scalar | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 9168041a4f474..9f6ef102b6a29 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -575,6 +575,7 @@ Datetimelike
- Bug in :meth:`Series.isin` with ``datetime64[ns]`` dtype and :meth:`.DatetimeIndex.isin` incorrectly casting integers to datetimes (:issue:`36621`)
- Bug in :meth:`Series.isin` with ``datetime64[ns]`` dtype and :meth:`.DatetimeIndex.isin` failing to consider timezone-aware and timezone-naive datetimes as always different (:issue:`35728`)
- Bug in :meth:`Series.isin` with ``PeriodDtype`` dtype and :meth:`PeriodIndex.isin` failing to consider arguments with different ``PeriodDtype`` as always different (:issue:`37528`)
+- Bug in :class:`DataFrame` and :class:`Series` constructors sometimes dropping nanoseconds from :class:`Timestamp` (resp. :class:`Timedelta`) ``data``, with ``dtype=datetime64[ns]`` (resp. ``timedelta64[ns]``) (:issue:`38032`)
Timedelta
^^^^^^^^^
diff --git a/pandas/conftest.py b/pandas/conftest.py
index cb5b4145855d1..f155f768f6929 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -712,6 +712,7 @@ def float_frame():
DatetimeTZDtype(tz="US/Eastern"),
),
(Timedelta(seconds=500), "timedelta64[ns]"),
+ (Timedelta(nanoseconds=1), "timedelta64[ns]"), # GH38032
]
)
def ea_scalar_and_dtype(request):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 12974d56dacdc..326db73264d19 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1611,13 +1611,42 @@ def cast_scalar_to_array(
ndarray of shape, filled with value, of specified / inferred dtype
"""
+ # that's what the type annotation indicates
+ assert isinstance(dtype, (type(None), str, np.dtype))
+
if dtype is None:
- dtype, fill_value = infer_dtype_from_scalar(value)
+ dtype, value = infer_dtype_from_scalar(value)
else:
- fill_value = value
+ if not isinstance(dtype, np.dtype):
+ dtype = np.dtype(dtype)
+ empty = shape and not any(shape)
+ # dtype coercion when empty: sometimes yes, sometimes no?
+
+ if not empty and is_integer_dtype(dtype) and isna(value):
+ # coerce if we have nan for an integer dtype
+ dtype = np.dtype("float64")
+ elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"):
+ # we need to coerce to object dtype to avoid
+ # to allow numpy to take our string as a scalar value
+ dtype = np.dtype("object")
+ if not isna(value):
+ value = ensure_str(value)
+ elif dtype.kind == "m":
+ # GH38032: filling in Timedelta/Timestamp drops nanoseconds
+ if isinstance(value, Timedelta):
+ value = value.to_numpy()
+ # GH36541: filling datetime-like array directly with pd.NaT
+ # raises ValueError: cannot convert float NaN to integer
+ elif is_valid_nat_for_dtype(value, dtype):
+ value = np.timedelta64("NaT")
+ elif dtype.kind == "M":
+ if isinstance(value, Timestamp):
+ value = value.to_numpy()
+ elif is_valid_nat_for_dtype(value, dtype):
+ value = np.datetime64("NaT")
values = np.empty(shape, dtype=dtype)
- values.fill(fill_value)
+ values.fill(value)
return values
@@ -1643,26 +1672,8 @@ def construct_1d_arraylike_from_scalar(
if is_extension_array_dtype(dtype):
cls = dtype.construct_array_type()
subarr = cls._from_sequence([value] * length, dtype=dtype)
-
else:
-
- if length and is_integer_dtype(dtype) and isna(value):
- # coerce if we have nan for an integer dtype
- dtype = np.dtype("float64")
- elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"):
- # we need to coerce to object dtype to avoid
- # to allow numpy to take our string as a scalar value
- dtype = np.dtype("object")
- if not isna(value):
- value = ensure_str(value)
- elif dtype.kind in ["M", "m"] and is_valid_nat_for_dtype(value, dtype):
- # GH36541: can't fill array directly with pd.NaT
- # > np.empty(10, dtype="datetime64[64]").fill(pd.NaT)
- # ValueError: cannot convert float NaN to integer
- value = np.datetime64("NaT")
-
- subarr = np.empty(length, dtype=dtype)
- subarr.fill(value)
+ subarr = cast_scalar_to_array((length,), value, dtype)
return subarr
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 85e35dbb86f1c..5c3eceaaf4ffd 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3923,12 +3923,7 @@ def reindexer(value):
value, len(self.index), infer_dtype
)
else:
- # pandas\core\frame.py:3827: error: Argument 1 to
- # "cast_scalar_to_array" has incompatible type "int"; expected
- # "Tuple[Any, ...]" [arg-type]
- value = cast_scalar_to_array(
- len(self.index), value # type: ignore[arg-type]
- )
+ value = cast_scalar_to_array((len(self.index),), value)
value = maybe_cast_to_datetime(value, infer_dtype)
diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py
index 157adacbdfdf7..178f9a0343e00 100644
--- a/pandas/tests/dtypes/cast/test_infer_dtype.py
+++ b/pandas/tests/dtypes/cast/test_infer_dtype.py
@@ -13,6 +13,7 @@
from pandas import (
Categorical,
Interval,
+ NaT,
Period,
Series,
Timedelta,
@@ -188,11 +189,32 @@ def test_infer_dtype_from_array(arr, expected, pandas_dtype):
(Period("2011-01-01", freq="D"), object),
],
)
-def test_cast_scalar_to_array(obj, dtype):
- shape = (3, 2)
-
+@pytest.mark.parametrize("shape", [(), (5,), (3, 2)])
+def test_cast_scalar_to_array(obj, dtype, shape):
exp = np.empty(shape, dtype=dtype)
exp.fill(obj)
- arr = cast_scalar_to_array(shape, obj, dtype=dtype)
+ arr = cast_scalar_to_array(shape, obj, dtype=np.dtype(dtype))
tm.assert_numpy_array_equal(arr, exp)
+
+
+@pytest.mark.parametrize(
+ "obj_in,dtype_in,obj_out,dtype_out",
+ [
+ (NaT, "timedelta64[ns]", np.timedelta64("NaT"), "timedelta64[ns]"),
+ (Timedelta(1), "timedelta64[ns]", 1, "timedelta64[ns]"),
+ (NaT, "datetime64[ns]", np.datetime64("NaT"), "datetime64[ns]"),
+ (Timestamp(1), "datetime64[ns]", 1, "datetime64[ns]"),
+ (Timestamp(1, tz="US/Eastern"), "datetime64[ns]", 1, "datetime64[ns]"),
+ (np.nan, np.int64, np.nan, np.float64),
+ ("hello", "U", "hello", object),
+ ("hello", "S", "hello", object),
+ ],
+)
+@pytest.mark.parametrize("shape", [(), (5,), (3, 2)])
+def test_cast_scalar_to_array_conversion_needed(
+ obj_in, dtype_in, obj_out, dtype_out, shape
+):
+ result = cast_scalar_to_array(shape, obj_in, dtype=np.dtype(dtype_in))
+ expected = np.full(shape, obj_out, dtype=dtype_out)
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index a98723e9e31f8..f850cc140beb1 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1934,6 +1934,21 @@ def test_constructor_datetimes_with_nulls(self, arr):
expected = Series([np.dtype("datetime64[ns]")])
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize(
+ "scalar,dtype",
+ [
+ (Timedelta(1), "timedelta64[ns]"),
+ (Timestamp(1), "datetime64[ns]"),
+ (Timestamp(1, tz="US/Eastern"), "datetime64[ns]"),
+ ],
+ )
+ def test_constructor_timelike_nanoseconds(self, scalar, dtype):
+ # GH38032
+ df = DataFrame(scalar, index=[0], columns=[0], dtype=dtype)
+ result = df.at[0, 0].value
+ expected = scalar.value
+ assert result == expected
+
def test_constructor_for_list_with_dtypes(self):
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 5b13091470b09..3d17a797d486e 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -24,6 +24,7 @@
Period,
RangeIndex,
Series,
+ Timedelta,
Timestamp,
date_range,
isna,
@@ -1319,6 +1320,21 @@ def test_constructor_dtype_timedelta64(self):
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
+ @pytest.mark.parametrize(
+ "scalar,dtype",
+ [
+ (Timedelta(1), "timedelta64[ns]"),
+ (Timestamp(1), "datetime64[ns]"),
+ (Timestamp(1, tz="US/Eastern"), "datetime64[ns]"),
+ ],
+ )
+ def test_constructor_timelike_nanoseconds(self, scalar, dtype):
+ # GH38032
+ ser = Series(scalar, index=[0], dtype=dtype)
+ result = ser[0].value
+ expected = scalar.value
+ assert result == expected
+
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
| - [x] closes #38032
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38040 | 2020-11-24T16:22:30Z | 2020-12-11T08:49:23Z | null | 2021-03-22T20:24:55Z |
Backport PR #37986 on branch 1.1.x: REGR: fix inplace operations for EAs with non-EA arg | diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst
index 323342cb43950..609c3650c8cc2 100644
--- a/doc/source/whatsnew/v1.1.5.rst
+++ b/doc/source/whatsnew/v1.1.5.rst
@@ -17,7 +17,7 @@ Fixed regressions
- Regression in addition of a timedelta-like scalar to a :class:`DatetimeIndex` raising incorrectly (:issue:`37295`)
- Fixed regression in :meth:`Series.groupby` raising when the :class:`Index` of the :class:`Series` had a tuple as its name (:issue:`37755`)
- Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` for ``__setitem__`` when one-dimensional tuple was given to select from :class:`MultiIndex` (:issue:`37711`)
--
+- Fixed regression in inplace operations on :class:`Series` with ``ExtensionDtype`` with NumPy dtyped operand (:issue:`37910`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index 6a44178e3c704..17223d6a54d4a 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -3,6 +3,7 @@
"""
import operator
+from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.ops.roperator import (
@@ -97,7 +98,7 @@ def f(self, other):
if (
self.ndim == 1
and result._indexed_same(self)
- and result.dtype == self.dtype
+ and is_dtype_equal(result.dtype, self.dtype)
):
# GH#36498 this inplace op can _actually_ be inplace.
self._values[:] = result._values
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index cb33f99d9bd91..2757766724156 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -429,6 +429,7 @@ def test_td_div_numeric_scalar(self):
_is_numpy_dev and not compat.PY39,
raises=RuntimeWarning,
reason="https://github.com/pandas-dev/pandas/issues/31992",
+ strict=False,
),
),
float("nan"),
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index ef2bafd4ea2ad..7394f15555f7b 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -687,3 +687,30 @@ def test_datetime_understood(self):
result = series - offset
expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
+
+
+class TestInplaceOperations:
+ @pytest.mark.parametrize(
+ "dtype1, dtype2, dtype_expected, dtype_mul",
+ (
+ ("Int64", "Int64", "Int64", "Int64"),
+ ("float", "float", "float", "float"),
+ ("Int64", "float", "float", "float"),
+ ),
+ )
+ def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul):
+ # GH 37910
+
+ ser1 = Series([1], dtype=dtype1)
+ ser2 = Series([2], dtype=dtype2)
+ ser1 += ser2
+ expected = Series([3], dtype=dtype_expected)
+ tm.assert_series_equal(ser1, expected)
+
+ ser1 -= ser2
+ expected = Series([1], dtype=dtype_expected)
+ tm.assert_series_equal(ser1, expected)
+
+ ser1 *= ser2
+ expected = Series([2], dtype=dtype_mul)
+ tm.assert_series_equal(ser1, expected)
| Backport PR #37986 | https://api.github.com/repos/pandas-dev/pandas/pulls/38035 | 2020-11-24T09:36:48Z | 2020-11-25T10:52:56Z | 2020-11-25T10:52:56Z | 2020-11-25T10:53:11Z |
ENH: preserve RangeIndex in factorize | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 4c9817f3c3dc6..afd6bbb6c57e0 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -51,6 +51,7 @@
ABCExtensionArray,
ABCIndexClass,
ABCMultiIndex,
+ ABCRangeIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
@@ -682,7 +683,9 @@ def factorize(
na_sentinel = -1
dropna = False
- if is_extension_array_dtype(values.dtype):
+ if isinstance(values, ABCRangeIndex):
+ return values.factorize(sort=sort)
+ elif is_extension_array_dtype(values.dtype):
values = extract_array(values)
codes, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 8e12f84895361..3222f44dd4415 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,7 +1,7 @@
from datetime import timedelta
import operator
from sys import getsizeof
-from typing import Any, List
+from typing import Any, List, Optional, Tuple
import warnings
import numpy as np
@@ -461,6 +461,16 @@ def argsort(self, *args, **kwargs) -> np.ndarray:
else:
return np.arange(len(self) - 1, -1, -1)
+ def factorize(
+ self, sort: bool = False, na_sentinel: Optional[int] = -1
+ ) -> Tuple[np.ndarray, "RangeIndex"]:
+ codes = np.arange(len(self), dtype=np.intp)
+ uniques = self
+ if sort and self.step < 0:
+ codes = codes[::-1]
+ uniques = uniques[::-1]
+ return codes, uniques
+
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index dab7fc51f2537..753c15bde6bba 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -290,6 +290,16 @@ def test_constructor_with_generator(self):
cat = Categorical([0, 1, 2], categories=range(3))
tm.assert_categorical_equal(cat, exp)
+ def test_constructor_with_rangeindex(self):
+ # RangeIndex is preserved in Categories
+ rng = Index(range(3))
+
+ cat = Categorical(rng)
+ tm.assert_index_equal(cat.categories, rng, exact=True)
+
+ cat = Categorical([1, 2, 0], categories=rng)
+ tm.assert_index_equal(cat.categories, rng, exact=True)
+
@pytest.mark.parametrize(
"dtl",
[
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index 85f3d17fdd0d4..ca6387938d747 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -477,6 +477,14 @@ def test_from_product_datetimeindex():
tm.assert_numpy_array_equal(mi.values, etalon)
+def test_from_product_rangeindex():
+ # RangeIndex is preserved by factorize, so preserved in levels
+ rng = Index(range(5))
+ other = ["a", "b"]
+ mi = MultiIndex.from_product([rng, other])
+ tm.assert_index_equal(mi._levels[0], rng, exact=True)
+
+
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("f", [lambda x: x, lambda x: Series(x), lambda x: x.values])
def test_from_product_index_series_categorical(ordered, f):
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 89d0a6723c890..d836ca7a53249 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -307,6 +307,39 @@ def test_datetime64_factorize(self, writable):
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
+ @pytest.mark.parametrize("sort", [True, False])
+ def test_factorize_rangeindex(self, sort):
+ # increasing -> sort doesn't matter
+ ri = pd.RangeIndex.from_range(range(10))
+ expected = np.arange(10, dtype=np.intp), ri
+
+ result = algos.factorize(ri, sort=sort)
+ tm.assert_numpy_array_equal(result[0], expected[0])
+ tm.assert_index_equal(result[1], expected[1], exact=True)
+
+ result = ri.factorize(sort=sort)
+ tm.assert_numpy_array_equal(result[0], expected[0])
+ tm.assert_index_equal(result[1], expected[1], exact=True)
+
+ @pytest.mark.parametrize("sort", [True, False])
+ def test_factorize_rangeindex_decreasing(self, sort):
+ # decreasing -> sort matters
+ ri = pd.RangeIndex.from_range(range(10))
+ expected = np.arange(10, dtype=np.intp), ri
+
+ ri2 = ri[::-1]
+ expected = expected[0], ri2
+ if sort:
+ expected = expected[0][::-1], expected[1][::-1]
+
+ result = algos.factorize(ri2, sort=sort)
+ tm.assert_numpy_array_equal(result[0], expected[0])
+ tm.assert_index_equal(result[1], expected[1], exact=True)
+
+ result = ri2.factorize(sort=sort)
+ tm.assert_numpy_array_equal(result[0], expected[0])
+ tm.assert_index_equal(result[1], expected[1], exact=True)
+
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38034 | 2020-11-24T05:17:15Z | 2020-11-26T22:24:31Z | 2020-11-26T22:24:31Z | 2020-11-26T22:30:49Z |
BUG: NumericIndex.insert(0, False) casting to int | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 0c7cd31a10acb..d0a00e286aad5 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -621,6 +621,7 @@ Indexing
- Bug in :meth:`DataFrame.xs` ignored ``droplevel=False`` for columns (:issue:`19056`)
- Bug in :meth:`DataFrame.reindex` raising ``IndexingError`` wrongly for empty DataFrame with ``tolerance`` not None or ``method="nearest"`` (:issue:`27315`)
- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using listlike indexer that contains elements that are in the index's ``categories`` but not in the index itself failing to raise ``KeyError`` (:issue:`37901`)
+- Bug on inserting a boolean label into a :class:`DataFrame` with a numeric :class:`Index` columns incorrectly casting to integer (:issue:`36319`)
- Bug in :meth:`DataFrame.iloc` and :meth:`Series.iloc` aligning objects in ``__setitem__`` (:issue:`22046`)
- Bug in :meth:`DataFrame.loc` did not raise ``KeyError`` when missing combination was given with ``slice(None)`` for remaining levels (:issue:`19556`)
- Bug in :meth:`DataFrame.loc` raising ``TypeError`` when non-integer slice was given to select values from :class:`MultiIndex` (:issue:`25165`, :issue:`24263`)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 7778b1e264cd8..1ec0284f0620a 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -123,6 +123,12 @@ def _validate_fill_value(self, value):
raise TypeError
elif isinstance(value, str) or lib.is_complex(value):
raise TypeError
+ elif is_scalar(value) and isna(value):
+ if is_valid_nat_for_dtype(value, self.dtype):
+ value = self._na_value
+ else:
+ # NaT, np.datetime64("NaT"), np.timedelta64("NaT")
+ raise TypeError
return value
@@ -161,13 +167,10 @@ def _is_all_dates(self) -> bool:
@doc(Index.insert)
def insert(self, loc: int, item):
- # treat NA values as nans:
- if is_scalar(item) and isna(item):
- if is_valid_nat_for_dtype(item, self.dtype):
- item = self._na_value
- else:
- # NaT, np.datetime64("NaT"), np.timedelta64("NaT")
- return self.astype(object).insert(loc, item)
+ try:
+ item = self._validate_fill_value(item)
+ except TypeError:
+ return self.astype(object).insert(loc, item)
return super().insert(loc, item)
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index cd3102836422f..e4a66ea9133dd 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -304,6 +304,20 @@ def test_setitem_complete_column_with_array(self):
)
tm.assert_frame_equal(df, expected)
+ @pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
+ def test_setitem_bool_with_numeric_index(self, dtype):
+ # GH#36319
+ cols = Index([1, 2, 3], dtype=dtype)
+ df = DataFrame(np.random.randn(3, 3), columns=cols)
+
+ df[False] = ["a", "b", "c"]
+
+ expected_cols = Index([1, 2, 3, False], dtype=object)
+ if dtype == "f8":
+ expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
+
+ tm.assert_index_equal(df.columns, expected_cols)
+
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index fd6f6fbc6a4ba..bde7e9991bbed 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -393,7 +393,7 @@ def test_insert_index_object(self, insert, coerced_val, coerced_dtype):
[
(1, 1, np.int64),
(1.1, 1.1, np.float64),
- (False, 0, np.int64),
+ (False, False, object), # GH#36319
("x", "x", object),
],
)
@@ -409,7 +409,7 @@ def test_insert_index_int64(self, insert, coerced_val, coerced_dtype):
[
(1, 1.0, np.float64),
(1.1, 1.1, np.float64),
- (False, 0.0, np.float64),
+ (False, False, object), # GH#36319
("x", "x", object),
],
)
| - [x] closes #36319
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
xref #16277
| https://api.github.com/repos/pandas-dev/pandas/pulls/38030 | 2020-11-24T04:02:27Z | 2020-11-26T17:26:00Z | 2020-11-26T17:26:00Z | 2020-11-26T19:00:04Z |
BUG: unstack with missing levels results in incorrect index names | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 53f254aee2e0e..811b31edf301a 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -267,7 +267,7 @@ Groupby/resample/rolling
Reshaping
^^^^^^^^^
--
+- Bug in :meth:`DataFrame.unstack` with missing levels led to incorrect index names (:issue:`37510`)
-
Sparse
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 5312dfe84cfd8..bb8f344d4f0f8 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1979,6 +1979,9 @@ def remove_unused_levels(self):
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
+
+ if lev.isna().any() and len(uniques) == len(lev):
+ break
# We have unused levels
changed = True
diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py
index 9825bcb0b5d57..e8ae9f6584ad6 100644
--- a/pandas/tests/frame/test_stack_unstack.py
+++ b/pandas/tests/frame/test_stack_unstack.py
@@ -1907,3 +1907,27 @@ def test_unstack_with_missing_int_cast_to_float(self):
),
)
tm.assert_frame_equal(result, expected)
+
+ def test_unstack_with_level_has_nan(self):
+ # GH 37510
+ df1 = DataFrame(
+ {
+ "L1": [1, 2, 3, 4],
+ "L2": [3, 4, 1, 2],
+ "L3": [1, 1, 1, 1],
+ "x": [1, 2, 3, 4],
+ }
+ )
+ df1 = df1.set_index(["L1", "L2", "L3"])
+ new_levels = ["n1", "n2", "n3", None]
+ df1.index = df1.index.set_levels(levels=new_levels, level="L1")
+ df1.index = df1.index.set_levels(levels=new_levels, level="L2")
+
+ result = df1.unstack("L3")[("x", 1)].sort_index().index
+ expected = MultiIndex(
+ levels=[["n1", "n2", "n3", None], ["n1", "n2", "n3", None]],
+ codes=[[0, 1, 2, 3], [2, 3, 0, 1]],
+ names=["L1", "L2"],
+ )
+
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index e5d178581136b..3de78c5e982d3 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -7,6 +7,7 @@
from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex
import pandas._testing as tm
+from pandas.core.indexes.frozen import FrozenList
def test_sortlevel(idx):
@@ -271,3 +272,13 @@ def test_argsort(idx):
result = idx.argsort()
expected = idx.values.argsort()
tm.assert_numpy_array_equal(result, expected)
+
+
+def test_remove_unused_levels_with_nan():
+ # GH 37510
+ idx = Index([(1, np.nan), (3, 4)]).rename(["id1", "id2"])
+ idx = idx.set_levels(["a", np.nan], level="id1")
+ idx = idx.remove_unused_levels()
+ result = idx.levels
+ expected = FrozenList([["a", np.nan], [4]])
+ assert str(result) == str(expected)
| - [x] closes #37510
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38029 | 2020-11-24T03:07:32Z | 2020-12-31T19:11:28Z | 2020-12-31T19:11:28Z | 2021-01-01T01:35:55Z |
BUG: RangeIndex.difference with mismatched step | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 08edc7531bcd6..4f1f54af2dd6a 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -772,6 +772,7 @@ Other
- Bug in :meth:`Index.union` behaving differently depending on whether operand is an :class:`Index` or other list-like (:issue:`36384`)
- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError`` rather than a bare ``Exception`` (:issue:`35744`)
- Bug in ``dir`` where ``dir(obj)`` wouldn't show attributes defined on the instance for pandas objects (:issue:`37173`)
+- Bug in :meth:`RangeIndex.difference` returning :class:`Int64Index` in some cases where it should return :class:`RangeIndex` (:issue:`38028`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 8e12f84895361..8e5725fd72544 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -660,13 +660,17 @@ def difference(self, other, sort=None):
if not isinstance(overlap, RangeIndex):
# We wont end up with RangeIndex, so fall back
return super().difference(other, sort=sort)
+ if overlap.step != first.step:
+ # In some cases we might be able to get a RangeIndex back,
+ # but not worth the effort.
+ return super().difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
- elif overlap[-1] == first.stop:
+ elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
- new_rng = range(first.start, overlap[0] - first.step, first.step)
+ new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super().difference(other, sort=sort)
diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py
index 9c9f5dbdf7e7f..1fd41b017221b 100644
--- a/pandas/tests/indexes/ranges/test_setops.py
+++ b/pandas/tests/indexes/ranges/test_setops.py
@@ -247,21 +247,38 @@ def test_difference(self):
result = obj.difference(obj)
expected = RangeIndex.from_range(range(0), name="foo")
- tm.assert_index_equal(result, expected)
+ tm.assert_index_equal(result, expected, exact=True)
result = obj.difference(expected.rename("bar"))
- tm.assert_index_equal(result, obj.rename(None))
+ tm.assert_index_equal(result, obj.rename(None), exact=True)
result = obj.difference(obj[:3])
- tm.assert_index_equal(result, obj[3:])
+ tm.assert_index_equal(result, obj[3:], exact=True)
result = obj.difference(obj[-3:])
- tm.assert_index_equal(result, obj[:-3])
+ tm.assert_index_equal(result, obj[:-3], exact=True)
+
+ result = obj[::-1].difference(obj[-3:])
+ tm.assert_index_equal(result, obj[:-3][::-1], exact=True)
+
+ result = obj[::-1].difference(obj[-3:][::-1])
+ tm.assert_index_equal(result, obj[:-3][::-1], exact=True)
result = obj.difference(obj[2:6])
expected = Int64Index([1, 2, 7, 8, 9], name="foo")
tm.assert_index_equal(result, expected)
+ def test_difference_mismatched_step(self):
+ obj = RangeIndex.from_range(range(1, 10), name="foo")
+
+ result = obj.difference(obj[::2])
+ expected = obj[1::2]._int64index
+ tm.assert_index_equal(result, expected, exact=True)
+
+ result = obj.difference(obj[1::2])
+ expected = obj[::2]._int64index
+ tm.assert_index_equal(result, expected, exact=True)
+
def test_symmetric_difference(self):
# GH#12034 Cases where we operate against another RangeIndex and may
# get back another RangeIndex
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38028 | 2020-11-24T02:59:02Z | 2020-11-26T22:26:51Z | 2020-11-26T22:26:51Z | 2020-11-26T22:33:20Z |
REF: Share code between NumericIndex subclasses | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b5900ead246f3..7827cd9bfe332 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5523,6 +5523,17 @@ def _cmp_method(self, other, op):
"""
Wrapper used to dispatch comparison operations.
"""
+ if self.is_(other):
+ # fastpath
+ if op in {operator.eq, operator.le, operator.ge}:
+ arr = np.ones(len(self), dtype=bool)
+ if self._can_hold_na and not isinstance(self, ABCMultiIndex):
+ # TODO: should set MultiIndex._can_hold_na = False?
+ arr[self.isna()] = False
+ return arr
+ elif op in {operator.ne, operator.lt, operator.gt}:
+ return np.zeros(len(self), dtype=bool)
+
if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 7778b1e264cd8..8a1bcc7146616 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,4 +1,3 @@
-import operator
from typing import Any
import warnings
@@ -6,7 +5,7 @@
from pandas._libs import index as libindex, lib
from pandas._typing import Dtype, Label
-from pandas.util._decorators import cache_readonly, doc
+from pandas.util._decorators import doc
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
@@ -188,18 +187,6 @@ def _union(self, other, sort):
else:
return super()._union(other, sort)
- def _cmp_method(self, other, op):
- if self.is_(other): # fastpath
- if op in {operator.eq, operator.le, operator.ge}:
- arr = np.ones(len(self), dtype=bool)
- if self._can_hold_na:
- arr[self.isna()] = False
- return arr
- elif op in {operator.ne, operator.lt, operator.gt}:
- return np.zeros(len(self), dtype=bool)
-
- return super()._cmp_method(other, op)
-
_num_index_shared_docs[
"class_descr"
@@ -243,6 +230,20 @@ class IntegerIndex(NumericIndex):
"""
_default_dtype: np.dtype
+ _can_hold_na = False
+
+ @classmethod
+ def _assert_safe_casting(cls, data, subarr):
+ """
+ Ensure incoming data can be represented with matching signed-ness.
+ """
+ if data.dtype.kind != cls._default_dtype.kind:
+ if not np.array_equal(data, subarr):
+ raise TypeError("Unsafe NumPy casting, you must explicitly cast")
+
+ def _can_union_without_object_cast(self, other) -> bool:
+ # See GH#26778, further casting may occur in NumericIndex._union
+ return other.dtype == "f8" or other.dtype == self.dtype
def __contains__(self, key) -> bool:
"""
@@ -278,23 +279,9 @@ class Int64Index(IntegerIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _int64_descr_args
_typ = "int64index"
- _can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.dtype(np.int64)
- @classmethod
- def _assert_safe_casting(cls, data, subarr):
- """
- Ensure incoming data can be represented as ints.
- """
- if not issubclass(data.dtype.type, np.signedinteger):
- if not np.array_equal(data, subarr):
- raise TypeError("Unsafe NumPy casting, you must explicitly cast")
-
- def _can_union_without_object_cast(self, other) -> bool:
- # See GH#26778, further casting may occur in NumericIndex._union
- return other.dtype == "f8" or other.dtype == self.dtype
-
_uint64_descr_args = dict(
klass="UInt64Index", ltype="unsigned integer", dtype="uint64", extra=""
@@ -305,7 +292,6 @@ class UInt64Index(IntegerIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _uint64_descr_args
_typ = "uint64index"
- _can_hold_na = False
_engine_type = libindex.UInt64Engine
_default_dtype = np.dtype(np.uint64)
@@ -324,21 +310,6 @@ def _convert_arr_indexer(self, keyarr):
return com.asarray_tuplesafe(keyarr, dtype=dtype)
- # ----------------------------------------------------------------
-
- @classmethod
- def _assert_safe_casting(cls, data, subarr):
- """
- Ensure incoming data can be represented as uints.
- """
- if not issubclass(data.dtype.type, np.unsignedinteger):
- if not np.array_equal(data, subarr):
- raise TypeError("Unsafe NumPy casting, you must explicitly cast")
-
- def _can_union_without_object_cast(self, other) -> bool:
- # See GH#26778, further casting may occur in NumericIndex._union
- return other.dtype == "f8" or other.dtype == self.dtype
-
_float64_descr_args = dict(
klass="Float64Index", dtype="float64", ltype="float", extra=""
@@ -350,7 +321,7 @@ class Float64Index(NumericIndex):
_typ = "float64index"
_engine_type = libindex.Float64Engine
- _default_dtype = np.float64
+ _default_dtype = np.dtype(np.float64)
@property
def inferred_type(self) -> str:
@@ -429,10 +400,6 @@ def __contains__(self, other: Any) -> bool:
return is_float(other) and np.isnan(other) and self.hasnans
- @cache_readonly
- def is_unique(self) -> bool:
- return super().is_unique and self._nan_idxs.size < 2
-
def _can_union_without_object_cast(self, other) -> bool:
# See GH#26778, further casting may occur in NumericIndex._union
return is_numeric_dtype(other.dtype)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38027 | 2020-11-24T00:03:27Z | 2020-11-26T16:20:06Z | 2020-11-26T16:20:06Z | 2020-11-26T16:26:45Z | |
Bug in loc raising KeyError when MultiIndex columns has only one level | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 48561b50f66ae..ad5af5df710ba 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -621,6 +621,7 @@ Indexing
- Bug in :meth:`DataFrame.iloc` and :meth:`Series.iloc` aligning objects in ``__setitem__`` (:issue:`22046`)
- Bug in :meth:`DataFrame.loc` did not raise ``KeyError`` when missing combination was given with ``slice(None)`` for remaining levels (:issue:`19556`)
- Bug in :meth:`DataFrame.loc` raising ``TypeError`` when non-integer slice was given to select values from :class:`MultiIndex` (:issue:`25165`, :issue:`24263`)
+- Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.__getitem__` raising ``KeyError`` when columns were :class:`MultiIndex` with only one level (:issue:`29749`)
Missing
^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6f6d94f0e9f8e..5b87c4ea8b9cc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2935,7 +2935,7 @@ def __getitem__(self, key):
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
- if self.columns.nlevels > 1:
+ if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py
index 868df82a43a91..6c6b4e002644c 100644
--- a/pandas/tests/frame/indexing/test_getitem.py
+++ b/pandas/tests/frame/indexing/test_getitem.py
@@ -96,6 +96,17 @@ def test_getitem_callable(self, float_frame):
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
+ def test_loc_multiindex_columns_one_level(self):
+ # GH#29749
+ df = DataFrame([[1, 2]], columns=[["a", "b"]])
+ expected = DataFrame([1], columns=[["a"]])
+
+ result = df["a"]
+ tm.assert_frame_equal(result, expected)
+
+ result = df.loc[:, "a"]
+ tm.assert_frame_equal(result, expected)
+
class TestGetitemBooleanMask:
def test_getitem_bool_mask_categorical_index(self):
| - [x] closes #29749
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
@jbrockmendel I think we should check for MultiIndex instead of level number here | https://api.github.com/repos/pandas-dev/pandas/pulls/38026 | 2020-11-23T23:07:42Z | 2020-11-25T03:18:36Z | 2020-11-25T03:18:36Z | 2020-11-25T18:50:52Z |
Revert "REF: back IntervalArray by a single ndarray (#37047)" | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 46a7351443883..efb66c9a47a97 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1,7 +1,7 @@
import operator
from operator import le, lt
import textwrap
-from typing import TYPE_CHECKING, Optional, Sequence, Tuple, Type, TypeVar, Union, cast
+from typing import Sequence, Type, TypeVar
import numpy as np
@@ -14,7 +14,6 @@
intervals_to_interval_bounds,
)
from pandas._libs.missing import NA
-from pandas._typing import ArrayLike, Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
@@ -22,9 +21,7 @@
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
- is_dtype_equal,
is_float_dtype,
- is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
@@ -52,10 +49,6 @@
from pandas.core.indexes.base import ensure_index
from pandas.core.ops import invalid_comparison, unpack_zerodim_and_defer
-if TYPE_CHECKING:
- from pandas import Index
- from pandas.core.arrays import DatetimeArray, TimedeltaArray
-
IntervalArrayT = TypeVar("IntervalArrayT", bound="IntervalArray")
_interval_shared_docs = {}
@@ -182,17 +175,6 @@ def __new__(
left = data._left
right = data._right
closed = closed or data.closed
-
- if dtype is None or data.dtype == dtype:
- # This path will preserve id(result._combined)
- # TODO: could also validate dtype before going to simple_new
- combined = data._combined
- if copy:
- combined = combined.copy()
- result = cls._simple_new(combined, closed=closed)
- if verify_integrity:
- result._validate()
- return result
else:
# don't allow scalars
@@ -210,22 +192,83 @@ def __new__(
)
closed = closed or infer_closed
- closed = closed or "right"
- left, right = _maybe_cast_inputs(left, right, copy, dtype)
- combined = _get_combined_data(left, right)
- result = cls._simple_new(combined, closed=closed)
- if verify_integrity:
- result._validate()
- return result
+ return cls._simple_new(
+ left,
+ right,
+ closed,
+ copy=copy,
+ dtype=dtype,
+ verify_integrity=verify_integrity,
+ )
@classmethod
- def _simple_new(cls, data, closed="right"):
+ def _simple_new(
+ cls, left, right, closed=None, copy=False, dtype=None, verify_integrity=True
+ ):
result = IntervalMixin.__new__(cls)
- result._combined = data
- result._left = data[:, 0]
- result._right = data[:, 1]
+ closed = closed or "right"
+ left = ensure_index(left, copy=copy)
+ right = ensure_index(right, copy=copy)
+
+ if dtype is not None:
+ # GH 19262: dtype must be an IntervalDtype to override inferred
+ dtype = pandas_dtype(dtype)
+ if not is_interval_dtype(dtype):
+ msg = f"dtype must be an IntervalDtype, got {dtype}"
+ raise TypeError(msg)
+ elif dtype.subtype is not None:
+ left = left.astype(dtype.subtype)
+ right = right.astype(dtype.subtype)
+
+ # coerce dtypes to match if needed
+ if is_float_dtype(left) and is_integer_dtype(right):
+ right = right.astype(left.dtype)
+ elif is_float_dtype(right) and is_integer_dtype(left):
+ left = left.astype(right.dtype)
+
+ if type(left) != type(right):
+ msg = (
+ f"must not have differing left [{type(left).__name__}] and "
+ f"right [{type(right).__name__}] types"
+ )
+ raise ValueError(msg)
+ elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):
+ # GH 19016
+ msg = (
+ "category, object, and string subtypes are not supported "
+ "for IntervalArray"
+ )
+ raise TypeError(msg)
+ elif isinstance(left, ABCPeriodIndex):
+ msg = "Period dtypes are not supported, use a PeriodIndex instead"
+ raise ValueError(msg)
+ elif isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):
+ msg = (
+ "left and right must have the same time zone, got "
+ f"'{left.tz}' and '{right.tz}'"
+ )
+ raise ValueError(msg)
+
+ # For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
+ from pandas.core.ops.array_ops import maybe_upcast_datetimelike_array
+
+ left = maybe_upcast_datetimelike_array(left)
+ left = extract_array(left, extract_numpy=True)
+ right = maybe_upcast_datetimelike_array(right)
+ right = extract_array(right, extract_numpy=True)
+
+ lbase = getattr(left, "_ndarray", left).base
+ rbase = getattr(right, "_ndarray", right).base
+ if lbase is not None and lbase is rbase:
+ # If these share data, then setitem could corrupt our IA
+ right = right.copy()
+
+ result._left = left
+ result._right = right
result._closed = closed
+ if verify_integrity:
+ result._validate()
return result
@classmethod
@@ -360,16 +403,10 @@ def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
- if len(left) != len(right):
- raise ValueError("left and right must have the same length")
- closed = closed or "right"
- left, right = _maybe_cast_inputs(left, right, copy, dtype)
- combined = _get_combined_data(left, right)
-
- result = cls._simple_new(combined, closed)
- result._validate()
- return result
+ return cls._simple_new(
+ left, right, closed, copy=copy, dtype=dtype, verify_integrity=True
+ )
_interval_shared_docs["from_tuples"] = textwrap.dedent(
"""
@@ -475,6 +512,19 @@ def _validate(self):
msg = "left side of interval must be <= right side"
raise ValueError(msg)
+ def _shallow_copy(self, left, right):
+ """
+ Return a new IntervalArray with the replacement attributes
+
+ Parameters
+ ----------
+ left : Index
+ Values to be used for the left-side of the intervals.
+ right : Index
+ Values to be used for the right-side of the intervals.
+ """
+ return self._simple_new(left, right, closed=self.closed, verify_integrity=False)
+
# ---------------------------------------------------------------------
# Descriptive
@@ -502,20 +552,18 @@ def __len__(self) -> int:
def __getitem__(self, key):
key = check_array_indexer(self, key)
+ left = self._left[key]
+ right = self._right[key]
- result = self._combined[key]
-
- if is_integer(key):
- left, right = result[0], result[1]
- if isna(left):
+ if not isinstance(left, (np.ndarray, ExtensionArray)):
+ # scalar
+ if is_scalar(left) and isna(left):
return self._fill_value
return Interval(left, right, self.closed)
-
- # TODO: need to watch out for incorrectly-reducing getitem
- if np.ndim(result) > 2:
+ if np.ndim(left) > 1:
# GH#30588 multi-dimensional indexer disallowed
raise ValueError("multi-dimensional indexing not allowed")
- return type(self)._simple_new(result, closed=self.closed)
+ return self._shallow_copy(left, right)
def __setitem__(self, key, value):
value_left, value_right = self._validate_setitem_value(value)
@@ -673,8 +721,7 @@ def fillna(self, value=None, method=None, limit=None):
left = self.left.fillna(value=value_left)
right = self.right.fillna(value=value_right)
- combined = _get_combined_data(left, right)
- return type(self)._simple_new(combined, closed=self.closed)
+ return self._shallow_copy(left, right)
def astype(self, dtype, copy=True):
"""
@@ -716,9 +763,7 @@ def astype(self, dtype, copy=True):
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
)
raise TypeError(msg) from err
- # TODO: do astype directly on self._combined
- combined = _get_combined_data(new_left, new_right)
- return type(self)._simple_new(combined, closed=self.closed)
+ return self._shallow_copy(new_left, new_right)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self), dtype=dtype)
elif isinstance(dtype, StringDtype):
@@ -761,11 +806,9 @@ def _concat_same_type(
raise ValueError("Intervals must all be closed on the same side.")
closed = closed.pop()
- # TODO: will this mess up on dt64tz?
left = np.concatenate([interval.left for interval in to_concat])
right = np.concatenate([interval.right for interval in to_concat])
- combined = _get_combined_data(left, right) # TODO: 1-stage concat
- return cls._simple_new(combined, closed=closed)
+ return cls._simple_new(left, right, closed=closed, copy=False)
def copy(self: IntervalArrayT) -> IntervalArrayT:
"""
@@ -775,8 +818,11 @@ def copy(self: IntervalArrayT) -> IntervalArrayT:
-------
IntervalArray
"""
- combined = self._combined.copy()
- return type(self)._simple_new(combined, closed=self.closed)
+ left = self._left.copy()
+ right = self._right.copy()
+ closed = self.closed
+ # TODO: Could skip verify_integrity here.
+ return type(self).from_arrays(left, right, closed=closed)
def isna(self) -> np.ndarray:
return isna(self._left)
@@ -869,8 +915,7 @@ def take(self, indices, *, allow_fill=False, fill_value=None, axis=None, **kwarg
self._right, indices, allow_fill=allow_fill, fill_value=fill_right
)
- combined = _get_combined_data(left_take, right_take)
- return type(self)._simple_new(combined, closed=self.closed)
+ return self._shallow_copy(left_take, right_take)
def _validate_listlike(self, value):
# list-like of intervals
@@ -1183,7 +1228,10 @@ def set_closed(self, closed):
if closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {closed}"
raise ValueError(msg)
- return type(self)._simple_new(self._combined, closed=closed)
+
+ return type(self)._simple_new(
+ left=self._left, right=self._right, closed=closed, verify_integrity=False
+ )
_interval_shared_docs[
"is_non_overlapping_monotonic"
@@ -1324,8 +1372,9 @@ def to_tuples(self, na_tuple=True):
@Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
- combined = self._combined.repeat(repeats, 0)
- return type(self)._simple_new(combined, closed=self.closed)
+ left_repeat = self.left.repeat(repeats)
+ right_repeat = self.right.repeat(repeats)
+ return self._shallow_copy(left=left_repeat, right=right_repeat)
_interval_shared_docs["contains"] = textwrap.dedent(
"""
@@ -1408,101 +1457,3 @@ def maybe_convert_platform_interval(values):
values = np.asarray(values)
return maybe_convert_platform(values)
-
-
-def _maybe_cast_inputs(
- left_orig: Union["Index", ArrayLike],
- right_orig: Union["Index", ArrayLike],
- copy: bool,
- dtype: Optional[Dtype],
-) -> Tuple["Index", "Index"]:
- left = ensure_index(left_orig, copy=copy)
- right = ensure_index(right_orig, copy=copy)
-
- if dtype is not None:
- # GH#19262: dtype must be an IntervalDtype to override inferred
- dtype = pandas_dtype(dtype)
- if not is_interval_dtype(dtype):
- msg = f"dtype must be an IntervalDtype, got {dtype}"
- raise TypeError(msg)
- dtype = cast(IntervalDtype, dtype)
- if dtype.subtype is not None:
- left = left.astype(dtype.subtype)
- right = right.astype(dtype.subtype)
-
- # coerce dtypes to match if needed
- if is_float_dtype(left) and is_integer_dtype(right):
- right = right.astype(left.dtype)
- elif is_float_dtype(right) and is_integer_dtype(left):
- left = left.astype(right.dtype)
-
- if type(left) != type(right):
- msg = (
- f"must not have differing left [{type(left).__name__}] and "
- f"right [{type(right).__name__}] types"
- )
- raise ValueError(msg)
- elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):
- # GH#19016
- msg = (
- "category, object, and string subtypes are not supported "
- "for IntervalArray"
- )
- raise TypeError(msg)
- elif isinstance(left, ABCPeriodIndex):
- msg = "Period dtypes are not supported, use a PeriodIndex instead"
- raise ValueError(msg)
- elif isinstance(left, ABCDatetimeIndex) and not is_dtype_equal(
- left.dtype, right.dtype
- ):
- left_arr = cast("DatetimeArray", left._data)
- right_arr = cast("DatetimeArray", right._data)
- msg = (
- "left and right must have the same time zone, got "
- f"'{left_arr.tz}' and '{right_arr.tz}'"
- )
- raise ValueError(msg)
-
- return left, right
-
-
-def _get_combined_data(
- left: Union["Index", ArrayLike], right: Union["Index", ArrayLike]
-) -> Union[np.ndarray, "DatetimeArray", "TimedeltaArray"]:
- # For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
- from pandas.core.ops.array_ops import maybe_upcast_datetimelike_array
-
- left = maybe_upcast_datetimelike_array(left)
- left = extract_array(left, extract_numpy=True)
- right = maybe_upcast_datetimelike_array(right)
- right = extract_array(right, extract_numpy=True)
-
- lbase = getattr(left, "_ndarray", left).base
- rbase = getattr(right, "_ndarray", right).base
- if lbase is not None and lbase is rbase:
- # If these share data, then setitem could corrupt our IA
- right = right.copy()
-
- if isinstance(left, np.ndarray):
- assert isinstance(right, np.ndarray) # for mypy
- combined = np.concatenate(
- [left.reshape(-1, 1), right.reshape(-1, 1)],
- axis=1,
- )
- else:
- # error: Item "type" of "Union[Type[Index], Type[ExtensionArray]]" has
- # no attribute "_concat_same_type" [union-attr]
-
- # error: Unexpected keyword argument "axis" for "_concat_same_type" of
- # "ExtensionArray" [call-arg]
-
- # error: Item "Index" of "Union[Index, ExtensionArray]" has no
- # attribute "reshape" [union-attr]
-
- # error: Item "ExtensionArray" of "Union[Index, ExtensionArray]" has no
- # attribute "reshape" [union-attr]
- combined = type(left)._concat_same_type( # type: ignore[union-attr,call-arg]
- [left.reshape(-1, 1), right.reshape(-1, 1)], # type: ignore[union-attr]
- axis=1,
- )
- return combined
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 0a10191bfac52..98752a21e44a2 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -872,7 +872,7 @@ def delete(self, loc):
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
- result = IntervalArray.from_arrays(new_left, new_right, closed=self.closed)
+ result = self._data._shallow_copy(new_left, new_right)
return type(self)._simple_new(result, name=self.name)
def insert(self, loc, item):
@@ -894,7 +894,7 @@ def insert(self, loc, item):
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
- result = IntervalArray.from_arrays(new_left, new_right, closed=self.closed)
+ result = self._data._shallow_copy(new_left, new_right)
return type(self)._simple_new(result, name=self.name)
# --------------------------------------------------------------------
diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py
index 24e88824088be..63280f5ccf8cd 100644
--- a/pandas/tests/base/test_conversion.py
+++ b/pandas/tests/base/test_conversion.py
@@ -241,7 +241,7 @@ def test_numpy_array_all_dtypes(any_numpy_dtype):
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
- (IntervalArray.from_breaks([0, 1]), "_combined"),
+ (IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index c0ca0b415ba8e..aec7de549744f 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -266,11 +266,7 @@ def test_left_right_dont_share_data(self):
# GH#36310
breaks = np.arange(5)
result = IntervalIndex.from_breaks(breaks)._data
- left = result._left
- right = result._right
-
- left[:] = 10000
- assert not (right == 10000).any()
+ assert result._left.base is None or result._left.base is not result._right.base
class TestFromTuples(Base):
| This reverts commit 9cb372376fc78aa66e2559de919592007b74cfaa.
See discussion in https://github.com/pandas-dev/pandas/pull/37047/
@jbrockmendel can you give this a check? (there were already several conflicts) | https://api.github.com/repos/pandas-dev/pandas/pulls/38024 | 2020-11-23T20:32:49Z | 2020-11-24T17:35:58Z | 2020-11-24T17:35:58Z | 2020-11-24T19:22:32Z |
ENH: support 2D in DatetimeArray._from_sequence | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 7c6b38d9114ab..effce6fc464b0 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2065,20 +2065,24 @@ def objects_to_datetime64ns(
# if str-dtype, convert
data = np.array(data, copy=False, dtype=np.object_)
+ flags = data.flags
+ order = "F" if flags.f_contiguous else "C"
try:
result, tz_parsed = tslib.array_to_datetime(
- data,
+ data.ravel("K"),
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601,
)
+ result = result.reshape(data.shape, order=order)
except ValueError as e:
try:
- values, tz_parsed = conversion.datetime_to_datetime64(data)
+ values, tz_parsed = conversion.datetime_to_datetime64(data.ravel("K"))
# If tzaware, these values represent unix timestamps, so we
# return them as i8 to distinguish from wall times
+ values = values.reshape(data.shape, order=order)
return values.view("i8"), tz_parsed
except (ValueError, TypeError):
raise e
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 1d8ee9cf2b73b..4addc0536848f 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -465,6 +465,24 @@ def test_tz_dtype_matches(self):
result, _, _ = sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="US/Central"))
tm.assert_numpy_array_equal(arr._data, result)
+ @pytest.mark.parametrize("order", ["F", "C"])
+ def test_2d(self, order):
+ dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")
+ arr = np.array(dti, dtype=object).reshape(3, 2)
+ if order == "F":
+ arr = arr.T
+
+ res = sequence_to_dt64ns(arr)
+ expected = sequence_to_dt64ns(arr.ravel())
+
+ tm.assert_numpy_array_equal(res[0].ravel(), expected[0])
+ assert res[1] == expected[1]
+ assert res[2] == expected[2]
+
+ res = DatetimeArray._from_sequence(arr)
+ expected = DatetimeArray._from_sequence(arr.ravel()).reshape(arr.shape)
+ tm.assert_datetime_array_equal(res, expected)
+
class TestReductions:
@pytest.fixture
| Broken off from a branch that fixes #37682, the constraint on which is that _validate_setitem_value needs to handle 2D. | https://api.github.com/repos/pandas-dev/pandas/pulls/38021 | 2020-11-23T18:01:58Z | 2020-12-17T13:52:44Z | 2020-12-17T13:52:44Z | 2020-12-17T15:44:46Z |
BUG: algos.isin numeric vs datetimelike | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index b79905796f7cd..c315a5c03256c 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -49,7 +49,6 @@
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
- ABCIndex,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
@@ -60,7 +59,7 @@
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
- from pandas import Categorical, DataFrame, Series
+ from pandas import Categorical, DataFrame, Index, Series
_shared_docs: Dict[str, str] = {}
@@ -69,7 +68,7 @@
# dtype access #
# --------------- #
def _ensure_data(
- values, dtype: Optional[DtypeObj] = None
+ values: ArrayLike, dtype: Optional[DtypeObj] = None
) -> Tuple[np.ndarray, DtypeObj]:
"""
routine to ensure that our data is of the correct
@@ -95,6 +94,12 @@ def _ensure_data(
pandas_dtype : np.dtype or ExtensionDtype
"""
+ if dtype is not None:
+ # We only have non-None dtype when called from `isin`, and
+ # both Datetimelike and Categorical dispatch before getting here.
+ assert not needs_i8_conversion(dtype)
+ assert not is_categorical_dtype(dtype)
+
if not isinstance(values, ABCMultiIndex):
# extract_array would raise
values = extract_array(values, extract_numpy=True)
@@ -131,21 +136,20 @@ def _ensure_data(
return ensure_object(values), np.dtype("object")
# datetimelike
- vals_dtype = getattr(values, "dtype", None)
- if needs_i8_conversion(vals_dtype) or needs_i8_conversion(dtype):
- if is_period_dtype(vals_dtype) or is_period_dtype(dtype):
+ if needs_i8_conversion(values.dtype) or needs_i8_conversion(dtype):
+ if is_period_dtype(values.dtype) or is_period_dtype(dtype):
from pandas import PeriodIndex
- values = PeriodIndex(values)
+ values = PeriodIndex(values)._data
dtype = values.dtype
- elif is_timedelta64_dtype(vals_dtype) or is_timedelta64_dtype(dtype):
+ elif is_timedelta64_dtype(values.dtype) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
- values = TimedeltaIndex(values)
+ values = TimedeltaIndex(values)._data
dtype = values.dtype
else:
# Datetime
- if values.ndim > 1 and is_datetime64_ns_dtype(vals_dtype):
+ if values.ndim > 1 and is_datetime64_ns_dtype(values.dtype):
# Avoid calling the DatetimeIndex constructor as it is 1D only
# Note: this is reached by DataFrame.rank calls GH#27027
# TODO(EA2D): special case not needed with 2D EAs
@@ -155,12 +159,12 @@ def _ensure_data(
from pandas import DatetimeIndex
- values = DatetimeIndex(values)
+ values = DatetimeIndex(values)._data
dtype = values.dtype
return values.asi8, dtype
- elif is_categorical_dtype(vals_dtype) and (
+ elif is_categorical_dtype(values.dtype) and (
is_categorical_dtype(dtype) or dtype is None
):
values = values.codes
@@ -237,11 +241,11 @@ def _ensure_arraylike(values):
}
-def _get_hashtable_algo(values):
+def _get_hashtable_algo(values: np.ndarray):
"""
Parameters
----------
- values : arraylike
+ values : np.ndarray
Returns
-------
@@ -255,15 +259,15 @@ def _get_hashtable_algo(values):
return htable, values
-def _get_values_for_rank(values):
+def _get_values_for_rank(values: ArrayLike):
if is_categorical_dtype(values):
- values = values._values_for_rank()
+ values = cast("Categorical", values)._values_for_rank()
values, _ = _ensure_data(values)
return values
-def get_data_algo(values):
+def get_data_algo(values: ArrayLike):
values = _get_values_for_rank(values)
ndtype = _check_object_for_strings(values)
@@ -421,20 +425,28 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
f"to isin(), you passed a [{type(values).__name__}]"
)
- if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
+ if not isinstance(
+ values, (ABCIndexClass, ABCSeries, ABCExtensionArray, np.ndarray)
+ ):
values = construct_1d_object_array_from_listlike(list(values))
# TODO: could use ensure_arraylike here
+ elif isinstance(values, ABCMultiIndex):
+ # Avoid raising in extract_array
+ values = np.array(values)
comps = _ensure_arraylike(comps)
comps = extract_array(comps, extract_numpy=True)
- if is_categorical_dtype(comps):
+ if is_categorical_dtype(comps.dtype):
# TODO(extension)
# handle categoricals
return cast("Categorical", comps).isin(values)
- if needs_i8_conversion(comps):
+ if needs_i8_conversion(comps.dtype):
# Dispatch to DatetimeLikeArrayMixin.isin
return array(comps).isin(values)
+ elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype):
+ # e.g. comps are integers and values are datetime64s
+ return np.zeros(comps.shape, dtype=bool)
comps, dtype = _ensure_data(comps)
values, _ = _ensure_data(values, dtype=dtype)
@@ -474,7 +486,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
def factorize_array(
- values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None
+ values: np.ndarray, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Factorize an array-like to codes and uniques.
@@ -540,7 +552,7 @@ def factorize(
sort: bool = False,
na_sentinel: Optional[int] = -1,
size_hint: Optional[int] = None,
-) -> Tuple[np.ndarray, Union[np.ndarray, ABCIndex]]:
+) -> Tuple[np.ndarray, Union[np.ndarray, "Index"]]:
"""
Encode the object as an enumerated type or categorical variable.
@@ -838,7 +850,7 @@ def value_counts_arraylike(values, dropna: bool):
return keys, counts
-def duplicated(values, keep="first") -> np.ndarray:
+def duplicated(values: ArrayLike, keep: str = "first") -> np.ndarray:
"""
Return boolean ndarray denoting duplicate values.
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index c76369c213a70..89d0a6723c890 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -842,6 +842,27 @@ def test_i8(self):
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
+ @pytest.mark.parametrize("dtype1", ["m8[ns]", "M8[ns]", "M8[ns, UTC]", "period[D]"])
+ @pytest.mark.parametrize("dtype", ["i8", "f8", "u8"])
+ def test_isin_datetimelike_values_numeric_comps(self, dtype, dtype1):
+ # Anything but object and we get all-False shortcut
+
+ dta = date_range("2013-01-01", periods=3)._values
+ if dtype1 == "period[D]":
+ # TODO: fix Series.view to get this on its own
+ arr = dta.to_period("D")
+ elif dtype1 == "M8[ns, UTC]":
+ # TODO: fix Series.view to get this on its own
+ arr = dta.tz_localize("UTC")
+ else:
+ arr = Series(dta.view("i8")).view(dtype1)._values
+
+ comps = arr.view("i8").astype(dtype)
+
+ result = algos.isin(comps, arr)
+ expected = np.zeros(comps.shape, dtype=bool)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_large(self):
s = date_range("20000101", periods=2000000, freq="s").values
result = algos.isin(s, s[0:2])
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38020 | 2020-11-23T16:28:22Z | 2020-11-25T22:05:48Z | 2020-11-25T22:05:48Z | 2020-11-25T22:11:40Z |
TST/REF: collect Index setops tests | diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py
index b571ff7f63f58..9a6a892307da8 100644
--- a/pandas/tests/indexes/base_class/test_setops.py
+++ b/pandas/tests/indexes/base_class/test_setops.py
@@ -1,3 +1,5 @@
+from datetime import datetime
+
import numpy as np
import pytest
@@ -83,7 +85,7 @@ def test_union_sort_other_incomparable(self):
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
- @pytest.mark.xfail(reason="Not implemented")
+ @pytest.mark.xfail(reason="GH#25151 need to decide on True behavior")
def test_union_sort_other_incomparable_true(self):
# TODO decide on True behaviour
# sort=True
@@ -91,6 +93,13 @@ def test_union_sort_other_incomparable_true(self):
with pytest.raises(TypeError, match=".*"):
idx.union(idx[:1], sort=True)
+ @pytest.mark.xfail(reason="GH#25151 need to decide on True behavior")
+ def test_intersection_equal_sort_true(self):
+ # TODO decide on True behaviour
+ idx = Index(["c", "a", "b"])
+ sorted_ = Index(["a", "b", "c"])
+ tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
+
def test_intersection_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = Index([0, "a", 1, "b", 2, "c"])
@@ -111,7 +120,7 @@ def test_intersection_different_type_base(self, klass, sort):
result = first.intersection(klass(second.values), sort=sort)
assert tm.equalContents(result, second)
- def test_intersect_nosort(self):
+ def test_intersection_nosort(self):
result = Index(["c", "b", "a"]).intersection(["b", "a"])
expected = Index(["b", "a"])
tm.assert_index_equal(result, expected)
@@ -121,6 +130,28 @@ def test_intersection_equal_sort(self):
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
+ def test_intersection_str_dates(self, sort):
+ dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
+
+ i1 = Index(dt_dates, dtype=object)
+ i2 = Index(["aa"], dtype=object)
+ result = i2.intersection(i1, sort=sort)
+
+ assert len(result) == 0
+
+ @pytest.mark.parametrize(
+ "index2,expected_arr",
+ [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])],
+ )
+ def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort):
+ # non-monotonic non-unique
+ index1 = Index(["A", "B", "A", "C"])
+ expected = Index(expected_arr, dtype="object")
+ result = index1.intersection(index2, sort=sort)
+ if sort is None:
+ expected = expected.sort_values()
+ tm.assert_index_equal(result, expected)
+
def test_difference_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = Index([0, "a", 1, "b", 2, "c"])
@@ -142,3 +173,74 @@ def test_symmetric_difference(self):
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, "a", "c"])
tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "method,expected,sort",
+ [
+ (
+ "intersection",
+ np.array(
+ [(1, "A"), (2, "A"), (1, "B"), (2, "B")],
+ dtype=[("num", int), ("let", "a1")],
+ ),
+ False,
+ ),
+ (
+ "intersection",
+ np.array(
+ [(1, "A"), (1, "B"), (2, "A"), (2, "B")],
+ dtype=[("num", int), ("let", "a1")],
+ ),
+ None,
+ ),
+ (
+ "union",
+ np.array(
+ [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")],
+ dtype=[("num", int), ("let", "a1")],
+ ),
+ None,
+ ),
+ ],
+ )
+ def test_tuple_union_bug(self, method, expected, sort):
+ index1 = Index(
+ np.array(
+ [(1, "A"), (2, "A"), (1, "B"), (2, "B")],
+ dtype=[("num", int), ("let", "a1")],
+ )
+ )
+ index2 = Index(
+ np.array(
+ [(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")],
+ dtype=[("num", int), ("let", "a1")],
+ )
+ )
+
+ result = getattr(index1, method)(index2, sort=sort)
+ assert result.ndim == 1
+
+ expected = Index(expected)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("first_list", [list("ba"), list()])
+ @pytest.mark.parametrize("second_list", [list("ab"), list()])
+ @pytest.mark.parametrize(
+ "first_name, second_name, expected_name",
+ [("A", "B", None), (None, "B", None), ("A", None, None)],
+ )
+ def test_union_name_preservation(
+ self, first_list, second_list, first_name, second_name, expected_name, sort
+ ):
+ first = Index(first_list, name=first_name)
+ second = Index(second_list, name=second_name)
+ union = first.union(second, sort=sort)
+
+ vals = set(first_list).union(second_list)
+
+ if sort is None and len(first_list) > 0 and len(second_list) > 0:
+ expected = Index(sorted(vals), name=expected_name)
+ tm.assert_index_equal(union, expected)
+ else:
+ expected = Index(vals, name=expected_name)
+ tm.equalContents(union, expected)
diff --git a/pandas/tests/indexes/numeric/test_setops.py b/pandas/tests/indexes/numeric/test_setops.py
new file mode 100644
index 0000000000000..6cde3e2366062
--- /dev/null
+++ b/pandas/tests/indexes/numeric/test_setops.py
@@ -0,0 +1,139 @@
+from datetime import datetime, timedelta
+
+import numpy as np
+import pytest
+
+from pandas import Float64Index, Index, Int64Index, RangeIndex, UInt64Index
+import pandas._testing as tm
+
+
+@pytest.fixture
+def index_large():
+ # large values used in TestUInt64Index where no compat needed with Int64/Float64
+ large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
+ return UInt64Index(large)
+
+
+class TestSetOps:
+ @pytest.mark.parametrize("dtype", ["f8", "u8", "i8"])
+ def test_union_non_numeric(self, dtype):
+ # corner case, non-numeric
+ index = Index(np.arange(5, dtype=dtype), dtype=dtype)
+ assert index.dtype == dtype
+
+ other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object)
+ result = index.union(other)
+ expected = Index(np.concatenate((index, other)))
+ tm.assert_index_equal(result, expected)
+
+ result = other.union(index)
+ expected = Index(np.concatenate((other, index)))
+ tm.assert_index_equal(result, expected)
+
+ def test_intersection(self):
+ index = Int64Index(range(5))
+
+ other = Index([1, 2, 3, 4, 5])
+ result = index.intersection(other)
+ expected = Index(np.sort(np.intersect1d(index.values, other.values)))
+ tm.assert_index_equal(result, expected)
+
+ result = other.intersection(index)
+ expected = Index(
+ np.sort(np.asarray(np.intersect1d(index.values, other.values)))
+ )
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", ["int64", "uint64"])
+ def test_int_float_union_dtype(self, dtype):
+ # https://github.com/pandas-dev/pandas/issues/26778
+ # [u]int | float -> float
+ index = Index([0, 2, 3], dtype=dtype)
+ other = Float64Index([0.5, 1.5])
+ expected = Float64Index([0.0, 0.5, 1.5, 2.0, 3.0])
+ result = index.union(other)
+ tm.assert_index_equal(result, expected)
+
+ result = other.union(index)
+ tm.assert_index_equal(result, expected)
+
+ def test_range_float_union_dtype(self):
+ # https://github.com/pandas-dev/pandas/issues/26778
+ index = RangeIndex(start=0, stop=3)
+ other = Float64Index([0.5, 1.5])
+ result = index.union(other)
+ expected = Float64Index([0.0, 0.5, 1, 1.5, 2.0])
+ tm.assert_index_equal(result, expected)
+
+ result = other.union(index)
+ tm.assert_index_equal(result, expected)
+
+ def test_float64_index_difference(self):
+ # https://github.com/pandas-dev/pandas/issues/35217
+ float_index = Index([1.0, 2, 3])
+ string_index = Index(["1", "2", "3"])
+
+ result = float_index.difference(string_index)
+ tm.assert_index_equal(result, float_index)
+
+ result = string_index.difference(float_index)
+ tm.assert_index_equal(result, string_index)
+
+ def test_intersection_uint64_outside_int64_range(self, index_large):
+ other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20])
+ result = index_large.intersection(other)
+ expected = Index(np.sort(np.intersect1d(index_large.values, other.values)))
+ tm.assert_index_equal(result, expected)
+
+ result = other.intersection(index_large)
+ expected = Index(
+ np.sort(np.asarray(np.intersect1d(index_large.values, other.values)))
+ )
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "index2,keeps_name",
+ [
+ (Index([4, 7, 6, 5, 3], name="index"), True),
+ (Index([4, 7, 6, 5, 3], name="other"), False),
+ ],
+ )
+ def test_intersection_monotonic(self, index2, keeps_name, sort):
+ index1 = Index([5, 3, 2, 4, 1], name="index")
+ expected = Index([5, 3, 4])
+
+ if keeps_name:
+ expected.name = "index"
+
+ result = index1.intersection(index2, sort=sort)
+ if sort is None:
+ expected = expected.sort_values()
+ tm.assert_index_equal(result, expected)
+
+
+class TestSetOpsSort:
+ @pytest.mark.parametrize("slice_", [slice(None), slice(0)])
+ def test_union_sort_other_special(self, slice_):
+ # https://github.com/pandas-dev/pandas/issues/24959
+
+ idx = Index([1, 0, 2])
+ # default, sort=None
+ other = idx[slice_]
+ tm.assert_index_equal(idx.union(other), idx)
+ tm.assert_index_equal(other.union(idx), idx)
+
+ # sort=False
+ tm.assert_index_equal(idx.union(other, sort=False), idx)
+
+ @pytest.mark.xfail(reason="Not implemented")
+ @pytest.mark.parametrize("slice_", [slice(None), slice(0)])
+ def test_union_sort_special_true(self, slice_):
+ # TODO: decide on True behaviour
+ # sort=True
+ idx = Index([1, 0, 2])
+ # default, sort=None
+ other = idx[slice_]
+
+ result = idx.union(other, sort=True)
+ expected = Index([0, 1, 2])
+ tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index ec03d5466d1f0..2e3a70e8c2215 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -660,54 +660,6 @@ def test_intersection_name_preservation2(
intersect = first.intersection(second, sort=sort)
assert intersect.name == expected_name
- @pytest.mark.parametrize(
- "index2,keeps_name",
- [
- (Index([4, 7, 6, 5, 3], name="index"), True),
- (Index([4, 7, 6, 5, 3], name="other"), False),
- ],
- )
- def test_intersection_monotonic(self, index2, keeps_name, sort):
- index1 = Index([5, 3, 2, 4, 1], name="index")
- expected = Index([5, 3, 4])
-
- if keeps_name:
- expected.name = "index"
-
- result = index1.intersection(index2, sort=sort)
- if sort is None:
- expected = expected.sort_values()
- tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize(
- "index2,expected_arr",
- [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])],
- )
- def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort):
- # non-monotonic non-unique
- index1 = Index(["A", "B", "A", "C"])
- expected = Index(expected_arr, dtype="object")
- result = index1.intersection(index2, sort=sort)
- if sort is None:
- expected = expected.sort_values()
- tm.assert_index_equal(result, expected)
-
- def test_intersect_str_dates(self, sort):
- dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
-
- i1 = Index(dt_dates, dtype=object)
- i2 = Index(["aa"], dtype=object)
- result = i2.intersection(i1, sort=sort)
-
- assert len(result) == 0
-
- @pytest.mark.xfail(reason="Not implemented")
- def test_intersection_equal_sort_true(self):
- # TODO decide on True behaviour
- idx = Index(["c", "a", "b"])
- sorted_ = Index(["a", "b", "c"])
- tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
-
def test_chained_union(self, sort):
# Chained unions handles names correctly
i1 = Index([1, 2], name="i1")
@@ -735,32 +687,6 @@ def test_union(self, index, sort):
tm.assert_index_equal(union, everything.sort_values())
assert tm.equalContents(union, everything)
- @pytest.mark.parametrize("slice_", [slice(None), slice(0)])
- def test_union_sort_other_special(self, slice_):
- # https://github.com/pandas-dev/pandas/issues/24959
-
- idx = Index([1, 0, 2])
- # default, sort=None
- other = idx[slice_]
- tm.assert_index_equal(idx.union(other), idx)
- tm.assert_index_equal(other.union(idx), idx)
-
- # sort=False
- tm.assert_index_equal(idx.union(other, sort=False), idx)
-
- @pytest.mark.xfail(reason="Not implemented")
- @pytest.mark.parametrize("slice_", [slice(None), slice(0)])
- def test_union_sort_special_true(self, slice_):
- # TODO decide on True behaviour
- # sort=True
- idx = Index([1, 0, 2])
- # default, sort=None
- other = idx[slice_]
-
- result = idx.union(other, sort=True)
- expected = Index([0, 1, 2])
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize("klass", [np.array, Series, list])
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union_from_iterables(self, index, klass, sort):
@@ -791,28 +717,6 @@ def test_union_identity(self, index, sort):
union = Index([]).union(first, sort=sort)
assert (union is first) is (not sort)
- @pytest.mark.parametrize("first_list", [list("ba"), list()])
- @pytest.mark.parametrize("second_list", [list("ab"), list()])
- @pytest.mark.parametrize(
- "first_name, second_name, expected_name",
- [("A", "B", None), (None, "B", None), ("A", None, None)],
- )
- def test_union_name_preservation(
- self, first_list, second_list, first_name, second_name, expected_name, sort
- ):
- first = Index(first_list, name=first_name)
- second = Index(second_list, name=second_name)
- union = first.union(second, sort=sort)
-
- vals = set(first_list).union(second_list)
-
- if sort is None and len(first_list) > 0 and len(second_list) > 0:
- expected = Index(sorted(vals), name=expected_name)
- tm.assert_index_equal(union, expected)
- else:
- expected = Index(vals, name=expected_name)
- assert tm.equalContents(union, expected)
-
def test_union_dt_as_obj(self, sort):
# TODO: Replace with fixturesult
index = self.create_index()
@@ -820,10 +724,7 @@ def test_union_dt_as_obj(self, sort):
first_cat = index.union(date_index)
second_cat = index.union(index)
- if date_index.dtype == np.object_:
- appended = np.append(index, date_index)
- else:
- appended = np.append(index, date_index.astype("O"))
+ appended = np.append(index, date_index.astype("O"))
assert tm.equalContents(first_cat, appended)
assert tm.equalContents(second_cat, index)
@@ -1595,55 +1496,6 @@ def test_drop_tuple(self, values, to_drop):
with pytest.raises(KeyError, match=msg):
removed.drop(drop_me)
- @pytest.mark.parametrize(
- "method,expected,sort",
- [
- (
- "intersection",
- np.array(
- [(1, "A"), (2, "A"), (1, "B"), (2, "B")],
- dtype=[("num", int), ("let", "a1")],
- ),
- False,
- ),
- (
- "intersection",
- np.array(
- [(1, "A"), (1, "B"), (2, "A"), (2, "B")],
- dtype=[("num", int), ("let", "a1")],
- ),
- None,
- ),
- (
- "union",
- np.array(
- [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")],
- dtype=[("num", int), ("let", "a1")],
- ),
- None,
- ),
- ],
- )
- def test_tuple_union_bug(self, method, expected, sort):
- index1 = Index(
- np.array(
- [(1, "A"), (2, "A"), (1, "B"), (2, "B")],
- dtype=[("num", int), ("let", "a1")],
- )
- )
- index2 = Index(
- np.array(
- [(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")],
- dtype=[("num", int), ("let", "a1")],
- )
- )
-
- result = getattr(index1, method)(index2, sort=sort)
- assert result.ndim == 1
-
- expected = Index(expected)
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize(
"attr",
[
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index d69cbeac31a32..11f2a9f07a4c2 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import datetime
import numpy as np
import pytest
@@ -408,18 +408,6 @@ def test_identical(self):
assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype))
- def test_union_noncomparable(self):
- # corner case, non-Int64Index
- index = self.create_index()
- other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object)
- result = index.union(other)
- expected = Index(np.concatenate((index, other)))
- tm.assert_index_equal(result, expected)
-
- result = other.union(index)
- expected = Index(np.concatenate((other, index)))
- tm.assert_index_equal(result, expected)
-
def test_cant_or_shouldnt_cast(self):
msg = (
"String dtype not supported, "
@@ -535,19 +523,6 @@ def test_coerce_list(self):
arr = Index([1, 2, 3, 4], dtype=object)
assert isinstance(arr, Index)
- def test_intersection(self):
- index = self.create_index()
- other = Index([1, 2, 3, 4, 5])
- result = index.intersection(other)
- expected = Index(np.sort(np.intersect1d(index.values, other.values)))
- tm.assert_index_equal(result, expected)
-
- result = other.intersection(index)
- expected = Index(
- np.sort(np.asarray(np.intersect1d(index.values, other.values)))
- )
- tm.assert_index_equal(result, expected)
-
class TestUInt64Index(NumericInt):
@@ -564,14 +539,8 @@ class TestUInt64Index(NumericInt):
def index(self, request):
return UInt64Index(request.param)
- @pytest.fixture
- def index_large(self):
- # large values used in TestUInt64Index where no compat needed with Int64/Float64
- large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25]
- return UInt64Index(large)
-
def create_index(self) -> UInt64Index:
- # compat with shared Int64/Float64 tests; use index_large for UInt64 only tests
+ # compat with shared Int64/Float64 tests
return UInt64Index(np.arange(5, dtype="uint64"))
def test_constructor(self):
@@ -596,44 +565,6 @@ def test_constructor(self):
res = Index([1, 2 ** 63 + 1], dtype=np.uint64)
tm.assert_index_equal(res, idx)
- def test_intersection(self, index_large):
- other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20])
- result = index_large.intersection(other)
- expected = Index(np.sort(np.intersect1d(index_large.values, other.values)))
- tm.assert_index_equal(result, expected)
-
- result = other.intersection(index_large)
- expected = Index(
- np.sort(np.asarray(np.intersect1d(index_large.values, other.values)))
- )
- tm.assert_index_equal(result, expected)
-
-
-@pytest.mark.parametrize("dtype", ["int64", "uint64"])
-def test_int_float_union_dtype(dtype):
- # https://github.com/pandas-dev/pandas/issues/26778
- # [u]int | float -> float
- index = Index([0, 2, 3], dtype=dtype)
- other = Float64Index([0.5, 1.5])
- expected = Float64Index([0.0, 0.5, 1.5, 2.0, 3.0])
- result = index.union(other)
- tm.assert_index_equal(result, expected)
-
- result = other.union(index)
- tm.assert_index_equal(result, expected)
-
-
-def test_range_float_union_dtype():
- # https://github.com/pandas-dev/pandas/issues/26778
- index = pd.RangeIndex(start=0, stop=3)
- other = Float64Index([0.5, 1.5])
- result = index.union(other)
- expected = Float64Index([0.0, 0.5, 1, 1.5, 2.0])
- tm.assert_index_equal(result, expected)
-
- result = other.union(index)
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize(
"box",
@@ -675,15 +606,3 @@ def test_float64_index_equals():
result = string_index.equals(float_index)
assert result is False
-
-
-def test_float64_index_difference():
- # https://github.com/pandas-dev/pandas/issues/35217
- float_index = Index([1.0, 2, 3])
- string_index = Index(["1", "2", "3"])
-
- result = float_index.difference(string_index)
- tm.assert_index_equal(result, float_index)
-
- result = string_index.difference(float_index)
- tm.assert_index_equal(result, string_index)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38019 | 2020-11-23T16:13:30Z | 2020-11-24T13:26:16Z | 2020-11-24T13:26:16Z | 2020-11-24T15:26:27Z |
Using os.PathLike instead of pathlib.Path (#37979) | diff --git a/pandas/_typing.py b/pandas/_typing.py
index 7f01bcaa1c50e..09c490e64957d 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -1,7 +1,7 @@
from datetime import datetime, timedelta, tzinfo
from io import BufferedIOBase, RawIOBase, TextIOBase, TextIOWrapper
from mmap import mmap
-from pathlib import Path
+from os import PathLike
from typing import (
IO,
TYPE_CHECKING,
@@ -135,7 +135,7 @@
# filenames and file-like-objects
Buffer = Union[IO[AnyStr], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]
FileOrBuffer = Union[str, Buffer[T]]
-FilePathOrBuffer = Union[Path, FileOrBuffer[T]]
+FilePathOrBuffer = Union["PathLike[str]", FileOrBuffer[T]]
# for arbitrary kwargs passed during reading/writing files
StorageOptions = Optional[Dict[str, Any]]
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 8ec0a869c7042..9fede5180e727 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -7,7 +7,6 @@
from io import BufferedIOBase, BytesIO, RawIOBase, TextIOWrapper
import mmap
import os
-import pathlib
from typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, cast
from urllib.parse import (
urljoin,
@@ -176,19 +175,8 @@ def stringify_path(
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
- if hasattr(filepath_or_buffer, "__fspath__"):
- # https://github.com/python/mypy/issues/1424
- # error: Item "str" of "Union[str, Path, IO[str]]" has no attribute
- # "__fspath__" [union-attr]
- # error: Item "IO[str]" of "Union[str, Path, IO[str]]" has no attribute
- # "__fspath__" [union-attr]
- # error: Item "str" of "Union[str, Path, IO[bytes]]" has no attribute
- # "__fspath__" [union-attr]
- # error: Item "IO[bytes]" of "Union[str, Path, IO[bytes]]" has no
- # attribute "__fspath__" [union-attr]
- filepath_or_buffer = filepath_or_buffer.__fspath__() # type: ignore[union-attr]
- elif isinstance(filepath_or_buffer, pathlib.Path):
- filepath_or_buffer = str(filepath_or_buffer)
+ if isinstance(filepath_or_buffer, os.PathLike):
+ filepath_or_buffer = filepath_or_buffer.__fspath__()
return _expand_user(filepath_or_buffer)
| - [x] closes #37979
- [x] tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Replaces `pathlib.Path` with `os.PathLike` to bring behaviour further in-line with the [documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) | https://api.github.com/repos/pandas-dev/pandas/pulls/38018 | 2020-11-23T16:07:29Z | 2020-12-02T02:02:32Z | 2020-12-02T02:02:32Z | 2020-12-02T02:02:37Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.