title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
REF: collect validator methods for DTA/TDA/PA | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 430f20b359f8b..27b2ed822a49f 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -588,9 +588,6 @@ def __setitem__(
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
- if lib.is_scalar(value) and not isna(value):
- value = com.maybe_box_datetimelike(value)
-
if is_list_like(value):
is_slice = isinstance(key, slice)
@@ -609,21 +606,7 @@ def __setitem__(
elif not len(key):
return
- value = type(self)._from_sequence(value, dtype=self.dtype)
- self._check_compatible_with(value, setitem=True)
- value = value.asi8
- elif isinstance(value, self._scalar_type):
- self._check_compatible_with(value, setitem=True)
- value = self._unbox_scalar(value)
- elif is_valid_nat_for_dtype(value, self.dtype):
- value = iNaT
- else:
- msg = (
- f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
- f"or array of those. Got '{type(value).__name__}' instead."
- )
- raise TypeError(msg)
-
+ value = self._validate_setitem_value(value)
key = check_array_indexer(self, key)
self._data[key] = value
self._maybe_clear_freq()
@@ -682,35 +665,6 @@ def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
- def _validate_fill_value(self, fill_value):
- """
- If a fill_value is passed to `take` convert it to an i8 representation,
- raising ValueError if this is not possible.
-
- Parameters
- ----------
- fill_value : object
-
- Returns
- -------
- fill_value : np.int64
-
- Raises
- ------
- ValueError
- """
- if isna(fill_value):
- fill_value = iNaT
- elif isinstance(fill_value, self._recognized_scalars):
- self._check_compatible_with(fill_value)
- fill_value = self._scalar_type(fill_value)
- fill_value = self._unbox_scalar(fill_value)
- else:
- raise ValueError(
- f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
- )
- return fill_value
-
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
@@ -769,6 +723,45 @@ def shift(self, periods=1, fill_value=None, axis=0):
if not self.size or periods == 0:
return self.copy()
+ fill_value = self._validate_shift_value(fill_value)
+ new_values = shift(self._data, periods, axis, fill_value)
+
+ return type(self)._simple_new(new_values, dtype=self.dtype)
+
+ # ------------------------------------------------------------------
+ # Validation Methods
+ # TODO: try to de-duplicate these, ensure identical behavior
+
+ def _validate_fill_value(self, fill_value):
+ """
+ If a fill_value is passed to `take` convert it to an i8 representation,
+ raising ValueError if this is not possible.
+
+ Parameters
+ ----------
+ fill_value : object
+
+ Returns
+ -------
+ fill_value : np.int64
+
+ Raises
+ ------
+ ValueError
+ """
+ if isna(fill_value):
+ fill_value = iNaT
+ elif isinstance(fill_value, self._recognized_scalars):
+ self._check_compatible_with(fill_value)
+ fill_value = self._scalar_type(fill_value)
+ fill_value = self._unbox_scalar(fill_value)
+ else:
+ raise ValueError(
+ f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
+ )
+ return fill_value
+
+ def _validate_shift_value(self, fill_value):
# TODO(2.0): once this deprecation is enforced, used _validate_fill_value
if is_valid_nat_for_dtype(fill_value, self.dtype):
fill_value = NaT
@@ -787,15 +780,104 @@ def shift(self, periods=1, fill_value=None, axis=0):
"will raise in a future version, pass "
f"{self._scalar_type.__name__} instead.",
FutureWarning,
- stacklevel=9,
+ stacklevel=10,
)
fill_value = new_fill
fill_value = self._unbox_scalar(fill_value)
+ return fill_value
- new_values = shift(self._data, periods, axis, fill_value)
+ def _validate_searchsorted_value(self, value):
+ if isinstance(value, str):
+ try:
+ value = self._scalar_from_string(value)
+ except ValueError as err:
+ raise TypeError(
+ "searchsorted requires compatible dtype or scalar"
+ ) from err
- return type(self)._simple_new(new_values, dtype=self.dtype)
+ elif is_valid_nat_for_dtype(value, self.dtype):
+ value = NaT
+
+ elif isinstance(value, self._recognized_scalars):
+ value = self._scalar_type(value)
+
+ elif is_list_like(value) and not isinstance(value, type(self)):
+ value = array(value)
+
+ if not type(self)._is_recognized_dtype(value):
+ raise TypeError(
+ "searchsorted requires compatible dtype or scalar, "
+ f"not {type(value).__name__}"
+ )
+
+ if not (isinstance(value, (self._scalar_type, type(self))) or (value is NaT)):
+ raise TypeError(f"Unexpected type for 'value': {type(value)}")
+
+ if isinstance(value, type(self)):
+ self._check_compatible_with(value)
+ value = value.asi8
+ else:
+ value = self._unbox_scalar(value)
+
+ return value
+
+ def _validate_setitem_value(self, value):
+ if lib.is_scalar(value) and not isna(value):
+ value = com.maybe_box_datetimelike(value)
+
+ if is_list_like(value):
+ value = type(self)._from_sequence(value, dtype=self.dtype)
+ self._check_compatible_with(value, setitem=True)
+ value = value.asi8
+ elif isinstance(value, self._scalar_type):
+ self._check_compatible_with(value, setitem=True)
+ value = self._unbox_scalar(value)
+ elif is_valid_nat_for_dtype(value, self.dtype):
+ value = iNaT
+ else:
+ msg = (
+ f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
+ f"or array of those. Got '{type(value).__name__}' instead."
+ )
+ raise TypeError(msg)
+
+ return value
+
+ def _validate_insert_value(self, value):
+ if isinstance(value, self._recognized_scalars):
+ value = self._scalar_type(value)
+ elif is_valid_nat_for_dtype(value, self.dtype):
+ # GH#18295
+ value = NaT
+ elif lib.is_scalar(value) and isna(value):
+ raise TypeError(
+ f"cannot insert {type(self).__name__} with incompatible label"
+ )
+
+ return value
+
+ def _validate_where_value(self, other):
+ if lib.is_scalar(other) and isna(other):
+ other = NaT.value
+
+ else:
+ # Do type inference if necessary up front
+ # e.g. we passed PeriodIndex.values and got an ndarray of Periods
+ from pandas import Index
+
+ other = Index(other)
+
+ if is_categorical_dtype(other):
+ # e.g. we have a Categorical holding self.dtype
+ if is_dtype_equal(other.categories.dtype, self.dtype):
+ other = other._internal_get_values()
+
+ if not is_dtype_equal(self.dtype, other.dtype):
+ raise TypeError(f"Where requires matching dtype, not {other.dtype}")
+
+ other = other.view("i8")
+ return other
# ------------------------------------------------------------------
# Additional array methods
@@ -827,37 +909,7 @@ def searchsorted(self, value, side="left", sorter=None):
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
- if isinstance(value, str):
- try:
- value = self._scalar_from_string(value)
- except ValueError as e:
- raise TypeError(
- "searchsorted requires compatible dtype or scalar"
- ) from e
-
- elif is_valid_nat_for_dtype(value, self.dtype):
- value = NaT
-
- elif isinstance(value, self._recognized_scalars):
- value = self._scalar_type(value)
-
- elif is_list_like(value) and not isinstance(value, type(self)):
- value = array(value)
-
- if not type(self)._is_recognized_dtype(value):
- raise TypeError(
- "searchsorted requires compatible dtype or scalar, "
- f"not {type(value).__name__}"
- )
-
- if not (isinstance(value, (self._scalar_type, type(self))) or (value is NaT)):
- raise TypeError(f"Unexpected type for 'value': {type(value)}")
-
- if isinstance(value, type(self)):
- self._check_compatible_with(value)
- value = value.asi8
- else:
- value = self._unbox_scalar(value)
+ value = self._validate_searchsorted_value(value)
# TODO: Use datetime64 semantics for sorting, xref GH#29844
return self.asi8.searchsorted(value, side=side, sorter=sorter)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 067ff32b85862..3a721d8c8c320 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -17,7 +17,6 @@
ensure_int64,
ensure_platform_int,
is_bool_dtype,
- is_categorical_dtype,
is_dtype_equal,
is_integer,
is_list_like,
@@ -26,7 +25,6 @@
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
-from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import algorithms
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
@@ -494,23 +492,7 @@ def isin(self, values, level=None):
def where(self, cond, other=None):
values = self.view("i8")
- if is_scalar(other) and isna(other):
- other = NaT.value
-
- else:
- # Do type inference if necessary up front
- # e.g. we passed PeriodIndex.values and got an ndarray of Periods
- other = Index(other)
-
- if is_categorical_dtype(other):
- # e.g. we have a Categorical holding self.dtype
- if is_dtype_equal(other.categories.dtype, self.dtype):
- other = other._internal_get_values()
-
- if not is_dtype_equal(self.dtype, other.dtype):
- raise TypeError(f"Where requires matching dtype, not {other.dtype}")
-
- other = other.view("i8")
+ other = self._data._validate_where_value(other)
result = np.where(cond, values, other).astype("i8")
arr = type(self._data)._simple_new(result, dtype=self.dtype)
@@ -923,15 +905,7 @@ def insert(self, loc, item):
-------
new_index : Index
"""
- if isinstance(item, self._data._recognized_scalars):
- item = self._data._scalar_type(item)
- elif is_valid_nat_for_dtype(item, self.dtype):
- # GH 18295
- item = self._na_value
- elif is_scalar(item) and isna(item):
- raise TypeError(
- f"cannot insert {type(self).__name__} with incompatible label"
- )
+ item = self._data._validate_insert_value(item)
freq = None
if isinstance(item, self._data._scalar_type) or item is NaT:
| Zero logic changes in this PR.
xref #33685
We have a _lot_ of methods for converting arguments to DTA/TDA/PA/DTI/TDI/PI methods to/from i8 representations. ATM these have slightly different behavior, which we should try to bring into sync (and then de-duplicate the code).
This PR just collects all those methods in one place to make the upcoming steps easier.
Two more methods that I think/hope can eventually be added to this grouping are the comparisons and get_loc, but those are more complicated, but those are more complicated, so holding off on those for now. | https://api.github.com/repos/pandas-dev/pandas/pulls/33689 | 2020-04-20T23:36:19Z | 2020-04-21T12:51:41Z | 2020-04-21T12:51:41Z | 2020-04-21T14:29:17Z |
Grammatically updated HTML Footer | diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html
index 92126a7b5a2f2..023bfe9e26b78 100644
--- a/web/pandas/_templates/layout.html
+++ b/web/pandas/_templates/layout.html
@@ -86,7 +86,7 @@
</li>
</ul>
<p>
- pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>
+ pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS.</a>
</p>
</footer>
| Grammatically updated HTML Footer, Adding a period in the 89th line.
- [x] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33688 | 2020-04-20T23:16:53Z | 2020-04-22T02:34:25Z | 2020-04-22T02:34:25Z | 2020-04-22T02:34:33Z |
CI/DEP: Use numba.extending.is_jitted for numba version > 0.49.0 | diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py
index af24189adbc27..29e74747881ae 100644
--- a/pandas/core/util/numba_.py
+++ b/pandas/core/util/numba_.py
@@ -1,4 +1,5 @@
"""Common utilities for Numba operations"""
+from distutils.version import LooseVersion
import inspect
import types
from typing import Callable, Dict, Optional, Tuple
@@ -90,7 +91,12 @@ def jit_user_function(
"""
numba = import_optional_dependency("numba")
- if isinstance(func, numba.targets.registry.CPUDispatcher):
+ if LooseVersion(numba.__version__) >= LooseVersion("0.49.0"):
+ is_jitted = numba.extending.is_jitted(func)
+ else:
+ is_jitted = isinstance(func, numba.targets.registry.CPUDispatcher)
+
+ if is_jitted:
# Don't jit a user passed jitted function
numba_func = func
else:
| - [x] xref #33686
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/33687 | 2020-04-20T21:37:24Z | 2020-04-20T22:50:06Z | 2020-04-20T22:50:06Z | 2020-05-26T09:38:20Z |
BLD: Set max version of numba to 0.48.0 | diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml
index 548660cabaa67..92f9bbe662790 100644
--- a/ci/deps/azure-windows-36.yaml
+++ b/ci/deps/azure-windows-36.yaml
@@ -17,7 +17,7 @@ dependencies:
- bottleneck
- fastparquet>=0.3.2
- matplotlib=3.0.2
- - numba
+ - numba<0.49.0
- numexpr
- numpy=1.15.*
- openpyxl
diff --git a/environment.yml b/environment.yml
index 8893302b4c9b2..afeebe71127ad 100644
--- a/environment.yml
+++ b/environment.yml
@@ -76,7 +76,7 @@ dependencies:
- matplotlib>=2.2.2 # pandas.plotting, Series.plot, DataFrame.plot
- numexpr>=2.6.8
- scipy>=1.1
- - numba>=0.46.0
+ - numba>=0.46.0,<0.49.0
# optional for io
# ---------------
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 8a954fabd2d8d..2761d5c5e8b23 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -51,7 +51,7 @@ jinja2
matplotlib>=2.2.2
numexpr>=2.6.8
scipy>=1.1
-numba>=0.46.0
+numba>=0.46.0,<0.49.0
beautifulsoup4>=4.6.0
html5lib
lxml
| This should fix the errors in the CI (Travis) builds, e.g.
```
E AttributeError: module 'numba' has no attribute 'targets'
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/33686 | 2020-04-20T21:20:36Z | 2020-04-20T21:43:13Z | null | 2020-05-27T08:23:27Z |
BUG: arg validation in DTA/TDA/PA.take, DTI/TDI/PI.where | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 27b2ed822a49f..6489d07ebdc4b 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -749,7 +749,7 @@ def _validate_fill_value(self, fill_value):
------
ValueError
"""
- if isna(fill_value):
+ if is_valid_nat_for_dtype(fill_value, self.dtype):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
@@ -757,7 +757,8 @@ def _validate_fill_value(self, fill_value):
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
- f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
+ f"'fill_value' should be a {self._scalar_type}. "
+ f"Got '{str(fill_value)}'."
)
return fill_value
@@ -858,8 +859,11 @@ def _validate_insert_value(self, value):
return value
def _validate_where_value(self, other):
- if lib.is_scalar(other) and isna(other):
+ if is_valid_nat_for_dtype(other, self.dtype):
other = NaT.value
+ elif not is_list_like(other):
+ # TODO: what about own-type scalars?
+ raise TypeError(f"Where requires matching dtype, not {type(other)}")
else:
# Do type inference if necessary up front
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 5b703cfe8fae5..80739b9512953 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -508,6 +508,12 @@ def test_take_fill_valid(self, datetime_index, tz_naive_fixture):
# require NaT, not iNaT, as it could be confused with an integer
arr.take([-1, 1], allow_fill=True, fill_value=value)
+ value = np.timedelta64("NaT", "ns")
+ msg = f"'fill_value' should be a {self.dtype}. Got '{str(value)}'."
+ with pytest.raises(ValueError, match=msg):
+ # require appropriate-dtype if we have a NA value
+ arr.take([-1, 1], allow_fill=True, fill_value=value)
+
def test_concat_same_type_invalid(self, datetime_index):
# different timezones
dti = datetime_index
@@ -669,6 +675,12 @@ def test_take_fill_valid(self, timedelta_index):
# fill_value Period invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
+ value = np.datetime64("NaT", "ns")
+ msg = f"'fill_value' should be a {self.dtype}. Got '{str(value)}'."
+ with pytest.raises(ValueError, match=msg):
+ # require appropriate-dtype if we have a NA value
+ arr.take([-1, 1], allow_fill=True, fill_value=value)
+
class TestPeriodArray(SharedTests):
index_cls = pd.PeriodIndex
@@ -697,6 +709,22 @@ def test_astype_object(self, period_index):
assert asobj.dtype == "O"
assert list(asobj) == list(pi)
+ def test_take_fill_valid(self, period_index):
+ pi = period_index
+ arr = PeriodArray(pi)
+
+ value = pd.NaT.value
+ msg = f"'fill_value' should be a {self.dtype}. Got '{value}'."
+ with pytest.raises(ValueError, match=msg):
+ # require NaT, not iNaT, as it could be confused with an integer
+ arr.take([-1, 1], allow_fill=True, fill_value=value)
+
+ value = np.timedelta64("NaT", "ns")
+ msg = f"'fill_value' should be a {self.dtype}. Got '{str(value)}'."
+ with pytest.raises(ValueError, match=msg):
+ # require appropriate-dtype if we have a NA value
+ arr.take([-1, 1], allow_fill=True, fill_value=value)
+
@pytest.mark.parametrize("how", ["S", "E"])
def test_to_timestamp(self, how, period_index):
pi = period_index
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 08b8e710237c5..f9b8bd27b7f5a 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -174,7 +174,6 @@ def test_where_other(self):
def test_where_invalid_dtypes(self):
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
- i2 = dti.copy()
i2 = Index([pd.NaT, pd.NaT] + dti[2:].tolist())
with pytest.raises(TypeError, match="Where requires matching dtype"):
@@ -194,6 +193,14 @@ def test_where_invalid_dtypes(self):
with pytest.raises(TypeError, match="Where requires matching dtype"):
dti.where(notna(i2), i2.asi8)
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ # non-matching scalar
+ dti.where(notna(i2), pd.Timedelta(days=4))
+
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ # non-matching NA value
+ dti.where(notna(i2), np.timedelta64("NaT", "ns"))
+
def test_where_tz(self):
i = pd.date_range("20130101", periods=3, tz="US/Eastern")
result = i.where(notna(i))
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index c4aaf6332ba15..bd71c04a9ab03 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -526,7 +526,6 @@ def test_where_other(self):
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
- i2 = pi.copy()
i2 = PeriodIndex([NaT, NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
@@ -538,6 +537,14 @@ def test_where_invalid_dtypes(self):
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ # non-matching scalar
+ pi.where(notna(i2), Timedelta(days=4))
+
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ # non-matching NA value
+ pi.where(notna(i2), np.timedelta64("NaT", "ns"))
+
class TestTake:
def test_take(self):
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 8c39a9c40a69b..17feed3fd7a68 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -148,7 +148,6 @@ def test_where_doesnt_retain_freq(self):
def test_where_invalid_dtypes(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
- i2 = tdi.copy()
i2 = Index([pd.NaT, pd.NaT] + tdi[2:].tolist())
with pytest.raises(TypeError, match="Where requires matching dtype"):
@@ -160,6 +159,14 @@ def test_where_invalid_dtypes(self):
with pytest.raises(TypeError, match="Where requires matching dtype"):
tdi.where(notna(i2), (i2 + pd.Timestamp.now()).to_period("D"))
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ # non-matching scalar
+ tdi.where(notna(i2), pd.Timestamp.now())
+
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ # non-matching NA value
+ tdi.where(notna(i2), np.datetime64("NaT", "ns"))
+
class TestTake:
def test_take(self):
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index c390347236ad3..6cb73823adabb 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -700,7 +700,7 @@ def test_where_index_datetime(self):
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
- msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind"
+ msg = "Where requires matching dtype, not .*Timestamp"
with pytest.raises(TypeError, match=msg):
obj.where(cond, fill_val)
| We have a _lot_ of really similar validation/casting methods in these classes. This is a step in smoothing out the differences between them so we can share them. | https://api.github.com/repos/pandas-dev/pandas/pulls/33685 | 2020-04-20T21:18:24Z | 2020-04-25T21:06:22Z | 2020-04-25T21:06:22Z | 2020-04-25T21:10:13Z |
REF: update unbox_scalar for better shareability | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 430f20b359f8b..8f94212779e02 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -75,9 +75,9 @@ def wrapper(self, other):
other = self._scalar_type(other)
self._check_compatible_with(other)
- other_i8 = self._unbox_scalar(other)
+ other_val = self._unbox_scalar(other)
- result = op(self.view("i8"), other_i8)
+ result = op(self._data, other_val)
if isna(other):
result.fill(nat_result)
@@ -177,7 +177,7 @@ def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) ->
Returns
-------
- int
+ int, np.datetime64, or np.timedelta64
Examples
--------
@@ -611,7 +611,7 @@ def __setitem__(
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
- value = value.asi8
+ value = value._data
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
@@ -693,22 +693,22 @@ def _validate_fill_value(self, fill_value):
Returns
-------
- fill_value : np.int64
+ fill_value : np.int64, np.datetime64, or np.timedelta64
Raises
------
ValueError
"""
if isna(fill_value):
- fill_value = iNaT
+ fill_value = NaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
- fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
+ fill_value = self._unbox_scalar(fill_value)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
@@ -716,7 +716,7 @@ def take(self, indices, allow_fill=False, fill_value=None):
fill_value = self._validate_fill_value(fill_value)
new_values = take(
- self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
+ self._data, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@@ -855,12 +855,11 @@ def searchsorted(self, value, side="left", sorter=None):
if isinstance(value, type(self)):
self._check_compatible_with(value)
- value = value.asi8
+ value = value._data
else:
value = self._unbox_scalar(value)
- # TODO: Use datetime64 semantics for sorting, xref GH#29844
- return self.asi8.searchsorted(value, side=side, sorter=sorter)
+ return self._data.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index d34bba68da49c..978f881935e62 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -451,7 +451,7 @@ def _unbox_scalar(self, value):
raise ValueError("'value' should be a Timestamp.")
if not isna(value):
self._check_compatible_with(value)
- return value.value
+ return np.datetime64(value.value, "ns")
def _scalar_from_string(self, value):
return Timestamp(value, tz=self.tz)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 8c93dca783113..aa57ec950b5a4 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -277,7 +277,7 @@ def _unbox_scalar(self, value):
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value)
- return value.value
+ return np.timedelta64(value.value, "ns")
def _scalar_from_string(self, value):
return Timedelta(value)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 203ea2152886a..710929cbf51e8 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -406,7 +406,7 @@ def _partial_date_slice(
self._validate_partial_date_slice(reso)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
- i8vals = self.asi8
+ vals = self._data._data
unbox = self._data._unbox_scalar
if self.is_monotonic:
@@ -421,14 +421,14 @@ def _partial_date_slice(
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
- # Use asi8.searchsorted to avoid re-validating Periods/Timestamps
- left = i8vals.searchsorted(unbox(t1), side="left") if use_lhs else None
- right = i8vals.searchsorted(unbox(t2), side="right") if use_rhs else None
+ # Use vals.searchsorted to avoid re-validating Periods/Timestamps
+ left = vals.searchsorted(unbox(t1), side="left") if use_lhs else None
+ right = vals.searchsorted(unbox(t2), side="right") if use_rhs else None
return slice(left, right)
else:
- lhs_mask = (i8vals >= unbox(t1)) if use_lhs else True
- rhs_mask = (i8vals <= unbox(t2)) if use_rhs else True
+ lhs_mask = (vals >= unbox(t1)) if use_lhs else True
+ rhs_mask = (vals <= unbox(t2)) if use_rhs else True
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 5b703cfe8fae5..d8a1151c8c94a 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -140,11 +140,19 @@ def test_concat_same_type(self):
def test_unbox_scalar(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
+
+ if self.array_cls is PeriodArray:
+ ex_cls = int
+ elif self.array_cls is TimedeltaArray:
+ ex_cls = np.timedelta64
+ else:
+ ex_cls = np.datetime64
+
result = arr._unbox_scalar(arr[0])
- assert isinstance(result, int)
+ assert isinstance(result, ex_cls)
result = arr._unbox_scalar(pd.NaT)
- assert isinstance(result, int)
+ assert isinstance(result, ex_cls)
msg = f"'value' should be a {self.dtype.__name__}."
with pytest.raises(ValueError, match=msg):
@@ -208,7 +216,11 @@ def test_searchsorted(self):
# Following numpy convention, NaT goes at the beginning
# (unlike NaN which goes at the end)
result = arr.searchsorted(pd.NaT)
- assert result == 0
+ if _np_version_under1p18 or self.array_cls is PeriodArray:
+ # TODO: PeriodArray shouldn't be special-cased here
+ assert result == 0
+ else:
+ assert result == 10
def test_getitem_2d(self, arr1d):
# 2d slicing on a 1D array
| I'm finding this will make it easier to share a bunch of unpacking/validation code | https://api.github.com/repos/pandas-dev/pandas/pulls/33684 | 2020-04-20T20:30:16Z | 2020-04-20T21:32:53Z | null | 2021-03-02T15:51:39Z |
TST: Remove some old xfails | diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index bf7d275e4ff7b..4ae1c1e6b63ce 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -31,11 +31,6 @@ def _assert(self, a, b):
def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op):
with np.errstate(invalid="ignore", divide="ignore"):
- if op in [operator.floordiv, ops.rfloordiv]:
- # FIXME: GH#13843
- if self._base == pd.Series and a.dtype.subtype == np.dtype("int64"):
- pytest.xfail("Not defined/working. See GH#13843")
-
if mix:
result = op(a, b_dense).to_dense()
else:
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 346de55f551df..93dd1bf23c308 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -16,7 +16,6 @@
NaT,
Series,
Timestamp,
- _is_numpy_dev,
date_range,
isna,
)
@@ -698,11 +697,6 @@ def test_numpy_compat(func):
getattr(g, func)(foo=1)
-@pytest.mark.xfail(
- _is_numpy_dev,
- reason="https://github.com/pandas-dev/pandas/issues/31992",
- strict=False,
-)
def test_cummin(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
min_val = numpy_dtypes_for_minmax[1]
@@ -751,11 +745,6 @@ def test_cummin(numpy_dtypes_for_minmax):
tm.assert_series_equal(result, expected)
-@pytest.mark.xfail(
- _is_numpy_dev,
- reason="https://github.com/pandas-dev/pandas/issues/31992",
- strict=False,
-)
def test_cummin_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
@@ -766,11 +755,6 @@ def test_cummin_all_nan_column():
tm.assert_frame_equal(expected, result)
-@pytest.mark.xfail(
- _is_numpy_dev,
- reason="https://github.com/pandas-dev/pandas/issues/31992",
- strict=False,
-)
def test_cummax(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
max_val = numpy_dtypes_for_minmax[2]
@@ -819,11 +803,6 @@ def test_cummax(numpy_dtypes_for_minmax):
tm.assert_series_equal(result, expected)
-@pytest.mark.xfail(
- _is_numpy_dev,
- reason="https://github.com/pandas-dev/pandas/issues/31992",
- strict=False,
-)
def test_cummax_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 2295eb2297fa6..e1042bf35acc4 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -15,7 +15,6 @@
MultiIndex,
Series,
Timestamp,
- _is_numpy_dev,
concat,
date_range,
)
@@ -330,8 +329,6 @@ def test_transform_transformation_func(transformation_func):
if transformation_func in ["pad", "backfill", "tshift", "cumcount"]:
# These transformation functions are not yet covered in this test
pytest.xfail("See GH 31269")
- elif _is_numpy_dev and transformation_func in ["cummin"]:
- pytest.xfail("https://github.com/pandas-dev/pandas/issues/31992")
elif transformation_func == "fillna":
test_op = lambda x: x.transform("fillna", value=0)
mock_op = lambda x: x.fillna(value=0)
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 584a545769c4c..42b4ea5ad9aac 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -196,7 +196,6 @@ def test_pickle_path_localpath():
tm.assert_frame_equal(df, result)
-@pytest.mark.xfail(reason="GitHub issue #31310", strict=False)
def test_legacy_sparse_warning(datapath):
"""
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index 304033f82c7a2..620fc1c006d93 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -1,7 +1,5 @@
from datetime import date, datetime, timedelta
-from distutils.version import StrictVersion
-import dateutil
import numpy as np
import pytest
import pytz
@@ -1437,11 +1435,6 @@ def test_period_immutable():
per.freq = 2 * freq
-@pytest.mark.xfail(
- StrictVersion(dateutil.__version__.split(".dev")[0]) < StrictVersion("2.7.0"),
- reason="Bug in dateutil < 2.7.0 when parsing old dates: Period('0001-01-07', 'D')",
- strict=False,
-)
def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 7baeb8f5673bc..eb22b715f9f4d 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -417,6 +417,7 @@ def test_td_div_numeric_scalar(self):
np.float64("NaN"),
marks=pytest.mark.xfail(
_is_numpy_dev,
+ raises=RuntimeWarning,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
),
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/33680 | 2020-04-20T18:04:52Z | 2020-04-20T21:35:28Z | 2020-04-20T21:35:28Z | 2020-04-20T21:43:52Z |
CLN: Remove redundant mixin to TimedeltaIndex | diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 3e3591ee42c30..d2e743739db3d 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -51,7 +51,7 @@
],
TimedeltaArray,
)
-class TimedeltaIndex(DatetimeTimedeltaMixin, dtl.TimelikeOps):
+class TimedeltaIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects.
| we already pass-through the relevant attributes | https://api.github.com/repos/pandas-dev/pandas/pulls/33679 | 2020-04-20T16:50:25Z | 2020-04-20T22:14:17Z | 2020-04-20T22:14:17Z | 2020-04-20T22:16:42Z |
REF: remove single-tuple special case for Categorical.__hash__ | diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 8fe2b3c60d6d0..0224895774c8e 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -482,31 +482,23 @@ def _hash_categories(categories, ordered: Ordered = True) -> int:
from pandas.core.util.hashing import (
hash_array,
_combine_hash_arrays,
- hash_tuples,
)
- from pandas.core.dtypes.common import is_datetime64tz_dtype, DT64NS_DTYPE
-
- if len(categories) and isinstance(categories[0], tuple):
- # assumes if any individual category is a tuple, then all our. ATM
- # I don't really want to support just some of the categories being
- # tuples.
- categories = list(categories) # breaks if a np.array of categories
- cat_array = hash_tuples(categories)
- else:
- if categories.dtype == "O":
- if len({type(x) for x in categories}) != 1:
- # TODO: hash_array doesn't handle mixed types. It casts
- # everything to a str first, which means we treat
- # {'1', '2'} the same as {'1', 2}
- # find a better solution
- hashed = hash((tuple(categories), ordered))
- return hashed
-
- if is_datetime64tz_dtype(categories.dtype):
- # Avoid future warning.
- categories = categories.astype(DT64NS_DTYPE)
-
- cat_array = hash_array(np.asarray(categories), categorize=False)
+
+ if categories.dtype == "O":
+ if len({type(x) for x in categories}) != 1:
+ # TODO: hash_array doesn't handle mixed types. It casts
+ # everything to a str first, which means we treat
+ # {'1', '2'} the same as {'1', 2}
+ # find a better solution
+ hashed = hash((tuple(categories), ordered))
+ return hashed
+
+ if isinstance(categories.dtype, DatetimeTZDtype):
+ # Avoid future warning.
+ categories = categories.astype("datetime64[ns]")
+
+ cat_array = hash_array(np.asarray(categories), categorize=False)
+
if ordered:
cat_array = np.vstack(
[cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index 1d6e02254e44a..57c1357df44d7 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -264,8 +264,6 @@ def hash_array(
# First, turn whatever array this is into unsigned 64-bit ints, if we can
# manage it.
- elif isinstance(dtype, np.bool):
- vals = vals.astype("u8")
elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):
vals = vals.view("i8").astype("u8", copy=False)
elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:
| https://api.github.com/repos/pandas-dev/pandas/pulls/33678 | 2020-04-20T16:45:14Z | 2020-04-20T19:20:12Z | null | 2020-04-21T22:58:49Z | |
BUG: value_counts not working correctly on ExtensionArrays | diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 22e53dbc89f01..d8ba6ced187cb 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -28,6 +28,18 @@ def test_value_counts(self, all_data, dropna):
self.assert_series_equal(result, expected)
+ def test_value_counts_with_normalize(self, data):
+ # GH 33172
+ data = data[:10].unique()
+ values = np.array(data[~data.isna()])
+
+ result = (
+ pd.Series(data, dtype=data.dtype).value_counts(normalize=True).sort_index()
+ )
+
+ expected = pd.Series([1 / len(values)] * len(values), index=result.index)
+ self.assert_series_equal(result, expected)
+
def test_count(self, data_missing):
df = pd.DataFrame({"A": data_missing})
result = df.count(axis="columns")
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index f4ffcb8d0f109..233b658d29782 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -181,6 +181,10 @@ def test_value_counts(self, all_data, dropna):
tm.assert_series_equal(result, expected)
+ @pytest.mark.xfail(reason="value_counts not implemented yet.")
+ def test_value_counts_with_normalize(self, data):
+ return super().test_value_counts_with_normalize(data)
+
class TestCasting(BaseDecimal, base.BaseCastingTests):
pass
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index d086896fb09c3..85a2ec8bc70ac 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -193,6 +193,10 @@ class TestMethods(BaseJSON, base.BaseMethodsTests):
def test_value_counts(self, all_data, dropna):
pass
+ @unhashable
+ def test_value_counts_with_normalize(self, data):
+ pass
+
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index aa5a99282131a..1c887cc4371b6 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -199,6 +199,10 @@ class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
def test_value_counts(self, all_data, dropna):
pass
+ @pytest.mark.xfail(reason="not working. will be covered by #32028")
+ def test_value_counts_with_normalize(self, data):
+ return super().test_value_counts_with_normalize(data)
+
@pytest.mark.skip(reason="Incorrect expected")
# We have a bool dtype, so the result is an ExtensionArray
# but expected is not
| - [ ] closes #33172
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33674 | 2020-04-20T13:10:13Z | 2020-05-01T00:34:44Z | 2020-05-01T00:34:43Z | 2020-05-01T00:58:27Z |
CLN,TYP: Use subsitutions in info docstrings | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 85bb47485a2e7..57c315af09e8d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2326,9 +2326,116 @@ def to_html(
)
# ----------------------------------------------------------------------
+ @Substitution(
+ klass="DataFrame",
+ type_sub=" and columns",
+ max_cols_sub=(
+ """max_cols : int, optional
+ When to switch from the verbose to the truncated output. If the
+ DataFrame has more than `max_cols` columns, the truncated output
+ is used. By default, the setting in
+ ``pandas.options.display.max_info_columns`` is used.
+ """
+ ),
+ examples_sub=(
+ """
+ >>> int_values = [1, 2, 3, 4, 5]
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
+ >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
+ >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
+ ... "float_col": float_values})
+ >>> df
+ int_col text_col float_col
+ 0 1 alpha 0.00
+ 1 2 beta 0.25
+ 2 3 gamma 0.50
+ 3 4 delta 0.75
+ 4 5 epsilon 1.00
+
+ Prints information of all columns:
+
+ >>> df.info(verbose=True)
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 5 entries, 0 to 4
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 int_col 5 non-null int64
+ 1 text_col 5 non-null object
+ 2 float_col 5 non-null float64
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 248.0+ bytes
+
+ Prints a summary of columns count and its dtypes but not per column
+ information:
+
+ >>> df.info(verbose=False)
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 5 entries, 0 to 4
+ Columns: 3 entries, int_col to float_col
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 248.0+ bytes
+
+ Pipe output of DataFrame.info to buffer instead of sys.stdout, get
+ buffer content and writes to a text file:
+
+ >>> import io
+ >>> buffer = io.StringIO()
+ >>> df.info(buf=buffer)
+ >>> s = buffer.getvalue()
+ >>> with open("df_info.txt", "w",
+ ... encoding="utf-8") as f: # doctest: +SKIP
+ ... f.write(s)
+ 260
+
+ The `memory_usage` parameter allows deep introspection mode, specially
+ useful for big DataFrames and fine-tune memory optimization:
+
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
+ >>> df = pd.DataFrame({
+ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
+ ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
+ ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
+ ... })
+ >>> df.info()
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 1000000 entries, 0 to 999999
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
+ dtypes: object(3)
+ memory usage: 22.9+ MB
+
+ >>> df.info(memory_usage='deep')
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 1000000 entries, 0 to 999999
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
+ dtypes: object(3)
+ memory usage: 188.8 MB"""
+ ),
+ see_also_sub=(
+ """
+ DataFrame.describe: Generate descriptive statistics of DataFrame
+ columns.
+ DataFrame.memory_usage: Memory usage of DataFrame columns."""
+ ),
+ )
@doc(info)
def info(
- self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
+ self,
+ verbose: Optional[bool] = None,
+ buf: Optional[IO[str]] = None,
+ max_cols: Optional[int] = None,
+ memory_usage: Optional[Union[bool, str]] = None,
+ null_counts: Optional[bool] = None,
) -> None:
return info(self, verbose, buf, max_cols, memory_usage, null_counts)
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 7b5e553cf394e..d68a1fdde8da9 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -1,7 +1,10 @@
import sys
+from typing import IO, Optional, Union
from pandas._config import get_option
+from pandas._typing import FrameOrSeries
+
from pandas.io.formats import format as fmt
from pandas.io.formats.printing import pprint_thing
@@ -11,18 +14,23 @@ def _put_str(s, space):
def info(
- data, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
+ data: FrameOrSeries,
+ verbose: Optional[bool] = None,
+ buf: Optional[IO[str]] = None,
+ max_cols: Optional[int] = None,
+ memory_usage: Optional[Union[bool, str]] = None,
+ null_counts: Optional[bool] = None,
) -> None:
"""
- Print a concise summary of a DataFrame.
+ Print a concise summary of a %(klass)s.
- This method prints information about a DataFrame including
- the index dtype and column dtypes, non-null values and memory usage.
+ This method prints information about a %(klass)s including
+ the index dtype%(type_sub)s, non-null values and memory usage.
Parameters
----------
- data : DataFrame
- DataFrame to print information about.
+ data : %(klass)s
+ %(klass)s to print information about.
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
@@ -30,13 +38,9 @@ def info(
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
- max_cols : int, optional
- When to switch from the verbose to the truncated output. If the
- DataFrame has more than `max_cols` columns, the truncated output
- is used. By default, the setting in
- ``pandas.options.display.max_info_columns`` is used.
+ %(max_cols_sub)s
memory_usage : bool, str, optional
- Specifies whether total memory usage of the DataFrame
+ Specifies whether total memory usage of the %(klass)s
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
@@ -50,7 +54,7 @@ def info(
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
- only if the frame is smaller than
+ only if the %(klass)s is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
@@ -58,97 +62,15 @@ def info(
Returns
-------
None
- This method prints a summary of a DataFrame and returns None.
+ This method prints a summary of a %(klass)s and returns None.
See Also
--------
- DataFrame.describe: Generate descriptive statistics of DataFrame
- columns.
- DataFrame.memory_usage: Memory usage of DataFrame columns.
+ %(see_also_sub)s
Examples
--------
- >>> int_values = [1, 2, 3, 4, 5]
- >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
- >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
- >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
- ... "float_col": float_values})
- >>> df
- int_col text_col float_col
- 0 1 alpha 0.00
- 1 2 beta 0.25
- 2 3 gamma 0.50
- 3 4 delta 0.75
- 4 5 epsilon 1.00
-
- Prints information of all columns:
-
- >>> df.info(verbose=True)
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 5 entries, 0 to 4
- Data columns (total 3 columns):
- # Column Non-Null Count Dtype
- --- ------ -------------- -----
- 0 int_col 5 non-null int64
- 1 text_col 5 non-null object
- 2 float_col 5 non-null float64
- dtypes: float64(1), int64(1), object(1)
- memory usage: 248.0+ bytes
-
- Prints a summary of columns count and its dtypes but not per column
- information:
-
- >>> df.info(verbose=False)
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 5 entries, 0 to 4
- Columns: 3 entries, int_col to float_col
- dtypes: float64(1), int64(1), object(1)
- memory usage: 248.0+ bytes
-
- Pipe output of DataFrame.info to buffer instead of sys.stdout, get
- buffer content and writes to a text file:
-
- >>> import io
- >>> buffer = io.StringIO()
- >>> df.info(buf=buffer)
- >>> s = buffer.getvalue()
- >>> with open("df_info.txt", "w",
- ... encoding="utf-8") as f: # doctest: +SKIP
- ... f.write(s)
- 260
-
- The `memory_usage` parameter allows deep introspection mode, specially
- useful for big DataFrames and fine-tune memory optimization:
-
- >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
- >>> df = pd.DataFrame({
- ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
- ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
- ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
- ... })
- >>> df.info()
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 1000000 entries, 0 to 999999
- Data columns (total 3 columns):
- # Column Non-Null Count Dtype
- --- ------ -------------- -----
- 0 column_1 1000000 non-null object
- 1 column_2 1000000 non-null object
- 2 column_3 1000000 non-null object
- dtypes: object(3)
- memory usage: 22.9+ MB
-
- >>> df.info(memory_usage='deep')
- <class 'pandas.core.frame.DataFrame'>
- RangeIndex: 1000000 entries, 0 to 999999
- Data columns (total 3 columns):
- # Column Non-Null Count Dtype
- --- ------ -------------- -----
- 0 column_1 1000000 non-null object
- 1 column_2 1000000 non-null object
- 2 column_3 1000000 non-null object
- dtypes: object(3)
- memory usage: 188.8 MB
+ %(examples_sub)s
"""
if buf is None: # pragma: no cover
buf = sys.stdout
| Another precursor to #31796, which should make it easier to review/merge
----
screenshots of docs




| https://api.github.com/repos/pandas-dev/pandas/pulls/33673 | 2020-04-20T10:59:19Z | 2020-04-22T14:13:21Z | 2020-04-22T14:13:21Z | 2020-04-22T16:11:58Z |
Solve missing interpolation method (cubicspline) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 07849702c646d..cd1cb0b64f74a 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -99,6 +99,7 @@ Other enhancements
``df.to_csv(path, compression={'method': 'gzip', 'compresslevel': 1}``
(:issue:`33196`)
- :meth:`~pandas.core.groupby.GroupBy.transform` has gained ``engine`` and ``engine_kwargs`` arguments that supports executing functions with ``Numba`` (:issue:`32854`)
+- :meth:`~pandas.core.resample.Resampler.interpolate` now supports SciPy interpolation method :class:`scipy.interpolate.CubicSpline` as method ``cubicspline`` (:issue:`33670`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2f35a5b6f9a7e..9b4854d8024a8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6671,9 +6671,9 @@ def replace(
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
- * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
- Wrappers around the SciPy interpolation methods of similar
- names. See `Notes`.
+ * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
+ 'cubicspline': Wrappers around the SciPy interpolation methods of
+ similar names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index c46aed999f45a..2acaa808d8324 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -112,6 +112,7 @@ def clean_interp_method(method, **kwargs):
"akima",
"spline",
"from_derivatives",
+ "cubicspline",
]
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or polynomial.")
@@ -293,6 +294,7 @@ def interpolate_1d(
"piecewise_polynomial",
"pchip",
"akima",
+ "cubicspline",
]
if method in sp_methods:
@@ -341,14 +343,11 @@ def _interpolate_scipy_wrapper(
x, new_x = x._values.astype("i8"), new_x.astype("i8")
if method == "pchip":
- try:
- alt_methods["pchip"] = interpolate.pchip_interpolate
- except AttributeError as err:
- raise ImportError(
- "Your version of Scipy does not support PCHIP interpolation."
- ) from err
+ alt_methods["pchip"] = interpolate.pchip_interpolate
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
+ elif method == "cubicspline":
+ alt_methods["cubicspline"] = _cubicspline_interpolate
interp1d_methods = [
"nearest",
@@ -406,7 +405,7 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
- list of derivatives to extract. This numberincludes the function
+ list of derivatives to extract. This number includes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
@@ -446,8 +445,7 @@ def _akima_interpolate(xi, yi, x, der=0, axis=0):
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
- x : scalar or array_like
- Of length M.
+ x : scalar or array_like of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
@@ -478,6 +476,85 @@ def _akima_interpolate(xi, yi, x, der=0, axis=0):
return [P(x, nu) for nu in der]
+def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolate=None):
+ """
+ Convenience function for cubic spline data interpolator.
+
+ See `scipy.interpolate.CubicSpline` for details.
+
+ Parameters
+ ----------
+ xi : array_like, shape (n,)
+ 1-d array containing values of the independent variable.
+ Values must be real, finite and in strictly increasing order.
+ yi : array_like
+ Array containing values of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ x : scalar or array_like, shape (m,)
+ axis : int, optional
+ Axis along which `y` is assumed to be varying. Meaning that for
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+ Default is 0.
+ bc_type : string or 2-tuple, optional
+ Boundary condition type. Two additional equations, given by the
+ boundary conditions, are required to determine all coefficients of
+ polynomials on each segment [2]_.
+ If `bc_type` is a string, then the specified condition will be applied
+ at both ends of a spline. Available conditions are:
+ * 'not-a-knot' (default): The first and second segment at a curve end
+ are the same polynomial. It is a good default when there is no
+ information on boundary conditions.
+ * 'periodic': The interpolated functions is assumed to be periodic
+ of period ``x[-1] - x[0]``. The first and last value of `y` must be
+ identical: ``y[0] == y[-1]``. This boundary condition will result in
+ ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
+ * 'clamped': The first derivative at curves ends are zero. Assuming
+ a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
+ * 'natural': The second derivative at curve ends are zero. Assuming
+ a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
+ If `bc_type` is a 2-tuple, the first and the second value will be
+ applied at the curve start and end respectively. The tuple values can
+ be one of the previously mentioned strings (except 'periodic') or a
+ tuple `(order, deriv_values)` allowing to specify arbitrary
+ derivatives at curve ends:
+ * `order`: the derivative order, 1 or 2.
+ * `deriv_value`: array_like containing derivative values, shape must
+ be the same as `y`, excluding ``axis`` dimension. For example, if
+ `y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with
+ the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
+ and have the shape (n0, n1).
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. If None (default), ``extrapolate`` is
+ set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
+
+ See Also
+ --------
+ scipy.interpolate.CubicHermiteSpline
+
+ Returns
+ -------
+ y : scalar or array_like
+ The result, of shape (m,)
+
+ References
+ ----------
+ .. [1] `Cubic Spline Interpolation
+ <https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
+ on Wikiversity.
+ .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
+ """
+ from scipy import interpolate
+
+ P = interpolate.CubicSpline(
+ xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate
+ )
+
+ return P(x)
+
+
def interpolate_2d(
values, method="pad", axis=0, limit=None, fill_value=None, dtype=None
):
diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py
index 6844225a81a8f..b26cb21bc5f3d 100644
--- a/pandas/tests/series/methods/test_interpolate.py
+++ b/pandas/tests/series/methods/test_interpolate.py
@@ -26,6 +26,7 @@
"from_derivatives",
"pchip",
"akima",
+ "cubicspline",
]
)
def nontemporal_method(request):
@@ -55,6 +56,7 @@ def nontemporal_method(request):
"from_derivatives",
"pchip",
"akima",
+ "cubicspline",
]
)
def interp_methods_ind(request):
@@ -97,6 +99,22 @@ def test_interpolate_time_raises_for_non_timeseries(self):
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
+ @td.skip_if_no_scipy
+ def test_interpolate_cubicspline(self):
+
+ ser = Series([10, 11, 12, 13])
+
+ expected = Series(
+ [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
+ index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
+ )
+ # interpolate at new_index
+ new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
+ float
+ )
+ result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
+ tm.assert_series_equal(result, expected)
+
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
| By commit 8bb2cc1 scipy.interpolate.CubicSpline method is
referenced in the pandas documentation (see pandas/core/generic.py)
but it is not wrapped by any interpolation method. This PR solves
this by adding the corresponding wrapper. SciPy's CubicSpline
is a cubic spline data interpolator that allows explicit control
of the boundary conditions for the interval.
Changes to be committed:
modified: pandas/core/missing.py
modified: pandas/tests/series/methods/test_interpolate.py
- [x] closes #xxxx (no issue opened but PR #20270 commented instead)
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33670 | 2020-04-20T06:35:41Z | 2020-04-23T17:39:03Z | 2020-04-23T17:39:03Z | 2020-04-23T20:10:28Z |
CLN: make Categorical.codes a simpler property | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index c5cac0cfeef6c..cdd0717849e96 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -199,17 +199,6 @@ def contains(cat, key, container):
return any(loc_ in container for loc_ in loc)
-_codes_doc = """
-The category codes of this categorical.
-
-Level codes are an array if integer which are the positions of the real
-values in the categories array.
-
-There is not setter, use the other categorical methods and the normal item
-setter to change values in the categorical.
-"""
-
-
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
@@ -652,27 +641,26 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
return cls(codes, dtype=dtype, fastpath=True)
- def _get_codes(self):
+ @property
+ def codes(self) -> np.ndarray:
"""
- Get the codes.
+ The category codes of this categorical.
+
+ Codes are an array of integers which are the positions of the actual
+ values in the categories array.
+
+ There is no setter, use the other categorical methods and the normal item
+ setter to change values in the categorical.
Returns
-------
- codes : integer array view
- A non writable view of the `codes` array.
+ ndarray[int]
+ A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
- def _set_codes(self, codes):
- """
- Not settable by the user directly
- """
- raise ValueError("cannot set Categorical codes directly")
-
- codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
-
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index 691230620c2e8..6fce4b4145ff2 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -464,7 +464,7 @@ def test_codes_immutable(self):
tm.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
- with pytest.raises(ValueError, match="cannot set Categorical codes directly"):
+ with pytest.raises(AttributeError, match="can't set attribute"):
c.codes = np.array([0, 1, 2, 0, 1], dtype="int8")
# changes in the codes array should raise
| https://api.github.com/repos/pandas-dev/pandas/pulls/33667 | 2020-04-20T03:10:11Z | 2020-04-20T15:20:52Z | 2020-04-20T15:20:52Z | 2020-04-20T15:29:47Z | |
CLN: make DTA/TDA check clearer | diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 203ea2152886a..067ff32b85862 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -80,8 +80,7 @@ def wrapper(left, right):
cache=True,
)
@inherit_names(
- ["mean", "freq", "freqstr", "asi8", "_box_values", "_box_func"],
- DatetimeLikeArrayMixin,
+ ["mean", "freq", "freqstr", "asi8", "_box_func"], DatetimeLikeArrayMixin,
)
class DatetimeIndexOpsMixin(ExtensionIndex):
"""
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d411867af2ef8..ce5008630e53d 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -37,7 +37,7 @@
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
-from pandas.core.dtypes.generic import ABCDataFrame
+from pandas.core.dtypes.generic import ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
@@ -653,7 +653,9 @@ def values(self):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals._internal_get_values()
- if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
+ if isinstance(vals.dtype, ExtensionDtype) or isinstance(
+ vals, (ABCDatetimeIndex, ABCTimedeltaIndex)
+ ):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33665 | 2020-04-20T01:13:28Z | 2020-04-20T22:38:27Z | 2020-04-20T22:38:27Z | 2021-04-22T21:01:22Z | |
TST: CI checks against direct imports from conftest.py | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 45b7db74fa409..427be459d9edc 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -150,7 +150,13 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
# Check for imports from pandas._testing instead of `import pandas._testing as tm`
invgrep -R --include="*.py*" -E "from pandas._testing import" pandas/tests
RET=$(($RET + $?)) ; echo $MSG "DONE"
- invgrep -R --include="*.py*" -E "from pandas.util import testing as tm" pandas/tests
+ invgrep -R --include="*.py*" -E "from pandas import _testing as tm" pandas/tests
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
+ # No direct imports from conftest
+ invgrep -R --include="*.py*" -E "conftest import" pandas/tests
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+ invgrep -R --include="*.py*" -E "import conftest" pandas/tests
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Check for use of exec' ; echo $MSG
| - [x] closes #30914
| https://api.github.com/repos/pandas-dev/pandas/pulls/33664 | 2020-04-20T00:47:16Z | 2020-04-20T22:34:37Z | 2020-04-20T22:34:36Z | 2020-04-20T22:34:40Z |
REF: use array_algos shift for Categorical.shift | diff --git a/pandas/core/array_algos/transforms.py b/pandas/core/array_algos/transforms.py
index f775b6d733d9c..b8b234d937292 100644
--- a/pandas/core/array_algos/transforms.py
+++ b/pandas/core/array_algos/transforms.py
@@ -10,6 +10,10 @@
def shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray:
new_values = values
+ if periods == 0:
+ # TODO: should we copy here?
+ return new_values
+
# make sure array sent to np.roll is c_contiguous
f_ordered = values.flags.f_contiguous
if f_ordered:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index af07dee3b6838..c5cac0cfeef6c 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -27,7 +27,6 @@
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
- ensure_platform_int,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
@@ -51,6 +50,7 @@
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
+from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
@@ -1241,23 +1241,41 @@ def shift(self, periods, fill_value=None):
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
- if np.prod(codes.shape) and (periods != 0):
- codes = np.roll(codes, ensure_platform_int(periods), axis=0)
- if isna(fill_value):
- fill_value = -1
- elif fill_value in self.categories:
- fill_value = self.categories.get_loc(fill_value)
- else:
- raise ValueError(
- f"'fill_value={fill_value}' is not present "
- "in this Categorical's categories"
- )
- if periods > 0:
- codes[:periods] = fill_value
- else:
- codes[periods:] = fill_value
- return self.from_codes(codes, dtype=self.dtype)
+ fill_value = self._validate_fill_value(fill_value)
+
+ codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
+
+ return self._constructor(codes, dtype=self.dtype, fastpath=True)
+
+ def _validate_fill_value(self, fill_value):
+ """
+ Convert a user-facing fill_value to a representation to use with our
+ underlying ndarray, raising ValueError if this is not possible.
+
+ Parameters
+ ----------
+ fill_value : object
+
+ Returns
+ -------
+ fill_value : int
+
+ Raises
+ ------
+ ValueError
+ """
+
+ if isna(fill_value):
+ fill_value = -1
+ elif fill_value in self.categories:
+ fill_value = self.categories.get_loc(fill_value)
+ else:
+ raise ValueError(
+ f"'fill_value={fill_value}' is not present "
+ "in this Categorical's categories"
+ )
+ return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
@@ -1835,24 +1853,12 @@ def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
indexer = np.asarray(indexer, dtype=np.intp)
- dtype = self.dtype
-
- if isna(fill_value):
- fill_value = -1
- elif allow_fill:
+ if allow_fill:
# convert user-provided `fill_value` to codes
- if fill_value in self.categories:
- fill_value = self.categories.get_loc(fill_value)
- else:
- msg = (
- f"'fill_value' ('{fill_value}') is not in this "
- "Categorical's categories."
- )
- raise TypeError(msg)
+ fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
- result = type(self).from_codes(codes, dtype=dtype)
- return result
+ return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index e3cdc898a88bf..430f20b359f8b 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -769,6 +769,7 @@ def shift(self, periods=1, fill_value=None, axis=0):
if not self.size or periods == 0:
return self.copy()
+ # TODO(2.0): once this deprecation is enforced, used _validate_fill_value
if is_valid_nat_for_dtype(fill_value, self.dtype):
fill_value = NaT
elif not isinstance(fill_value, self._recognized_scalars):
diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index 325fa476d70e6..45e0d503f30e7 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -184,8 +184,8 @@ def test_take_fill_value(self):
def test_take_fill_value_new_raises(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(["a", "b", "c"])
- xpr = r"'fill_value' \('d'\) is not in this Categorical's categories."
- with pytest.raises(TypeError, match=xpr):
+ xpr = r"'fill_value=d' is not present in this Categorical's categories"
+ with pytest.raises(ValueError, match=xpr):
cat.take([0, 1, -1], fill_value="d", allow_fill=True)
def test_take_nd_deprecated(self):
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 9d3c40ce926d7..2e707342a0793 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -320,9 +320,9 @@ def test_unstack_fill_frame_categorical(self):
)
tm.assert_frame_equal(result, expected)
- # Fill with non-category results in a TypeError
- msg = r"'fill_value' \('d'\) is not in"
- with pytest.raises(TypeError, match=msg):
+ # Fill with non-category results in a ValueError
+ msg = r"'fill_value=d' is not present in"
+ with pytest.raises(ValueError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
| We have duplicate code in Categorical.shift and Categorical.take that can be refactored out into Categorical._validate_fill_value (which is the name that DTA/TDA/PA use for the same method, which I intend to share eventually, xref #33660). From there the rest of Categorical.shift can dispatch to array_algos.transforms.shift.
The logic changed here is a TypeError becoming a ValueError (which matches what DTA/TDA/PA do) | https://api.github.com/repos/pandas-dev/pandas/pulls/33663 | 2020-04-19T23:51:41Z | 2020-04-20T01:21:05Z | 2020-04-20T01:21:05Z | 2020-04-20T01:35:37Z |
TST: Dtype constants into pandas._testing to avoid direct imports from conftest | diff --git a/pandas/_testing.py b/pandas/_testing.py
index 1f6b645c821c8..4f957b7a55e3a 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -22,7 +22,7 @@
)
import pandas._libs.testing as _testing
-from pandas._typing import FilePathOrBuffer, FrameOrSeries
+from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
@@ -73,6 +73,37 @@
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
+UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
+UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
+SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
+SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
+ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
+ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
+
+FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
+COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
+STRING_DTYPES: List[Dtype] = [str, "str", "U"]
+
+DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
+TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
+
+BOOL_DTYPES = [bool, "bool"]
+BYTES_DTYPES = [bytes, "bytes"]
+OBJECT_DTYPES = [object, "object"]
+
+ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
+ALL_NUMPY_DTYPES = (
+ ALL_REAL_DTYPES
+ + COMPLEX_DTYPES
+ + STRING_DTYPES
+ + DATETIME64_DTYPES
+ + TIMEDELTA64_DTYPES
+ + BOOL_DTYPES
+ + OBJECT_DTYPES
+ + BYTES_DTYPES
+)
+
+
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index e1088dae3925a..70be6b5d9fcbc 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -23,7 +23,6 @@
from decimal import Decimal
import operator
import os
-from typing import List
from dateutil.tz import tzlocal, tzutc
import hypothesis
@@ -32,7 +31,6 @@
import pytest
from pytz import FixedOffset, utc
-from pandas._typing import Dtype
import pandas.util._test_decorators as td
import pandas as pd
@@ -864,39 +862,7 @@ def utc_fixture(request):
# ----------------------------------------------------------------
# Dtypes
# ----------------------------------------------------------------
-
-UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"]
-UNSIGNED_EA_INT_DTYPES = ["UInt8", "UInt16", "UInt32", "UInt64"]
-SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
-SIGNED_EA_INT_DTYPES = ["Int8", "Int16", "Int32", "Int64"]
-ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
-ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
-
-FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
-COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
-STRING_DTYPES: List[Dtype] = [str, "str", "U"]
-
-DATETIME64_DTYPES = ["datetime64[ns]", "M8[ns]"]
-TIMEDELTA64_DTYPES = ["timedelta64[ns]", "m8[ns]"]
-
-BOOL_DTYPES = [bool, "bool"]
-BYTES_DTYPES = [bytes, "bytes"]
-OBJECT_DTYPES = [object, "object"]
-
-ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
-ALL_NUMPY_DTYPES = (
- ALL_REAL_DTYPES
- + COMPLEX_DTYPES
- + STRING_DTYPES
- + DATETIME64_DTYPES
- + TIMEDELTA64_DTYPES
- + BOOL_DTYPES
- + OBJECT_DTYPES
- + BYTES_DTYPES
-)
-
-
-@pytest.fixture(params=STRING_DTYPES)
+@pytest.fixture(params=tm.STRING_DTYPES)
def string_dtype(request):
"""
Parametrized fixture for string dtypes.
@@ -908,7 +874,7 @@ def string_dtype(request):
return request.param
-@pytest.fixture(params=BYTES_DTYPES)
+@pytest.fixture(params=tm.BYTES_DTYPES)
def bytes_dtype(request):
"""
Parametrized fixture for bytes dtypes.
@@ -919,7 +885,7 @@ def bytes_dtype(request):
return request.param
-@pytest.fixture(params=OBJECT_DTYPES)
+@pytest.fixture(params=tm.OBJECT_DTYPES)
def object_dtype(request):
"""
Parametrized fixture for object dtypes.
@@ -930,7 +896,7 @@ def object_dtype(request):
return request.param
-@pytest.fixture(params=DATETIME64_DTYPES)
+@pytest.fixture(params=tm.DATETIME64_DTYPES)
def datetime64_dtype(request):
"""
Parametrized fixture for datetime64 dtypes.
@@ -941,7 +907,7 @@ def datetime64_dtype(request):
return request.param
-@pytest.fixture(params=TIMEDELTA64_DTYPES)
+@pytest.fixture(params=tm.TIMEDELTA64_DTYPES)
def timedelta64_dtype(request):
"""
Parametrized fixture for timedelta64 dtypes.
@@ -952,7 +918,7 @@ def timedelta64_dtype(request):
return request.param
-@pytest.fixture(params=FLOAT_DTYPES)
+@pytest.fixture(params=tm.FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
@@ -964,7 +930,7 @@ def float_dtype(request):
return request.param
-@pytest.fixture(params=COMPLEX_DTYPES)
+@pytest.fixture(params=tm.COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
@@ -976,7 +942,7 @@ def complex_dtype(request):
return request.param
-@pytest.fixture(params=SIGNED_INT_DTYPES)
+@pytest.fixture(params=tm.SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
@@ -990,7 +956,7 @@ def sint_dtype(request):
return request.param
-@pytest.fixture(params=UNSIGNED_INT_DTYPES)
+@pytest.fixture(params=tm.UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
@@ -1003,7 +969,7 @@ def uint_dtype(request):
return request.param
-@pytest.fixture(params=ALL_INT_DTYPES)
+@pytest.fixture(params=tm.ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtype.
@@ -1021,7 +987,7 @@ def any_int_dtype(request):
return request.param
-@pytest.fixture(params=ALL_EA_INT_DTYPES)
+@pytest.fixture(params=tm.ALL_EA_INT_DTYPES)
def any_nullable_int_dtype(request):
"""
Parameterized fixture for any nullable integer dtype.
@@ -1038,7 +1004,7 @@ def any_nullable_int_dtype(request):
return request.param
-@pytest.fixture(params=ALL_REAL_DTYPES)
+@pytest.fixture(params=tm.ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtype.
@@ -1059,7 +1025,7 @@ def any_real_dtype(request):
return request.param
-@pytest.fixture(params=ALL_NUMPY_DTYPES)
+@pytest.fixture(params=tm.ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 6e73e1542bb80..b9c8f3a8dd494 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -20,14 +20,6 @@
import pandas as pd
import pandas._testing as tm
from pandas.arrays import SparseArray
-from pandas.conftest import (
- ALL_EA_INT_DTYPES,
- ALL_INT_DTYPES,
- SIGNED_EA_INT_DTYPES,
- SIGNED_INT_DTYPES,
- UNSIGNED_EA_INT_DTYPES,
- UNSIGNED_INT_DTYPES,
-)
# EA & Actual Dtypes
@@ -295,10 +287,10 @@ def test_is_string_dtype():
"dtype",
integer_dtypes
+ [pd.Series([1, 2])]
- + ALL_INT_DTYPES
- + to_numpy_dtypes(ALL_INT_DTYPES)
- + ALL_EA_INT_DTYPES
- + to_ea_dtypes(ALL_EA_INT_DTYPES),
+ + tm.ALL_INT_DTYPES
+ + to_numpy_dtypes(tm.ALL_INT_DTYPES)
+ + tm.ALL_EA_INT_DTYPES
+ + to_ea_dtypes(tm.ALL_EA_INT_DTYPES),
)
def test_is_integer_dtype(dtype):
assert com.is_integer_dtype(dtype)
@@ -327,10 +319,10 @@ def test_is_not_integer_dtype(dtype):
"dtype",
signed_integer_dtypes
+ [pd.Series([1, 2])]
- + SIGNED_INT_DTYPES
- + to_numpy_dtypes(SIGNED_INT_DTYPES)
- + SIGNED_EA_INT_DTYPES
- + to_ea_dtypes(SIGNED_EA_INT_DTYPES),
+ + tm.SIGNED_INT_DTYPES
+ + to_numpy_dtypes(tm.SIGNED_INT_DTYPES)
+ + tm.SIGNED_EA_INT_DTYPES
+ + to_ea_dtypes(tm.SIGNED_EA_INT_DTYPES),
)
def test_is_signed_integer_dtype(dtype):
assert com.is_integer_dtype(dtype)
@@ -347,10 +339,10 @@ def test_is_signed_integer_dtype(dtype):
np.array(["a", "b"]),
np.array([], dtype=np.timedelta64),
]
- + UNSIGNED_INT_DTYPES
- + to_numpy_dtypes(UNSIGNED_INT_DTYPES)
- + UNSIGNED_EA_INT_DTYPES
- + to_ea_dtypes(UNSIGNED_EA_INT_DTYPES),
+ + tm.UNSIGNED_INT_DTYPES
+ + to_numpy_dtypes(tm.UNSIGNED_INT_DTYPES)
+ + tm.UNSIGNED_EA_INT_DTYPES
+ + to_ea_dtypes(tm.UNSIGNED_EA_INT_DTYPES),
)
def test_is_not_signed_integer_dtype(dtype):
assert not com.is_signed_integer_dtype(dtype)
@@ -363,10 +355,10 @@ def test_is_not_signed_integer_dtype(dtype):
"dtype",
unsigned_integer_dtypes
+ [pd.Series([1, 2], dtype=np.uint32)]
- + UNSIGNED_INT_DTYPES
- + to_numpy_dtypes(UNSIGNED_INT_DTYPES)
- + UNSIGNED_EA_INT_DTYPES
- + to_ea_dtypes(UNSIGNED_EA_INT_DTYPES),
+ + tm.UNSIGNED_INT_DTYPES
+ + to_numpy_dtypes(tm.UNSIGNED_INT_DTYPES)
+ + tm.UNSIGNED_EA_INT_DTYPES
+ + to_ea_dtypes(tm.UNSIGNED_EA_INT_DTYPES),
)
def test_is_unsigned_integer_dtype(dtype):
assert com.is_unsigned_integer_dtype(dtype)
@@ -383,10 +375,10 @@ def test_is_unsigned_integer_dtype(dtype):
np.array(["a", "b"]),
np.array([], dtype=np.timedelta64),
]
- + SIGNED_INT_DTYPES
- + to_numpy_dtypes(SIGNED_INT_DTYPES)
- + SIGNED_EA_INT_DTYPES
- + to_ea_dtypes(SIGNED_EA_INT_DTYPES),
+ + tm.SIGNED_INT_DTYPES
+ + to_numpy_dtypes(tm.SIGNED_INT_DTYPES)
+ + tm.SIGNED_EA_INT_DTYPES
+ + to_ea_dtypes(tm.SIGNED_EA_INT_DTYPES),
)
def test_is_not_unsigned_integer_dtype(dtype):
assert not com.is_unsigned_integer_dtype(dtype)
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 829ee61197ff2..c9634c4c90809 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -14,7 +14,6 @@
Series,
Timedelta,
Timestamp,
- conftest,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
@@ -752,9 +751,9 @@ def test_map_with_dict_or_series(self):
[1.5, 2.5, 3.5],
[-1.5, -2.5, -3.5],
# numpy int/uint
- *[np.array([1, 2, 3], dtype=dtype) for dtype in conftest.ALL_INT_DTYPES],
+ *[np.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_DTYPES],
# numpy floats
- *[np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in conftest.FLOAT_DTYPES],
+ *[np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in tm.FLOAT_DTYPES],
# numpy object
np.array([1, "b", 3.5], dtype=object),
# pandas scalars
@@ -762,7 +761,7 @@ def test_map_with_dict_or_series(self):
[Timestamp(2019, 1, 1), Timestamp(2019, 2, 1), Timestamp(2019, 3, 1)],
[Timedelta(1, "d"), Timedelta(2, "d"), Timedelta(3, "D")],
# pandas Integer arrays
- *[pd.array([1, 2, 3], dtype=dtype) for dtype in conftest.ALL_EA_INT_DTYPES],
+ *[pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES],
# other pandas arrays
pd.IntervalIndex.from_breaks([1, 4, 6, 9]).array,
pd.date_range("2019-01-01", periods=3).array,
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index ad7028702ec8c..5f904241da485 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -31,7 +31,6 @@
compat,
)
import pandas._testing as tm
-from pandas.conftest import BYTES_DTYPES, STRING_DTYPES
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray
import pandas.core.common as com
@@ -362,7 +361,7 @@ def test_on_index_object(self):
def test_dtype_preservation(self, any_numpy_dtype):
# GH 15442
- if any_numpy_dtype in (BYTES_DTYPES + STRING_DTYPES):
+ if any_numpy_dtype in (tm.BYTES_DTYPES + tm.STRING_DTYPES):
pytest.skip("skip string dtype")
elif is_integer_dtype(any_numpy_dtype):
data = [1, 2, 2]
| part of #30914
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
---------------------------------------------------------------------------------------
Took a naive approach to move these constants into `pandas._testing`. Could also move them to a separate module instead. Let me know what you think is best. | https://api.github.com/repos/pandas-dev/pandas/pulls/33661 | 2020-04-19T22:17:43Z | 2020-04-20T00:38:22Z | 2020-04-20T00:38:22Z | 2020-04-20T00:38:26Z |
REF: Implement NDArrayBackedExtensionArray | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 07849702c646d..53432bbc84781 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -190,6 +190,7 @@ Backwards incompatible API changes
Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`)
- :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`)
- Passing an integer dtype other than ``int64`` to ``np.array(period_index, dtype=...)`` will now raise ``TypeError`` instead of incorrectly using ``int64`` (:issue:`32255`)
+- Passing an invalid ``fill_value`` to :meth:`Categorical.take` raises a ``ValueError`` instead of ``TypeError`` (:issue:`33660`)
``MultiIndex.get_indexer`` interprets `method` argument differently
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
new file mode 100644
index 0000000000000..0ed9de804c55e
--- /dev/null
+++ b/pandas/core/arrays/_mixins.py
@@ -0,0 +1,62 @@
+from typing import Any, Sequence, TypeVar
+
+import numpy as np
+
+from pandas.errors import AbstractMethodError
+
+from pandas.core.algorithms import take
+from pandas.core.arrays.base import ExtensionArray
+
+_T = TypeVar("_T", bound="NDArrayBackedExtensionArray")
+
+
+class NDArrayBackedExtensionArray(ExtensionArray):
+ """
+ ExtensionArray that is backed by a single NumPy ndarray.
+ """
+
+ _ndarray: np.ndarray
+
+ def _from_backing_data(self: _T, arr: np.ndarray) -> _T:
+ """
+ Construct a new ExtensionArray `new_array` with `arr` as its _ndarray.
+
+ This should round-trip:
+ self == self._from_backing_data(self._ndarray)
+ """
+ raise AbstractMethodError(self)
+
+ # ------------------------------------------------------------------------
+
+ def take(
+ self: _T,
+ indices: Sequence[int],
+ allow_fill: bool = False,
+ fill_value: Any = None,
+ ) -> _T:
+ if allow_fill:
+ fill_value = self._validate_fill_value(fill_value)
+
+ new_data = take(
+ self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value,
+ )
+ return self._from_backing_data(new_data)
+
+ def _validate_fill_value(self, fill_value):
+ """
+ If a fill_value is passed to `take` convert it to a representation
+ suitable for self._ndarray, raising ValueError if this is not possible.
+
+ Parameters
+ ----------
+ fill_value : object
+
+ Returns
+ -------
+ fill_value : native representation
+
+ Raises
+ ------
+ ValueError
+ """
+ raise AbstractMethodError(self)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index cdd0717849e96..0bb6ca8315a3a 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -49,9 +49,10 @@
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
-from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
+from pandas.core.algorithms import _get_data_algo, factorize, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
-from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
+from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray
+from pandas.core.arrays.base import _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
@@ -199,7 +200,7 @@ def contains(cat, key, container):
return any(loc_ in container for loc_ in loc)
-class Categorical(ExtensionArray, PandasObject):
+class Categorical(NDArrayBackedExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
@@ -1238,7 +1239,7 @@ def shift(self, periods, fill_value=None):
def _validate_fill_value(self, fill_value):
"""
- Convert a user-facing fill_value to a representation to use with our
+ Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
@@ -1768,7 +1769,7 @@ def fillna(self, value=None, method=None, limit=None):
return self._constructor(codes, dtype=self.dtype, fastpath=True)
- def take(self, indexer, allow_fill: bool = False, fill_value=None):
+ def take(self: _T, indexer, allow_fill: bool = False, fill_value=None) -> _T:
"""
Take elements from the Categorical.
@@ -1837,16 +1838,23 @@ def take(self, indexer, allow_fill: bool = False, fill_value=None):
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
- will raise a ``TypeError``.
+ will raise a ``ValueError``.
"""
- indexer = np.asarray(indexer, dtype=np.intp)
+ return NDArrayBackedExtensionArray.take(
+ self, indexer, allow_fill=allow_fill, fill_value=fill_value
+ )
- if allow_fill:
- # convert user-provided `fill_value` to codes
- fill_value = self._validate_fill_value(fill_value)
+ # ------------------------------------------------------------------
+ # NDArrayBackedExtensionArray compat
- codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
- return self._constructor(codes, dtype=self.dtype, fastpath=True)
+ @property
+ def _ndarray(self) -> np.ndarray:
+ return self._codes
+
+ def _from_backing_data(self, arr: np.ndarray) -> "Categorical":
+ return self._constructor(arr, dtype=self.dtype, fastpath=True)
+
+ # ------------------------------------------------------------------
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 27b2ed822a49f..f41964cc7b0c8 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -39,8 +39,9 @@
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
-from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
+from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts
from pandas.core.array_algos.transforms import shift
+from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.construction import array, extract_array
@@ -425,7 +426,9 @@ def _with_freq(self, freq):
return self
-class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
+class DatetimeLikeArrayMixin(
+ ExtensionOpsMixin, AttributesMixin, NDArrayBackedExtensionArray
+):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
@@ -437,6 +440,20 @@ class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray)
_generate_range
"""
+ # ------------------------------------------------------------------
+ # NDArrayBackedExtensionArray compat
+
+ @property
+ def _ndarray(self) -> np.ndarray:
+ # NB: A bunch of Interval tests fail if we use ._data
+ return self.asi8
+
+ def _from_backing_data(self: _T, arr: np.ndarray) -> _T:
+ # Note: we do not retain `freq`
+ return type(self)(arr, dtype=self.dtype) # type: ignore
+
+ # ------------------------------------------------------------------
+
@property
def ndim(self) -> int:
return self._data.ndim
@@ -665,16 +682,6 @@ def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
- def take(self, indices, allow_fill=False, fill_value=None):
- if allow_fill:
- fill_value = self._validate_fill_value(fill_value)
-
- new_values = take(
- self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
- )
-
- return type(self)(new_values, dtype=self.dtype)
-
@classmethod
def _concat_same_type(cls, to_concat, axis: int = 0):
| Many EAs are thin wrappers around np.ndarray. We can both de-duplicate a bunch of code and make things easier on downstream authors by implementing `NDArrayBackedExtensionArray` as a base class for such EAs.
This PR only implements `NDArrayBackedExtensionArray.take`, but there is quite a bit more that can be shared in follow-ups:
- copy, delete, repeat
- mix in to PandasArray
- with small changes, `__getitem__`, `__setitem__`, reductions, ...
The only change in logic this PR makes is to make `Categorical.take` raise a ValueError instead of a `TypeError`, matching DTA/TDA/PA. | https://api.github.com/repos/pandas-dev/pandas/pulls/33660 | 2020-04-19T21:04:25Z | 2020-04-25T20:58:51Z | 2020-04-25T20:58:51Z | 2020-04-25T21:30:13Z |
ENH: Add optional argument index to pd.melt to maintain index values | diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst
index c476e33b8ddde..aa6bf44547040 100644
--- a/doc/source/user_guide/reshaping.rst
+++ b/doc/source/user_guide/reshaping.rst
@@ -296,6 +296,22 @@ For instance,
cheese.melt(id_vars=['first', 'last'])
cheese.melt(id_vars=['first', 'last'], var_name='quantity')
+When transforming a DataFrame using :func:`~pandas.melt`, the index will be ignored. The original index values can be kept around by setting the ``ignore_index`` parameter to ``False`` (default is ``True``). This will however duplicate them.
+
+.. versionadded:: 1.1.0
+
+.. ipython:: python
+
+ index = pd.MultiIndex.from_tuples([('person', 'A'), ('person', 'B')])
+ cheese = pd.DataFrame({'first': ['John', 'Mary'],
+ 'last': ['Doe', 'Bo'],
+ 'height': [5.5, 6.0],
+ 'weight': [130, 150]},
+ index=index)
+ cheese
+ cheese.melt(id_vars=['first', 'last'])
+ cheese.melt(id_vars=['first', 'last'], ignore_index=False)
+
Another way to transform is to use the :func:`~pandas.wide_to_long` panel data
convenience function. It is less flexible than :func:`~pandas.melt`, but more
user-friendly.
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 75f406d908c73..73b14b83d8444 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -287,6 +287,7 @@ Other enhancements
This can be used to set a custom compression level, e.g.,
``df.to_csv(path, compression={'method': 'gzip', 'compresslevel': 1}``
(:issue:`33196`)
+- :meth:`melt` has gained an ``ignore_index`` (default ``True``) argument that, if set to ``False``, prevents the method from dropping the index (:issue:`17440`).
- :meth:`Series.update` now accepts objects that can be coerced to a :class:`Series`,
such as ``dict`` and ``list``, mirroring the behavior of :meth:`DataFrame.update` (:issue:`33215`)
- :meth:`~pandas.core.groupby.GroupBy.transform` and :meth:`~pandas.core.groupby.GroupBy.aggregate` has gained ``engine`` and ``engine_kwargs`` arguments that supports executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`)
@@ -1143,3 +1144,4 @@ Other
Contributors
~~~~~~~~~~~~
+
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 102c457f94a95..9c223d66b727b 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -6,7 +6,6 @@
from pandas._config import option_context
-from pandas._libs import reduction as libreduction
from pandas._typing import Axis
from pandas.util._decorators import cache_readonly
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 521d16ac0b905..b40c6b4927e97 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2140,7 +2140,7 @@ def to_stata(
from pandas.io.stata import StataWriter117 as statawriter # type: ignore
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
- from pandas.io.stata import StataWriterUTF8 as statawriter # type:ignore
+ from pandas.io.stata import StataWriterUTF8 as statawriter # type: ignore
kwargs: Dict[str, Any] = {}
if version is None or version >= 117:
@@ -7086,6 +7086,7 @@ def melt(
var_name=None,
value_name="value",
col_level=None,
+ ignore_index=True,
) -> "DataFrame":
return melt(
@@ -7095,6 +7096,7 @@ def melt(
var_name=var_name,
value_name=value_name,
col_level=col_level,
+ ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index cd0619738677d..caf9e2fd7be57 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -13,6 +13,7 @@
import pandas.core.common as com
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.reshape.concat import concat
+from pandas.core.reshape.util import _tile_compat
from pandas.core.shared_docs import _shared_docs
from pandas.core.tools.numeric import to_numeric
@@ -31,8 +32,8 @@ def melt(
var_name=None,
value_name="value",
col_level=None,
+ ignore_index: bool = True,
) -> "DataFrame":
- # TODO: what about the existing index?
# If multiindex, gather names of columns on all level for checking presence
# of `id_vars` and `value_vars`
if isinstance(frame.columns, MultiIndex):
@@ -121,7 +122,12 @@ def melt(
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)
- return frame._constructor(mdata, columns=mcolumns)
+ result = frame._constructor(mdata, columns=mcolumns)
+
+ if not ignore_index:
+ result.index = _tile_compat(frame.index, K)
+
+ return result
@deprecate_kwarg(old_arg_name="label", new_arg_name=None)
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 1894f551afea5..b81942f062b19 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -28,6 +28,11 @@
Name to use for the 'value' column.
col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
+ ignore_index : bool, default True
+ If True, original index is ignored. If False, the original index is retained.
+ Index labels will be repeated as necessary.
+
+ .. versionadded:: 1.1.0
Returns
-------
@@ -78,6 +83,17 @@
1 b B 3
2 c B 5
+ Original index values can be kept around:
+
+ >>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False)
+ A variable value
+ 0 a B 1
+ 1 b B 3
+ 2 c B 5
+ 0 a C 2
+ 1 b C 4
+ 2 c C 6
+
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index 000a6354277ab..923595038c5eb 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -357,6 +357,47 @@ def test_melt_mixed_int_str_value_vars(self):
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
+ def test_ignore_index(self):
+ # GH 17440
+ df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
+ result = melt(df, ignore_index=False)
+ expected = DataFrame(
+ {"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_ignore_multiindex(self):
+ # GH 17440
+ index = pd.MultiIndex.from_tuples(
+ [("first", "second"), ("first", "third")], names=["baz", "foobar"]
+ )
+ df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
+ result = melt(df, ignore_index=False)
+
+ expected_index = pd.MultiIndex.from_tuples(
+ [("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
+ )
+ expected = DataFrame(
+ {"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
+ index=expected_index,
+ )
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_ignore_index_name_and_type(self):
+ # GH 17440
+ index = pd.Index(["foo", "bar"], dtype="category", name="baz")
+ df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
+ result = melt(df, ignore_index=False)
+
+ expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz")
+ expected = DataFrame(
+ {"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
+ index=expected_index,
+ )
+
+ tm.assert_frame_equal(result, expected)
+
class TestLreshape:
def test_pairs(self):
| Finishing up a stale PR idea: #28859 and #17459
Has some tests and better code.
I think it's fair to duplicate the index values and not bend over backwards to maintain uniqueness like in previous iterations.
Apologies for the mess, it was a quick job and I didn't want to spend an hour fiddling with the commits.
Finally, I deleted some ignore type comments for mypy because the commits weren't going on my system. Is there some other fix for that? Other than that I think it's good to go.
- [X] closes #17440
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
- [x] Reconsider API design
- [x] Add usage examples in whatsnew, docstring, and reshaping.rst.
| https://api.github.com/repos/pandas-dev/pandas/pulls/33659 | 2020-04-19T20:49:20Z | 2020-07-09T23:34:45Z | 2020-07-09T23:34:45Z | 2020-07-10T07:44:49Z |
BUG: Fix StringArray use_inf_as_na bug | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 84ad478226175..c14bcb6c6d8c5 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -796,13 +796,13 @@ ExtensionArray
^^^^^^^^^^^^^^
- Fixed bug where :meth:`Series.value_counts` would raise on empty input of ``Int64`` dtype (:issue:`33317`)
+- Fixed bug where :meth:`StringArray.isna` would return ``False`` for NA values when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33655`)
- Fixed bug in :class:`Series` construction with EA dtype and index but no data or scalar data fails (:issue:`26469`)
- Fixed bug that caused :meth:`Series.__repr__()` to crash for extension types whose elements are multidimensional arrays (:issue:`33770`).
- Fixed bug where :meth:`Series.update` would raise a ``ValueError`` for ``ExtensionArray`` dtypes with missing values (:issue:`33980`)
- Fixed bug where :meth:`StringArray.memory_usage` was not implemented (:issue:`33963`)
- Fixed bug that `DataFrame(columns=.., dtype='string')` would fail (:issue:`27953`, :issue:`33623`)
-
Other
^^^^^
- Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True``
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 62a8bfccaa432..6bca5e370ac89 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -90,11 +90,6 @@ cpdef bint checknull_old(object val):
return False
-cdef inline bint _check_none_nan_inf_neginf(object val):
- return val is None or (isinstance(val, float) and
- (val != val or val == INF or val == NEGINF))
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
cpdef ndarray[uint8_t] isnaobj(ndarray arr):
@@ -141,6 +136,7 @@ def isnaobj_old(arr: ndarray) -> ndarray:
- INF
- NEGINF
- NaT
+ - NA
Parameters
----------
@@ -161,7 +157,7 @@ def isnaobj_old(arr: ndarray) -> ndarray:
result = np.zeros(n, dtype=np.uint8)
for i in range(n):
val = arr[i]
- result[i] = val is NaT or _check_none_nan_inf_neginf(val)
+ result[i] = checknull(val) or val == INF or val == NEGINF
return result.view(np.bool_)
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 048fd7adf55b1..ab8df492f1c01 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -17,6 +17,7 @@
TD64NS_DTYPE,
ensure_object,
is_bool_dtype,
+ is_categorical_dtype,
is_complex_dtype,
is_datetimelike_v_numeric,
is_dtype_equal,
@@ -209,8 +210,8 @@ def _isna_ndarraylike(obj, inf_as_na: bool = False):
dtype = values.dtype
if is_extension_array_dtype(dtype):
- if inf_as_na:
- result = values.isna() | (values == -np.inf) | (values == np.inf)
+ if inf_as_na and is_categorical_dtype(dtype):
+ result = libmissing.isnaobj_old(values.to_numpy())
else:
result = values.isna()
elif is_string_dtype(dtype):
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index b681abf03a2b3..6f9a1a5be4c43 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -305,6 +305,28 @@ def test_value_counts_na():
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize(
+ "values, expected",
+ [
+ (pd.array(["a", "b", "c"]), np.array([False, False, False])),
+ (pd.array(["a", "b", None]), np.array([False, False, True])),
+ ],
+)
+def test_use_inf_as_na(values, expected):
+ # https://github.com/pandas-dev/pandas/issues/33655
+ with pd.option_context("mode.use_inf_as_na", True):
+ result = values.isna()
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = pd.Series(values).isna()
+ expected = pd.Series(expected)
+ tm.assert_series_equal(result, expected)
+
+ result = pd.DataFrame(values).isna()
+ expected = pd.DataFrame(expected)
+ tm.assert_frame_equal(result, expected)
+
+
def test_memory_usage():
# GH 33963
series = pd.Series(["a", "b", "c"], dtype="string")
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index 2393d2edcd2c6..a5969ef961bab 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -127,3 +127,10 @@ def test_fillna_fill_other(self, data):
expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)})
self.assert_frame_equal(result, expected)
+
+ def test_use_inf_as_na_no_effect(self, data_missing):
+ ser = pd.Series(data_missing)
+ expected = ser.isna()
+ with pd.option_context("mode.use_inf_as_na", True):
+ result = ser.isna()
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 3b8c2e595148e..162778e372426 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -509,12 +509,12 @@ def test_fillna_nat(self):
tm.assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
- s = Series(["a", np.inf, np.nan, 1.0])
+ s = Series(["a", np.inf, np.nan, pd.NA, 1.0])
with pd.option_context("mode.use_inf_as_na", True):
r = s.isna()
dr = s.dropna()
- e = Series([False, True, True, False])
- de = Series(["a", 1.0], index=[0, 3])
+ e = Series([False, True, True, True, False])
+ de = Series(["a", 1.0], index=[0, 4])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
| - [x] closes #33655
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33656 | 2020-04-19T15:54:24Z | 2020-05-10T17:12:46Z | 2020-05-10T17:12:46Z | 2020-05-10T17:40:00Z |
DOC: Fix heading capitalization in doc/source/whatsnew - part6 (#32550) | diff --git a/doc/source/whatsnew/v0.22.0.rst b/doc/source/whatsnew/v0.22.0.rst
index 75949a90d09a6..66d3ab3305565 100644
--- a/doc/source/whatsnew/v0.22.0.rst
+++ b/doc/source/whatsnew/v0.22.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0220:
-v0.22.0 (December 29, 2017)
----------------------------
+Version 0.22.0 (December 29, 2017)
+----------------------------------
{{ header }}
@@ -96,7 +96,7 @@ returning ``1`` instead.
These changes affect :meth:`DataFrame.sum` and :meth:`DataFrame.prod` as well.
Finally, a few less obvious places in pandas are affected by this change.
-Grouping by a categorical
+Grouping by a Categorical
^^^^^^^^^^^^^^^^^^^^^^^^^
Grouping by a ``Categorical`` and summing now returns ``0`` instead of
diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst
index b9e1b5060d1da..f91d89679dad1 100644
--- a/doc/source/whatsnew/v0.23.0.rst
+++ b/doc/source/whatsnew/v0.23.0.rst
@@ -86,8 +86,8 @@ Please note that the string `index` is not supported with the round trip format,
.. _whatsnew_0230.enhancements.assign_dependent:
-``.assign()`` accepts dependent arguments
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``.assign()`` accepts dependent arguments
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :func:`DataFrame.assign` now accepts dependent keyword arguments for python version later than 3.6 (see also `PEP 468
<https://www.python.org/dev/peps/pep-0468/>`_). Later keyword arguments may now refer to earlier ones if the argument is a callable. See the
@@ -244,7 +244,7 @@ documentation. If you build an extension array, publicize it on our
.. _whatsnew_0230.enhancements.categorical_grouping:
-New ``observed`` keyword for excluding unobserved categories in ``groupby``
+New ``observed`` keyword for excluding unobserved categories in ``GroupBy``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Grouping by a categorical includes the unobserved categories in the output.
@@ -360,8 +360,8 @@ Fill all consecutive outside values in both directions
.. _whatsnew_0210.enhancements.get_dummies_dtype:
-``get_dummies`` now supports ``dtype`` argument
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Function ``get_dummies`` now supports ``dtype`` argument
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtype for the new columns. The default remains uint8. (:issue:`18330`)
@@ -388,8 +388,8 @@ See the :ref:`documentation here <timedeltas.mod_divmod>`. (:issue:`19365`)
.. _whatsnew_0230.enhancements.ran_inf:
-``.rank()`` handles ``inf`` values when ``NaN`` are present
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``.rank()`` handles ``inf`` values when ``NaN`` are present
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In previous versions, ``.rank()`` would assign ``inf`` elements ``NaN`` as their ranks. Now ranks are calculated properly. (:issue:`6945`)
@@ -587,7 +587,7 @@ If installed, we now require:
.. _whatsnew_0230.api_breaking.dict_insertion_order:
-Instantiation from dicts preserves dict insertion order for python 3.6+
+Instantiation from dicts preserves dict insertion order for Python 3.6+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Until Python 3.6, dicts in Python had no formally defined ordering. For Python
@@ -1365,8 +1365,8 @@ MultiIndex
- Bug in indexing where nested indexers having only numpy arrays are handled incorrectly (:issue:`19686`)
-I/O
-^^^
+IO
+^^
- :func:`read_html` now rewinds seekable IO objects after parse failure, before attempting to parse with a new parser. If a parser errors and the object is non-seekable, an informative error is raised suggesting the use of a different parser (:issue:`17975`)
- :meth:`DataFrame.to_html` now has an option to add an id to the leading `<table>` tag (:issue:`8496`)
@@ -1403,7 +1403,7 @@ Plotting
- :func:`DataFrame.plot` now supports multiple columns to the ``y`` argument (:issue:`19699`)
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`)
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 45399792baecf..5bfaa7a5a3e6b 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -277,8 +277,8 @@ For earlier versions this can be done using the following.
.. _whatsnew_0240.enhancements.read_html:
-``read_html`` Enhancements
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+Function ``read_html`` enhancements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:func:`read_html` previously ignored ``colspan`` and ``rowspan`` attributes.
Now it understands them, treating them as sequences of cells with the same
@@ -1371,7 +1371,7 @@ the object's ``freq`` attribute (:issue:`21939`, :issue:`23878`).
.. _whatsnew_0240.deprecations.integer_tz:
-Passing integer data and a timezone to datetimeindex
+Passing integer data and a timezone to DatetimeIndex
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The behavior of :class:`DatetimeIndex` when passed integer data and
@@ -1769,8 +1769,8 @@ MultiIndex
- :class:`MultiIndex` has gained the :meth:`MultiIndex.from_frame`, it allows constructing a :class:`MultiIndex` object from a :class:`DataFrame` (:issue:`22420`)
- Fix ``TypeError`` in Python 3 when creating :class:`MultiIndex` in which some levels have mixed types, e.g. when some labels are tuples (:issue:`15457`)
-I/O
-^^^
+IO
+^^
- Bug in :func:`read_csv` in which a column specified with ``CategoricalDtype`` of boolean categories was not being correctly coerced from string values to booleans (:issue:`20498`)
- Bug in :func:`read_csv` in which unicode column names were not being properly recognized with Python 2.x (:issue:`13253`)
@@ -1827,7 +1827,7 @@ Plotting
- Bug in :func:`DataFrame.plot.bar` caused bars to use multiple colors instead of a single one (:issue:`20585`)
- Bug in validating color parameter caused extra color to be appended to the given color array. This happened to multiple plotting functions using matplotlib. (:issue:`20726`)
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :func:`pandas.core.window.Rolling.min` and :func:`pandas.core.window.Rolling.max` with ``closed='left'``, a datetime-like index and only one entry in the series leading to segfault (:issue:`24718`)
diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
index d1a893f99cff4..27e84bf0a7cd7 100644
--- a/doc/source/whatsnew/v0.24.2.rst
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -51,7 +51,6 @@ Bug fixes
- Bug where calling :meth:`Series.replace` on categorical data could return a ``Series`` with incorrect dimensions (:issue:`24971`)
-
--
**Reshaping**
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py
index 5de2a07381ae5..62ec6b9ef07af 100755
--- a/scripts/validate_rst_title_capitalization.py
+++ b/scripts/validate_rst_title_capitalization.py
@@ -138,6 +138,9 @@
"CategoricalDtype",
"UTC",
"Panel",
+ "False",
+ "Styler",
+ "os",
}
CAP_EXCEPTIONS_DICT = {word.lower(): word for word in CAPITALIZATION_EXCEPTIONS}
| - [ ] Modify files v0.22.0.rst, v0.23.1.rst, v0.19.0.rst, v0.24.0.rst, v0.24.2.rst
-[ ] Add exceptions in 'validate_rst_title_capitalization.py' | https://api.github.com/repos/pandas-dev/pandas/pulls/33653 | 2020-04-19T12:54:28Z | 2020-07-21T20:15:16Z | null | 2020-07-21T20:15:17Z |
Value counts normalize | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index bce6a735b7b07..fc2aaba6e9c0c 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -247,7 +247,8 @@ Numeric
^^^^^^^
- Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`)
- Bug in :meth:`DataFrame.any` with ``axis=1`` and ``bool_only=True`` ignoring the ``bool_only`` keyword (:issue:`32432`)
--
+- Bug in :meth:`Series.value_counts` with ``dropna=True`` and ``normalize=True`` where value counts did not sum to 1. (:issue:`25970`)
+
Conversion
^^^^^^^^^^
@@ -315,7 +316,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`)
- Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`)
- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`)
--
+- Bug in :meth:`DataframeGroupBy.value_counts` outputs wrong index labels with bins (:issue:`32471`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 872c51c7dfa75..5e2944c80de92 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -720,17 +720,23 @@ def value_counts(
ascending : bool, default False
Sort in ascending order
normalize: bool, default False
- If True then compute a relative histogram
- bins : integer, optional
- Rather than count values, group them into half-open bins,
- convenience for pd.cut, only works with numeric data
+ If True, then compute a relative histogram that outputs the
+ proportion of each value.
+ bins : integer or iterable of numeric, optional
+ Rather than count values, group them into half-open bins.
+ Only works with numeric data.
+ If int, interpreted as number of bins.
+ If interable of numeric, will use provided numbers as bin endpoints.
dropna : bool, default True
- Don't include counts of NaN
+ Don't include counts of NaN.
+ If False and NaNs are present, NaN will be a key in the output.
+ .. versionchanged:: 1.2
Returns
-------
Series
"""
+
from pandas.core.series import Series
name = getattr(values, "name", None)
@@ -744,39 +750,30 @@ def value_counts(
except TypeError as err:
raise TypeError("bins argument only works with numeric data.") from err
- # count, remove nulls (from the index), and but the bins
+ # count, remove nulls (from the index), and use the bins
result = ii.value_counts(dropna=dropna)
- result = result[result.index.notna()]
result.index = result.index.astype("interval")
result = result.sort_index()
- # if we are dropna and we have NO values
- if dropna and (result._values == 0).all():
- result = result.iloc[0:0]
-
- # normalizing is by len of all (regardless of dropna)
- counts = np.array([len(ii)])
-
else:
if is_extension_array_dtype(values):
- # handle Categorical and sparse,
+ # handle Categorical and sparse data,
result = Series(values)._values.value_counts(dropna=dropna)
result.name = name
- counts = result._values
else:
keys, counts = value_counts_arraylike(values, dropna)
result = Series(counts, index=keys, name=name)
- if sort:
- result = result.sort_values(ascending=ascending)
-
if normalize:
- result = result / float(counts.sum())
+ counts = result._values
+ result = result / float(max(counts.sum(), 1))
+ if sort:
+ result = result.sort_values(ascending=ascending)
return result
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 1926803d8f04b..55ca1259bb188 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1174,17 +1174,20 @@ def value_counts(
Parameters
----------
normalize : bool, default False
- If True then the object returned will contain the relative
- frequencies of the unique values.
+ If True, outputs the relative frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
- bins : int, optional
- Rather than count values, group them into half-open bins,
- a convenience for ``pd.cut``, only works with numeric data.
+ bins : integer or iterable of numeric, optional
+ Rather than count individual values, group them into half-open bins.
+ Only works with numeric data.
+ If int, interpreted as number of bins.
+ If interable of numeric, will use provided numbers as bin endpoints.
dropna : bool, default True
Don't include counts of NaN.
+ If False and NaNs are present, NaN will be a key in the output.
+ .. versionchanged:: 1.1.2
Returns
-------
@@ -1221,8 +1224,10 @@ def value_counts(
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
- apparitions of values, divide the index in the specified
- number of half-open bins.
+ instances of values, count the number of values that fall
+ into half-open intervals.
+
+ Bins can be an int.
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
@@ -1230,6 +1235,15 @@ def value_counts(
(3.0, 4.0] 1
dtype: int64
+ Bins can also be an iterable of numbers. These numbers are treated
+ as endpoints for the intervals.
+
+ >>> s.value_counts(bins=[0, 2, 4, 9])
+ (2.0, 4.0] 3
+ (-0.001, 2.0] 2
+ (4.0, 9.0] 0
+ dtype: int64
+
**dropna**
With `dropna` set to `False` we can also see NaN index values.
@@ -1242,6 +1256,7 @@ def value_counts(
1.0 1
dtype: int64
"""
+
result = value_counts(
self,
sort=sort,
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index e870187fc7952..cff9a24bd1540 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -45,7 +45,6 @@
ensure_platform_int,
is_bool,
is_integer_dtype,
- is_interval_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
@@ -59,6 +58,7 @@
validate_func_kwargs,
)
import pandas.core.algorithms as algorithms
+from pandas.core.algorithms import unique
from pandas.core.arrays import ExtensionArray
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
@@ -79,6 +79,7 @@
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager
from pandas.core.series import Series
+from pandas.core.sorting import compress_group_index
from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
@@ -685,7 +686,6 @@ def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
- from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
if bins is not None and not np.iterable(bins):
@@ -701,111 +701,111 @@ def value_counts(
ids, _, _ = self.grouper.group_info
val = self.obj._values
+ codes = self.grouper.reconstructed_codes # this will track the groups
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
+ if dropna:
+ mask = ~isna(val)
+ if not mask.all():
+ ids, val = ids[mask], val[mask]
if bins is None:
- lab, lev = algorithms.factorize(val, sort=True)
- llab = lambda lab, inc: lab[inc]
+ val_lab, val_lev = algorithms.factorize(
+ val, sort=True, na_sentinel=(None if dropna else -1)
+ )
else:
+ # val_lab is a Categorical with categories an IntervalIndex
+ val_lab = cut(Series(val), bins, include_lowest=True)
+ val_lev = val_lab.cat.categories
+ val_lab = val_lab.cat.codes.values
- # lab is a Categorical with categories an IntervalIndex
- lab = cut(Series(val), bins, include_lowest=True)
- lev = lab.cat.categories
- lab = lev.take(lab.cat.codes)
- llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
-
- if is_interval_dtype(lab.dtype):
- # TODO: should we do this inside II?
- sorter = np.lexsort((lab.left, lab.right, ids))
- else:
- sorter = np.lexsort((lab, ids))
+ if dropna:
+ included = val_lab != -1
+ ids, val_lab = ids[included], val_lab[included]
- ids, lab = ids[sorter], lab[sorter]
+ sorter = np.lexsort((val_lab, ids))
+ ids, val_lab = ids[sorter], val_lab[sorter]
+ used_ids = unique(ids)
+ if max(used_ids) >= len(codes[0]):
+ # this means we had something skipped from the start
+ used_ids = compress_group_index(used_ids)[0]
+ codes = [code[used_ids] for code in codes] # drop what was taken out for n/a
# group boundaries are where group ids change
- idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
-
# new values are where sorted labels change
- lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
- inc = np.r_[True, lchanges]
- inc[idx] = True # group boundaries are also new values
- out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
-
- # num. of times each group should be repeated
- rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
-
- # multi-index components
- codes = self.grouper.reconstructed_codes
- codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
- levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
+ change_ids = ids[1:] != ids[:-1]
+ changes = np.logical_or(change_ids, (val_lab[1:] != val_lab[:-1]))
+ changes = np.r_[True, changes]
+ val_lab = val_lab[changes]
+ ids = ids[changes]
+ cts = np.diff(np.nonzero(np.r_[changes, True]))[0]
+ idx = np.r_[0, 1 + np.nonzero(change_ids)[0]]
+ # how many times each index gets repeated
+ rep = partial(np.repeat, repeats=np.add.reduceat(changes, idx))
+
+ if (not dropna) and (-1 in val_lab):
+ # in this case we need to explicitly add NaN as a level
+ val_lev = np.r_[Index([np.nan]), val_lev]
+ val_lab += 1
+
+ levels = [ping.group_index for ping in self.grouper.groupings] + [
+ Index(val_lev)
+ ]
names = self.grouper.names + [self._selection_name]
- if dropna:
- mask = codes[-1] != -1
- if mask.all():
- dropna = False
- else:
- out, codes = out[mask], [level_codes[mask] for level_codes in codes]
-
if normalize:
- out = out.astype("float")
- d = np.diff(np.r_[idx, len(ids)])
- if dropna:
- m = ids[lab == -1]
- np.add.at(d, m, -1)
- acc = rep(d)[mask]
- else:
- acc = rep(d)
- out /= acc
-
- if sort and bins is None:
- cat = ids[inc][mask] if dropna else ids[inc]
- sorter = np.lexsort((out if ascending else -out, cat))
- out, codes[-1] = out[sorter], codes[-1][sorter]
+ num_repeats = np.diff(idx, append=len(change_ids) + 1)
+ cts = cts.astype("float") / rep(num_repeats)
+ # each divisor is the number of repeats for that index
if bins is None:
+ codes = [rep(level_codes) for level_codes in codes] + [val_lab]
+
+ if sort:
+ indices = tuple(reversed(codes[:-1]))
+ sorter = np.lexsort(
+ np.r_[[val_lab], [cts if ascending else -cts], indices]
+ ) # sorts using right columns first
+ cts = cts[sorter]
+ codes = [code[sorter] for code in codes]
+
mi = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
-
- if is_integer_dtype(out):
- out = ensure_int64(out)
- return self.obj._constructor(out, index=mi, name=self._selection_name)
+ if is_integer_dtype(cts):
+ cts = ensure_int64(cts)
+ return self.obj._constructor(cts, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
- diff = np.zeros(len(out), dtype="bool")
- for level_codes in codes[:-1]:
- diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
-
- ncat, nbin = diff.sum(), len(levels[-1])
-
- left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
-
- right = [diff.cumsum() - 1, codes[-1]]
-
- _, idx = get_join_indexers(left, right, sort=False, how="left")
- out = np.where(idx != -1, out[idx], 0)
+ nbin = len(levels[-1])
+ ncat = len(codes[0])
+ fout = np.zeros((ncat * nbin), dtype=float if normalize else np.int64)
+ id = 0
+ change_ids = np.r_[ # need to update now that we removed full repeats
+ ids[1:] != ids[:-1], True
+ ]
+ for i, ct in enumerate(cts): # fill in nonzero values of fout
+ fout[id * nbin + val_lab[i]] = cts[i]
+ id += change_ids[i]
+ ncodes = [np.repeat(code, nbin) for code in codes]
+ ncodes.append(np.tile(range(nbin), len(codes[0])))
if sort:
- sorter = np.lexsort((out if ascending else -out, left[0]))
- out, left[-1] = out[sorter], left[-1][sorter]
-
- # build the multi-index w/ full levels
- def build_codes(lev_codes: np.ndarray) -> np.ndarray:
- return np.repeat(lev_codes[diff], nbin)
-
- codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
- codes.append(left[-1])
-
- mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
-
- if is_integer_dtype(out):
- out = ensure_int64(out)
- return self.obj._constructor(out, index=mi, name=self._selection_name)
+ indices = tuple(reversed(ncodes[:-1]))
+ sorter = np.lexsort(
+ np.r_[[fout if ascending else -fout], indices]
+ ) # sorts using right columns first
+ fout = fout[sorter]
+ ncodes = [code[sorter] for code in ncodes]
+ mi = MultiIndex(
+ levels=levels, codes=ncodes, names=names, verify_integrity=False
+ )
+ if is_integer_dtype(fout):
+ fout = ensure_int64(fout)
+ return self.obj._constructor(fout, index=mi, name=self._selection_name)
def count(self) -> Series:
"""
diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py
old mode 100644
new mode 100755
index de04c30432e6f..8ea7f0fe3fc98
--- a/pandas/tests/base/test_value_counts.py
+++ b/pandas/tests/base/test_value_counts.py
@@ -191,6 +191,37 @@ def test_value_counts_bins(index_or_series):
assert s.nunique() == 0
+@pytest.mark.parametrize("dropna", [True, False])
+@pytest.mark.parametrize("bins", [None, 3, [0, 1, 3, 6]])
+def test_value_counts_bins_nas(dropna, bins):
+ # GH25970, handle normalizing bins with NA's properly
+ # First test that NA's are included appropriately
+ rand_data = np.append(
+ np.random.randint(1, 5, 50), [np.nan] * np.random.randint(1, 20)
+ )
+ s = Series(rand_data)
+ if dropna:
+ assert not s.value_counts(dropna=dropna, bins=bins).index.hasnans
+ else:
+ assert s.value_counts(dropna=dropna, bins=bins).index.hasnans
+
+
+def test_value_counts_bins_specific_na():
+ # GH25970 case where proportions were incorrect for dropna and normalize=True
+ s2 = Series([1, 2, 2, 3, 3, 3, np.nan, np.nan, 4, 5])
+ intervals = IntervalIndex.from_breaks([0.995, 2.333, 3.667, 5.0])
+ expected_dropna = Series([0.375, 0.375, 0.25], intervals.take([1, 0, 2]))
+ tm.assert_series_equal(
+ s2.value_counts(dropna=True, normalize=True, bins=3), expected_dropna
+ )
+ keys = list(intervals.take([1, 0, 2]))
+ keys.insert(2, np.nan)
+ expected_keepna = Series([0.3, 0.3, 0.2, 0.2], keys)
+ tm.assert_series_equal(
+ s2.value_counts(dropna=False, normalize=True, bins=3), expected_keepna
+ )
+
+
def test_value_counts_datetime64(index_or_series):
klass = index_or_series
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index c86cb4532bc26..94e19b93368d8 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -41,13 +41,12 @@ def seed_df(seed_nans, n, m):
ids = []
for seed_nans in [True, False]:
for n, m in product((100, 1000), (5, 20)):
-
df = seed_df(seed_nans, n, m)
bins = None, np.arange(0, max(5, df["3rd"].max()) + 1, 2)
keys = "1st", "2nd", ["1st", "2nd"]
for k, b in product(keys, bins):
binned.append((df, k, b, n, m))
- ids.append(f"{k}-{n}-{m}")
+ ids.append(f"{k}-{n}-{m}-{seed_nans} ")
@pytest.mark.slow
@@ -71,16 +70,41 @@ def rebuild_index(df):
gr = df.groupby(keys, sort=isort)
left = gr["3rd"].value_counts(**kwargs)
+ left.index.names = left.index.names[:-1] + ["3rd"]
- gr = df.groupby(keys, sort=isort)
right = gr["3rd"].apply(Series.value_counts, **kwargs)
right.index.names = right.index.names[:-1] + ["3rd"]
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
+
+ # have to ignore 0 counts to be consistent with individual column value_counts
+ left = left[left.astype(bool)]
+ right = right[right.astype(bool)]
tm.assert_series_equal(left.sort_index(), right.sort_index())
+def test_groubpy_value_counts_bins():
+ # GH32471
+ BINS = [0, 20, 80, 100]
+ values = [
+ [0, 5, 0],
+ [1, 5, 100],
+ [0, 5, 100],
+ [2, 5, 0],
+ [3, 6, 100],
+ [3, 5, 100],
+ [1, 5, 100],
+ ]
+ df = DataFrame(values, columns=["key1", "key2", "score"])
+ result = df.groupby(["key1", "key2"])["score"].value_counts(bins=BINS)
+ result.sort_index(inplace=True)
+ expected = Series(
+ [1, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 1, 0, 0, 1], result.index, name="score"
+ )
+ tm.assert_series_equal(result, expected)
+
+
def test_series_groupby_value_counts_with_grouper():
# GH28479
df = DataFrame(
diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py
index f97362ce9c2a9..270f29914442a 100644
--- a/pandas/tests/series/methods/test_value_counts.py
+++ b/pandas/tests/series/methods/test_value_counts.py
@@ -179,6 +179,12 @@ def test_value_counts_categorical_with_nan(self):
res = ser.value_counts(dropna=False, sort=False)
tm.assert_series_equal(res, exp)
+ def test_value_counts_interval_bins(self):
+ ser = Series([1, 2, 3, 0, 1, 4], ["a", "a", "a", "b", "b", "c"])
+ res = ser.value_counts(bins=[0, 1, 2])
+ exp = Series([3, 1], res.index)
+ tm.assert_series_equal(res, exp)
+
@pytest.mark.parametrize(
"ser, dropna, exp",
[
| - [x] closes #25970
- [x] closes #32471
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This pull request resolves issues with binning and NA values in both ``Series.value_counts`` and ``SeriesGroupBy.value_counts,`` adding new tests to check the problematic cases. | https://api.github.com/repos/pandas-dev/pandas/pulls/33652 | 2020-04-19T12:48:00Z | 2021-06-16T13:59:03Z | null | 2021-06-16T13:59:03Z |
BUG: Create empty dataframe with string dtype fails | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 816ef4e5c9eb6..3cef9fbe49a9b 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -759,6 +759,7 @@ ExtensionArray
- Fixed bug in :class:`Series` construction with EA dtype and index but no data or scalar data fails (:issue:`26469`)
- Fixed bug that caused :meth:`Series.__repr__()` to crash for extension types whose elements are multidimensional arrays (:issue:`33770`).
- Fixed bug where :meth:`StringArray.memory_usage` was not implemented (:issue:`33963`)
+- Fixed bug that `DataFrame(columns=.., dtype='string')` would fail (:issue:`27953`, :issue:`33623`)
Other
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index ce3f07d06d6a2..b2af149ccf14c 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -257,7 +257,10 @@ def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None):
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
- if dtype is None or np.issubdtype(dtype, np.flexible):
+ if dtype is None or (
+ not is_extension_array_dtype(dtype)
+ and np.issubdtype(dtype, np.flexible)
+ ):
# GH#1783
nan_dtype = np.dtype(object)
else:
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index 681c6f9a19dc5..48f1c34764313 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -69,6 +69,10 @@ def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
# pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays
super().test_series_constructor_scalar_na_with_index(dtype, na_value)
+ @pytest.mark.xfail(reason="raises AssertionError")
+ def test_construct_empty_dataframe(self, dtype):
+ super().test_construct_empty_dataframe(dtype)
+
class TestReduce(base.BaseNoReduceTests):
def test_reduce_series_boolean(self):
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 52e29cffc79c4..5c9e5dcf3ae24 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -108,3 +108,11 @@ def test_pandas_array_dtype(self, data):
result = pd.array(data, dtype=np.dtype(object))
expected = pd.arrays.PandasArray(np.asarray(data, dtype=object))
self.assert_equal(result, expected)
+
+ def test_construct_empty_dataframe(self, dtype):
+ # GH 33623
+ result = pd.DataFrame(columns=["a"], dtype=dtype)
+ expected = pd.DataFrame(
+ {"a": pd.array([], dtype=dtype)}, index=pd.Index([], dtype="object")
+ )
+ self.assert_frame_equal(result, expected)
| - [ ] closes #33623, closes #27953
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33651 | 2020-04-19T12:10:29Z | 2020-05-09T19:56:15Z | 2020-05-09T19:56:15Z | 2020-05-09T19:56:19Z |
TST: add messages to pytest.raises | diff --git a/pandas/tests/arrays/boolean/test_function.py b/pandas/tests/arrays/boolean/test_function.py
index c2987dc37b960..49a832f8dda20 100644
--- a/pandas/tests/arrays/boolean/test_function.py
+++ b/pandas/tests/arrays/boolean/test_function.py
@@ -54,7 +54,8 @@ def test_ufuncs_binary(ufunc):
tm.assert_extension_array_equal(result, expected)
# not handled types
- with pytest.raises(TypeError):
+ msg = r"operand type\(s\) all returned NotImplemented from __array_ufunc__"
+ with pytest.raises(TypeError, match=msg):
ufunc(a, "test")
@@ -76,7 +77,8 @@ def test_ufuncs_unary(ufunc):
@pytest.mark.parametrize("values", [[True, False], [True, None]])
def test_ufunc_reduce_raises(values):
a = pd.array(values, dtype="boolean")
- with pytest.raises(NotImplementedError):
+ msg = "The 'reduce' method is not supported"
+ with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
diff --git a/pandas/tests/indexes/ranges/test_constructors.py b/pandas/tests/indexes/ranges/test_constructors.py
index b7f673428ae38..f573da44e99b3 100644
--- a/pandas/tests/indexes/ranges/test_constructors.py
+++ b/pandas/tests/indexes/ranges/test_constructors.py
@@ -149,9 +149,9 @@ def test_constructor_corner(self):
tm.assert_index_equal(index, Index(arr))
# non-int raise Exception
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=r"Wrong type \<class 'str'\>"):
RangeIndex("1", "10", "1")
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=r"Wrong type \<class 'float'\>"):
RangeIndex(1.1, 10.2, 1.3)
# invalid passed type
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index 05422e7b4419f..2438cd352f86f 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -117,7 +117,8 @@ def test_delete(self):
tm.assert_index_equal(result, expected)
assert result.name == expected.name
- with pytest.raises((IndexError, ValueError)):
+ msg = "index 5 is out of bounds for axis 0 with size 5"
+ with pytest.raises((IndexError, ValueError), match=msg):
# either depending on numpy version
result = idx.delete(len(idx))
diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
index 8bfba8c12e934..d3b13336e2a44 100644
--- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
@@ -23,7 +23,8 @@ def test_detect_chained_assignment():
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
- with pytest.raises(com.SettingWithCopyError):
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(com.SettingWithCopyError, match=msg):
zed["eyes"]["right"].fillna(value=555, inplace=True)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 2ce07ec41758f..2e691c6fd76d8 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -43,10 +43,12 @@ def test_partial_setting(self):
# iloc/iat raise
s = s_orig.copy()
- with pytest.raises(IndexError):
+ msg = "iloc cannot enlarge its target object"
+ with pytest.raises(IndexError, match=msg):
s.iloc[3] = 5.0
- with pytest.raises(IndexError):
+ msg = "index 3 is out of bounds for axis 0 with size 3"
+ with pytest.raises(IndexError, match=msg):
s.iat[3] = 5.0
# ## frame ##
@@ -58,10 +60,12 @@ def test_partial_setting(self):
# iloc/iat raise
df = df_orig.copy()
- with pytest.raises(IndexError):
+ msg = "iloc cannot enlarge its target object"
+ with pytest.raises(IndexError, match=msg):
df.iloc[4, 2] = 5.0
- with pytest.raises(IndexError):
+ msg = "index 2 is out of bounds for axis 0 with size 2"
+ with pytest.raises(IndexError, match=msg):
df.iat[4, 2] = 5.0
# row setting where it exists
@@ -162,7 +166,8 @@ def test_partial_setting_mixed_dtype(self):
# list-like must conform
df = DataFrame(columns=["A", "B"])
- with pytest.raises(ValueError):
+ msg = "cannot set a row with mismatched columns"
+ with pytest.raises(ValueError, match=msg):
df.loc[0] = [1, 2, 3]
# TODO: #15657, these are left as object and not coerced
@@ -330,10 +335,12 @@ def test_partial_set_invalid(self):
df = orig.copy()
# don't allow not string inserts
- with pytest.raises(TypeError):
+ msg = "cannot insert DatetimeIndex with incompatible label"
+
+ with pytest.raises(TypeError, match=msg):
df.loc[100.0, :] = df.iloc[0]
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
df.loc[100, :] = df.iloc[0]
# allow object conversion here
@@ -375,13 +382,16 @@ def test_partial_set_empty_frame(self):
# frame
df = DataFrame()
- with pytest.raises(ValueError):
+ msg = "cannot set a frame with no defined columns"
+
+ with pytest.raises(ValueError, match=msg):
df.loc[1] = 1
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
df.loc[1] = Series([1], index=["foo"])
- with pytest.raises(ValueError):
+ msg = "cannot set a frame with no defined index and a scalar"
+ with pytest.raises(ValueError, match=msg):
df.loc[:, 1] = 1
# these work as they don't really change
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index f2969e15fad8a..558f10d967df6 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -283,7 +283,8 @@ def test_reindex_datetimeindexes_tz_naive_and_aware():
idx = date_range("20131101", tz="America/Chicago", periods=7)
newidx = date_range("20131103", periods=10, freq="H")
s = Series(range(7), index=idx)
- with pytest.raises(TypeError):
+ msg = "Cannot compare tz-naive and tz-aware timestamps"
+ with pytest.raises(TypeError, match=msg):
s.reindex(newidx, method="ffill")
| - [x] xref #30999
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33650 | 2020-04-19T10:12:48Z | 2020-04-19T18:39:40Z | 2020-04-19T18:39:39Z | 2020-04-20T01:31:58Z |
BUG/API: getitem behavior with list match ndarray/index/series | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 03a547fadd7ca..9d40f9b6ffa2c 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -529,6 +529,7 @@ Indexing
- Bug in :meth:`DataFrame.iloc` when slicing a single column-:class:`DataFrame`` with ``ExtensionDtype`` (e.g. ``df.iloc[:, :1]``) returning an invalid result (:issue:`32957`)
- Bug in :meth:`DatetimeIndex.insert` and :meth:`TimedeltaIndex.insert` causing index ``freq`` to be lost when setting an element into an empty :class:`Series` (:issue:33573`)
- Bug in :meth:`Series.__setitem__` with an :class:`IntervalIndex` and a list-like key of integers (:issue:`33473`)
+- Bug in :meth:`Series.__getitem__` allowing missing labels with ``np.ndarray``, :class:`Index`, :class:`Series` indexers but not ``list``, these now all raise ``KeyError`` (:issue:`33646`)
Missing
^^^^^^^
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9182e378fbaeb..256c6959fe6fb 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -949,11 +949,8 @@ def _get_with(self, key):
else:
return self.iloc[key]
- if isinstance(key, list):
- # handle the dup indexing case GH#4246
- return self.loc[key]
-
- return self.reindex(key)
+ # handle the dup indexing case GH#4246
+ return self.loc[key]
def _get_values_tuple(self, key):
# mpl hackaround
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index 8878a4a6526af..e2b71b1f2f412 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -28,11 +28,6 @@ def test_getitem_boolean_empty():
# GH5877
# indexing with empty series
- s = Series(["A", "B"])
- expected = Series(np.nan, index=["C"], dtype=object)
- result = s[Series(["C"], dtype=object)]
- tm.assert_series_equal(result, expected)
-
s = Series(["A", "B"])
expected = Series(dtype=object, index=Index([], dtype="int64"))
result = s[Series([], dtype=object)]
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 2922f3c741320..9ce31f5f6decf 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -78,6 +78,18 @@ def test_getitem_median_slice_bug(self):
class TestSeriesGetitemListLike:
+ @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])
+ def test_getitem_no_matches(self, box):
+ # GH#33462 we expect the same behavior for list/ndarray/Index/Series
+ ser = Series(["A", "B"])
+
+ key = Series(["C"], dtype=object)
+ key = box(key)
+
+ msg = r"None of \[Index\(\['C'\], dtype='object'\)\] are in the \[index\]"
+ with pytest.raises(KeyError, match=msg):
+ ser[key]
+
def test_getitem_intlist_intindex_periodvalues(self):
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
| - [x] closes #33642
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33646 | 2020-04-19T02:18:36Z | 2020-04-19T21:14:41Z | 2020-04-19T21:14:40Z | 2020-04-19T21:30:44Z |
IO: Fix S3 Error Handling | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 03a547fadd7ca..77acf02bade21 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -582,6 +582,8 @@ I/O
- Bug in :func:`pandas.io.json.json_normalize` where location specified by `record_path` doesn't point to an array. (:issue:`26284`)
- :func:`pandas.read_hdf` has a more explicit error message when loading an
unsupported HDF file (:issue:`9539`)
+- Bug in :meth:`~DataFrame.to_parquet` was not raising ``PermissionError`` when writing to a private s3 bucket with invalid creds. (:issue:`27679`)
+- Bug in :meth:`~DataFrame.to_csv` was silently failing when writing to an invalid s3 bucket. (:issue:`32486`)
Plotting
^^^^^^^^
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 091f7662630ff..dcd764bec7426 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -62,7 +62,7 @@ def __init__(
# Extract compression mode as given, if dict
compression, self.compression_args = get_compression_method(compression)
- self.path_or_buf, _, _, _ = get_filepath_or_buffer(
+ self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression, mode=mode
)
self.sep = sep
@@ -223,6 +223,8 @@ def save(self) -> None:
f.close()
for _fh in handles:
_fh.close()
+ elif self.should_close:
+ f.close()
def _save_header(self):
writer = self.writer
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 33747d2a6dd83..068210eddcc1b 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -92,7 +92,7 @@ def write(
**kwargs,
):
self.validate_dataframe(df)
- path, _, _, _ = get_filepath_or_buffer(path, mode="wb")
+ path, _, _, should_close = get_filepath_or_buffer(path, mode="wb")
from_pandas_kwargs: Dict[str, Any] = {"schema": kwargs.pop("schema", None)}
if index is not None:
@@ -109,6 +109,8 @@ def write(
)
else:
self.api.parquet.write_table(table, path, compression=compression, **kwargs)
+ if should_close:
+ path.close()
def read(self, path, columns=None, **kwargs):
path, _, _, should_close = get_filepath_or_buffer(path)
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index b7164477c31f2..0f09659a24936 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -54,8 +54,8 @@ def tips_df(datapath):
@pytest.mark.usefixtures("s3_resource")
@td.skip_if_not_us_locale()
class TestS3:
+ @td.skip_if_no("s3fs")
def test_parse_public_s3_bucket(self, tips_df):
- pytest.importorskip("s3fs")
# more of an integration test due to the not-public contents portion
# can probably mock this though.
@@ -159,7 +159,7 @@ def test_parse_public_s3_bucket_nrows_python(self, tips_df):
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
- def test_s3_fails(self):
+ def test_read_s3_fails(self):
with pytest.raises(IOError):
read_csv("s3://nyqpug/asdf.csv")
@@ -168,6 +168,22 @@ def test_s3_fails(self):
with pytest.raises(IOError):
read_csv("s3://cant_get_it/file.csv")
+ def test_write_s3_csv_fails(self, tips_df):
+ # GH 32486
+ # Attempting to write to an invalid S3 path should raise
+ with pytest.raises(
+ FileNotFoundError, match="The specified bucket does not exist"
+ ):
+ tips_df.to_csv("s3://an_s3_bucket_data_doesnt_exit/not_real.csv")
+
+ @td.skip_if_no("pyarrow")
+ def test_write_s3_parquet_fails(self, tips_df):
+ # GH 27679
+ with pytest.raises(
+ FileNotFoundError, match="The specified bucket does not exist"
+ ):
+ tips_df.to_parquet("s3://an_s3_bucket_data_doesnt_exit/not_real.parquet")
+
def test_read_csv_handles_boto_s3_object(self, s3_resource, tips_file):
# see gh-16135
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 557a9d5c13987..cf745fcc492a1 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -56,7 +56,15 @@ def open(*args):
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_csv("gs://test/test.csv", index=True)
- df2 = read_csv(StringIO(s.getvalue()), parse_dates=["dt"], index_col=0)
+
+ def mock_get_filepath_or_buffer(*args, **kwargs):
+ return StringIO(df1.to_csv()), None, None, False
+
+ monkeypatch.setattr(
+ "pandas.io.gcs.get_filepath_or_buffer", mock_get_filepath_or_buffer
+ )
+
+ df2 = read_csv("gs://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@@ -86,28 +94,6 @@ def open(self, path, mode="r", *args):
)
-@td.skip_if_no("gcsfs")
-def test_gcs_get_filepath_or_buffer(monkeypatch):
- df1 = DataFrame(
- {
- "int": [1, 3],
- "float": [2.0, np.nan],
- "str": ["t", "s"],
- "dt": date_range("2018-06-18", periods=2),
- }
- )
-
- def mock_get_filepath_or_buffer(*args, **kwargs):
- return (StringIO(df1.to_csv(index=False)), None, None, False)
-
- monkeypatch.setattr(
- "pandas.io.gcs.get_filepath_or_buffer", mock_get_filepath_or_buffer
- )
- df2 = read_csv("gs://test/test.csv", parse_dates=["dt"])
-
- tm.assert_frame_equal(df1, df2)
-
-
@td.skip_if_installed("gcsfs")
def test_gcs_not_present_exception():
with pytest.raises(ImportError) as e:
| closes #27679
closes #32486
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33645 | 2020-04-19T00:40:51Z | 2020-04-21T12:39:06Z | 2020-04-21T12:39:06Z | 2020-05-26T09:30:12Z |
BUG: Groupby quantiles incorrect bins #33200 | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 4605c14643fa2..5cbc01a4ba67f 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -815,6 +815,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.resample` where an ``AmbiguousTimeError`` would be raised when the resulting timezone aware :class:`DatetimeIndex` had a DST transition at midnight (:issue:`25758`)
- Bug in :meth:`DataFrame.groupby` where a ``ValueError`` would be raised when grouping by a categorical column with read-only categories and ``sort=False`` (:issue:`33410`)
- Bug in :meth:`GroupBy.first` and :meth:`GroupBy.last` where None is not preserved in object dtype (:issue:`32800`)
+- Bug in :meth:`GroupBy.quantile` causes the quantiles to be shifted when the ``by`` axis contains ``NaN`` (:issue:`33200`, :issue:`33569`)
- Bug in :meth:`Rolling.min` and :meth:`Rolling.max`: Growing memory usage after multiple calls when using a fixed window (:issue:`30726`)
- Bug in :meth:`Series.groupby` would raise ``ValueError`` when grouping by :class:`PeriodIndex` level (:issue:`34010`)
- Bug in :meth:`GroupBy.agg`, :meth:`GroupBy.transform`, and :meth:`GroupBy.resample` where subclasses are not preserved (:issue:`28330`)
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index d5d706650bb34..4e792da31e1d5 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -777,7 +777,13 @@ def group_quantile(ndarray[float64_t] out,
non_na_counts[lab] += 1
# Get an index of values sorted by labels and then values
- order = (values, labels)
+ if labels.any():
+ # Put '-1' (NaN) labels as the last group so it does not interfere
+ # with the calculations.
+ labels_for_lexsort = np.where(labels == -1, labels.max() + 1, labels)
+ else:
+ labels_for_lexsort = labels
+ order = (values, labels_for_lexsort)
sort_arr = np.lexsort(order).astype(np.int64, copy=False)
with nogil:
diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py
index 87347fe1293ef..8cfd8035502c3 100644
--- a/pandas/tests/groupby/test_quantile.py
+++ b/pandas/tests/groupby/test_quantile.py
@@ -181,15 +181,32 @@ def test_quantile_missing_group_values_no_segfaults():
grp.quantile()
-def test_quantile_missing_group_values_correct_results():
- # GH 28662
- data = np.array([1.0, np.nan, 3.0, np.nan])
- df = pd.DataFrame(dict(key=data, val=range(4)))
+@pytest.mark.parametrize(
+ "key, val, expected_key, expected_val",
+ [
+ ([1.0, np.nan, 3.0, np.nan], range(4), [1.0, 3.0], [0.0, 2.0]),
+ ([1.0, np.nan, 2.0, 2.0], range(4), [1.0, 2.0], [0.0, 2.5]),
+ (["a", "b", "b", np.nan], range(4), ["a", "b"], [0, 1.5]),
+ ([0], [42], [0], [42.0]),
+ ([], [], np.array([], dtype="float64"), np.array([], dtype="float64")),
+ ],
+)
+def test_quantile_missing_group_values_correct_results(
+ key, val, expected_key, expected_val
+):
+ # GH 28662, GH 33200, GH 33569
+ df = pd.DataFrame({"key": key, "val": val})
- result = df.groupby("key").quantile()
expected = pd.DataFrame(
- [1.0, 3.0], index=pd.Index([1.0, 3.0], name="key"), columns=["val"]
+ expected_val, index=pd.Index(expected_key, name="key"), columns=["val"]
)
+
+ grp = df.groupby("key")
+
+ result = grp.quantile(0.5)
+ tm.assert_frame_equal(result, expected)
+
+ result = grp.quantile()
tm.assert_frame_equal(result, expected)
| Maintain the order of the bins in group_quantile. Updated tests #33200
- [x] closes #33200, closes #33569
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33644 | 2020-04-19T00:34:00Z | 2020-05-25T21:58:07Z | 2020-05-25T21:58:07Z | 2020-05-26T08:31:01Z |
REF: dispatch Series.__setitem__ to loc/iloc, remove redundant helpers | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index d100cb0bb70d8..303365f50c546 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -616,6 +616,8 @@ def _get_setitem_indexer(self, key):
# invalid indexer type vs 'other' indexing errors
if "cannot do" in str(e):
raise
+ elif "unhashable type" in str(e):
+ raise
raise IndexingError(key) from e
def _ensure_listlike_indexer(self, key, axis=None):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9182e378fbaeb..854c87071e24a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1031,7 +1031,7 @@ def __setitem__(self, key, value):
try:
self._where(~key, value, inplace=True)
except InvalidIndexError:
- self._set_values(key.astype(np.bool_), value)
+ self.iloc[key] = value
return
else:
@@ -1049,8 +1049,10 @@ def _set_with_engine(self, key, value):
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
+ # extract_array so that if we set e.g. ser[-5:] = ser[:5]
+ # we get the first five values, and not 5 NaNs
indexer = self.index._convert_slice_indexer(key, kind="getitem")
- return self._set_values(indexer, value)
+ self.iloc[indexer] = extract_array(value, extract_numpy=True)
else:
assert not isinstance(key, tuple)
@@ -1068,25 +1070,11 @@ def _set_with(self, key, value):
# should be caught by the is_bool_indexer check in __setitem__
if key_type == "integer":
if not self.index._should_fallback_to_positional():
- self._set_labels(key, value)
+ self.loc[key] = value
else:
- self._set_values(key, value)
+ self.iloc[key] = value
else:
- self._set_labels(key, value)
-
- def _set_labels(self, key, value):
- key = com.asarray_tuplesafe(key)
- indexer: np.ndarray = self.index.get_indexer(key)
- mask = indexer == -1
- if mask.any():
- raise ValueError(f"{key[mask]} not contained in the index")
- self._set_values(indexer, value)
-
- def _set_values(self, key, value):
- if isinstance(key, Series):
- key = key._values
- self._mgr = self._mgr.setitem(indexer=key, value=value)
- self._maybe_update_cacher()
+ self.loc[key] = value
def _set_value(self, label, value, takeable: bool = False):
"""
diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py
index 7d6b6c78cc492..368adcfb32215 100644
--- a/pandas/tests/series/indexing/test_loc.py
+++ b/pandas/tests/series/indexing/test_loc.py
@@ -131,8 +131,8 @@ def test_basic_setitem_with_labels(datetime_series):
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
- msg = r"\[5\] not contained in the index"
- with pytest.raises(ValueError, match=msg):
+ msg = r"\[5\] not in index"
+ with pytest.raises(KeyError, match=msg):
s[inds_notfound] = 0
with pytest.raises(Exception, match=msg):
s[arr_inds_notfound] = 0
| This will entail some extra overhead, but we get a) de-duplication and b) the structure of `__setitem__` comes to parallel the structure of `__getitem__`, which i find helpful grok-wise. | https://api.github.com/repos/pandas-dev/pandas/pulls/33643 | 2020-04-18T23:56:25Z | 2020-04-19T21:12:39Z | 2020-04-19T21:12:39Z | 2020-04-19T21:30:23Z |
CLN: remove unnecessary non-scalar code in maybe_upcast_putmask | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index df70e73c6aadb..c9419fded5de9 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -350,6 +350,7 @@ def maybe_cast_to_extension_array(cls: Type["ExtensionArray"], obj, dtype=None):
def maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray, other):
"""
A safe version of putmask that potentially upcasts the result.
+
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
@@ -399,24 +400,6 @@ def maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray, other):
other = np.array(other, dtype=result.dtype)
def changeit():
-
- # try to directly set by expanding our array to full
- # length of the boolean
- try:
- om = other[mask]
- except (IndexError, TypeError):
- # IndexError occurs in test_upcast when we have a boolean
- # mask of the wrong shape
- # TypeError occurs in test_upcast when `other` is a bool
- pass
- else:
- om_at = om.astype(result.dtype)
- if (om == om_at).all():
- new_result = result.values.copy()
- new_result[mask] = om_at
- result[:] = new_result
- return result, False
-
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
@@ -434,15 +417,8 @@ def changeit():
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
- if is_scalar(other) or (isinstance(other, np.ndarray) and other.ndim < 1):
- if isna(other):
- return changeit()
-
- # we have an ndarray and the masking has nans in it
- else:
-
- if isna(other).any():
- return changeit()
+ if isna(other):
+ return changeit()
try:
np.place(result, mask, other)
| A few months ago we got rid of the non-scalar case, so some of this became unnecessary | https://api.github.com/repos/pandas-dev/pandas/pulls/33641 | 2020-04-18T23:42:08Z | 2020-04-19T18:40:54Z | 2020-04-19T18:40:54Z | 2020-04-19T18:44:57Z |
DOC: Fix: single | diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
index 3f15c91f83c6a..eb7ee000a9a86 100644
--- a/doc/source/getting_started/index.rst
+++ b/doc/source/getting_started/index.rst
@@ -398,7 +398,7 @@ data set, a sliding window of the data or grouped by categories. The latter is a
<div class="card-body">
Change the structure of your data table in multiple ways. You can :func:`~pandas.melt` your data table from wide to long/tidy form or :func:`~pandas.pivot`
-from long to wide format. With aggregations built-in, a pivot table is created with a sinlge command.
+from long to wide format. With aggregations built-in, a pivot table is created with a single command.
.. image:: ../_static/schemas/07_melt.svg
:align: center
| One word in documentation was misspelled: single | https://api.github.com/repos/pandas-dev/pandas/pulls/33640 | 2020-04-18T22:42:49Z | 2020-04-18T23:27:41Z | 2020-04-18T23:27:41Z | 2020-04-19T15:02:59Z |
TST: groupby-reindex on DTI | diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9fcbabb07857e..e2b5118922a5a 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -491,6 +491,26 @@ def test_apply_with_duplicated_non_sorted_axis(test_series):
tm.assert_frame_equal(result, expected)
+def test_apply_reindex_values():
+ # GH: 26209
+ # reindexing from a single column of a groupby object with duplicate indices caused
+ # a ValueError (cannot reindex from duplicate axis) in 0.24.2, the problem was
+ # solved in #30679
+ values = [1, 2, 3, 4]
+ indices = [1, 1, 2, 2]
+ df = pd.DataFrame(
+ {"group": ["Group1", "Group2"] * 2, "value": values}, index=indices
+ )
+ expected = pd.Series(values, index=indices, name="value")
+
+ def reindex_helper(x):
+ return x.reindex(np.arange(x.index.min(), x.index.max() + 1))
+
+ # the following group by raised a ValueError
+ result = df.groupby("group").value.apply(reindex_helper)
+ tm.assert_series_equal(expected, result)
+
+
def test_apply_corner_cases():
# #535, can't use sliding iterator
| Add test to check whether reindexing works correctly.
- [ ] closes #26209
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33638 | 2020-04-18T21:12:01Z | 2020-04-25T22:05:11Z | 2020-04-25T22:05:11Z | 2020-04-25T22:05:14Z |
REF: _AXIS_TO_AXIS_NUMBER to simplify axis access | diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 992cdfa5d7332..56ef6fc479f2c 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -1333,33 +1333,6 @@ Values can be set to NaT using np.nan, similar to datetime
y[1] = np.nan
y
-Aliasing axis names
--------------------
-
-To globally provide aliases for axis names, one can define these 2 functions:
-
-.. ipython:: python
-
- def set_axis_alias(cls, axis, alias):
- if axis not in cls._AXIS_NUMBERS:
- raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias))
- cls._AXIS_ALIASES[alias] = axis
-
-.. ipython:: python
-
- def clear_axis_alias(cls, axis, alias):
- if axis not in cls._AXIS_NUMBERS:
- raise Exception("invalid axis [%s] for alias [%s]" % (axis, alias))
- cls._AXIS_ALIASES.pop(alias, None)
-
-.. ipython:: python
-
- set_axis_alias(pd.DataFrame, 'columns', 'myaxis2')
- df2 = pd.DataFrame(np.random.randn(3, 2), columns=['c1', 'c2'],
- index=['i1', 'i2', 'i3'])
- df2.sum(axis='myaxis2')
- clear_axis_alias(pd.DataFrame, 'columns', 'myaxis2')
-
Creating example data
---------------------
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py
index e45d3ca66b6ec..82867cf9dcd29 100644
--- a/pandas/core/computation/align.py
+++ b/pandas/core/computation/align.py
@@ -38,7 +38,7 @@ def _align_core_single_unary_op(
def _zip_axes_from_type(
typ: Type[FrameOrSeries], new_axes: Sequence[int]
) -> Dict[str, int]:
- axes = {name: new_axes[i] for i, name in typ._AXIS_NAMES.items()}
+ axes = {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}
return axes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 85bb47485a2e7..dc0ec4294942b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8787,8 +8787,11 @@ def isin(self, values) -> "DataFrame":
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
- _AXIS_NUMBERS = {"index": 0, "columns": 1}
- _AXIS_NAMES = {0: "index", 1: "columns"}
+ _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {
+ **NDFrame._AXIS_TO_AXIS_NUMBER,
+ 1: 1,
+ "columns": 1,
+ }
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
@@ -8801,6 +8804,18 @@ def isin(self, values) -> "DataFrame":
axis=0, doc="The column labels of the DataFrame."
)
+ @property
+ def _AXIS_NUMBERS(self) -> Dict[str, int]:
+ """.. deprecated:: 1.1.0"""
+ super()._AXIS_NUMBERS
+ return {"index": 0, "columns": 1}
+
+ @property
+ def _AXIS_NAMES(self) -> Dict[int, str]:
+ """.. deprecated:: 1.1.0"""
+ super()._AXIS_NAMES
+ return {0: "index", 1: "columns"}
+
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 673d661e9eff4..ccf344a0a1ece 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -67,7 +67,6 @@
is_dict_like,
is_extension_array_dtype,
is_float,
- is_integer,
is_list_like,
is_number,
is_numeric_dtype,
@@ -302,19 +301,32 @@ def _data(self):
# ----------------------------------------------------------------------
# Axis
- _AXIS_ALIASES = {"rows": 0}
- _AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
- _AXIS_NUMBERS: Dict[str, int]
- _AXIS_NAMES: Dict[int, str]
+ _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {0: 0, "index": 0, "rows": 0}
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
+ @property
+ def _AXIS_NUMBERS(self) -> Dict[str, int]:
+ """.. deprecated:: 1.1.0"""
+ warnings.warn(
+ "_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=3,
+ )
+ return {"index": 0}
+
+ @property
+ def _AXIS_NAMES(self) -> Dict[int, str]:
+ """.. deprecated:: 1.1.0"""
+ warnings.warn(
+ "_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=3,
+ )
+ return {0: "index"}
+
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
@@ -353,37 +365,24 @@ def _construct_axes_from_arguments(
return axes, kwargs
@classmethod
- def _get_axis_number(cls, axis) -> int:
- axis = cls._AXIS_ALIASES.get(axis, axis)
- if is_integer(axis):
- if axis in cls._AXIS_NAMES:
- return axis
- else:
- try:
- return cls._AXIS_NUMBERS[axis]
- except KeyError:
- pass
- raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
+ def _get_axis_number(cls, axis: Axis) -> int:
+ try:
+ return cls._AXIS_TO_AXIS_NUMBER[axis]
+ except KeyError:
+ raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
@classmethod
- def _get_axis_name(cls, axis) -> str:
- axis = cls._AXIS_ALIASES.get(axis, axis)
- if isinstance(axis, str):
- if axis in cls._AXIS_NUMBERS:
- return axis
- else:
- try:
- return cls._AXIS_NAMES[axis]
- except KeyError:
- pass
- raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
+ def _get_axis_name(cls, axis: Axis) -> str:
+ axis_number = cls._get_axis_number(axis)
+ return cls._AXIS_ORDERS[axis_number]
- def _get_axis(self, axis) -> Index:
- name = self._get_axis_name(axis)
- return getattr(self, name)
+ def _get_axis(self, axis: Axis) -> Index:
+ axis_number = self._get_axis_number(axis)
+ assert axis_number in {0, 1}
+ return self.index if axis_number == 0 else self.columns
@classmethod
- def _get_block_manager_axis(cls, axis) -> int:
+ def _get_block_manager_axis(cls, axis: Axis) -> int:
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
@@ -448,11 +447,11 @@ def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
}
@property
- def _info_axis(self):
+ def _info_axis(self) -> Index:
return getattr(self, self._info_axis_name)
@property
- def _stat_axis(self):
+ def _stat_axis(self) -> Index:
return getattr(self, self._stat_axis_name)
@property
@@ -813,7 +812,7 @@ def squeeze(self, axis=None):
>>> df_0a.squeeze()
1
"""
- axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
+ axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
@@ -1156,7 +1155,7 @@ class name
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
- v = axes.get(self._AXIS_NAMES[axis])
+ v = axes.get(self._get_axis_name(axis))
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5a1d7f3b90bd9..9ef865a964123 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4599,8 +4599,6 @@ def to_period(self, freq=None, copy=True) -> "Series":
# ----------------------------------------------------------------------
# Add index
_AXIS_ORDERS = ["index"]
- _AXIS_NUMBERS = {"index": 0}
- _AXIS_NAMES = {0: "index"}
_AXIS_REVERSED = False
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 0
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 20724a498b397..eb7f15c78b671 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -867,12 +867,15 @@ def _convert_axes(self):
"""
Try to convert axes.
"""
- for axis in self.obj._AXIS_NUMBERS.keys():
+ for axis_name in self.obj._AXIS_ORDERS:
new_axis, result = self._try_convert_data(
- axis, self.obj._get_axis(axis), use_dtypes=False, convert_dates=True
+ name=axis_name,
+ data=self.obj._get_axis(axis_name),
+ use_dtypes=False,
+ convert_dates=True,
)
if result:
- setattr(self.obj, axis, new_axis)
+ setattr(self.obj, axis_name, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 425118694fa02..311d8d0d55341 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3712,7 +3712,7 @@ def _create_axes(
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
- axis_name = obj._AXIS_NAMES[idx]
+ axis_name = obj._get_axis_name(idx)
new_index = _convert_index(axis_name, a, self.encoding, self.errors)
new_index.axis = idx
@@ -3919,7 +3919,7 @@ def process_axes(self, obj, selection: "Selection", columns=None):
def process_filter(field, filt):
- for axis_name in obj._AXIS_NAMES.values():
+ for axis_name in obj._AXIS_ORDERS:
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
assert axis_number is not None
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 2c8261a6dcc5a..05588ead54be4 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -86,7 +86,9 @@ def test_rename(self):
def test_get_numeric_data(self):
n = 4
- kwargs = {self._typ._AXIS_NAMES[i]: list(range(n)) for i in range(self._ndim)}
+ kwargs = {
+ self._typ._get_axis_name(i): list(range(n)) for i in range(self._ndim)
+ }
# get the numeric data
o = self._construct(n, **kwargs)
@@ -901,12 +903,22 @@ def test_pipe_tuple_error(self):
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_axis_classmethods(self, box):
obj = box(dtype=object)
- values = (
- list(box._AXIS_NAMES.keys())
- + list(box._AXIS_NUMBERS.keys())
- + list(box._AXIS_ALIASES.keys())
- )
+ values = box._AXIS_TO_AXIS_NUMBER.keys()
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
+
+ @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
+ def test_axis_names_deprecated(self, box):
+ # GH33637
+ obj = box(dtype=object)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ obj._AXIS_NAMES
+
+ @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
+ def test_axis_numbers_deprecated(self, box):
+ # GH33637
+ obj = box(dtype=object)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ obj._AXIS_NUMBERS
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 682575cc9ed48..53a25eb321b73 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -257,7 +257,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
- if "axis" in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS):
+ if "axis" in kwargs and any(x in kwargs for x in data._AXIS_TO_AXIS_NUMBER):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
@@ -302,8 +302,8 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"a 'TypeError'."
)
warnings.warn(msg.format(method_name=method_name), FutureWarning, stacklevel=4)
- out[data._AXIS_NAMES[0]] = args[0]
- out[data._AXIS_NAMES[1]] = args[1]
+ out[data._get_axis_name(0)] = args[0]
+ out[data._get_axis_name(1)] = args[1]
else:
msg = f"Cannot specify all of '{arg_name}', 'index', 'columns'."
raise TypeError(msg)
| This adds a dict called ``_AXIS_TO_AXIS_NUMBER `` to NDFrame/DataFrame where the keys are the allowed parameter values for the ``axis`` parameter in various ndframe methods and the dict values are the related axis number. This makes getting to the correct axis more straight forward, see for example the new ``_get_axis_number``, and makes adding type hints to the ``axis`` parameter easier. | https://api.github.com/repos/pandas-dev/pandas/pulls/33637 | 2020-04-18T20:09:29Z | 2020-04-21T12:43:17Z | 2020-04-21T12:43:17Z | 2020-04-21T13:32:25Z |
ENH: allow passing freq=None to DatetimeIndex/TimedeltaIndex | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index af5834f01c24c..00e24478b09dd 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1790,6 +1790,7 @@ def maybe_infer_freq(freq):
-------
freq : {DateOffset, None}
freq_infer : bool
+ Whether we should inherit the freq of passed data.
"""
freq_infer = False
if not isinstance(freq, DateOffset):
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index d34bba68da49c..e3fbb906ed6b1 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -302,11 +302,13 @@ def _from_sequence(
dtype=None,
copy=False,
tz=None,
- freq=None,
+ freq=lib.no_default,
dayfirst=False,
yearfirst=False,
ambiguous="raise",
):
+ explicit_none = freq is None
+ freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
@@ -321,6 +323,8 @@ def _from_sequence(
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
+ if explicit_none:
+ freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 8c93dca783113..96b529d489b3b 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -141,13 +141,18 @@ def dtype(self):
# ----------------------------------------------------------------
# Constructors
- def __init__(self, values, dtype=TD64NS_DTYPE, freq=None, copy=False):
+ def __init__(self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy=False):
values = extract_array(values)
inferred_freq = getattr(values, "_freq", None)
+ explicit_none = freq is None
+ freq = freq if freq is not lib.no_default else None
if isinstance(values, type(self)):
- if freq is None:
+ if explicit_none:
+ # dont inherit from values
+ pass
+ elif freq is None:
freq = values.freq
elif freq and values.freq:
freq = to_offset(freq)
@@ -206,13 +211,21 @@ def _simple_new(cls, values, freq=None, dtype=TD64NS_DTYPE):
return result
@classmethod
- def _from_sequence(cls, data, dtype=TD64NS_DTYPE, copy=False, freq=None, unit=None):
+ def _from_sequence(
+ cls, data, dtype=TD64NS_DTYPE, copy=False, freq=lib.no_default, unit=None
+ ):
if dtype:
_validate_td64_dtype(dtype)
+
+ explicit_none = freq is None
+ freq = freq if freq is not lib.no_default else None
+
freq, freq_infer = dtl.maybe_infer_freq(freq)
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
+ if explicit_none:
+ freq = None
result = cls._simple_new(data, freq=freq)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 1b43176d4b3e3..5a89c45a3e425 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -224,7 +224,7 @@ class DatetimeIndex(DatetimeTimedeltaMixin):
def __new__(
cls,
data=None,
- freq=None,
+ freq=lib.no_default,
tz=None,
normalize=False,
closed=None,
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 741951d480d18..df6a7fc170eb1 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -1,6 +1,6 @@
""" implement the TimedeltaIndex """
-from pandas._libs import NaT, Timedelta, index as libindex
+from pandas._libs import NaT, Timedelta, index as libindex, lib
from pandas._typing import DtypeObj, Label
from pandas.util._decorators import doc
@@ -121,7 +121,7 @@ def __new__(
cls,
data=None,
unit=None,
- freq=None,
+ freq=lib.no_default,
closed=None,
dtype=TD64NS_DTYPE,
copy=False,
@@ -141,12 +141,12 @@ def __new__(
"represent unambiguous timedelta values durations."
)
- if isinstance(data, TimedeltaArray) and freq is None:
+ if isinstance(data, TimedeltaArray) and freq is lib.no_default:
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
- if isinstance(data, TimedeltaIndex) and freq is None and name is None:
+ if isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None:
if copy:
return data.copy()
else:
@@ -340,6 +340,6 @@ def timedelta_range(
if freq is None and com.any_none(periods, start, end):
freq = "D"
- freq, freq_infer = dtl.maybe_infer_freq(freq)
+ freq, _ = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)
return TimedeltaIndex._simple_new(tdarr, name=name)
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 691f542fc2084..b15549839de03 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -816,6 +816,16 @@ def test_dti_constructor_preserve_dti_freq(self):
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
+ def test_explicit_none_freq(self):
+ # Explicitly passing freq=None is respected
+ rng = date_range("1/1/2000", "1/2/2000", freq="5min")
+
+ result = DatetimeIndex(rng, freq=None)
+ assert result.freq is None
+
+ result = DatetimeIndex(rng._data, freq=None)
+ assert result.freq is None
+
def test_dti_constructor_years_only(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 6961
diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py
index 3e5bb56c3e58e..acc68dfe7301f 100644
--- a/pandas/tests/indexes/timedeltas/test_constructors.py
+++ b/pandas/tests/indexes/timedeltas/test_constructors.py
@@ -227,3 +227,14 @@ def test_constructor_wrong_precision_raises(self):
msg = r"dtype timedelta64\[us\] cannot be converted to timedelta64\[ns\]"
with pytest.raises(ValueError, match=msg):
pd.TimedeltaIndex(["2000"], dtype="timedelta64[us]")
+
+ def test_explicit_none_freq(self):
+ # Explicitly passing freq=None is respected
+ tdi = timedelta_range(1, periods=5)
+ assert tdi.freq is not None
+
+ result = TimedeltaIndex(tdi, freq=None)
+ assert result.freq is None
+
+ result = TimedeltaIndex(tdi._data, freq=None)
+ assert result.freq is None
| cc @jreback discussed elsewhere | https://api.github.com/repos/pandas-dev/pandas/pulls/33635 | 2020-04-18T17:00:30Z | 2020-04-25T23:54:10Z | 2020-04-25T23:54:10Z | 2020-04-26T00:11:17Z |
CLN: replaced Appender with doc | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 9ed9db801d0a8..f0b690bc56d04 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -54,12 +54,7 @@
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, InvalidIndexError
-from pandas.util._decorators import (
- Appender,
- Substitution,
- doc,
- rewrite_axis_style_signature,
-)
+from pandas.util._decorators import Appender, doc, rewrite_axis_style_signature
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
@@ -2975,7 +2970,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal'
else:
return xarray.Dataset.from_dataframe(self)
- @Substitution(returns=fmt.return_docstring)
+ @doc(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
@@ -3004,9 +2999,9 @@ def to_latex(
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
- Requires ``\usepackage{booktabs}``. The output can be copy/pasted
+ Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
- with ``\input{table.tex}``.
+ with ``\input{{table.tex}}``.
.. versionchanged:: 0.20.2
Added to Series.
@@ -3029,13 +3024,13 @@ def to_latex(
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
- formatters : list of functions or dict of {str: function}, optional
+ formatters : list of functions or dict of {{str: function}}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
- ``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
+ ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
@@ -3053,7 +3048,7 @@ def to_latex(
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
- adding a \usepackage{longtable} to your LaTeX preamble.
+ adding a \usepackage{{longtable}} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
@@ -3071,24 +3066,24 @@ def to_latex(
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
- \usepackage{multirow} to your LaTeX preamble. Will print
+ \usepackage{{multirow}} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
- The LaTeX caption to be placed inside ``\caption{}`` in the output.
+ The LaTeX caption to be placed inside ``\caption{{}}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
- The LaTeX label to be placed inside ``\label{}`` in the output.
- This is used with ``\ref{}`` in the main ``.tex`` file.
+ The LaTeX label to be placed inside ``\label{{}}`` in the output.
+ This is used with ``\ref{{}}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
position : str, optional
The LaTeX positional argument for tables, to be placed after
- ``\begin{}`` in the output.
- %(returns)s
+ ``\begin{{}}`` in the output.
+ {returns}
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
@@ -3097,18 +3092,18 @@ def to_latex(
Examples
--------
- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
- ... 'mask': ['red', 'purple'],
- ... 'weapon': ['sai', 'bo staff']})
+ >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
+ ... mask=['red', 'purple'],
+ ... weapon=['sai', 'bo staff']))
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
- \begin{tabular}{lll}
+ \begin{{tabular}}{{lll}}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
- \end{tabular}
+ \end{{tabular}}
"""
# Get defaults from the pandas config
if self.ndim == 1:
@@ -6799,6 +6794,7 @@ def interpolate(
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
+
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Axis to interpolate along.
limit : int, optional
@@ -6837,7 +6833,7 @@ def interpolate(
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
- **kwargs
+ ``**kwargs`` : optional
Keyword arguments to pass on to the interpolating function.
Returns
@@ -7253,11 +7249,11 @@ def isna(self: FrameOrSeries) -> FrameOrSeries:
--------
Show which entries in a DataFrame are NA.
- >>> df = pd.DataFrame({{'age': [5, 6, np.NaN],
- ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
+ >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
+ ... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
- ... 'name': ['Alfred', 'Batman', ''],
- ... 'toy': [None, 'Batmobile', 'Joker']}})
+ ... name=['Alfred', 'Batman', ''],
+ ... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
@@ -7320,11 +7316,11 @@ def notna(self: FrameOrSeries) -> FrameOrSeries:
--------
Show which entries in a DataFrame are not NA.
- >>> df = pd.DataFrame({{'age': [5, 6, np.NaN],
- ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
+ >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
+ ... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
- ... 'name': ['Alfred', 'Batman', ''],
- ... 'toy': [None, 'Batmobile', 'Joker']}})
+ ... name=['Alfred', 'Batman', ''],
+ ... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
@@ -10262,10 +10258,10 @@ def pct_change(
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
- >>> df = pd.DataFrame({
- ... 'FR': [4.0405, 4.0963, 4.3149],
- ... 'GR': [1.7246, 1.7482, 1.8519],
- ... 'IT': [804.74, 810.01, 860.13]},
+ >>> df = pd.DataFrame(dict(
+ ... FR=[4.0405, 4.0963, 4.3149],
+ ... GR=[1.7246, 1.7482, 1.8519],
+ ... IT=[804.74, 810.01, 860.13]),
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
@@ -10282,10 +10278,10 @@ def pct_change(
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
- >>> df = pd.DataFrame({
- ... '2016': [1769950, 30586265],
- ... '2015': [1500923, 40912316],
- ... '2014': [1371819, 41403351]},
+ >>> df = pd.DataFrame(dict([
+ ... ('2016', [1769950, 30586265]),
+ ... ('2015', [1500923, 40912316]),
+ ... ('2014', [1371819, 41403351])]),
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
@@ -10701,43 +10697,43 @@ def _doc_parms(cls):
_num_doc = """
-%(desc)s
+{desc}
Parameters
----------
-axis : %(axis_descr)s
+axis : {axis_descr}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
- particular level, collapsing into a %(name1)s.
+ particular level, collapsing into a {name1}.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
-%(min_count)s\
+{min_count}\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
-%(name1)s or %(name2)s (if level specified)\
-%(see_also)s\
-%(examples)s
+{name1} or {name2} (if level specified)\
+{see_also}\
+{examples}
"""
_num_ddof_doc = """
-%(desc)s
+{desc}
Parameters
----------
-axis : %(axis_descr)s
+axis : {axis_descr}
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
- particular level, collapsing into a %(name1)s.
+ particular level, collapsing into a {name1}.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
@@ -10747,7 +10743,7 @@ def _doc_parms(cls):
Returns
-------
-%(name1)s or %(name2)s (if level specified)
+{name1} or {name2} (if level specified)
Notes
-----
@@ -10755,11 +10751,11 @@ def _doc_parms(cls):
default `ddof=1`)\n"""
_bool_doc = """
-%(desc)s
+{desc}
Parameters
----------
-axis : {0 or 'index', 1 or 'columns', None}, default 0
+axis : {{0 or 'index', 1 or 'columns', None}}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
@@ -10773,24 +10769,24 @@ def _doc_parms(cls):
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
- True, then the result will be %(empty_value)s, as for an empty row/column.
+ True, then the result will be {empty_value}, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
- particular level, collapsing into a %(name1)s.
+ particular level, collapsing into a {name1}.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
-%(name1)s or %(name2)s
- If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
+{name1} or {name2}
+ If level is specified, then, {name2} is returned; otherwise, {name1}
is returned.
-%(see_also)s
-%(examples)s"""
+{see_also}
+{examples}"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
@@ -10853,14 +10849,14 @@ def _doc_parms(cls):
"""
_cnum_doc = """
-Return cumulative %(desc)s over a DataFrame or Series axis.
+Return cumulative {desc} over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
-%(desc)s.
+{desc}.
Parameters
----------
-axis : {0 or 'index', 1 or 'columns'}, default 0
+axis : {{0 or 'index', 1 or 'columns'}}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
@@ -10871,21 +10867,21 @@ def _doc_parms(cls):
Returns
-------
-%(name1)s or %(name2)s
- Return cumulative %(desc)s of %(name1)s or %(name2)s.
+{name1} or {name2}
+ Return cumulative {desc} of {name1} or {name2}.
See Also
--------
-core.window.Expanding.%(accum_func_name)s : Similar functionality
+core.window.Expanding.{accum_func_name} : Similar functionality
but ignores ``NaN`` values.
-%(name2)s.%(accum_func_name)s : Return the %(desc)s over
- %(name2)s axis.
-%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
-%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
-%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
-%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
+{name2}.{accum_func_name} : Return the {desc} over
+ {name2} axis.
+{name2}.cummax : Return cumulative maximum over {name2} axis.
+{name2}.cummin : Return cumulative minimum over {name2} axis.
+{name2}.cumsum : Return cumulative sum over {name2} axis.
+{name2}.cumprod : Return cumulative product over {name2} axis.
-%(examples)s"""
+{examples}"""
_cummin_examples = """\
Examples
@@ -11366,7 +11362,8 @@ def _make_min_count_stat_function(
see_also: str = "",
examples: str = "",
) -> Callable:
- @Substitution(
+ @doc(
+ _num_doc,
desc=desc,
name1=name1,
name2=name2,
@@ -11375,7 +11372,6 @@ def _make_min_count_stat_function(
see_also=see_also,
examples=examples,
)
- @Appender(_num_doc)
def stat_func(
self,
axis=None,
@@ -11422,7 +11418,8 @@ def _make_stat_function(
see_also: str = "",
examples: str = "",
) -> Callable:
- @Substitution(
+ @doc(
+ _num_doc,
desc=desc,
name1=name1,
name2=name2,
@@ -11431,7 +11428,6 @@ def _make_stat_function(
see_also=see_also,
examples=examples,
)
- @Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
@@ -11455,8 +11451,7 @@ def stat_func(
def _make_stat_function_ddof(
cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
) -> Callable:
- @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
- @Appender(_num_ddof_doc)
+ @doc(_num_ddof_doc, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
@@ -11487,7 +11482,8 @@ def _make_cum_function(
accum_func_name: str,
examples: str,
) -> Callable:
- @Substitution(
+ @doc(
+ _cnum_doc,
desc=desc,
name1=name1,
name2=name2,
@@ -11495,7 +11491,6 @@ def _make_cum_function(
accum_func_name=accum_func_name,
examples=examples,
)
- @Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
@@ -11533,7 +11528,8 @@ def _make_logical_function(
examples: str,
empty_value: bool,
) -> Callable:
- @Substitution(
+ @doc(
+ _bool_doc,
desc=desc,
name1=name1,
name2=name2,
@@ -11542,7 +11538,6 @@ def _make_logical_function(
examples=examples,
empty_value=empty_value,
)
- @Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
| - [x] ref #31942
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] replaced the Appender/Substitute decorators with doc
| https://api.github.com/repos/pandas-dev/pandas/pulls/33633 | 2020-04-18T16:20:35Z | 2020-09-13T22:20:56Z | 2020-09-13T22:20:56Z | 2020-09-13T22:21:03Z |
IO: Fix parquet read from s3 directory | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e06b586926a3e..5b9749aac495c 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -625,6 +625,8 @@ I/O
- Bug in :meth:`~DataFrame.to_parquet` was not raising ``PermissionError`` when writing to a private s3 bucket with invalid creds. (:issue:`27679`)
- Bug in :meth:`~DataFrame.to_csv` was silently failing when writing to an invalid s3 bucket. (:issue:`32486`)
- Bug in :meth:`~DataFrame.read_feather` was raising an `ArrowIOError` when reading an s3 or http file path (:issue:`29055`)
+- Bug in :meth:`read_parquet` was raising a ``FileNotFoundError`` when passed an s3 directory path. (:issue:`26388`)
+- Bug in :meth:`~DataFrame.to_parquet` was throwing an ``AttributeError`` when writing a partitioned parquet file to s3 (:issue:`27596`)
Plotting
^^^^^^^^
diff --git a/pandas/io/common.py b/pandas/io/common.py
index dd3d205ca90eb..8349acafca1e3 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -150,6 +150,33 @@ def urlopen(*args, **kwargs):
return urllib.request.urlopen(*args, **kwargs)
+def get_fs_for_path(filepath: str):
+ """
+ Get appropriate filesystem given a filepath.
+ Supports s3fs, gcs and local file system.
+
+ Parameters
+ ----------
+ filepath : str
+ File path. e.g s3://bucket/object, /local/path, gcs://pandas/obj
+
+ Returns
+ -------
+ s3fs.S3FileSystem, gcsfs.GCSFileSystem, None
+ Appropriate FileSystem to use. None for local filesystem.
+ """
+ if is_s3_url(filepath):
+ from pandas.io import s3
+
+ return s3.get_fs()
+ elif is_gcs_url(filepath):
+ from pandas.io import gcs
+
+ return gcs.get_fs()
+ else:
+ return None
+
+
def get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: Optional[str] = None,
diff --git a/pandas/io/gcs.py b/pandas/io/gcs.py
index 1f5e0faedc6d2..d2d8fc2d2139f 100644
--- a/pandas/io/gcs.py
+++ b/pandas/io/gcs.py
@@ -6,6 +6,10 @@
)
+def get_fs():
+ return gcsfs.GCSFileSystem()
+
+
def get_filepath_or_buffer(
filepath_or_buffer, encoding=None, compression=None, mode=None
):
@@ -13,6 +17,6 @@ def get_filepath_or_buffer(
if mode is None:
mode = "rb"
- fs = gcsfs.GCSFileSystem()
+ fs = get_fs()
filepath_or_buffer = fs.open(filepath_or_buffer, mode)
return filepath_or_buffer, None, compression, True
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 068210eddcc1b..0a9daea105b64 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -8,7 +8,12 @@
from pandas import DataFrame, get_option
-from pandas.io.common import get_filepath_or_buffer, is_gcs_url, is_s3_url
+from pandas.io.common import (
+ get_filepath_or_buffer,
+ get_fs_for_path,
+ is_gcs_url,
+ is_s3_url,
+)
def get_engine(engine: str) -> "BaseImpl":
@@ -92,13 +97,15 @@ def write(
**kwargs,
):
self.validate_dataframe(df)
- path, _, _, should_close = get_filepath_or_buffer(path, mode="wb")
+ file_obj_or_path, _, _, should_close = get_filepath_or_buffer(path, mode="wb")
from_pandas_kwargs: Dict[str, Any] = {"schema": kwargs.pop("schema", None)}
if index is not None:
from_pandas_kwargs["preserve_index"] = index
table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
+ # write_to_dataset does not support a file-like object when
+ # a dircetory path is used, so just pass the path string.
if partition_cols is not None:
self.api.parquet.write_to_dataset(
table,
@@ -108,20 +115,18 @@ def write(
**kwargs,
)
else:
- self.api.parquet.write_table(table, path, compression=compression, **kwargs)
+ self.api.parquet.write_table(
+ table, file_obj_or_path, compression=compression, **kwargs
+ )
if should_close:
- path.close()
+ file_obj_or_path.close()
def read(self, path, columns=None, **kwargs):
- path, _, _, should_close = get_filepath_or_buffer(path)
-
- kwargs["use_pandas_metadata"] = True
- result = self.api.parquet.read_table(
- path, columns=columns, **kwargs
- ).to_pandas()
- if should_close:
- path.close()
-
+ parquet_ds = self.api.parquet.ParquetDataset(
+ path, filesystem=get_fs_for_path(path), **kwargs
+ )
+ kwargs["columns"] = columns
+ result = parquet_ds.read_pandas(**kwargs).to_pandas()
return result
@@ -273,7 +278,7 @@ def read_parquet(path, engine: str = "auto", columns=None, **kwargs):
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
- ``file://localhost/path/to/tables``
+ ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
diff --git a/pandas/io/s3.py b/pandas/io/s3.py
index 976c319f89d47..329c861d2386a 100644
--- a/pandas/io/s3.py
+++ b/pandas/io/s3.py
@@ -16,6 +16,10 @@ def _strip_schema(url):
return result.netloc + result.path
+def get_fs():
+ return s3fs.S3FileSystem(anon=False)
+
+
def get_file_and_filesystem(
filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None
) -> Tuple[IO, Any]:
@@ -24,7 +28,7 @@ def get_file_and_filesystem(
if mode is None:
mode = "rb"
- fs = s3fs.S3FileSystem(anon=False)
+ fs = get_fs()
try:
file = fs.open(_strip_schema(filepath_or_buffer), mode)
except (FileNotFoundError, NoCredentialsError):
@@ -34,7 +38,7 @@ def get_file_and_filesystem(
# aren't valid for that bucket.
# A NoCredentialsError is raised if you don't have creds
# for that bucket.
- fs = s3fs.S3FileSystem(anon=True)
+ fs = get_fs()
file = fs.open(_strip_schema(filepath_or_buffer), mode)
return file, fs
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 280424c68297f..8a43d4079159b 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -1,7 +1,6 @@
""" test parquet compat """
import datetime
from distutils.version import LooseVersion
-import locale
import os
from warnings import catch_warnings
@@ -131,6 +130,7 @@ def check_round_trip(
read_kwargs=None,
expected=None,
check_names=True,
+ check_like=False,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
@@ -150,6 +150,8 @@ def check_round_trip(
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
+ check_like: bool, optional
+ If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
@@ -169,7 +171,9 @@ def compare(repeat):
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
- tm.assert_frame_equal(expected, actual, check_names=check_names)
+ tm.assert_frame_equal(
+ expected, actual, check_names=check_names, check_like=check_like
+ )
if path is None:
with tm.ensure_clean() as path:
@@ -532,15 +536,37 @@ def test_categorical(self, pa):
expected = df.astype(object)
check_round_trip(df, pa, expected=expected)
- # GH#33077 2020-03-27
- @pytest.mark.xfail(
- locale.getlocale()[0] == "zh_CN",
- reason="dateutil cannot parse e.g. '五, 27 3月 2020 21:45:38 GMT'",
- )
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet")
+ @td.skip_if_no("s3fs")
+ @pytest.mark.parametrize("partition_col", [["A"], []])
+ def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col):
+ from pandas.io.s3 import get_fs as get_s3_fs
+
+ # GH #26388
+ # https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716
+ # As per pyarrow partitioned columns become 'categorical' dtypes
+ # and are added to back of dataframe on read
+
+ expected_df = df_compat.copy()
+ if partition_col:
+ expected_df[partition_col] = expected_df[partition_col].astype("category")
+ check_round_trip(
+ df_compat,
+ pa,
+ expected=expected_df,
+ path="s3://pandas-test/parquet_dir",
+ write_kwargs={
+ "partition_cols": partition_col,
+ "compression": None,
+ "filesystem": get_s3_fs(),
+ },
+ check_like=True,
+ repeat=1,
+ )
+
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
| - [x] closes https://github.com/pandas-dev/pandas/issues/26388
- [x] closes https://github.com/pandas-dev/pandas/issues/27596
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
(Seems to have also fixed the xfailing test in https://github.com/pandas-dev/pandas/issues/33077)
NOTE: lets merge https://github.com/pandas-dev/pandas/pull/33645 first - since that fixes up a crucial bit of error handling around this functionality. | https://api.github.com/repos/pandas-dev/pandas/pulls/33632 | 2020-04-18T16:01:56Z | 2020-04-26T21:41:16Z | 2020-04-26T21:41:15Z | 2020-05-29T22:14:08Z |
CLN: Clean missing.py | diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 08a6d42042c1c..d329f4337de2e 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -208,11 +208,10 @@ def _use_inf_as_na(key):
def _isna_ndarraylike(obj):
- is_extension = is_extension_array_dtype(obj.dtype)
values = getattr(obj, "_values", obj)
dtype = values.dtype
- if is_extension:
+ if is_extension_array_dtype(dtype):
result = values.isna()
elif is_string_dtype(dtype):
result = _isna_string_dtype(values, dtype, old=False)
| Random nitpick here | https://api.github.com/repos/pandas-dev/pandas/pulls/33631 | 2020-04-18T15:21:01Z | 2020-04-19T22:24:25Z | 2020-04-19T22:24:24Z | 2020-04-19T22:25:25Z |
BUG: DataFrameGroupby std/sem modify grouped column when as_index=False | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 73892da2cbf71..15adb74e27ba7 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -817,7 +817,7 @@ Groupby/resample/rolling
- Bug in :meth:`Rolling.min` and :meth:`Rolling.max`: Growing memory usage after multiple calls when using a fixed window (:issue:`30726`)
- Bug in :meth:`GroupBy.agg`, :meth:`GroupBy.transform`, and :meth:`GroupBy.resample` where subclasses are not preserved (:issue:`28330`)
- Bug in :meth:`GroupBy.rolling.apply` ignores args and kwargs parameters (:issue:`33433`)
-
+- Bug in :meth:`DataFrameGroupby.std` and :meth:`DataFrameGroupby.sem` would modify grouped-by columns when ``as_index=False`` (:issue:`10355`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index d9b65f92ac0e1..b630aed69be10 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -649,11 +649,11 @@ def _set_group_selection(self):
):
return
- ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
+ ax = self.obj._info_axis
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
@@ -1368,8 +1368,18 @@ def std(self, ddof: int = 1):
Series or DataFrame
Standard deviation of values within each group.
"""
- # TODO: implement at Cython level?
- return np.sqrt(self.var(ddof=ddof))
+ result = self.var(ddof=ddof)
+ if result.ndim == 1:
+ result = np.sqrt(result)
+ else:
+ cols = result.columns.get_indexer_for(
+ result.columns.difference(self.exclusions).unique()
+ )
+ # TODO(GH-22046) - setting with iloc broken if labels are not unique
+ # .values to remove labels
+ result.iloc[:, cols] = np.sqrt(result.iloc[:, cols]).values
+
+ return result
@Substitution(name="groupby")
@Appender(_common_see_also)
@@ -1416,7 +1426,19 @@ def sem(self, ddof: int = 1):
Series or DataFrame
Standard error of the mean of values within each group.
"""
- return self.std(ddof=ddof) / np.sqrt(self.count())
+ result = self.std(ddof=ddof)
+ if result.ndim == 1:
+ result /= np.sqrt(self.count())
+ else:
+ cols = result.columns.get_indexer_for(
+ result.columns.difference(self.exclusions).unique()
+ )
+ # TODO(GH-22046) - setting with iloc broken if labels are not unique
+ # .values to remove labels
+ result.iloc[:, cols] = (
+ result.iloc[:, cols].values / np.sqrt(self.count().iloc[:, cols]).values
+ )
+ return result
@Substitution(name="groupby")
@Appender(_common_see_also)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 68c8b86250e06..9d7bc749d6e89 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -573,6 +573,28 @@ def test_ops_general(op, targop):
tm.assert_frame_equal(result, expected)
+def test_ops_not_as_index(reduction_func):
+ # GH 10355
+ # Using as_index=False should not modify grouped column
+
+ if reduction_func in ("nth", "ngroup", "size",):
+ pytest.skip("Skip until behavior is determined (GH #5755)")
+
+ if reduction_func in ("corrwith", "idxmax", "idxmin", "mad", "nunique", "skew",):
+ pytest.xfail(
+ "_GroupBy._python_apply_general incorrectly modifies grouping columns"
+ )
+
+ df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
+ expected = getattr(df.groupby("a"), reduction_func)().reset_index()
+
+ result = getattr(df.groupby("a", as_index=False), reduction_func)()
+ tm.assert_frame_equal(result, expected)
+
+ result = getattr(df.groupby("a", as_index=False)["b"], reduction_func)()
+ tm.assert_frame_equal(result, expected)
+
+
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
| - [x] closes #10355
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
When working on this, I noticed an unrelated line of code that could be moved inside an if-block and made the change. Should unrelated cleanups like this be left to a separate PR? Can revert if that's the case. | https://api.github.com/repos/pandas-dev/pandas/pulls/33630 | 2020-04-18T15:18:20Z | 2020-05-19T12:56:49Z | 2020-05-19T12:56:49Z | 2020-07-11T16:02:09Z |
BUG: Fix Categorical use_inf_as_na bug | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 719178a67459d..e7b79bed148dd 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -480,6 +480,7 @@ Categorical
- :meth:`Categorical.fillna` now accepts :class:`Categorical` ``other`` argument (:issue:`32420`)
- Bug where :meth:`Categorical.replace` would replace with ``NaN`` whenever the new value and replacement value were equal (:issue:`33288`)
- Bug where an ordered :class:`Categorical` containing only ``NaN`` values would raise rather than returning ``NaN`` when taking the minimum or maximum (:issue:`33450`)
+- Bug where :meth:`Series.isna` and :meth:`DataFrame.isna` would raise for categorical dtype when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33594`)
Datetimelike
^^^^^^^^^^^^
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index d329f4337de2e..92e1b17c41694 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -134,13 +134,13 @@ def _isna_new(obj):
elif isinstance(obj, type):
return False
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, ABCExtensionArray)):
- return _isna_ndarraylike(obj)
+ return _isna_ndarraylike(obj, old=False)
elif isinstance(obj, ABCDataFrame):
return obj.isna()
elif isinstance(obj, list):
- return _isna_ndarraylike(np.asarray(obj, dtype=object))
+ return _isna_ndarraylike(np.asarray(obj, dtype=object), old=False)
elif hasattr(obj, "__array__"):
- return _isna_ndarraylike(np.asarray(obj))
+ return _isna_ndarraylike(np.asarray(obj), old=False)
else:
return False
@@ -165,13 +165,13 @@ def _isna_old(obj):
elif isinstance(obj, type):
return False
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, ABCExtensionArray)):
- return _isna_ndarraylike_old(obj)
+ return _isna_ndarraylike(obj, old=True)
elif isinstance(obj, ABCDataFrame):
return obj.isna()
elif isinstance(obj, list):
- return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
+ return _isna_ndarraylike(np.asarray(obj, dtype=object), old=True)
elif hasattr(obj, "__array__"):
- return _isna_ndarraylike_old(np.asarray(obj))
+ return _isna_ndarraylike(np.asarray(obj), old=True)
else:
return False
@@ -207,40 +207,40 @@ def _use_inf_as_na(key):
globals()["_isna"] = _isna_new
-def _isna_ndarraylike(obj):
- values = getattr(obj, "_values", obj)
- dtype = values.dtype
-
- if is_extension_array_dtype(dtype):
- result = values.isna()
- elif is_string_dtype(dtype):
- result = _isna_string_dtype(values, dtype, old=False)
-
- elif needs_i8_conversion(dtype):
- # this is the NaT pattern
- result = values.view("i8") == iNaT
- else:
- result = np.isnan(values)
-
- # box
- if isinstance(obj, ABCSeries):
- result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
-
- return result
+def _isna_ndarraylike(obj, old: bool = False):
+ """
+ Return an array indicating which values of the input array are NaN / NA.
+ Parameters
+ ----------
+ obj: array-like
+ The input array whose elements are to be checked.
+ old: bool
+ Whether or not to treat infinite values as NA.
-def _isna_ndarraylike_old(obj):
+ Returns
+ -------
+ array-like
+ Array of boolean values denoting the NA status of each element.
+ """
values = getattr(obj, "_values", obj)
dtype = values.dtype
- if is_string_dtype(dtype):
- result = _isna_string_dtype(values, dtype, old=True)
-
+ if is_extension_array_dtype(dtype):
+ if old:
+ result = values.isna() | (values == -np.inf) | (values == np.inf)
+ else:
+ result = values.isna()
+ elif is_string_dtype(dtype):
+ result = _isna_string_dtype(values, dtype, old=old)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
- result = ~np.isfinite(values)
+ if old:
+ result = ~np.isfinite(values)
+ else:
+ result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index 9eb3c8b3a8c48..5309b8827e3f0 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -5,7 +5,8 @@
from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas import Categorical, Index, Series, isna
+import pandas as pd
+from pandas import Categorical, DataFrame, Index, Series, isna
import pandas._testing as tm
@@ -97,3 +98,53 @@ def test_fillna_array(self):
expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype)
tm.assert_categorical_equal(result, expected)
assert isna(cat[-1]) # didnt modify original inplace
+
+ @pytest.mark.parametrize(
+ "values, expected",
+ [
+ ([1, 2, 3], np.array([False, False, False])),
+ ([1, 2, np.nan], np.array([False, False, True])),
+ ([1, 2, np.inf], np.array([False, False, True])),
+ ([1, 2, pd.NA], np.array([False, False, True])),
+ ],
+ )
+ def test_use_inf_as_na(self, values, expected):
+ # https://github.com/pandas-dev/pandas/issues/33594
+ with pd.option_context("mode.use_inf_as_na", True):
+ cat = Categorical(values)
+ result = cat.isna()
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = Series(cat).isna()
+ expected = Series(expected)
+ tm.assert_series_equal(result, expected)
+
+ result = DataFrame(cat).isna()
+ expected = DataFrame(expected)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "values, expected",
+ [
+ ([1, 2, 3], np.array([False, False, False])),
+ ([1, 2, np.nan], np.array([False, False, True])),
+ ([1, 2, np.inf], np.array([False, False, True])),
+ ([1, 2, pd.NA], np.array([False, False, True])),
+ ],
+ )
+ def test_use_inf_as_na_outside_context(self, values, expected):
+ # https://github.com/pandas-dev/pandas/issues/33594
+ # Using isna directly for Categorical will fail in general here
+ cat = Categorical(values)
+
+ with pd.option_context("mode.use_inf_as_na", True):
+ result = pd.isna(cat)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = pd.isna(Series(cat))
+ expected = Series(expected)
+ tm.assert_series_equal(result, expected)
+
+ result = pd.isna(DataFrame(cat))
+ expected = DataFrame(expected)
+ tm.assert_frame_equal(result, expected)
| - [x] closes #33594
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33629 | 2020-04-18T14:25:58Z | 2020-04-27T21:07:23Z | 2020-04-27T21:07:23Z | 2020-05-26T09:34:25Z |
inspect-safety for DataFrame._constructor_expanddim | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 85bb47485a2e7..56a9b57a8e720 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -422,7 +422,12 @@ def _constructor(self) -> Type["DataFrame"]:
@property
def _constructor_expanddim(self):
- raise NotImplementedError("Not supported for DataFrames!")
+ # GH#31549 raising NotImplementedError on a property causes trouble
+ # for `inspect`
+ def constructor(*args, **kwargs):
+ raise NotImplementedError("Not supported for DataFrames!")
+
+ return constructor
# ----------------------------------------------------------------------
# Constructors
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index ec8613faaa663..5cf74d3205a13 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -1,12 +1,13 @@
from copy import deepcopy
import datetime
+import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
-from pandas.util._test_decorators import async_mark
+from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
@@ -569,3 +570,14 @@ def test_cache_on_copy(self):
assert df["a"].values[0] == -1
tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0], "y": [0]}))
+
+ @skip_if_no("jinja2")
+ def test_constructor_expanddim_lookup(self):
+ # GH#33628 accessing _constructor_expanddim should not
+ # raise NotImplementedError
+ df = DataFrame()
+
+ inspect.getmembers(df)
+
+ with pytest.raises(NotImplementedError, match="Not supported for DataFrames!"):
+ df._constructor_expanddim(np.arange(27).reshape(3, 3, 3))
| - [x] closes #31549, closes #31474
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33628 | 2020-04-18T14:07:48Z | 2020-04-21T12:53:51Z | 2020-04-21T12:53:51Z | 2020-04-21T14:37:23Z |
TST: Groupby first/last/nth nan column test | diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index 947907caf5cbc..0cbfbad85a8b6 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -140,6 +140,18 @@ def test_first_last_nth_dtypes(df_mixed_floats):
assert f.dtype == "int64"
+def test_first_last_nth_nan_dtype():
+ # GH 33591
+ df = pd.DataFrame({"data": ["A"], "nans": pd.Series([np.nan], dtype=object)})
+
+ grouped = df.groupby("data")
+ expected = df.set_index("data").nans
+ tm.assert_series_equal(grouped.nans.first(), expected)
+ tm.assert_series_equal(grouped.nans.last(), expected)
+ tm.assert_series_equal(grouped.nans.nth(-1), expected)
+ tm.assert_series_equal(grouped.nans.nth(0), expected)
+
+
def test_first_strings_timestamps():
# GH 11244
test = pd.DataFrame(
| - [X] xref https://github.com/pandas-dev/pandas/issues/33591#issuecomment-615365647
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
The first two asserts (`first()`/`last()`) fail on `1.0.3`, as expected. | https://api.github.com/repos/pandas-dev/pandas/pulls/33627 | 2020-04-18T11:37:32Z | 2020-04-23T21:30:56Z | 2020-04-23T21:30:56Z | 2020-04-23T21:31:06Z |
BUG: support median function for custom BaseIndexer rolling windows | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index a797090a83444..915827660a110 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -175,7 +175,7 @@ Other API changes
- :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`)
- ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`)
- Using a :func:`pandas.api.indexers.BaseIndexer` with ``skew``, ``cov``, ``corr`` will now raise a ``NotImplementedError`` (:issue:`32865`)
-- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
+- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations.
-
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index f3889039c095e..673820fd8464a 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -843,7 +843,8 @@ def roll_kurt_variable(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_median_c(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int64_t win):
+ ndarray[int64_t] end, int64_t minp, int64_t win=0):
+ # GH 32865. win argument kept for compatibility
cdef:
float64_t val, res, prev
bint err = False
@@ -858,7 +859,7 @@ def roll_median_c(ndarray[float64_t] values, ndarray[int64_t] start,
# actual skiplist ops outweigh any window computation costs
output = np.empty(N, dtype=float)
- if win == 0 or (end - start).max() == 0:
+ if (end - start).max() == 0:
output[:] = NaN
return output
win = (end - start).max()
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 62f470060b039..d4234770617b3 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1429,7 +1429,8 @@ def mean(self, *args, **kwargs):
def median(self, **kwargs):
window_func = self._get_roll_func("roll_median_c")
- window_func = partial(window_func, win=self._get_window())
+ # GH 32865. Move max window size calculation to
+ # the median function implementation
return self._apply(window_func, center=self.center, name="median", **kwargs)
def std(self, ddof=1, *args, **kwargs):
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 1a3fe865d2a7a..aee47a085eb9c 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -141,6 +141,12 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
],
{"ddof": 1},
),
+ (
+ "median",
+ np.median,
+ [1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 8.5, np.nan],
+ {},
+ ),
],
)
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
@@ -162,7 +168,19 @@ def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs)
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
+
+ # Check that the function output matches the explicitly provided array
expected = constructor(expected)
tm.assert_equal(result, expected)
+
+ # Check that the rolling function output matches applying an alternative
+ # function to the rolling window object
expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result, expected2)
+
+ # Check that the function output matches applying an alternative function
+ # if min_periods isn't specified
+ rolling3 = constructor(values).rolling(window=indexer)
+ result3 = getattr(rolling3, func)()
+ expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs)))
+ tm.assert_equal(result3, expected3)
| - [X] xref #32865
- [X] 1 tests added / 1 passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
## Scope of PR
This PR makes sure that when we calculate the median in `roll_median_c`, we override the maximum window width with the correct value, and the function doesn't shortcut by returning all NaNs.
## Details
The `median` function eventually calls `roll_median_c` which accepts a `win` parameter (maximum window width) to correctly allocate memory and initialize the skiplist data structure which is the backbone of the rolling median algorithm. Currently, `win` is determined by the `_get_window` function which returns `min_periods or 0` for custom `BaseIndexer` subclasses:
```python
def _get_window(self, other=None, win_type: Optional[str] = None) -> int:
"""
Return window length.
Parameters
----------
other :
ignored, exists for compatibility
win_type :
ignored, exists for compatibility
Returns
-------
window : int
"""
if isinstance(self.window, BaseIndexer):
return self.min_periods or 0
return self.window
```
Thus, `roll_median_c` either shortcuts or initializes to incorrect depth:
```python
...
if win == 0 or (end - start).max() == 0:
output[:] = NaN
return output
win = (end - start).max()
sl = skiplist_init(<int>win)
...
```
<s>I propose we determine max window length directly in the `median` function. This means that `start` and `end` arrays get calculated twice: here and in `_apply`. However, I belive this is better than injecting a median-specific crutch into `_apply` or messing with the shortcut in `roll_median_c` (we could attempt to override `win` if `(end - start).max() > 0`. This other option is explored below.</s>
After discussing with @mroeschke , we decided to implement the bugfix directly in `roll_median_c`. Details below.
Please say if you think another approach would be preferable.
## Background on the wider issue
We currently don't support several rolling window functions when building a rolling window object using a custom class descended from `pandas.api.indexers.Baseindexer`. The implementations were written with backward-looking windows in mind, and this led to these functions breaking.
Currently, using these functions returns a `NotImplemented` error thanks to #33057, but ideally we want to update the implementations, so that they will work without a performance hit. This is what I aim to do over a series of PRs.
## Perf notes
The function currently shortcuts because of the bug or initializes the main datastructure incorrectly. For this reason, benchmarks are meaningless.
Ran benchmarks vs. the shortcut anyway:
```
asv continuous -f 1.1 master HEAD -b ^rolling.ForwardWindowMethods
...
before after ratio
[b630cdbc] [ce82372f]
<master> <rolling-median>
+ 3.55▒0.2ms 75.2▒0.9ms 21.19 rolling.ForwardWindowMethods.time_rolling('DataFrame', 1000, 'float', 'median')
+ 4.25▒0.08ms 76.7▒2ms 18.06 rolling.ForwardWindowMethods.time_rolling('Series', 1000, 'float', 'median')
+ 4.02▒0.4ms 60.3▒0.5ms 14.99 rolling.ForwardWindowMethods.time_rolling('DataFrame', 1000, 'int', 'median')
+ 3.42▒0.2ms 47.4▒1ms 13.86 rolling.ForwardWindowMethods.time_rolling('DataFrame', 10, 'float', 'median')
+ 4.80▒0.1ms 61.2▒0.5ms 12.74 rolling.ForwardWindowMethods.time_rolling('Series', 1000, 'int', 'median')
+ 4.58▒0.06ms 49.7▒0.9ms 10.85 rolling.ForwardWindowMethods.time_rolling('Series', 10, 'float', 'median')
+ 4.45▒0.5ms 45.0▒0.8ms 10.10 rolling.ForwardWindowMethods.time_rolling('DataFrame', 10, 'int', 'median')
+ 5.02▒0.07ms 45.9▒0.6ms 9.14 rolling.ForwardWindowMethods.time_rolling('Series', 10, 'int', 'median')
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
PERFORMANCE DECREASED.
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/33626 | 2020-04-18T10:56:16Z | 2020-04-21T12:56:43Z | 2020-04-21T12:56:43Z | 2020-04-26T11:34:35Z |
CLN: simplify info | diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 1fbc321160120..7b5e553cf394e 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -158,17 +158,18 @@ def info(
lines.append(str(type(data)))
lines.append(data.index._summary())
- if len(data.columns) == 0:
+ cols = data.columns
+ col_count = len(cols)
+ dtypes = data.dtypes
+
+ if col_count == 0:
lines.append(f"Empty {type(data).__name__}")
fmt.buffer_put_lines(buf, lines)
return
- cols = data.columns
- col_count = len(data.columns)
-
# hack
if max_cols is None:
- max_cols = get_option("display.max_info_columns", len(data.columns) + 1)
+ max_cols = get_option("display.max_info_columns", col_count + 1)
max_rows = get_option("display.max_info_rows", len(data) + 1)
@@ -179,7 +180,7 @@ def info(
exceeds_info_cols = col_count > max_cols
def _verbose_repr():
- lines.append(f"Data columns (total {len(data.columns)} columns):")
+ lines.append(f"Data columns (total {col_count} columns):")
id_head = " # "
column_head = "Column"
@@ -196,9 +197,9 @@ def _verbose_repr():
header = _put_str(id_head, space_num) + _put_str(column_head, space)
if show_counts:
counts = data.count()
- if len(cols) != len(counts): # pragma: no cover
+ if col_count != len(counts): # pragma: no cover
raise AssertionError(
- f"Columns must equal counts ({len(cols)} != {len(counts)})"
+ f"Columns must equal counts ({col_count} != {len(counts)})"
)
count_header = "Non-Null Count"
len_count = len(count_header)
@@ -214,7 +215,7 @@ def _verbose_repr():
dtype_header = "Dtype"
len_dtype = len(dtype_header)
- max_dtypes = max(len(pprint_thing(k)) for k in data.dtypes)
+ max_dtypes = max(len(pprint_thing(k)) for k in dtypes)
space_dtype = max(len_dtype, max_dtypes)
header += _put_str(count_header, space_count) + _put_str(
dtype_header, space_dtype
@@ -228,14 +229,14 @@ def _verbose_repr():
+ _put_str("-" * len_dtype, space_dtype)
)
- for i, col in enumerate(data.columns):
- dtype = data.dtypes.iloc[i]
+ for i, col in enumerate(cols):
+ dtype = dtypes[i]
col = pprint_thing(col)
line_no = _put_str(f" {i}", space_num)
count = ""
if show_counts:
- count = counts.iloc[i]
+ count = counts[i]
lines.append(
line_no
@@ -245,7 +246,7 @@ def _verbose_repr():
)
def _non_verbose_repr():
- lines.append(data.columns._summary(name="Columns"))
+ lines.append(cols._summary(name="Columns"))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
@@ -266,7 +267,7 @@ def _sizeof_fmt(num, size_qualifier):
_verbose_repr()
# groupby dtype.name to collect e.g. Categorical columns
- counts = data.dtypes.value_counts().groupby(lambda x: x.name).sum()
+ counts = dtypes.value_counts().groupby(lambda x: x.name).sum()
dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(counts.items())]
lines.append(f"dtypes: {', '.join(dtypes)}")
| precursor to #31796 | https://api.github.com/repos/pandas-dev/pandas/pulls/33625 | 2020-04-18T10:43:15Z | 2020-04-19T18:59:29Z | 2020-04-19T18:59:29Z | 2020-04-19T19:02:02Z |
BUG: pd.Series.replace does not preserve the original dtype | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 366ea54a510ef..ad2599f765380 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1010,7 +1010,12 @@ def coerce_to_target_dtype(self, other):
if is_dtype_equal(self.dtype, dtype):
return self
- if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
+ if is_extension_array_dtype(self.dtype) and not is_categorical_dtype(
+ self.dtype
+ ):
+ dtype = self.dtype
+
+ elif self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
@@ -1053,6 +1058,8 @@ def coerce_to_target_dtype(self, other):
raise AssertionError(
f"possible recursion in coerce_to_target_dtype: {self} {other}"
)
+ if is_categorical_dtype(dtype) or self.is_datetime:
+ return self.astype(object)
try:
return self.astype(dtype)
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 330c682216f53..de1674d8a9ce6 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -3,6 +3,7 @@
import pandas as pd
import pandas._testing as tm
+from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
@@ -250,7 +251,7 @@ def test_replace2(self):
def test_replace_with_dictlike_and_string_dtype(self):
# GH 32621
s = pd.Series(["one", "two", np.nan], dtype="string")
- expected = pd.Series(["1", "2", np.nan])
+ expected = pd.Series(["1", "2", np.nan], dtype="string")
result = s.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
@@ -402,3 +403,52 @@ def test_replace_only_one_dictlike_arg(self):
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
+
+ @pytest.mark.parametrize(
+ "series, to_replace, expected",
+ [
+ (
+ pd.Series(["one", "two"], dtype="string"),
+ {"one": "1", "two": "2"},
+ "string",
+ ),
+ (pd.Series([1, 2], dtype="int64"), {1: 10, 2: 20}, "int64"),
+ (pd.Series([True, False], dtype="bool"), {True: False}, "bool"),
+ (
+ pd.Series(IntervalArray.from_breaks([1, 2, 3, 4]), dtype=pd.IntervalDtype("int64")),
+ {pd.Interval(1, 2): pd.Interval(10, 20)},
+ "interval[int64]",
+ ),
+ (
+ pd.Series(IntervalArray.from_breaks([1, 2, 3, 4]), dtype=pd.IntervalDtype("float64")),
+ {pd.Interval(1, 2): pd.Interval(0.2, 0.3)},
+ "interval[float64]",
+ ),
+ (
+ pd.Series([pd.Period("2020-05", freq="M")], dtype=pd.PeriodDtype("M")),
+ {pd.Period("2020-05", freq="M"): pd.Period("2020-06", freq="M")},
+ "period[M]",
+ ),
+ (
+ pd.Series(
+ pd.arrays.DatetimeArray(
+ np.array(
+ ["2000-01-01T12:00:00", "2000-01-02T12:00:00"],
+ dtype="M8[ns]",
+ ),
+ dtype=pd.DatetimeTZDtype(tz="US/Central"),
+ )
+ ),
+ {
+ pd.Timestamp(
+ "2000-01-01 06:00:00-0600", tz="US/Central"
+ ): pd.Timestamp("2000-01-01 12:00:00-0600", tz="US/Central")
+ },
+ "datetime64[ns, US/Central]",
+ ),
+ ],
+ )
+ def test_replace_dtype(self, series, to_replace, expected):
+ # GH 33484
+ result = series.replace(to_replace).dtype
+ assert expected == result
| - [ ] closes #33484
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33622 | 2020-04-18T06:57:53Z | 2020-09-25T09:33:29Z | null | 2020-09-25T09:33:30Z |
REF: Make numba function cache globally accessible | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index c007d4920cbe7..504de404b2509 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -76,6 +76,7 @@
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.util.numba_ import (
+ NUMBA_FUNC_CACHE,
check_kwargs_and_nopython,
get_jit_arguments,
jit_user_function,
@@ -161,8 +162,6 @@ def pinner(cls):
class SeriesGroupBy(GroupBy[Series]):
_apply_whitelist = base.series_apply_whitelist
- _numba_func_cache: Dict[Callable, Callable] = {}
-
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
@@ -504,8 +503,9 @@ def _transform_general(
nopython, nogil, parallel = get_jit_arguments(engine_kwargs)
check_kwargs_and_nopython(kwargs, nopython)
validate_udf(func)
- numba_func = self._numba_func_cache.get(
- func, jit_user_function(func, nopython, nogil, parallel)
+ cache_key = (func, "groupby_transform")
+ numba_func = NUMBA_FUNC_CACHE.get(
+ cache_key, jit_user_function(func, nopython, nogil, parallel)
)
klass = type(self._selected_obj)
@@ -516,8 +516,8 @@ def _transform_general(
if engine == "numba":
values, index = split_for_numba(group)
res = numba_func(values, index, *args)
- if func not in self._numba_func_cache:
- self._numba_func_cache[func] = numba_func
+ if cache_key not in NUMBA_FUNC_CACHE:
+ NUMBA_FUNC_CACHE[cache_key] = numba_func
else:
res = func(group, *args, **kwargs)
@@ -847,8 +847,6 @@ class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_whitelist = base.dataframe_apply_whitelist
- _numba_func_cache: Dict[Callable, Callable] = {}
-
_agg_see_also_doc = dedent(
"""
See Also
@@ -1397,8 +1395,9 @@ def _transform_general(
nopython, nogil, parallel = get_jit_arguments(engine_kwargs)
check_kwargs_and_nopython(kwargs, nopython)
validate_udf(func)
- numba_func = self._numba_func_cache.get(
- func, jit_user_function(func, nopython, nogil, parallel)
+ cache_key = (func, "groupby_transform")
+ numba_func = NUMBA_FUNC_CACHE.get(
+ cache_key, jit_user_function(func, nopython, nogil, parallel)
)
else:
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
@@ -1409,8 +1408,8 @@ def _transform_general(
if engine == "numba":
values, index = split_for_numba(group)
res = numba_func(values, index, *args)
- if func not in self._numba_func_cache:
- self._numba_func_cache[func] = numba_func
+ if cache_key not in NUMBA_FUNC_CACHE:
+ NUMBA_FUNC_CACHE[cache_key] = numba_func
# Return the result as a DataFrame for concatenation later
res = DataFrame(res, index=group.index, columns=group.columns)
else:
diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py
index c5b27b937a05b..af24189adbc27 100644
--- a/pandas/core/util/numba_.py
+++ b/pandas/core/util/numba_.py
@@ -8,6 +8,8 @@
from pandas._typing import FrameOrSeries
from pandas.compat._optional import import_optional_dependency
+NUMBA_FUNC_CACHE: Dict[Tuple[Callable, str], Callable] = dict()
+
def check_kwargs_and_nopython(
kwargs: Optional[Dict] = None, nopython: Optional[bool] = None
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 40f17126fa163..ebc67d0a0e819 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -78,6 +78,7 @@ def _apply(
performing the original function call on the grouped object.
"""
kwargs.pop("floor", None)
+ kwargs.pop("original_func", None)
# TODO: can we de-duplicate with _dispatch?
def f(x, name=name, *args):
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 3fdf81c4bb570..7dfc210eab901 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -38,6 +38,7 @@
from pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin
import pandas.core.common as com
from pandas.core.indexes.api import Index, ensure_index
+from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
from pandas.core.window.common import (
WindowGroupByMixin,
_doc_template,
@@ -93,7 +94,6 @@ def __init__(
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
- self._numba_func_cache: Dict[Optional[str], Callable] = dict()
@property
def _constructor(self):
@@ -505,7 +505,7 @@ def calc(x):
result = np.asarray(result)
if use_numba_cache:
- self._numba_func_cache[name] = func
+ NUMBA_FUNC_CACHE[(kwargs["original_func"], "rolling_apply")] = func
if center:
result = self._center_window(result, window)
@@ -1278,9 +1278,10 @@ def apply(
elif engine == "numba":
if raw is False:
raise ValueError("raw must be `True` when using the numba engine")
- if func in self._numba_func_cache:
+ cache_key = (func, "rolling_apply")
+ if cache_key in NUMBA_FUNC_CACHE:
# Return an already compiled version of roll_apply if available
- apply_func = self._numba_func_cache[func]
+ apply_func = NUMBA_FUNC_CACHE[cache_key]
else:
apply_func = generate_numba_apply_func(
args, kwargs, func, engine_kwargs
@@ -1297,6 +1298,7 @@ def apply(
name=func,
use_numba_cache=engine == "numba",
raw=raw,
+ original_func=func,
)
def _generate_cython_apply_func(self, args, kwargs, raw, offset, func):
diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py
index 96078d0aa3662..28904b669ae56 100644
--- a/pandas/tests/groupby/transform/test_numba.py
+++ b/pandas/tests/groupby/transform/test_numba.py
@@ -4,6 +4,7 @@
from pandas import DataFrame
import pandas._testing as tm
+from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
@td.skip_if_no("numba", "0.46.0")
@@ -98,13 +99,13 @@ def func_2(values, index):
expected = grouped.transform(lambda x: x + 1, engine="cython")
tm.assert_equal(result, expected)
# func_1 should be in the cache now
- assert func_1 in grouped._numba_func_cache
+ assert (func_1, "groupby_transform") in NUMBA_FUNC_CACHE
# Add func_2 to the cache
result = grouped.transform(func_2, engine="numba", engine_kwargs=engine_kwargs)
expected = grouped.transform(lambda x: x * 5, engine="cython")
tm.assert_equal(result, expected)
- assert func_2 in grouped._numba_func_cache
+ assert (func_2, "groupby_transform") in NUMBA_FUNC_CACHE
# Retest func_1 which should use the cache
result = grouped.transform(func_1, engine="numba", engine_kwargs=engine_kwargs)
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index cc8aef1779b46..8ecf64b171df4 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -5,6 +5,7 @@
from pandas import Series
import pandas._testing as tm
+from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
@td.skip_if_no("numba", "0.46.0")
@@ -59,7 +60,7 @@ def func_2(x):
tm.assert_series_equal(result, expected)
# func_1 should be in the cache now
- assert func_1 in roll._numba_func_cache
+ assert (func_1, "rolling_apply") in NUMBA_FUNC_CACHE
result = roll.apply(
func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True
| - [x] tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
For all the numba accessible operations, we use a cache to store compiled JIT'd functions for performance.
This `_numba_func_cache` used to be an attribute of `Rolling` and `Groupby` objects before, but while working on https://github.com/pandas-dev/pandas/pull/33388, I cannot easily rely on having access to those objects.
This PR moves `_numba_func_cache` to `pandas/core/util/numba_.py`. Since this cache is now globally shared, the key has changed from `_numba_func_cache[func] = compiled_func` to `_numba_func_cache[(func, op)] = compiled_func`
| https://api.github.com/repos/pandas-dev/pandas/pulls/33621 | 2020-04-18T05:03:47Z | 2020-04-19T22:23:37Z | 2020-04-19T22:23:37Z | 2020-04-20T00:45:53Z |
CLN: isort core/api.py | diff --git a/setup.cfg b/setup.cfg
index 6c42b27c7b015..f7370b6cef8d6 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -119,7 +119,7 @@ combine_as_imports = True
line_length = 88
force_sort_within_sections = True
skip_glob = env,
-skip = pandas/__init__.py,pandas/core/api.py
+skip = pandas/__init__.py
[mypy]
ignore_missing_imports=True
| ```isort --quiet --recursive --check-only pandas asv_bench scripts``` runs fine | https://api.github.com/repos/pandas-dev/pandas/pulls/33619 | 2020-04-18T01:43:11Z | 2020-04-18T03:40:13Z | 2020-04-18T03:40:13Z | 2020-04-18T03:40:18Z |
CLN: Remove unrequired imports | diff --git a/pandas/io/common.py b/pandas/io/common.py
index ff527de79c387..dd3d205ca90eb 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -20,8 +20,7 @@
Type,
Union,
)
-from urllib.parse import ( # noqa
- urlencode,
+from urllib.parse import (
urljoin,
urlparse as parse_url,
uses_netloc,
@@ -32,13 +31,6 @@
from pandas._typing import FilePathOrBuffer
from pandas.compat import _get_lzma_file, _import_lzma
-from pandas.errors import ( # noqa
- AbstractMethodError,
- DtypeWarning,
- EmptyDataError,
- ParserError,
- ParserWarning,
-)
from pandas.core.dtypes.common import is_file_like
| Looks like these are no longer needed.
| https://api.github.com/repos/pandas-dev/pandas/pulls/33618 | 2020-04-18T01:40:11Z | 2020-04-19T18:46:52Z | 2020-04-19T18:46:52Z | 2020-04-19T18:47:01Z |
CLN: Cython warnings for type declartions | diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index d3d8bead88d08..1e53b789aa05c 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -141,10 +141,10 @@ cdef class BlockPlacement:
return BlockPlacement(val)
- def delete(self, loc) -> "BlockPlacement":
+ def delete(self, loc) -> BlockPlacement:
return BlockPlacement(np.delete(self.as_array, loc, axis=0))
- def append(self, others) -> "BlockPlacement":
+ def append(self, others) -> BlockPlacement:
if not len(others):
return self
@@ -185,7 +185,7 @@ cdef class BlockPlacement:
val = newarr
return BlockPlacement(val)
- def add(self, other) -> "BlockPlacement":
+ def add(self, other) -> BlockPlacement:
# We can get here with int or ndarray
return self.iadd(other)
| ```
warning: pandas/_libs/internals.pyx:144:29: Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.
warning: pandas/_libs/internals.pyx:147:32: Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.
warning: pandas/_libs/internals.pyx:188:28: Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.
```
(Against cython master)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33617 | 2020-04-18T01:37:29Z | 2020-04-19T21:15:19Z | 2020-04-19T21:15:19Z | 2020-05-01T22:03:19Z |
REF: get .items out of BlockManager.apply | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a28f89a79a880..ac0dffa8f6aa8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8665,6 +8665,11 @@ def _where(
else:
align = self._get_axis_number(axis) == 1
+ if align and isinstance(other, NDFrame):
+ other = other.reindex(self._info_axis, axis=self._info_axis_number)
+ if isinstance(cond, NDFrame):
+ cond = cond.reindex(self._info_axis, axis=self._info_axis_number)
+
block_axis = self._get_block_manager_axis(axis)
if inplace:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index dd950c0276646..6368a2498b04c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -373,23 +373,20 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T:
self._consolidate_inplace()
- align_copy = False
- if f == "where":
- align_copy = True
-
aligned_args = {k: kwargs[k] for k in align_keys}
for b in self.blocks:
if aligned_args:
- b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
- axis = obj._info_axis_number
- kwargs[k] = obj.reindex(
- b_items, axis=axis, copy=align_copy
- )._values
+ # The caller is responsible for ensuring that
+ # obj.axes[-1].equals(self.items)
+ if obj.ndim == 1:
+ kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values
+ else:
+ kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values
else:
# otherwise we have an ndarray
kwargs[k] = obj[b.mgr_locs.indexer]
@@ -1125,6 +1122,7 @@ def insert(self, loc: int, item: Label, value, allow_duplicates: bool = False):
new_axis = self.items.insert(loc, item)
if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype):
+ # TODO(EA2D): special case not needed with 2D EAs
value = _safe_reshape(value, (1,) + value.shape)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
| https://api.github.com/repos/pandas-dev/pandas/pulls/33616 | 2020-04-18T01:28:13Z | 2020-04-19T23:30:51Z | 2020-04-19T23:30:51Z | 2020-04-19T23:53:03Z | |
CLN: avoid catching AssertionError, AttributeError in NDFrame methods | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a28f89a79a880..490d3b8f4e003 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -701,10 +701,8 @@ def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
result = self[item]
del self[item]
- try:
+ if self.ndim == 2:
result._reset_cacher()
- except AttributeError:
- pass
return result
@@ -3255,14 +3253,9 @@ def _maybe_update_cacher(
if ref is None:
del self._cacher
else:
- # Note: we need to call ref._maybe_cache_changed even in the
- # case where it will raise. (Uh, not clear why)
- try:
+ if len(self) == len(ref):
+ # otherwise, either self or ref has swapped in new arrays
ref._maybe_cache_changed(cacher[0], self)
- except AssertionError:
- # ref._mgr.setitem can raise
- # AssertionError because of shape mismatch
- pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 71efde1cc5380..366ea54a510ef 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -51,7 +51,6 @@
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
- ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
@@ -2765,9 +2764,10 @@ def _safe_reshape(arr, new_shape):
"""
if isinstance(arr, ABCSeries):
arr = arr._values
- if not isinstance(arr, ABCExtensionArray):
- # TODO(EA2D): special case not needed with 2D EAs
- arr = arr.reshape(new_shape)
+ if not is_extension_array_dtype(arr.dtype):
+ # Note: this will include TimedeltaArray and tz-naive DatetimeArray
+ # TODO(EA2D): special case will be unnecessary with 2D EAs
+ arr = np.asarray(arr).reshape(new_shape)
return arr
| @WillAyd when i run `mypy` manually it passes, but when run via the pre-commit hook I get
```
pandas/core/internals/blocks.py:2739: error: unused 'type: ignore' comment
pandas/core/generic.py:4034: error: unused 'type: ignore' comment
pandas/core/generic.py:4093: error: unused 'type: ignore' comment
Found 3 errors in 2 files (checked 2 source files)
```
Something similar is happening with most branches these days. Have you seen this? | https://api.github.com/repos/pandas-dev/pandas/pulls/33615 | 2020-04-17T20:56:51Z | 2020-04-21T14:47:58Z | 2020-04-21T14:47:58Z | 2020-04-21T14:56:55Z |
CLN: assorted cleanups | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index e4aeb7ad69792..9bb5e10348e47 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -55,9 +55,7 @@ from pandas.core.dtypes.common import (
is_bool_dtype, is_object_dtype,
is_datetime64_dtype,
pandas_dtype, is_extension_array_dtype)
-from pandas.core.arrays import Categorical
from pandas.core.dtypes.concat import union_categoricals
-import pandas.io.common as icom
from pandas.compat import _import_lzma, _get_lzma_file
from pandas.errors import (ParserError, DtypeWarning,
@@ -1149,7 +1147,8 @@ cdef class TextReader:
# Method accepts list of strings, not encoded ones.
true_values = [x.decode() for x in self.true_values]
- cat = Categorical._from_inferred_categories(
+ array_type = dtype.construct_array_type()
+ cat = array_type._from_inferred_categories(
cats, codes, dtype, true_values=true_values)
return cat, na_count
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 9db9805e09b50..e6967630b97ac 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -29,7 +29,6 @@
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
- is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_extension_array_dtype,
@@ -122,12 +121,7 @@ def _ensure_data(values, dtype=None):
return ensure_object(values), "object"
# datetimelike
- if (
- needs_i8_conversion(values)
- or is_period_dtype(dtype)
- or is_datetime64_any_dtype(dtype)
- or is_timedelta64_dtype(dtype)
- ):
+ if needs_i8_conversion(values) or needs_i8_conversion(dtype):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index ece92acae6461..e12083dae5035 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -638,8 +638,6 @@ def astype(self, dtype, copy=True):
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
- from pandas import Categorical
-
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
@@ -667,7 +665,8 @@ def astype(self, dtype, copy=True):
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
- return Categorical(self, dtype=dtype)
+ arr_cls = dtype.construct_array_type()
+ return arr_cls(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index f777f52f56c9c..1316e15334fa6 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -22,8 +22,8 @@
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import (
- _INT64_DTYPE,
DT64NS_DTYPE,
+ INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
@@ -404,7 +404,7 @@ def _generate_range(
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
- # TODO: consider re-implementing _cached_range; GH#17914
+
values, _tz = generate_regular_range(start, end, periods, freq)
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
@@ -1963,7 +1963,7 @@ def sequence_to_dt64ns(
if tz:
tz = timezones.maybe_get_tz(tz)
- if data.dtype != _INT64_DTYPE:
+ if data.dtype != INT64_DTYPE:
data = data.astype(np.int64, copy=False)
result = data.view(DT64NS_DTYPE)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 5945d8a4b432d..ee514888c6331 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1143,8 +1143,7 @@ def _map_values(self, mapper, na_action=None):
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
- values = self.astype(object)
- values = getattr(values, "values", values)
+ values = self.astype(object)._values
if na_action == "ignore":
def map_f(values, f):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 7dda6850ba4f7..df70e73c6aadb 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -21,9 +21,9 @@
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
- _INT64_DTYPE,
_POSSIBLY_CAST_DTYPES,
DT64NS_DTYPE,
+ INT64_DTYPE,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
@@ -954,7 +954,7 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False):
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
- if dtype not in [_INT64_DTYPE, TD64NS_DTYPE]:
+ if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 5ea3ca09862c1..abfbe8d783325 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -60,7 +60,7 @@
DT64NS_DTYPE = conversion.DT64NS_DTYPE
TD64NS_DTYPE = conversion.TD64NS_DTYPE
-_INT64_DTYPE = np.dtype(np.int64)
+INT64_DTYPE = np.dtype(np.int64)
# oh the troubles to reduce import time
_is_scipy_sparse = None
@@ -68,9 +68,6 @@
ensure_float64 = algos.ensure_float64
ensure_float32 = algos.ensure_float32
-_ensure_datetime64ns = conversion.ensure_datetime64ns
-_ensure_timedelta64ns = conversion.ensure_timedelta64ns
-
def ensure_float(arr):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2878204f5ee79..25a3b14120537 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3548,8 +3548,6 @@ class animal locomotion
result._set_is_copy(self, copy=not result._is_view)
return result
- _xs: Callable = xs
-
def __getitem__(self, item):
raise AbstractMethodError(self)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index dd072cf00ed20..d100cb0bb70d8 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1046,7 +1046,7 @@ def _getitem_tuple(self, tup: Tuple):
def _get_label(self, label, axis: int):
# GH#5667 this will fail if the label is not present in the axis.
- return self.obj._xs(label, axis=axis)
+ return self.obj.xs(label, axis=axis)
def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 1bcbcb61ddde4..71efde1cc5380 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -585,8 +585,7 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"):
newb = self.copy() if copy else self
return newb
- # TODO(extension)
- # should we make this attribute?
+ # TODO(EA2D): special case not needed with 2D EAs
if isinstance(values, np.ndarray):
values = values.reshape(self.shape)
@@ -1554,6 +1553,7 @@ def __init__(self, values, placement, ndim=None):
@property
def shape(self):
+ # TODO(EA2D): override unnecessary with 2D EAs
if self.ndim == 1:
return ((len(self.values)),)
return (len(self.mgr_locs), len(self.values))
@@ -1561,6 +1561,7 @@ def shape(self):
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
+ # TODO(EA2D): unnecessary with 2D EAs
col, loc = col
if not com.is_null_slice(col) and col != 0:
raise IndexError(f"{self} only contains one item")
@@ -1669,6 +1670,7 @@ def setitem(self, indexer, value):
be a compatible shape.
"""
if isinstance(indexer, tuple):
+ # TODO(EA2D): not needed with 2D EAs
# we are always 1-D
indexer = indexer[0]
@@ -1678,6 +1680,7 @@ def setitem(self, indexer, value):
def get_values(self, dtype=None):
# ExtensionArrays must be iterable, so this works.
+ # TODO(EA2D): reshape not needed with 2D EAs
return np.asarray(self.values).reshape(self.shape)
def array_values(self) -> ExtensionArray:
@@ -1691,6 +1694,7 @@ def to_native_types(self, na_rep="nan", quoting=None, **kwargs):
values = np.asarray(values.astype(object))
values[mask] = na_rep
+ # TODO(EA2D): reshape not needed with 2D EAs
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
@@ -1703,6 +1707,7 @@ def take_nd(
if fill_value is lib.no_default:
fill_value = None
+ # TODO(EA2D): special case not needed with 2D EAs
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
@@ -2229,6 +2234,7 @@ def diff(self, n: int, axis: int = 0) -> List["Block"]:
by apply.
"""
if axis == 0:
+ # TODO(EA2D): special case not needed with 2D EAs
# Cannot currently calculate diff across multiple blocks since this
# function is invoked via apply
raise NotImplementedError
@@ -2280,7 +2286,7 @@ def quantile(self, qs, interpolation="linear", axis=0):
blk = self.make_block(naive)
res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)
- # ravel is kludge for 2D block with 1D values, assumes column-like
+ # TODO(EA2D): ravel is kludge for 2D block with 1D values, assumes column-like
aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)
return self.make_block_same_class(aware, ndim=res_blk.ndim)
@@ -2693,6 +2699,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None):
if isinstance(values, ABCPandasArray):
values = values.to_numpy()
if ndim and ndim > 1:
+ # TODO(EA2D): special case not needed with 2D EAs
values = np.atleast_2d(values)
if isinstance(dtype, PandasDtype):
@@ -2759,6 +2766,7 @@ def _safe_reshape(arr, new_shape):
if isinstance(arr, ABCSeries):
arr = arr._values
if not isinstance(arr, ABCExtensionArray):
+ # TODO(EA2D): special case not needed with 2D EAs
arr = arr.reshape(new_shape)
return arr
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index c8f4ec14545c7..743dd6db348b4 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -251,6 +251,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
):
if self.block is None:
array = empty_dtype.construct_array_type()
+ # TODO(EA2D): special case unneeded with 2D EAs
return array(
np.full(self.shape[1], fill_value.value), dtype=empty_dtype
)
| Trying to clear out my local CLN branches | https://api.github.com/repos/pandas-dev/pandas/pulls/33614 | 2020-04-17T20:52:26Z | 2020-04-17T21:39:09Z | 2020-04-17T21:39:08Z | 2020-04-17T22:06:38Z |
Fixed bug. Added in check for ufunc and evaluates inner expression be… | diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index c59952bea8dc0..ec7e4c107edf5 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -670,19 +670,26 @@ def visit_Call(self, node, side=None, **kwargs):
)
return res(*new_args)
+ elif isinstance(res, np.floor()):
+ new_args = [self.visit(arg) for arg in node.args]
+ new_args = str(*new_args)
+ new_args = [eval(new_args)]
+ if node.keywords:
+ raise TypeError(
+ f'Function "{res.name}" does not support keyword arguments'
+ )
else:
-
new_args = [self.visit(arg).value for arg in node.args]
- for key in node.keywords:
- if not isinstance(key, ast.keyword):
- raise ValueError(f"keyword error in function call '{node.func.id}'")
+ for key in node.keywords:
+ if not isinstance(key, ast.keyword):
+ raise ValueError(f"keyword error in function call '{node.func.id}'")
- if key.arg:
- kwargs[key.arg] = self.visit(key.value).value
+ if key.arg:
+ kwargs[key.arg] = self.visit(key.value).value
- return self.const_type(res(*new_args, **kwargs), self.env)
+ return self.const_type(res(*new_args, **kwargs), self.env)
def translate_In(self, op):
return op
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 08d8d5ca342b7..7059113759759 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -727,6 +727,10 @@ def test_line_continuation(self):
result = pd.eval(exp, engine=self.engine, parser=self.parser)
assert result == 12
+ def test_floor_expression(self):
+ assert pd.eval("floor(0.9 + floor(1.2+2.3))") == 3.0
+ assert pd.eval("floor(1.2+2.3)") == 3.0
+
def test_float_truncation(self):
# GH 14241
exp = "1000000000.006"
| …fore evaluating outer expression
- [x] closes #24670
- [x] passes `black pandas`
| https://api.github.com/repos/pandas-dev/pandas/pulls/33613 | 2020-04-17T20:43:11Z | 2020-08-07T22:56:51Z | null | 2020-08-07T22:56:51Z |
DOC: Remove extra backtick in example in documentation. | diff --git a/doc/source/getting_started/intro_tutorials/10_text_data.rst b/doc/source/getting_started/intro_tutorials/10_text_data.rst
index 4c03a276090d7..a7f3bdc9abcc6 100644
--- a/doc/source/getting_started/intro_tutorials/10_text_data.rst
+++ b/doc/source/getting_started/intro_tutorials/10_text_data.rst
@@ -199,7 +199,7 @@ names in the ``Name`` column. By using pandas string methods, the
Next, we need to get the corresponding location, preferably the index
label, in the table for which the name length is the largest. The
-:meth:`~Series.idxmax`` method does exactly that. It is not a string method and is
+:meth:`~Series.idxmax` method does exactly that. It is not a string method and is
applied to integers, so no ``str`` is used.
.. ipython:: python
| :meth:`~Series.idxmax`` --> :meth:`~Series.idxmax`
- [x] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33612 | 2020-04-17T17:16:41Z | 2020-04-17T17:55:09Z | 2020-04-17T17:55:09Z | 2020-04-25T17:06:31Z |
BUG: set_levels set wrong order levels for MutiIndex | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 4e2d07ddf9225..a046d60e79ed0 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -741,7 +741,14 @@ def _set_levels(
self._tuples = None
self._reset_cache()
- def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
+ def set_levels(
+ self,
+ levels,
+ level=None,
+ inplace=False,
+ verify_integrity=True,
+ change_codes=False,
+ ):
"""
Set new levels on MultiIndex. Defaults to returning new index.
@@ -755,6 +762,8 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
If True, mutates in place.
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
+ change_codes : bool, default False
+ If True, resets the codes for the levels specified.
Returns
-------
@@ -845,6 +854,29 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
+ # reset codes
+ if change_codes:
+ all_codes = []
+ for lev_nums, lev in zip(levels, level):
+ # for each lev, construct a code
+ codes = []
+ code = 0
+ for num in lev_nums:
+ if num not in codes:
+ codes.append(code)
+ code += 1
+ else:
+ codes.append(code)
+ new_codes = []
+ index = 0
+ for i in range(len(self._codes[lev])):
+ if self._codes[lev][i] != -1:
+ new_codes.append(codes[index % len(codes)])
+ else:
+ new_codes.append(-1)
+ index += 1
+ all_codes.append(new_codes)
+ idx._set_codes(all_codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index 8a3deca0236e4..7f9c3e58baad6 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -329,3 +329,25 @@ def test_set_levels_with_iterable():
[expected_sizes, colors], names=["size", "color"]
)
tm.assert_index_equal(result, expected)
+
+
+def test_set_levels_with_changed_multiindex():
+ # GH33420
+ np.random.seed(seed=0)
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "one", "one", "one", "one", "one", "one", "one"],
+ ["three", "four", "three", "four", "three", "four", "three", "four"],
+ ]
+ ran_array = np.random.rand(8, 4)
+ test_df = pd.DataFrame(ran_array, index=arrays)
+ test_df.index.set_levels([3, 4], level=2, inplace=True, change_codes=True)
+ correct_df = pd.DataFrame(ran_array, index=arrays)
+ correct_df.index = pd.MultiIndex.from_arrays(
+ [
+ test_df.index.get_level_values(0),
+ test_df.index.get_level_values(1),
+ np.tile([3, 4], 4),
+ ]
+ )
+ tm.assert_equal(test_df, correct_df)
| - [ ] closes #33420
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Added parameter that allows user to reset codes | https://api.github.com/repos/pandas-dev/pandas/pulls/33611 | 2020-04-17T16:13:30Z | 2020-05-25T22:43:57Z | null | 2020-05-25T22:43:58Z |
TYP: type NDFrame.(_get_axis|_get_axis_name|_get_axis_number) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 2f4e961ff433f..dabe589f86ee9 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -137,7 +137,9 @@ Other API changes
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`.
- Previously a ``AttributeError`` was raised (:issue:`31126`)
+ Previously an ``AttributeError`` was raised (:issue:`31126`)
+- :meth:`DataFrame.xs` now raises a ``TypeError`` if a ``level`` keyword is supplied and the axis is not a :class:`MultiIndex`.
+ Previously an ``AttributeError`` was raised (:issue:`33610`)
- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std` and :meth:`~DataFrameGroupby.var`)
now raise a ``TypeError`` if a not-accepted keyword argument is passed into it.
Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6a4f83427310e..c897827502eda 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -353,7 +353,7 @@ def _construct_axes_from_arguments(
return axes, kwargs
@classmethod
- def _get_axis_number(cls, axis):
+ def _get_axis_number(cls, axis) -> int:
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
@@ -366,7 +366,7 @@ def _get_axis_number(cls, axis):
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
@classmethod
- def _get_axis_name(cls, axis):
+ def _get_axis_name(cls, axis) -> str:
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
@@ -378,12 +378,12 @@ def _get_axis_name(cls, axis):
pass
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
- def _get_axis(self, axis):
+ def _get_axis(self, axis) -> Index:
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
- def _get_block_manager_axis(cls, axis):
+ def _get_block_manager_axis(cls, axis) -> int:
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
@@ -590,7 +590,9 @@ def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
if copy:
new_values = new_values.copy()
- return self._constructor(new_values, *new_axes).__finalize__(
+ # ignore needed because of NDFrame constructor is different than
+ # DataFrame/Series constructors.
+ return self._constructor(new_values, *new_axes).__finalize__( # type: ignore
self, method="swapaxes"
)
@@ -3490,6 +3492,8 @@ class animal locomotion
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
+ if not isinstance(labels, MultiIndex):
+ raise TypeError("Index must be a MultiIndex")
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
@@ -7621,11 +7625,11 @@ def at_time(
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
- try:
- indexer = index.indexer_at_time(time, asof=asof)
- except AttributeError as err:
- raise TypeError("Index must be DatetimeIndex") from err
+ if not isinstance(index, DatetimeIndex):
+ raise TypeError("Index must be DatetimeIndex")
+
+ indexer = index.indexer_at_time(time, asof=asof)
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
@@ -7704,16 +7708,12 @@ def between_time(
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
- try:
- indexer = index.indexer_between_time(
- start_time,
- end_time,
- include_start=include_start,
- include_end=include_end,
- )
- except AttributeError as err:
- raise TypeError("Index must be DatetimeIndex") from err
+ if not isinstance(index, DatetimeIndex):
+ raise TypeError("Index must be DatetimeIndex")
+ indexer = index.indexer_between_time(
+ start_time, end_time, include_start=include_start, include_end=include_end,
+ )
return self._take_with_is_copy(indexer, axis=axis)
def resample(
diff --git a/pandas/tests/indexing/multiindex/test_xs.py b/pandas/tests/indexing/multiindex/test_xs.py
index db8c0c643a623..ff748d755c063 100644
--- a/pandas/tests/indexing/multiindex/test_xs.py
+++ b/pandas/tests/indexing/multiindex/test_xs.py
@@ -243,3 +243,15 @@ def test_series_getitem_multiindex_xs_by_label():
result = s.xs("one", level="L2")
tm.assert_series_equal(result, expected)
+
+
+def test_xs_levels_raises():
+ df = DataFrame({"A": [1, 2, 3]})
+
+ msg = "Index must be a MultiIndex"
+ with pytest.raises(TypeError, match=msg):
+ df.xs(0, level="as")
+
+ s = df.A
+ with pytest.raises(TypeError, match=msg):
+ s.xs(0, level="as")
| Gives return types to ``NDFrame._get_axis`` etc. | https://api.github.com/repos/pandas-dev/pandas/pulls/33610 | 2020-04-17T15:31:35Z | 2020-04-18T19:07:45Z | 2020-04-18T19:07:45Z | 2020-04-18T19:07:49Z |
TST: make expected DTI/TDI results more specific | diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index bb228eadccc6c..42a72125ba411 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -25,7 +25,8 @@ def test_range_edges(self):
"1970-01-01 00:00:00.000000002",
"1970-01-01 00:00:00.000000003",
"1970-01-01 00:00:00.000000004",
- ]
+ ],
+ freq="N",
)
tm.assert_index_equal(idx, exp)
@@ -34,7 +35,7 @@ def test_range_edges(self):
end=Timestamp("1970-01-01 00:00:00.000000001"),
freq="N",
)
- exp = DatetimeIndex([])
+ exp = DatetimeIndex([], freq="N")
tm.assert_index_equal(idx, exp)
idx = pd.date_range(
@@ -42,7 +43,7 @@ def test_range_edges(self):
end=Timestamp("1970-01-01 00:00:00.000000001"),
freq="N",
)
- exp = DatetimeIndex(["1970-01-01 00:00:00.000000001"])
+ exp = DatetimeIndex(["1970-01-01 00:00:00.000000001"], freq="N")
tm.assert_index_equal(idx, exp)
idx = pd.date_range(
@@ -56,7 +57,8 @@ def test_range_edges(self):
"1970-01-01 00:00:00.000002",
"1970-01-01 00:00:00.000003",
"1970-01-01 00:00:00.000004",
- ]
+ ],
+ freq="U",
)
tm.assert_index_equal(idx, exp)
@@ -71,7 +73,8 @@ def test_range_edges(self):
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.003",
"1970-01-01 00:00:00.004",
- ]
+ ],
+ freq="L",
)
tm.assert_index_equal(idx, exp)
@@ -86,7 +89,8 @@ def test_range_edges(self):
"1970-01-01 00:00:02",
"1970-01-01 00:00:03",
"1970-01-01 00:00:04",
- ]
+ ],
+ freq="S",
)
tm.assert_index_equal(idx, exp)
@@ -101,7 +105,8 @@ def test_range_edges(self):
"1970-01-01 00:02",
"1970-01-01 00:03",
"1970-01-01 00:04",
- ]
+ ],
+ freq="T",
)
tm.assert_index_equal(idx, exp)
@@ -116,14 +121,17 @@ def test_range_edges(self):
"1970-01-01 02:00",
"1970-01-01 03:00",
"1970-01-01 04:00",
- ]
+ ],
+ freq="H",
)
tm.assert_index_equal(idx, exp)
idx = pd.date_range(
start=Timestamp("1970-01-01"), end=Timestamp("1970-01-04"), freq="D"
)
- exp = DatetimeIndex(["1970-01-01", "1970-01-02", "1970-01-03", "1970-01-04"])
+ exp = DatetimeIndex(
+ ["1970-01-01", "1970-01-02", "1970-01-03", "1970-01-04"], freq="D"
+ )
tm.assert_index_equal(idx, exp)
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index ddde30d0f8fbf..028a713a8af81 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -26,9 +26,11 @@ def test_slice_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
- tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
- tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
- tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
+ expected = ts.iloc[i_slc]
+
+ tm.assert_series_equal(ts[l_slc], expected)
+ tm.assert_series_equal(ts.loc[l_slc], expected)
+ tm.assert_series_equal(ts.loc[l_slc], expected)
assert_slices_equivalent(SLC[Timestamp("2014-10-01") :: -1], SLC[9::-1])
assert_slices_equivalent(SLC["2014-10-01"::-1], SLC[9::-1])
@@ -47,7 +49,7 @@ def assert_slices_equivalent(l_slc, i_slc):
SLC[Timestamp("2015-02-01") : "2014-10-01" : -1], SLC[13:8:-1]
)
- assert_slices_equivalent(SLC["2014-10-01":"2015-02-01":-1], SLC[:0])
+ assert_slices_equivalent(SLC["2014-10-01":"2015-02-01":-1], SLC[0:0:-1])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20), date_range("2014-01-01", periods=20, freq="MS"))
@@ -79,7 +81,9 @@ def test_monotone_DTI_indexing_bug(self):
df = pd.DataFrame(
{"A": [1, 2, 3]}, index=pd.date_range("20170101", periods=3)[::-1]
)
- expected = pd.DataFrame({"A": 1}, index=pd.date_range("20170103", periods=1))
+ expected = pd.DataFrame(
+ {"A": 1}, index=pd.date_range("20170103", periods=1)[::-1]
+ )
tm.assert_frame_equal(df.loc["2017-01-03"], expected)
def test_slice_year(self):
diff --git a/pandas/tests/indexes/datetimes/test_shift.py b/pandas/tests/indexes/datetimes/test_shift.py
index 6e53492b71578..8724bfeb05c4d 100644
--- a/pandas/tests/indexes/datetimes/test_shift.py
+++ b/pandas/tests/indexes/datetimes/test_shift.py
@@ -28,18 +28,21 @@ def test_dti_shift_tzaware(self, tz_naive_fixture):
["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"],
name="xxx",
tz=tz,
+ freq="H",
)
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = pd.DatetimeIndex(
["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"],
name="xxx",
tz=tz,
+ freq="H",
)
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = pd.DatetimeIndex(
["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"],
name="xxx",
tz=tz,
+ freq="H",
)
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
diff --git a/pandas/tests/indexes/period/test_to_timestamp.py b/pandas/tests/indexes/period/test_to_timestamp.py
index 23787586cb3d3..a7846d1864d40 100644
--- a/pandas/tests/indexes/period/test_to_timestamp.py
+++ b/pandas/tests/indexes/period/test_to_timestamp.py
@@ -60,7 +60,7 @@ def test_to_timestamp_quarterly_bug(self):
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp("D", "end")
- expected = DatetimeIndex([x.to_timestamp("D", "end") for x in pindex])
+ expected = DatetimeIndex([x.to_timestamp("D", "end") for x in pindex], freq="Q")
tm.assert_index_equal(stamps, expected)
def test_to_timestamp_pi_mult(self):
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index a0ef953db3600..10ad521ce4f76 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -52,9 +52,11 @@ def test_slice_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
- tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
- tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
- tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
+ expected = ts.iloc[i_slc]
+
+ tm.assert_series_equal(ts[l_slc], expected)
+ tm.assert_series_equal(ts.loc[l_slc], expected)
+ tm.assert_series_equal(ts.loc[l_slc], expected)
assert_slices_equivalent(SLC[Timedelta(hours=7) :: -1], SLC[7::-1])
assert_slices_equivalent(SLC["7 hours"::-1], SLC[7::-1])
@@ -73,7 +75,7 @@ def assert_slices_equivalent(l_slc, i_slc):
SLC[Timedelta(hours=15) : "7 hours" : -1], SLC[15:6:-1]
)
- assert_slices_equivalent(SLC["7 hours":"15 hours":-1], SLC[:0])
+ assert_slices_equivalent(SLC["7 hours":"15 hours":-1], SLC[0:0:-1])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20), timedelta_range("0", periods=20, freq="H"))
diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py
index 158c759fdaef3..82bde7c233626 100644
--- a/pandas/tests/series/methods/test_append.py
+++ b/pandas/tests/series/methods/test_append.py
@@ -175,7 +175,7 @@ def test_series_append_aware(self):
ts_result = ser1.append(ser2)
exp_index = DatetimeIndex(
- ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern"
+ ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern", freq="H"
)
exp = Series([1, 2], index=exp_index)
tm.assert_series_equal(ts_result, exp)
@@ -187,7 +187,9 @@ def test_series_append_aware(self):
ser2 = Series([2], index=rng2)
ts_result = ser1.append(ser2)
- exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC")
+ exp_index = DatetimeIndex(
+ ["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC", freq="H"
+ )
exp = Series([1, 2], index=exp_index)
tm.assert_series_equal(ts_result, exp)
utc = rng1.tz
diff --git a/pandas/tests/series/methods/test_asfreq.py b/pandas/tests/series/methods/test_asfreq.py
index d94b60384a07c..cd61c510c75f5 100644
--- a/pandas/tests/series/methods/test_asfreq.py
+++ b/pandas/tests/series/methods/test_asfreq.py
@@ -39,11 +39,14 @@ def test_tz_aware_asfreq(self, tz):
def test_asfreq(self):
ts = Series(
[0.0, 1.0, 2.0],
- index=[
- datetime(2009, 10, 30),
- datetime(2009, 11, 30),
- datetime(2009, 12, 31),
- ],
+ index=DatetimeIndex(
+ [
+ datetime(2009, 10, 30),
+ datetime(2009, 11, 30),
+ datetime(2009, 12, 31),
+ ],
+ freq="BM",
+ ),
)
daily_ts = ts.asfreq("B")
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 2f00a58fe80be..044dfa703c081 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -3501,7 +3501,7 @@ def test_offset_whole_year(self):
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
- exp = DatetimeIndex(dates)
+ exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
@@ -3760,7 +3760,7 @@ def test_offset_whole_year(self):
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS")
- exp = DatetimeIndex(dates)
+ exp = DatetimeIndex(dates, freq="SMS")
tm.assert_index_equal(result, exp)
offset_cases = []
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index f3a14971ef2e7..3c5352fcd997d 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -9,7 +9,7 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import DataFrame, Index, Series, isna, notna
+from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import Base, ConsistencyBase
@@ -1346,7 +1346,9 @@ def test_rolling_max_gh6297(self):
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
- index=[datetime(1975, 1, i, 0) for i in range(1, 6)],
+ index=DatetimeIndex(
+ [datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"
+ ),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
@@ -1366,7 +1368,9 @@ def test_rolling_max_resample(self):
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
- index=[datetime(1975, 1, i, 0) for i in range(1, 6)],
+ index=DatetimeIndex(
+ [datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"
+ ),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
@@ -1374,7 +1378,9 @@ def test_rolling_max_resample(self):
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
- index=[datetime(1975, 1, i, 0) for i in range(1, 6)],
+ index=DatetimeIndex(
+ [datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"
+ ),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
@@ -1383,7 +1389,9 @@ def test_rolling_max_resample(self):
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
- index=[datetime(1975, 1, i, 0) for i in range(1, 6)],
+ index=DatetimeIndex(
+ [datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"
+ ),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
@@ -1403,7 +1411,9 @@ def test_rolling_min_resample(self):
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
- index=[datetime(1975, 1, i, 0) for i in range(1, 6)],
+ index=DatetimeIndex(
+ [datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"
+ ),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
@@ -1423,7 +1433,9 @@ def test_rolling_median_resample(self):
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
- index=[datetime(1975, 1, i, 0) for i in range(1, 6)],
+ index=DatetimeIndex(
+ [datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"
+ ),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
| assert_index_equal doesnt check for matching freq. I've got a branch that changes that, but the diff is massive, so i am first going through the tests and updating the `expected`s so that they will pass once that check is in place.
Also in this sequence: xref #33604 | https://api.github.com/repos/pandas-dev/pandas/pulls/33609 | 2020-04-17T15:22:30Z | 2020-04-17T21:46:25Z | 2020-04-17T21:46:25Z | 2020-04-17T22:00:29Z |
DOC/CLN: Clean/Fix documentation for Window module | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c8d08277e9a26..4f85a89faf24c 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -322,8 +322,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA02, SA03, SA05)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA02,SA03,SA05
+ MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA02, SA03)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA02,SA03
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Validate correct capitalization among titles in documentation' ; echo $MSG
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 8abc47886d261..8b09bacaeb8a4 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -22,8 +22,10 @@
See Also
--------
- Series.%(name)s : Series %(name)s.
- DataFrame.%(name)s : DataFrame %(name)s.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrame data.
+ pandas.Series.%(func_name)s : Similar method for Series.
+ pandas.DataFrame.%(func_name)s : Similar method for DataFrame.
"""
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 0ec876583dcde..27ba05a6702ff 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -265,7 +265,7 @@ def func(arg):
return self._wrap_results(results, block_list, obj, exclude)
- @Substitution(name="ewm")
+ @Substitution(name="ewm", func_name="mean")
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
@@ -279,7 +279,7 @@ def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("ewma", **kwargs)
- @Substitution(name="ewm")
+ @Substitution(name="ewm", func_name="std")
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
@@ -291,7 +291,7 @@ def std(self, bias=False, *args, **kwargs):
vol = std
- @Substitution(name="ewm")
+ @Substitution(name="ewm", func_name="var")
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
@@ -313,7 +313,7 @@ def f(arg):
return self._apply(f, **kwargs)
- @Substitution(name="ewm")
+ @Substitution(name="ewm", func_name="cov")
@Appender(_doc_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""
@@ -360,7 +360,7 @@ def _get_cov(X, Y):
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
- @Substitution(name="ewm")
+ @Substitution(name="ewm", func_name="corr")
@Appender(_doc_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""
diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
index 140e0144d0a2d..99d16142f2993 100644
--- a/pandas/core/window/expanding.py
+++ b/pandas/core/window/expanding.py
@@ -88,9 +88,8 @@ def _get_window(self, other=None, **kwargs):
"""
See Also
--------
- DataFrame.expanding.aggregate
- DataFrame.rolling.aggregate
- DataFrame.aggregate
+ pandas.DataFrame.aggregate : Similar DataFrame method.
+ pandas.Series.aggregate : Similar Series method.
"""
)
@@ -172,7 +171,7 @@ def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
- @Substitution(name="expanding")
+ @Substitution(name="expanding", func_name="max")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
@@ -208,7 +207,7 @@ def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
- @Substitution(name="expanding")
+ @Substitution(name="expanding", func_name="skew")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
@@ -252,7 +251,7 @@ def quantile(self, quantile, interpolation="linear", **kwargs):
quantile=quantile, interpolation=interpolation, **kwargs
)
- @Substitution(name="expanding")
+ @Substitution(name="expanding", func_name="cov")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index dc8cf839d0bcb..a3e1504a68fbe 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -540,8 +540,8 @@ def aggregate(self, func, *args, **kwargs):
See Also
--------
- Series.sum : Reducing sum for Series.
- DataFrame.sum : Reducing sum for DataFrame.
+ pandas.Series.sum : Reducing sum for Series.
+ pandas.DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
@@ -618,10 +618,10 @@ def aggregate(self, func, *args, **kwargs):
See Also
--------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- Series.mean : Equivalent method for Series.
- DataFrame.mean : Equivalent method for DataFrame.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrames.
+ pandas.Series.mean : Equivalent method for Series.
+ pandas.DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
@@ -667,10 +667,10 @@ def aggregate(self, func, *args, **kwargs):
See Also
--------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- Series.var : Equivalent method for Series.
- DataFrame.var : Equivalent method for DataFrame.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrames.
+ pandas.Series.var : Equivalent method for Series.
+ pandas.DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
@@ -727,10 +727,10 @@ def aggregate(self, func, *args, **kwargs):
See Also
--------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- Series.std : Equivalent method for Series.
- DataFrame.std : Equivalent method for DataFrame.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrames.
+ pandas.Series.std : Equivalent method for Series.
+ pandas.DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
@@ -1030,8 +1030,8 @@ def _get_window(
"""
See Also
--------
- pandas.DataFrame.rolling.aggregate
- pandas.DataFrame.aggregate
+ pandas.DataFrame.aggregate : Similar DataFrame method.
+ pandas.Series.aggregate : Similar Series method.
"""
)
@@ -1146,9 +1146,9 @@ class _Rolling_and_Expanding(_Rolling):
See Also
--------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- DataFrame.count : Count of the full DataFrame.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrames.
+ pandas.DataFrame.count : Count of the full DataFrame.
Examples
--------
@@ -1243,8 +1243,10 @@ def count(self):
See Also
--------
- Series.%(name)s : Series %(name)s.
- DataFrame.%(name)s : DataFrame %(name)s.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrame data.
+ pandas.Series.apply : Similar method for Series.
+ pandas.DataFrame.apply : Similar method for DataFrame.
Notes
-----
@@ -1363,10 +1365,10 @@ def max(self, *args, **kwargs):
See Also
--------
- Series.%(name)s : Calling object with a Series.
- DataFrame.%(name)s : Calling object with a DataFrame.
- Series.min : Similar method for Series.
- DataFrame.min : Similar method for DataFrame.
+ pandas.Series.%(name)s : Calling object with a Series.
+ pandas.DataFrame.%(name)s : Calling object with a DataFrame.
+ pandas.Series.min : Similar method for Series.
+ pandas.DataFrame.min : Similar method for DataFrame.
Examples
--------
@@ -1410,10 +1412,10 @@ def mean(self, *args, **kwargs):
See Also
--------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- Series.median : Equivalent method for Series.
- DataFrame.median : Equivalent method for DataFrame.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrames.
+ pandas.Series.median : Equivalent method for Series.
+ pandas.DataFrame.median : Equivalent method for DataFrame.
Examples
--------
@@ -1508,10 +1510,10 @@ def skew(self, **kwargs):
See Also
--------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- Series.kurt : Equivalent method for Series.
- DataFrame.kurt : Equivalent method for DataFrame.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrames.
+ pandas.Series.kurt : Equivalent method for Series.
+ pandas.DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
@@ -1564,9 +1566,9 @@ def kurt(self, **kwargs):
See Also
--------
- Series.quantile : Computes value at the given quantile over all data
+ pandas.Series.quantile : Computes value at the given quantile over all data
in Series.
- DataFrame.quantile : Computes values at the given quantile over
+ pandas.DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
@@ -1690,11 +1692,11 @@ def _get_cov(X, Y):
See Also
--------
- Series.%(name)s : Calling object with Series data.
- DataFrame.%(name)s : Calling object with DataFrames.
- Series.corr : Equivalent method for Series.
- DataFrame.corr : Equivalent method for DataFrame.
- %(name)s.cov : Similar method to calculate covariance.
+ pandas.Series.%(name)s : Calling object with Series data.
+ pandas.DataFrame.%(name)s : Calling object with DataFrames.
+ pandas.Series.corr : Equivalent method for Series.
+ pandas.DataFrame.corr : Equivalent method for DataFrame.
+ cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
@@ -1895,8 +1897,8 @@ def _validate_freq(self):
"""
See Also
--------
- Series.rolling
- DataFrame.rolling
+ pandas.Series.rolling : Calling object with Series data.
+ pandas.DataFrame.rolling : Calling object with DataFrame data.
"""
)
@@ -1997,7 +1999,7 @@ def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
- @Substitution(name="rolling")
+ @Substitution(name="rolling", func_name="max")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
@@ -2033,7 +2035,7 @@ def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
- @Substitution(name="rolling")
+ @Substitution(name="rolling", func_name="skew")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
@@ -2077,7 +2079,7 @@ def quantile(self, quantile, interpolation="linear", **kwargs):
quantile=quantile, interpolation=interpolation, **kwargs
)
- @Substitution(name="rolling")
+ @Substitution(name="rolling", func_name="cov")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
| - xref #31661
- xref #28792
- Added '.' before dataframe/series reference to fix missing link problem. Don't know if it's the best approach.
- Changed base template that is used by some functions to return better See Also section. Hope it's ok.
- Also question about Window class. It doesn't has its counterpart from Series/DF modules, its doc containing examples from Rolling part and there're only 4 functions. Is it internal base class for all windows and maybe shoul be excluded from docs or is it used somewhere maybe for creating custom windows? | https://api.github.com/repos/pandas-dev/pandas/pulls/33608 | 2020-04-17T12:58:55Z | 2020-05-07T03:29:09Z | 2020-05-07T03:29:09Z | 2020-05-07T07:59:10Z |
ENH: general concat with ExtensionArrays through find_common_type | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 7ad7e8f5a27b0..e4ef752a33635 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -241,6 +241,9 @@ Backwards incompatible API changes
- :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`)
- Passing an integer dtype other than ``int64`` to ``np.array(period_index, dtype=...)`` will now raise ``TypeError`` instead of incorrectly using ``int64`` (:issue:`32255`)
- Passing an invalid ``fill_value`` to :meth:`Categorical.take` raises a ``ValueError`` instead of ``TypeError`` (:issue:`33660`)
+- Combining a ``Categorical`` with integer categories and which contains missing values
+ with a float dtype column in operations such as :func:`concat` or :meth:`~DataFrame.append`
+ will now result in a float column instead of an object dtyped column (:issue:`33607`)
``MultiIndex.get_indexer`` interprets `method` argument differently
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 7447d593a7ff0..bd903d9b1fae3 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1004,7 +1004,7 @@ def _concat_same_type(
cls, to_concat: Sequence["ExtensionArray"]
) -> "ExtensionArray":
"""
- Concatenate multiple array.
+ Concatenate multiple array of this dtype.
Parameters
----------
@@ -1014,6 +1014,11 @@ def _concat_same_type(
-------
ExtensionArray
"""
+ # Implementer note: this method will only be called with a sequence of
+ # ExtensionArrays of this class and with the same dtype as self. This
+ # should allow "easy" concatenation (no upcasting needed), and result
+ # in a new ExtensionArray of the same dtype.
+ # Note: this strict behaviour is only guaranteed starting with pandas 1.1
raise AbstractMethodError(cls)
# The _can_hold_na attribute is set to True so that pandas internals
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index de401368d55d7..a7a0df3908268 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2296,9 +2296,9 @@ def _can_hold_na(self):
@classmethod
def _concat_same_type(self, to_concat):
- from pandas.core.dtypes.concat import concat_categorical
+ from pandas.core.dtypes.concat import union_categoricals
- return concat_categorical(to_concat)
+ return union_categoricals(to_concat)
def isin(self, values):
"""
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 9d41071755e6f..743267534bfaa 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -1,11 +1,11 @@
import numbers
-from typing import TYPE_CHECKING, Tuple, Type, Union
+from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
-from pandas._typing import ArrayLike
+from pandas._typing import ArrayLike, DtypeObj
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
@@ -96,6 +96,17 @@ def construct_array_type(cls) -> Type["IntegerArray"]:
"""
return IntegerArray
+ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
+ # for now only handle other integer types
+ if not all(isinstance(t, _IntegerDtype) for t in dtypes):
+ return None
+ np_dtype = np.find_common_type(
+ [t.numpy_dtype for t in dtypes], [] # type: ignore
+ )
+ if np.issubdtype(np_dtype, np.integer):
+ return _dtypes[str(np_dtype)]
+ return None
+
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "IntegerArray":
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 72b6e07942d5e..e327e11a17f4f 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -952,27 +952,7 @@ def copy(self):
@classmethod
def _concat_same_type(cls, to_concat):
- fill_values = [x.fill_value for x in to_concat]
-
- fill_value = fill_values[0]
-
- # np.nan isn't a singleton, so we may end up with multiple
- # NaNs here, so we ignore tha all NA case too.
- if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
- warnings.warn(
- "Concatenating sparse arrays with multiple fill "
- f"values: '{fill_values}'. Picking the first and "
- "converting the rest.",
- PerformanceWarning,
- stacklevel=6,
- )
- keep = to_concat[0]
- to_concat2 = [keep]
-
- for arr in to_concat[1:]:
- to_concat2.append(cls(np.asarray(arr), fill_value=fill_value))
-
- to_concat = to_concat2
+ fill_value = to_concat[0].fill_value
values = []
length = 0
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index afa11586fda04..156a90f6ce600 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -1,11 +1,13 @@
"""Sparse Dtype"""
import re
-from typing import TYPE_CHECKING, Any, Tuple, Type
+from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type
+import warnings
import numpy as np
-from pandas._typing import Dtype
+from pandas._typing import Dtype, DtypeObj
+from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import astype_nansafe
@@ -352,3 +354,23 @@ def _subtype_with_str(self):
if isinstance(self.fill_value, str):
return type(self.fill_value)
return self.subtype
+
+ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
+
+ fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
+ fill_value = fill_values[0]
+
+ # np.nan isn't a singleton, so we may end up with multiple
+ # NaNs here, so we ignore tha all NA case too.
+ if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
+ warnings.warn(
+ "Concatenating sparse arrays with multiple fill "
+ f"values: '{fill_values}'. Picking the first and "
+ "converting the rest.",
+ PerformanceWarning,
+ stacklevel=6,
+ )
+
+ # TODO also handle non-numpy other dtypes
+ np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
+ return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value)
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index a4f0ccc2016c0..2d81dd4d884a3 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -6,6 +6,7 @@
import numpy as np
+from pandas._typing import DtypeObj
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
@@ -33,11 +34,12 @@ class ExtensionDtype:
* type
* name
- The following attributes influence the behavior of the dtype in
+ The following attributes and methods influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
+ * _get_common_dtype
Optionally one can override construct_array_type for construction
with the name of this dtype via the Registry. See
@@ -322,3 +324,31 @@ def _is_boolean(self) -> bool:
bool
"""
return False
+
+ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
+ """
+ Return the common dtype, if one exists.
+
+ Used in `find_common_type` implementation. This is for example used
+ to determine the resulting dtype in a concat operation.
+
+ If no common dtype exists, return None (which gives the other dtypes
+ the chance to determine a common dtype). If all dtypes in the list
+ return None, then the common dtype will be "object" dtype (this means
+ it is never needed to return "object" dtype from this method itself).
+
+ Parameters
+ ----------
+ dtypes : list of dtypes
+ The dtypes for which to determine a common dtype. This is a list
+ of np.dtype or ExtensionDtype instances.
+
+ Returns
+ -------
+ Common dtype (np.dtype or ExtensionDtype) or None
+ """
+ if len(set(dtypes)) == 1:
+ # only itself
+ return self
+ else:
+ return None
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e50d635a1ba6c..ad307fd99ec9c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -3,7 +3,7 @@
"""
from datetime import date, datetime, timedelta
-from typing import TYPE_CHECKING, Any, Optional, Tuple, Type
+from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type
import numpy as np
@@ -1423,7 +1423,7 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
return value
-def find_common_type(types):
+def find_common_type(types: List[DtypeObj]) -> DtypeObj:
"""
Find a common data type among the given dtypes.
@@ -1450,8 +1450,16 @@ def find_common_type(types):
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
+ # get unique types (dict.fromkeys is used as order-preserving set())
+ types = list(dict.fromkeys(types).keys())
+
if any(isinstance(t, ExtensionDtype) for t in types):
- return np.object
+ for t in types:
+ if isinstance(t, ExtensionDtype):
+ res = t._get_common_dtype(types)
+ if res is not None:
+ return res
+ return np.dtype("object")
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 2c560a1ed8c62..82b2795582ff1 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -4,6 +4,9 @@
import numpy as np
+from pandas._typing import ArrayLike, DtypeObj
+
+from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
@@ -17,6 +20,9 @@
)
from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCRangeIndex, ABCSeries
+from pandas.core.arrays import ExtensionArray
+from pandas.core.construction import array
+
def get_dtype_kinds(l):
"""
@@ -58,6 +64,40 @@ def get_dtype_kinds(l):
return typs
+def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike:
+ """
+ Helper function for `arr.astype(common_dtype)` but handling all special
+ cases.
+ """
+ if (
+ is_categorical_dtype(arr.dtype)
+ and isinstance(dtype, np.dtype)
+ and np.issubdtype(dtype, np.integer)
+ ):
+ # problem case: categorical of int -> gives int as result dtype,
+ # but categorical can contain NAs -> fall back to object dtype
+ try:
+ return arr.astype(dtype, copy=False)
+ except ValueError:
+ return arr.astype(object, copy=False)
+
+ if (
+ isinstance(arr, np.ndarray)
+ and arr.dtype.kind in ["m", "M"]
+ and dtype is np.dtype("object")
+ ):
+ # wrap datetime-likes in EA to ensure astype(object) gives Timestamp/Timedelta
+ # this can happen when concat_compat is called directly on arrays (when arrays
+ # are not coming from Index/Series._values), eg in BlockManager.quantile
+ arr = array(arr)
+
+ if is_extension_array_dtype(dtype):
+ if isinstance(arr, np.ndarray):
+ # numpy's astype cannot handle ExtensionDtypes
+ return array(arr, dtype=dtype, copy=False)
+ return arr.astype(dtype, copy=False)
+
+
def concat_compat(to_concat, axis: int = 0):
"""
provide concatenation of an array of arrays each of which is a single
@@ -93,28 +133,25 @@ def is_nonempty(x) -> bool:
typs = get_dtype_kinds(to_concat)
_contains_datetime = any(typ.startswith("datetime") for typ in typs)
- _contains_period = any(typ.startswith("period") for typ in typs)
all_empty = not len(non_empties)
single_dtype = len({x.dtype for x in to_concat}) == 1
any_ea = any(is_extension_array_dtype(x.dtype) for x in to_concat)
- if any_ea and single_dtype and axis == 0:
- cls = type(to_concat[0])
- return cls._concat_same_type(to_concat)
+ if any_ea and axis == 0:
+ if not single_dtype:
+ target_dtype = find_common_type([x.dtype for x in to_concat])
+ to_concat = [_cast_to_common_type(arr, target_dtype) for arr in to_concat]
- elif "category" in typs:
- # this must be prior to concat_datetime,
- # to support Categorical + datetime-like
- return concat_categorical(to_concat, axis=axis)
+ if isinstance(to_concat[0], ExtensionArray):
+ cls = type(to_concat[0])
+ return cls._concat_same_type(to_concat)
+ else:
+ return np.concatenate(to_concat)
- elif _contains_datetime or "timedelta" in typs or _contains_period:
+ elif _contains_datetime or "timedelta" in typs:
return concat_datetime(to_concat, axis=axis, typs=typs)
- # these are mandated to handle empties as well
- elif "sparse" in typs:
- return _concat_sparse(to_concat, axis=axis, typs=typs)
-
elif any_ea and axis == 1:
to_concat = [np.atleast_2d(x.astype("object")) for x in to_concat]
return np.concatenate(to_concat, axis=axis)
@@ -136,53 +173,6 @@ def is_nonempty(x) -> bool:
return np.concatenate(to_concat, axis=axis)
-def concat_categorical(to_concat, axis: int = 0):
- """
- Concatenate an object/categorical array of arrays, each of which is a
- single dtype
-
- Parameters
- ----------
- to_concat : array of arrays
- axis : int
- Axis to provide concatenation in the current implementation this is
- always 0, e.g. we only have 1D categoricals
-
- Returns
- -------
- Categorical
- A single array, preserving the combined dtypes
- """
- # we could have object blocks and categoricals here
- # if we only have a single categoricals then combine everything
- # else its a non-compat categorical
- categoricals = [x for x in to_concat if is_categorical_dtype(x.dtype)]
-
- # validate the categories
- if len(categoricals) != len(to_concat):
- pass
- else:
- # when all categories are identical
- first = to_concat[0]
- if all(first.is_dtype_equal(other) for other in to_concat[1:]):
- return union_categoricals(categoricals)
-
- # extract the categoricals & coerce to object if needed
- to_concat = [
- x._internal_get_values()
- if is_categorical_dtype(x.dtype)
- else np.asarray(x).ravel()
- if not is_datetime64tz_dtype(x)
- else np.asarray(x.astype(object))
- for x in to_concat
- ]
- result = concat_compat(to_concat)
- if axis == 1:
- # TODO(EA2D): not necessary with 2D EAs
- result = result.reshape(1, len(result))
- return result
-
-
def union_categoricals(
to_union, sort_categories: bool = False, ignore_order: bool = False
):
@@ -415,34 +405,3 @@ def _wrap_datetimelike(arr):
if isinstance(arr, np.ndarray) and arr.dtype.kind in ["m", "M"]:
arr = pd_array(arr)
return arr
-
-
-def _concat_sparse(to_concat, axis=0, typs=None):
- """
- provide concatenation of an sparse/dense array of arrays each of which is a
- single dtype
-
- Parameters
- ----------
- to_concat : array of arrays
- axis : axis to provide concatenation
- typs : set of to_concat dtypes
-
- Returns
- -------
- a single array, preserving the combined dtypes
- """
- from pandas.core.arrays import SparseArray
-
- fill_values = [x.fill_value for x in to_concat if isinstance(x, SparseArray)]
- fill_value = fill_values[0]
-
- # TODO: Fix join unit generation so we aren't passed this.
- to_concat = [
- x
- if isinstance(x, SparseArray)
- else SparseArray(x.squeeze(), fill_value=fill_value)
- for x in to_concat
- ]
-
- return SparseArray._concat_same_type(to_concat)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 8fe2b3c60d6d0..ceed7e29e4a35 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -21,7 +21,7 @@
from pandas._libs.interval import Interval
from pandas._libs.tslibs import NaT, Period, Timestamp, timezones
-from pandas._typing import Ordered
+from pandas._typing import DtypeObj, Ordered
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCDateOffset, ABCIndexClass
@@ -640,6 +640,32 @@ def _is_boolean(self) -> bool:
return is_bool_dtype(self.categories)
+ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
+ # check if we have all categorical dtype with identical categories
+ if all(isinstance(x, CategoricalDtype) for x in dtypes):
+ first = dtypes[0]
+ if all(first == other for other in dtypes[1:]):
+ return first
+
+ # special case non-initialized categorical
+ # TODO we should figure out the expected return value in general
+ non_init_cats = [
+ isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes
+ ]
+ if all(non_init_cats):
+ return self
+ elif any(non_init_cats):
+ return None
+
+ # extract the categories' dtype
+ non_cat_dtypes = [
+ x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes
+ ]
+ # TODO should categorical always give an answer?
+ from pandas.core.dtypes.cast import find_common_type
+
+ return find_common_type(non_cat_dtypes)
+
@register_extension_dtype
class DatetimeTZDtype(PandasExtensionDtype):
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index e9bbd915df768..bd07fefd03d2a 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -24,6 +24,7 @@
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
+from pandas.core.arrays import ExtensionArray
from pandas.core.internals.blocks import make_block
from pandas.core.internals.managers import BlockManager
@@ -65,13 +66,13 @@ def concatenate_block_managers(
blk = join_units[0].block
vals = [ju.block.values for ju in join_units]
- if not blk.is_extension or blk.is_datetimetz or blk.is_categorical:
- # datetimetz and categorical can have the same type but multiple
- # dtypes, concatting does not necessarily preserve dtype
+ if not blk.is_extension:
values = concat_compat(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals)
+ if not isinstance(values, ExtensionArray):
+ values = values.reshape(1, len(values))
b = make_block(values, placement=placement, ndim=blk.ndim)
else:
diff --git a/pandas/tests/arrays/integer/test_concat.py b/pandas/tests/arrays/integer/test_concat.py
new file mode 100644
index 0000000000000..3ace35700bd3e
--- /dev/null
+++ b/pandas/tests/arrays/integer/test_concat.py
@@ -0,0 +1,26 @@
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+
+@pytest.mark.parametrize(
+ "to_concat_dtypes, result_dtype",
+ [
+ (["Int64", "Int64"], "Int64"),
+ (["UInt64", "UInt64"], "UInt64"),
+ (["Int8", "Int8"], "Int8"),
+ (["Int8", "Int16"], "Int16"),
+ (["UInt8", "Int8"], "Int16"),
+ (["Int32", "UInt32"], "Int64"),
+ # this still gives object (awaiting float extension dtype)
+ (["Int64", "UInt64"], "object"),
+ ],
+)
+def test_concat_series(to_concat_dtypes, result_dtype):
+
+ result = pd.concat([pd.Series([1, 2, pd.NA], dtype=t) for t in to_concat_dtypes])
+ expected = pd.concat([pd.Series([1, 2, pd.NA], dtype=object)] * 2).astype(
+ result_dtype
+ )
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index ee4e199fbfe45..65e32d716a4db 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -112,3 +112,10 @@ def test_construct_from_string_wrong_type_raises(self, dtype):
match="'construct_from_string' expects a string, got <class 'int'>",
):
type(dtype).construct_from_string(0)
+
+ def test_get_common_dtype(self, dtype):
+ # in practice we will not typically call this with a 1-length list
+ # (we shortcut to just use that dtype as the common dtype), but
+ # still testing as good practice to have this working (and it is the
+ # only case we can test in general)
+ assert dtype._get_common_dtype([dtype]) == dtype
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index b2239c077bd69..5fd44d7cd74a9 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -584,7 +584,7 @@ def test_interleave_dtype(self, mgr_string, dtype):
mgr = create_mgr("a: complex")
assert mgr.as_array().dtype == "complex"
mgr = create_mgr("a: f8; b: category")
- assert mgr.as_array().dtype == "object"
+ assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: M8[ns]; b: category")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: bool")
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 7c01664df0607..ac3d83c29cdc4 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -610,11 +610,11 @@ def test_concat_categorical_3elem_coercion(self):
s2 = pd.Series([2, 1, 2], dtype="category")
s3 = pd.Series([1, 2, 1, 2, np.nan])
- exp = pd.Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="object")
+ exp = pd.Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
- exp = pd.Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="object")
+ exp = pd.Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
@@ -698,7 +698,7 @@ def test_concat_categorical_coercion_nan(self):
s1 = pd.Series([1, np.nan], dtype="category")
s2 = pd.Series([np.nan, np.nan])
- exp = pd.Series([1, np.nan, np.nan, np.nan], dtype="object")
+ exp = pd.Series([1, np.nan, np.nan, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
| Exploring what is being discussed in https://github.com/pandas-dev/pandas/issues/22994. @TomAugspurger your idea seems to be working nicely! (it almost removes as much code than it adds (apart from tests/docs), ànd fixes the EA concat bugs ;))
Few notes compared to proposal in #22994:
- Since we already have a `find_common_type` function, decided to use this as the "get_concat_dtype", since it seems this does what is needed for concat
- Extending the EA interface with a `ExensionDtype._get_common_type` method that is used in pandas' `find_common_type` function to dispatch the logic to the extension type
What I already handled:
- general protocol, using this for concat with axis=0 (eg concatting series) and using it as example for IntegerDtype
- handling categoricals this way (which removes the `concat_categorical` helper). This turned up a few cases where we have value-dependent behaviour right now, which we can't easily preserve (mainly regarding NaNs and int dtype, see below)
Still need to handle sparse (those has failing tests now) and maybe datetime, and check other failures. | https://api.github.com/repos/pandas-dev/pandas/pulls/33607 | 2020-04-17T10:05:51Z | 2020-05-02T16:29:44Z | 2020-05-02T16:29:44Z | 2020-05-02T17:26:21Z |
BUG: support count function for custom BaseIndexer rolling windows | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 2a641a37b46d8..50d97284b9023 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -174,8 +174,8 @@ Other API changes
- Added :meth:`DataFrame.value_counts` (:issue:`5377`)
- :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`)
- ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`)
-- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``skew``, ``cov``, ``corr`` will now raise a ``NotImplementedError`` (:issue:`32865`)
-- Using a :func:`pandas.api.indexers.BaseIndexer` with ``min``, ``max`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
+- Using a :func:`pandas.api.indexers.BaseIndexer` with ``skew``, ``cov``, ``corr`` will now raise a ``NotImplementedError`` (:issue:`32865`)
+- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations.
-
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 40f17126fa163..436585fe221dd 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -328,6 +328,7 @@ def func(arg, window, min_periods=None):
def validate_baseindexer_support(func_name: Optional[str]) -> None:
# GH 32865: These functions work correctly with a BaseIndexer subclass
BASEINDEXER_WHITELIST = {
+ "count",
"min",
"max",
"mean",
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 3fdf81c4bb570..62f470060b039 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1171,8 +1171,9 @@ class _Rolling_and_Expanding(_Rolling):
)
def count(self):
- if isinstance(self.window, BaseIndexer):
- validate_baseindexer_support("count")
+ # GH 32865. Using count with custom BaseIndexer subclass
+ # implementations shouldn't end up here
+ assert not isinstance(self.window, BaseIndexer)
blocks, obj = self._create_blocks()
results = []
@@ -1939,7 +1940,9 @@ def aggregate(self, func, *args, **kwargs):
def count(self):
# different impl for freq counting
- if self.is_freq_type:
+ # GH 32865. Use a custom count function implementation
+ # when using a BaseIndexer subclass as a window
+ if self.is_freq_type or isinstance(self.window, BaseIndexer):
window_func = self._get_roll_func("roll_count")
return self._apply(window_func, center=self.center, name="count")
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 43489e310bb93..1a3fe865d2a7a 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -82,7 +82,7 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
df.rolling(indexer, win_type="boxcar")
-@pytest.mark.parametrize("func", ["count", "skew", "cov", "corr"])
+@pytest.mark.parametrize("func", ["skew", "cov", "corr"])
def test_notimplemented_functions(func):
# GH 32865
class CustomIndexer(BaseIndexer):
@@ -99,6 +99,7 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
+ ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {},),
("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {},),
(
"max",
| - [X] xref #32865
- [X] 1 tests added / 1 passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
## Scope of PR
This PR makes sure that when we call `count` with a `BaseIndexer` subclass, custom `start` and `end` arrays get calculated, and the algorithm in the `aggregations.pyx` gets called instead of the `_Window.create_blocks`.
Turns out that we were sending the calculation into `aggregations.pyx` only for frequency-based windows, and custom `BaseIndexer` subclasses got ignored. Fixed it and cleaned the code up a bit.
## Background on the wider issue
We currently don't support several rolling window functions when building a rolling window object using a custom class descended from `pandas.api.indexers.Baseindexer`. The implementations were written with backward-looking windows in mind, and this led to these functions breaking.
Currently, using these functions returns a `NotImplemented` error thanks to #33057, but ideally we want to update the implementations, so that they will work without a performance hit. This is what I aim to do over a series of PRs.
## Perf notes
No changes to the algorithms necessary.
| https://api.github.com/repos/pandas-dev/pandas/pulls/33605 | 2020-04-17T07:28:21Z | 2020-04-17T21:33:07Z | 2020-04-17T21:33:07Z | 2020-04-18T05:45:10Z |
BUG: DatetimeIndex.intersection losing freq and tz | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 0682e179a7640..8c65a7268aa92 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -458,6 +458,7 @@ Datetimelike
- Bug in :class:`Timestamp` arithmetic when adding or subtracting a ``np.ndarray`` with ``timedelta64`` dtype (:issue:`33296`)
- Bug in :meth:`DatetimeIndex.to_period` not infering the frequency when called with no arguments (:issue:`33358`)
- Bug in :meth:`DatetimeIndex.tz_localize` incorrectly retaining ``freq`` in some cases where the original freq is no longer valid (:issue:`30511`)
+- Bug in :meth:`DatetimeIndex.intersection` losing ``freq`` and timezone in some cases (:issue:`33604`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index b83b64c144681..53205d3b402d0 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -724,10 +724,10 @@ def intersection(self, other, sort=False):
start = right[0]
if end < start:
- return type(self)(data=[])
+ return type(self)(data=[], dtype=self.dtype, freq=self.freq)
else:
lslice = slice(*left.slice_locs(start, end))
- left_chunk = left.values[lslice]
+ left_chunk = left._values[lslice]
return self._shallow_copy(left_chunk)
def _can_fast_union(self, other) -> bool:
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index ff15cded19b1c..08b8e710237c5 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -75,8 +75,9 @@ def test_getitem(self):
def test_dti_business_getitem(self):
rng = pd.bdate_range(START, END)
smaller = rng[:5]
- exp = DatetimeIndex(rng.view(np.ndarray)[:5])
+ exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="B")
tm.assert_index_equal(smaller, exp)
+ assert smaller.freq == exp.freq
assert smaller.freq == rng.freq
@@ -102,8 +103,9 @@ def test_dti_business_getitem_matplotlib_hackaround(self):
def test_dti_custom_getitem(self):
rng = pd.bdate_range(START, END, freq="C")
smaller = rng[:5]
- exp = DatetimeIndex(rng.view(np.ndarray)[:5])
+ exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="C")
tm.assert_index_equal(smaller, exp)
+ assert smaller.freq == exp.freq
assert smaller.freq == rng.freq
sliced = rng[::5]
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index c088301097beb..7182d05b77be3 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -269,14 +269,34 @@ def test_intersection(self, tz, sort):
assert result.freq is None
assert result.tz == expected.tz
- def test_intersection_empty(self):
+ # parametrize over both anchored and non-anchored freqs, as they
+ # have different code paths
+ @pytest.mark.parametrize("freq", ["T", "B"])
+ def test_intersection_empty(self, tz_aware_fixture, freq):
# empty same freq GH2129
- rng = date_range("6/1/2000", "6/15/2000", freq="T")
+ tz = tz_aware_fixture
+ rng = date_range("6/1/2000", "6/15/2000", freq=freq, tz=tz)
result = rng[0:0].intersection(rng)
assert len(result) == 0
+ assert result.freq == rng.freq
result = rng.intersection(rng[0:0])
assert len(result) == 0
+ assert result.freq == rng.freq
+
+ # no overlap GH#33604
+ result = rng[:3].intersection(rng[-3:])
+ tm.assert_index_equal(result, rng[:0])
+ if freq != "T":
+ # We don't preserve freq on non-anchored offsets
+ assert result.freq == rng.freq
+
+ # swapped left and right
+ result = rng[-3:].intersection(rng[:3])
+ tm.assert_index_equal(result, rng[:0])
+ if freq != "T":
+ # We don't preserve freq on non-anchored offsets
+ assert result.freq == rng.freq
def test_intersection_bug_1708(self):
from pandas import DateOffset
@@ -450,6 +470,7 @@ def test_intersection_bug(self):
b = bdate_range("12/10/2011", "12/20/2011")
result = a.intersection(b)
tm.assert_index_equal(result, b)
+ assert result.freq == b.freq
def test_month_range_union_tz_pytz(self, sort):
from pytz import timezone
@@ -527,3 +548,4 @@ def test_intersection_bug(self):
b = bdate_range("12/10/2011", "12/20/2011", freq="C")
result = a.intersection(b)
tm.assert_index_equal(result, b)
+ assert result.freq == b.freq
diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py
index 4808950f17b52..d7576697435a0 100644
--- a/pandas/tests/indexes/timedeltas/test_setops.py
+++ b/pandas/tests/indexes/timedeltas/test_setops.py
@@ -106,6 +106,7 @@ def test_intersection_bug_1708(self):
result = index_1 & index_2
expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
def test_intersection_equal(self, sort):
# GH 24471 Test intersection outcome given the sort keyword
@@ -182,7 +183,7 @@ def test_intersection(self, rng, expected, sort):
TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
TimedeltaIndex(["1 hour", "2 hour"], name=None),
),
- # reveresed index
+ # reversed index
(
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
::-1
@@ -200,7 +201,7 @@ def test_intersection_non_monotonic(self, rng, expected, sort):
tm.assert_index_equal(result, expected)
assert result.name == expected.name
- # if reveresed order, frequency is still the same
+ # if reversed order, frequency is still the same
if all(base == rng[::-1]) and sort is None:
assert isinstance(result.freq, Hour)
else:
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33604 | 2020-04-17T02:37:56Z | 2020-04-17T21:24:56Z | 2020-04-17T21:24:56Z | 2020-04-17T22:07:16Z |
.format changed to f string | diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 607a1b75dcfcd..e3f4a80ecce7c 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1727,20 +1727,19 @@ def flip(xs) -> np.ndarray:
tolerance = self.tolerance
# we require sortedness and non-null values in the join keys
- msg_sorted = "{side} keys must be sorted"
- msg_missings = "Merge keys contain null values on {side} side"
-
if not Index(left_values).is_monotonic:
+ side = "left"
if isna(left_values).any():
- raise ValueError(msg_missings.format(side="left"))
+ raise ValueError(f"Merge keys contain null values on {side} side")
else:
- raise ValueError(msg_sorted.format(side="left"))
+ raise ValueError(f"{side} keys must be sorted")
if not Index(right_values).is_monotonic:
+ side = "right"
if isna(right_values).any():
- raise ValueError(msg_missings.format(side="right"))
+ raise ValueError(f"Merge keys contain null values on {side} side")
else:
- raise ValueError(msg_sorted.format(side="right"))
+ raise ValueError(f"{side} keys must be sorted")
# initial type conversion as needed
if needs_i8_conversion(left_values):
| .format changed to f string
https://github.com/pandas-dev/pandas/issues/29547
- [ ] closes #xxxx
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33602 | 2020-04-17T00:59:56Z | 2020-04-17T02:58:37Z | 2020-04-17T02:58:36Z | 2020-04-17T02:58:45Z |
PERF: op(frame, series) when series is not EA | diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py
index 2745db58e83e3..8aa29468559b2 100644
--- a/asv_bench/benchmarks/arithmetic.py
+++ b/asv_bench/benchmarks/arithmetic.py
@@ -67,7 +67,7 @@ def time_series_op_with_fill_value_no_nas(self):
self.ser.add(self.ser, fill_value=4)
-class MixedFrameWithSeriesAxis0:
+class MixedFrameWithSeriesAxis:
params = [
[
"eq",
@@ -78,7 +78,7 @@ class MixedFrameWithSeriesAxis0:
"gt",
"add",
"sub",
- "div",
+ "truediv",
"floordiv",
"mul",
"pow",
@@ -87,15 +87,19 @@ class MixedFrameWithSeriesAxis0:
param_names = ["opname"]
def setup(self, opname):
- arr = np.arange(10 ** 6).reshape(100, -1)
+ arr = np.arange(10 ** 6).reshape(1000, -1)
df = DataFrame(arr)
df["C"] = 1.0
self.df = df
self.ser = df[0]
+ self.row = df.iloc[0]
def time_frame_op_with_series_axis0(self, opname):
getattr(self.df, opname)(self.ser, axis=0)
+ def time_frame_op_with_series_axis1(self, opname):
+ getattr(operator, opname)(self.df, self.ser)
+
class Ops:
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index ebbd3c9eddfdb..5639d6702a92c 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -11,8 +11,8 @@ class FrameOps:
param_names = ["op", "dtype", "axis"]
def setup(self, op, dtype, axis):
- if op == "mad" and dtype == "Int64" and axis == 1:
- # GH-33036
+ if op == "mad" and dtype == "Int64":
+ # GH-33036, GH#33600
raise NotImplementedError
values = np.random.randn(100000, 4)
if dtype == "Int64":
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 9d40f9b6ffa2c..873ca5b54dcd7 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -418,6 +418,7 @@ Performance improvements
- Performance improvement in :class:`Timedelta` constructor (:issue:`30543`)
- Performance improvement in :class:`Timestamp` constructor (:issue:`30543`)
- Performance improvement in flex arithmetic ops between :class:`DataFrame` and :class:`Series` with ``axis=0`` (:issue:`31296`)
+- Performance improvement in arithmetic ops between :class:`DataFrame` and :class:`Series` with ``axis=1`` (:issue:`33600`)
- The internal index method :meth:`~Index._shallow_copy` now copies cached attributes over to the new index,
avoiding creating these again on the new index. This can speed up many operations that depend on creating copies of
existing indexes (:issue:`28584`, :issue:`32640`, :issue:`32669`)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 8c93dca783113..d161501138162 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -518,11 +518,22 @@ def __truediv__(self, other):
return self._data / other
elif is_object_dtype(other.dtype):
- # Note: we do not do type inference on the result, so either
- # an object array or numeric-dtyped (if numpy does inference)
- # will be returned. GH#23829
- result = [self[n] / other[n] for n in range(len(self))]
- result = np.array(result)
+ # We operate on raveled arrays to avoid problems in inference
+ # on NaT
+ srav = self.ravel()
+ orav = other.ravel()
+ result = [srav[n] / orav[n] for n in range(len(srav))]
+ result = np.array(result).reshape(self.shape)
+
+ # We need to do dtype inference in order to keep DataFrame ops
+ # behavior consistent with Series behavior
+ inferred = lib.infer_dtype(result)
+ if inferred == "timedelta":
+ flat = result.ravel()
+ result = type(self)._from_sequence(flat).reshape(result.shape)
+ elif inferred == "floating":
+ result = result.astype(float)
+
return result
else:
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 9a7c9fdadf90d..57ca582384f39 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -522,6 +522,16 @@ def _combine_series_frame(left, right, func, axis: int, str_rep: str):
new_data = dispatch_to_series(left, right, func)
else:
+ rvalues = right._values
+ if isinstance(rvalues, np.ndarray):
+ # We can operate block-wise
+ rvalues = rvalues.reshape(1, -1)
+ rvalues = np.broadcast_to(rvalues, left.shape)
+
+ array_op = get_array_op(func, str_rep=str_rep)
+ bm = left._mgr.apply(array_op, right=rvalues.T, align_keys=["right"])
+ return type(left)(bm)
+
new_data = dispatch_to_series(left, right, func, axis="columns")
return left._construct_result(new_data)
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index a8e9ad9ff7cc9..cfe2a27a1b5eb 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -1473,7 +1473,10 @@ def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
- warn = None if box_with_array is pd.DataFrame else PerformanceWarning
+ warn = PerformanceWarning
+ if box_with_array is pd.DataFrame and tz is not None:
+ warn = None
+
with tm.assert_produces_warning(warn):
res = dtarr + other
expected = DatetimeIndex(
@@ -2434,7 +2437,10 @@ def test_dti_addsub_object_arraylike(
expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
- warn = None if box_with_array is pd.DataFrame else PerformanceWarning
+ warn = PerformanceWarning
+ if box_with_array is pd.DataFrame and tz is not None:
+ warn = None
+
with tm.assert_produces_warning(warn):
result = dtarr + other
tm.assert_equal(result, expected)
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 8387e4d708662..0dc97f0780085 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -1323,14 +1323,11 @@ def test_td64arr_add_offset_index(self, names, box):
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
- # The DataFrame operation is transposed and so operates as separate
- # scalar operations, which do not issue a PerformanceWarning
- warn = PerformanceWarning if box is not pd.DataFrame else None
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@@ -1349,14 +1346,11 @@ def test_td64arr_add_offset_array(self, box_with_array):
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
- # The DataFrame operation is transposed and so operates as separate
- # scalar operations, which do not issue a PerformanceWarning
- warn = PerformanceWarning if box is not pd.DataFrame else None
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@@ -1385,10 +1379,7 @@ def test_td64arr_sub_offset_index(self, names, box_with_array):
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
- # The DataFrame operation is transposed and so operates as separate
- # scalar operations, which do not issue a PerformanceWarning
- warn = PerformanceWarning if box is not pd.DataFrame else None
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@@ -1404,10 +1395,7 @@ def test_td64arr_sub_offset_array(self, box_with_array):
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
- # The DataFrame operation is transposed and so operates as separate
- # scalar operations, which do not issue a PerformanceWarning
- warn = None if box_with_array is pd.DataFrame else PerformanceWarning
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@@ -1478,28 +1466,31 @@ def test_td64arr_add_sub_object_array(self, box_with_array):
[pd.Timedelta(days=1), pd.offsets.Day(2), pd.Timestamp("2000-01-04")]
)
- warn = PerformanceWarning if box_with_array is not pd.DataFrame else None
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
result = tdarr + other
expected = pd.Index(
[pd.Timedelta(days=2), pd.Timedelta(days=4), pd.Timestamp("2000-01-07")]
)
expected = tm.box_expected(expected, box_with_array)
+ if box_with_array is pd.DataFrame:
+ expected = expected.astype(object)
tm.assert_equal(result, expected)
msg = "unsupported operand type|cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
tdarr - other
- with tm.assert_produces_warning(warn):
+ with tm.assert_produces_warning(PerformanceWarning):
result = other - tdarr
expected = pd.Index(
[pd.Timedelta(0), pd.Timedelta(0), pd.Timestamp("2000-01-01")]
)
expected = tm.box_expected(expected, box_with_array)
+ if box_with_array is pd.DataFrame:
+ expected = expected.astype(object)
tm.assert_equal(result, expected)
@@ -2039,6 +2030,7 @@ def test_td64arr_div_numeric_array(self, box_with_array, vector, any_real_dtype)
expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
+ expected = pd.Index(expected) # do dtype inference
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index d929d3e030508..d75f1f14b6369 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -613,13 +613,6 @@ def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
- if opname in ["__rmod__", "__rfloordiv__"]:
- # exvals will have dtypes [f8, i8, i8] so expected will be
- # all-f8, but the DataFrame operation will return mixed dtypes
- # use exvals[-1].dtype instead of "i8" for compat with 32-bit
- # systems/pythons
- expected[False] = expected[False].astype(exvals[-1].dtype)
-
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
@@ -1042,7 +1035,7 @@ def test_combine_series(
# no upcast needed
added = mixed_float_frame + series
- _check_mixed_float(added)
+ assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 1340f514e31ce..ba1b3e9d0ca8e 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -266,23 +266,24 @@ def test_scalar_na_logical_ops_corners(self):
result = s & list(s)
tm.assert_series_equal(result, expected)
+ def test_scalar_na_logical_ops_corners_aligns(self):
+ s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
+ s[::2] = np.nan
d = DataFrame({"A": s})
- # TODO: Fix this exception - needs to be fixed! (see GH5035)
- # (previously this was a TypeError because series returned
- # NotImplemented
- # this is an alignment issue; these are equivalent
- # https://github.com/pandas-dev/pandas/issues/5284
+ expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
- with pytest.raises(TypeError):
- d.__and__(s, axis="columns")
- with pytest.raises(TypeError):
- d.__and__(s, axis=1)
+ result = d.__and__(s, axis="columns")
+ tm.assert_frame_equal(result, expected)
- with pytest.raises(TypeError):
- s & d
- with pytest.raises(TypeError):
- d & s
+ result = d.__and__(s, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ result = s & d
+ tm.assert_frame_equal(result, expected)
+
+ result = d & s
+ tm.assert_frame_equal(result, expected)
expected = (s & s).to_frame("A")
result = d.__and__(s, axis="index")
| We have the same optimization in place for the flex ops.
ipython results for the asv this adds:
```
In [3]: arr = np.arange(10 ** 6).reshape(100, -1)
In [4]: df = pd.DataFrame(arr)
In [5]: df["C"] = 1.0
In [6]: row = df.iloc[0]
In [11]: %timeit df + row
1.92 s ± 71.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) # <-- master
1 ms ± 19.2 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) # <-- PR
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/33600 | 2020-04-17T00:39:34Z | 2020-04-25T21:40:30Z | 2020-04-25T21:40:30Z | 2020-04-25T21:46:55Z |
CLN: remove BlockManager._get_counts, get_dtype_counts | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index a3aff45afa116..44f71b392c0eb 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -564,7 +564,7 @@ def setup(self):
def time_frame_get_dtype_counts(self):
with warnings.catch_warnings(record=True):
- self.df._data.get_dtype_counts()
+ self.df.dtypes.value_counts()
def time_info(self):
self.df.info()
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 12e553cbd7ca4..485180b031e03 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -74,7 +74,6 @@ class BlockManager(PandasObject):
set_axis(axis, new_labels)
copy(deep=True)
- get_dtype_counts
get_dtypes
apply(func, axes, block_filter_fn)
@@ -256,18 +255,6 @@ def _rebuild_blknos_and_blklocs(self) -> None:
def items(self) -> Index:
return self.axes[0]
- def _get_counts(self, f):
- """ return a dict of the counts of the function in BlockManager """
- self._consolidate_inplace()
- counts = dict()
- for b in self.blocks:
- v = f(b)
- counts[v] = counts.get(v, 0) + b.shape[0]
- return counts
-
- def get_dtype_counts(self):
- return self._get_counts(lambda b: b.dtype.name)
-
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self.blknos, allow_fill=False)
@@ -1530,9 +1517,6 @@ def index(self) -> Index:
def dtype(self) -> DtypeObj:
return self._block.dtype
- def get_dtype_counts(self):
- return {self.dtype.name: 1}
-
def get_dtypes(self) -> np.ndarray:
return np.array([self._block.dtype])
| https://api.github.com/repos/pandas-dev/pandas/pulls/33599 | 2020-04-16T21:01:28Z | 2020-04-17T02:34:54Z | 2020-04-17T02:34:54Z | 2020-04-17T03:00:30Z | |
CLN: trim unreachable branches in _compare_or_regex_search | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 12e553cbd7ca4..bb0e9a27b376b 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1739,7 +1739,7 @@ def form_blocks(arrays, names: Index, axes) -> List[Block]:
return blocks
-def _simple_blockify(tuples, dtype):
+def _simple_blockify(tuples, dtype) -> List[Block]:
"""
return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
@@ -1862,7 +1862,7 @@ def _merge_blocks(
def _compare_or_regex_search(
- a: Union[ArrayLike, Scalar], b: Union[ArrayLike, Scalar], regex: bool = False
+ a: ArrayLike, b: Scalar, regex: bool = False
) -> Union[ArrayLike, bool]:
"""
Compare two array_like inputs of the same shape or two scalar values
@@ -1872,8 +1872,8 @@ def _compare_or_regex_search(
Parameters
----------
- a : array_like or scalar
- b : array_like or scalar
+ a : array_like
+ b : scalar
regex : bool, default False
Returns
@@ -1882,29 +1882,21 @@ def _compare_or_regex_search(
"""
def _check_comparison_types(
- result: Union[ArrayLike, bool],
- a: Union[ArrayLike, Scalar],
- b: Union[ArrayLike, Scalar],
- ) -> Union[ArrayLike, bool]:
+ result: Union[ArrayLike, bool], a: ArrayLike, b: Scalar,
+ ):
"""
Raises an error if the two arrays (a,b) cannot be compared.
Otherwise, returns the comparison result as expected.
"""
- if is_scalar(result) and (
- isinstance(a, np.ndarray) or isinstance(b, np.ndarray)
- ):
+ if is_scalar(result) and isinstance(a, np.ndarray):
type_names = [type(a).__name__, type(b).__name__]
if isinstance(a, np.ndarray):
type_names[0] = f"ndarray(dtype={a.dtype})"
- if isinstance(b, np.ndarray):
- type_names[1] = f"ndarray(dtype={b.dtype})"
-
raise TypeError(
f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
)
- return result
if not regex:
op = lambda x: operator.eq(x, b)
@@ -1918,18 +1910,13 @@ def _check_comparison_types(
# GH#32621 use mask to avoid comparing to NAs
if isinstance(a, np.ndarray) and not isinstance(b, np.ndarray):
mask = np.reshape(~(isna(a)), a.shape)
- elif isinstance(b, np.ndarray) and not isinstance(a, np.ndarray):
- mask = np.reshape(~(isna(b)), b.shape)
- elif isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
- mask = ~(isna(a) | isna(b))
if isinstance(a, np.ndarray):
a = a[mask]
- if isinstance(b, np.ndarray):
- b = b[mask]
if is_datetimelike_v_numeric(a, b) or is_numeric_v_string_like(a, b):
# GH#29553 avoid deprecation warnings from numpy
- return _check_comparison_types(False, a, b)
+ _check_comparison_types(False, a, b)
+ return False
result = op(a)
@@ -1940,7 +1927,8 @@ def _check_comparison_types(
tmp[mask] = result
result = tmp
- return _check_comparison_types(result, a, b)
+ _check_comparison_types(result, a, b)
+ return result
def _fast_count_smallints(arr: np.ndarray) -> np.ndarray:
| https://api.github.com/repos/pandas-dev/pandas/pulls/33598 | 2020-04-16T20:57:40Z | 2020-04-17T02:36:03Z | 2020-04-17T02:36:03Z | 2020-04-17T02:58:54Z | |
PERF: always slice when indexing on columns | diff --git a/pandas/_libs/internals.pyi b/pandas/_libs/internals.pyi
index 1791cbb85c355..5da32409c317f 100644
--- a/pandas/_libs/internals.pyi
+++ b/pandas/_libs/internals.pyi
@@ -52,6 +52,7 @@ class BlockPlacement:
def delete(self, loc) -> BlockPlacement: ...
def append(self, others: list[BlockPlacement]) -> BlockPlacement: ...
def tile_for_unstack(self, factor: int) -> np.ndarray: ...
+ def to_slices(self) -> list[BlockPlacement]: ...
class SharedBlock:
_mgr_locs: BlockPlacement
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 559359bdf3353..dd72bf364cc68 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -264,6 +264,20 @@ cdef class BlockPlacement:
new_placement = np.concatenate(mapped)
return new_placement
+ def to_slices(self):
+ """
+ Decompose a BlockPlacement into a list of BlockPlacements, each of
+ which is slice-like.
+
+ Returns
+ -------
+ List[BlockPlacement]
+ """
+ if self.is_slice_like:
+ return [self]
+ slices = indexer_as_slices(self.indexer)
+ return [BlockPlacement(x) for x in slices]
+
cdef slice slice_canonize(slice s):
"""
@@ -303,7 +317,7 @@ cdef slice slice_canonize(slice s):
stop = start
if start < 0 or (stop < 0 and s.stop is not None and step > 0):
- raise ValueError("unbounded slice")
+ raise ValueError("unbounded slice", start, stop, step, s)
if stop < 0:
return slice(start, None, step)
@@ -386,6 +400,50 @@ cdef slice_getitem(slice slc, ind):
return cnp.PyArray_Arange(s_start, s_stop, s_step, NPY_INTP)[ind]
+@cython.boundscheck(False)
+@cython.wraparound(False)
+cdef list indexer_as_slices(int64_t[:] vals):
+ """
+ Convert an indexer to a list of slices.
+ """
+ # TODO: there may be more efficient ways to decompose an indexer into slices
+ cdef:
+ Py_ssize_t i, n, start, stop
+ int64_t d
+
+ if vals is None:
+ raise TypeError("vals must be ndarray")
+
+ n = vals.shape[0]
+
+ if n == 0:
+ return []
+
+ if n == 1:
+ return [slice(vals[0], vals[0] + 1, 1)]
+
+ # n >= 2
+ d = vals[1] - vals[0]
+
+ if d == 0:
+ # i guess we have duplicate values (TODO: how?)
+ return [slice(vals[0], vals[0] + 1, 1)] + indexer_as_slices(vals[1:])
+
+ for i in range(2, n):
+ if vals[i] - vals[i - 1] != d:
+ # we need to start a new slice
+ start = vals[0]
+ stop = vals[i - 1] + d
+ return [slice(start, stop, d)] + indexer_as_slices(vals[i:])
+
+ start = vals[0]
+ stop = start + n * d
+ if stop < 0 and d < 0:
+ return [slice(start, None, d)]
+ else:
+ return [slice(start, stop, d)]
+
+
@cython.boundscheck(False)
@cython.wraparound(False)
cdef slice indexer_as_slice(intp_t[:] vals):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e02a88aafcf34..266b9c728ddd6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4589,6 +4589,9 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy)
columns, method, copy, level, fill_value, limit, tolerance
)
+ # If we have made a copy, no need to make another one
+ copy = False
+
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index b355ae2426884..ec84c378deb0e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -661,6 +661,8 @@ def reindex_indexer(
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_value=fill_value, only_slice=only_slice
)
+ if copy:
+ new_blocks = [x.copy() for x in new_blocks]
else:
new_blocks = [
blk.take_nd(
@@ -696,6 +698,7 @@ def _slice_take_blocks_ax0(
only_slice : bool, default False
If True, we always return views on existing arrays, never copies.
This is used when called from ops.blockwise.operate_blockwise.
+ Ignored; TODO: remove argument.
Returns
-------
@@ -721,7 +724,7 @@ def _slice_take_blocks_ax0(
if allow_fill and fill_value is None:
fill_value = blk.fill_value
- if not allow_fill and only_slice:
+ if not allow_fill:
# GH#33597 slice instead of take, so we get
# views instead of copies
blocks = [
@@ -758,8 +761,7 @@ def _slice_take_blocks_ax0(
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
blocks = []
- group = not only_slice
- for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
+ for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=False):
if blkno == -1:
# If we've got here, fill_value was not lib.no_default
@@ -785,24 +787,31 @@ def _slice_take_blocks_ax0(
# we may try to only slice
taker = blklocs[mgr_locs.indexer]
max_len = max(len(mgr_locs), taker.max() + 1)
- if only_slice:
- taker = lib.maybe_indices_to_slice(taker, max_len)
+ taker = lib.maybe_indices_to_slice(taker, max_len)
if isinstance(taker, slice):
+ bp = libinternals.BlockPlacement(taker)
+ bps = bp.to_slices()
+ assert len(bps) == 1 # just checking
nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)
blocks.append(nb)
- elif only_slice:
+ else:
# GH#33597 slice instead of take, so we get
# views instead of copies
- for i, ml in zip(taker, mgr_locs):
- slc = slice(i, i + 1)
- bp = BlockPlacement(ml)
- nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)
+ bp = libinternals.BlockPlacement(taker)
+ bps = bp.to_slices()
+ left, right = 0, 0
+
+ for sub in bps:
+ right += len(sub)
+ new_locs = mgr_locs[left:right]
+ # we have isinstance(sub.indexer, slice)
+ nb = blk.getitem_block_columns(
+ sub.indexer, new_mgr_locs=new_locs
+ )
# We have np.shares_memory(nb.values, blk.values)
blocks.append(nb)
- else:
- nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
- blocks.append(nb)
+ left += len(sub)
return blocks
diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
index d2704876c31c5..8c7f05d40552d 100644
--- a/pandas/tests/frame/indexing/test_xs.py
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -374,12 +374,11 @@ def test_xs_droplevel_false_view(self, using_array_manager):
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(result, expected)
- # with mixed dataframe, modifying the parent doesn't modify result
- # TODO the "split" path behaves differently here as with single block
+ # with mixed dataframe, we still get a view on the parent
df = DataFrame([[1, 2.5, "a"]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
df.iloc[0, 0] = 2
- expected = DataFrame({"a": [1]})
+ expected = DataFrame({"a": [2]})
tm.assert_frame_equal(result, expected)
def test_xs_list_indexer_droplevel_false(self):
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 362252e1a6b72..ad5f21e3f7ee5 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -717,7 +717,8 @@ def test_reindex_items(self):
mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2")
reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0)
- assert reindexed.nblocks == 2
+ assert reindexed.nblocks == 3
+ assert all(b.mgr_locs.is_slice_like for b in reindexed.blocks)
tm.assert_index_equal(reindexed.items, Index(["g", "c", "a", "d"]))
tm.assert_almost_equal(
mgr.iget(6).internal_values(), reindexed.iget(0).internal_values()
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Touched on in #32779, this makes all\* DataFrame indexing-on-columns do slicing instead of taking, so doesn't make copies.
\* Actually there is a different path in `_slice_take_blocks_ax0` that we go down if we `self._is_single_block`, can update that later if we decide this is something we want to do.
```
In [3]: dti = pd.date_range("2016-01-01", periods=10**5, freq="S")
In [4]: df = pd.DataFrame._from_arrays([dti]*10 + [dti - dti] * 10 + [dti.to_period("D")]*10, columns=range(30), index=range(len(dti)))
In [8]: arr = np.arange(30)
In [9]: np.random.shuffle(arr)
In [10]: %timeit df[arr]
8.35 ms ± 64.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) # <-- master
650 µs ± 52.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) # <-- PR
```
The tradeoff is that we end up with less-consolidated results. I'm OK with that, but there may be downsides I'm not aware of, will wait for others to weigh in. | https://api.github.com/repos/pandas-dev/pandas/pulls/33597 | 2020-04-16T20:35:08Z | 2021-11-28T21:05:35Z | null | 2021-12-06T00:53:36Z |
Update _core.py with missing parameters | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e466a215091ea..efa69c9b7d854 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -606,8 +606,21 @@ class PlotAccessor(PandasObject):
- 'pie' : pie plot
- 'scatter' : scatter plot
- 'hexbin' : hexbin plot.
-
+ ax : matplotlib axes object, default None
+ An axes of the current figure.
+ subplots : bool, default False
+ Make separate subplots for each column.
+ sharex : bool, default True if ax is None else False
+ In case ``subplots=True``, share x axis and set some x axis labels
+ to invisible; defaults to True if ax is None otherwise False if
+ an ax is passed in; Be aware, that passing in both an ax and
+ ``sharex=True`` will alter all x axis labels for all axis in a figure.
+ sharey : bool, default False
+ In case ``subplots=True``, share y axis and set some y axis labels to invisible.
+ layout : tuple, optional
+ (rows, columns) for the layout of subplots.
figsize : a tuple (width, height) in inches
+ Size of a figure object.
use_index : bool, default True
Use index as ticks for x axis.
title : str or list
@@ -637,7 +650,9 @@ class PlotAccessor(PandasObject):
yticks : sequence
Values to use for the yticks.
xlim : 2-tuple/list
+ Set the x limits of the current axes.
ylim : 2-tuple/list
+ Set the y limits of the current axes.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots).
@@ -663,6 +678,13 @@ class PlotAccessor(PandasObject):
detail.
xerr : DataFrame, Series, array-like, dict and str
Equivalent to yerr.
+ stacked : bool, default False in line and bar plots, and True in area plot
+ If True, create stacked plot.
+ sort_columns : bool, default False
+ Sort column names to determine plot ordering.
+ secondary_y : bool or sequence, default False
+ Whether to plot on the secondary y-axis if a list/tuple, which
+ columns to plot on secondary y-axis.
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend.
| added missing parameters:
- ax
- subplots
- sharex
- sharey
- secondary_y
- sort_columns
- stacked
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33596 | 2020-04-16T20:15:12Z | 2020-05-06T21:22:15Z | 2020-05-06T21:22:15Z | 2020-05-07T11:19:22Z |
DEPR: Deprecate week, weekofyear in Series.dt,DatetimeIndex | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 898d18af56f0e..c33cd505d0948 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -590,6 +590,9 @@ Deprecations
- :func:`pandas.api.types.is_categorical` is deprecated and will be removed in a future version; use `:func:pandas.api.types.is_categorical_dtype` instead (:issue:`33385`)
- :meth:`Index.get_value` is deprecated and will be removed in a future version (:issue:`19728`)
+- :meth:`Series.dt.week` and `Series.dt.weekofyear` are deprecated and will be removed in a future version, use :meth:`Series.dt.isocalendar().week` instead (:issue:`33595`)
+- :meth:`DatetimeIndex.week` and `DatetimeIndex.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeIndex.isocalendar().week` instead (:issue:`33595`)
+- :meth:`DatetimeArray.week` and `DatetimeArray.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeArray.isocalendar().week` instead (:issue:`33595`)
- :meth:`DateOffset.__call__` is deprecated and will be removed in a future version, use ``offset + other`` instead (:issue:`34171`)
- Indexing an :class:`Index` object with a float key is deprecated, and will
raise an ``IndexError`` in the future. You can manually convert to an integer key
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 90088c370697e..50d792aeb12f4 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1298,6 +1298,32 @@ def isocalendar(self):
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
+ @property
+ def weekofyear(self):
+ """
+ The week ordinal of the year.
+
+ .. deprecated:: 1.1.0
+
+ weekofyear and week have been deprecated.
+ Please use DatetimeIndex.isocalendar().week instead.
+ """
+ warnings.warn(
+ "weekofyear and week have been deprecated, please use "
+ "DatetimeIndex.isocalendar().week instead, which returns "
+ "a Series. To exactly reproduce the behavior of week and "
+ "weekofyear and return an Index, you may call "
+ "pd.Int64Index(idx.isocalendar().week)",
+ FutureWarning,
+ stacklevel=3,
+ )
+ week_series = self.isocalendar().week
+ if week_series.hasnans:
+ return week_series.to_numpy(dtype="float64", na_value=np.nan)
+ return week_series.to_numpy(dtype="int64")
+
+ week = weekofyear
+
year = _field_accessor(
"year",
"Y",
@@ -1482,14 +1508,6 @@ def isocalendar(self):
dtype: int64
""",
)
- weekofyear = _field_accessor(
- "weekofyear",
- "woy",
- """
- The week ordinal of the year.
- """,
- )
- week = weekofyear
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 598d228723ac8..881d5ce1fbaab 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -2,6 +2,7 @@
datetimelike delegation
"""
from typing import TYPE_CHECKING
+import warnings
import numpy as np
@@ -250,6 +251,30 @@ def isocalendar(self):
"""
return self._get_values().isocalendar().set_index(self._parent.index)
+ @property
+ def weekofyear(self):
+ """
+ The week ordinal of the year.
+
+ .. deprecated:: 1.1.0
+
+ Series.dt.weekofyear and Series.dt.week have been deprecated.
+ Please use Series.dt.isocalendar().week instead.
+ """
+ warnings.warn(
+ "Series.dt.weekofyear and Series.dt.week have been deprecated. "
+ "Please use Series.dt.isocalendar().week instead.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ week_series = self.isocalendar().week
+ week_series.name = self.name
+ if week_series.hasnans:
+ return week_series.astype("float64")
+ return week_series.astype("int64")
+
+ week = weekofyear
+
@delegate_names(
delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index d0bf5bb41bb2c..1a61b379de943 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -546,6 +546,9 @@ def test_bool_properties(self, datetime_index, propname):
@pytest.mark.parametrize("propname", pd.DatetimeIndex._field_ops)
def test_int_properties(self, datetime_index, propname):
+ if propname in ["week", "weekofyear"]:
+ # GH#33595 Deprecate week and weekofyear
+ return
dti = datetime_index
arr = DatetimeArray(dti)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index e1042bf35acc4..7997247ca0307 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -1022,7 +1022,7 @@ def test_groupby_transform_with_datetimes(func, values):
dates = pd.date_range("1/1/2011", periods=10, freq="D")
stocks = pd.DataFrame({"price": np.arange(10.0)}, index=dates)
- stocks["week_id"] = pd.to_datetime(stocks.index).week
+ stocks["week_id"] = dates.isocalendar().set_index(dates).week
result = stocks.groupby(stocks["week_id"])["price"].transform(func)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 42a72125ba411..b9373328eb87f 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -156,8 +156,8 @@ def test_datetimeindex_accessors(self):
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
- assert dti.weekofyear[0] == 1
- assert dti.weekofyear[120] == 18
+ assert dti.isocalendar().week[0] == 1
+ assert dti.isocalendar().week[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
@@ -192,7 +192,7 @@ def test_datetimeindex_accessors(self):
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
- assert len(dti.weekofyear) == 365
+ assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
@@ -205,6 +205,9 @@ def test_datetimeindex_accessors(self):
# non boolean accessors -> return Index
for accessor in DatetimeIndex._field_ops:
+ if accessor in ["week", "weekofyear"]:
+ # GH#33595 Deprecate week and weekofyear
+ continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
@@ -285,7 +288,7 @@ def test_datetimeindex_accessors(self):
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
expected = [52, 1, 1]
- assert dates.weekofyear.tolist() == expected
+ assert dates.isocalendar().week.tolist() == expected
assert [d.weekofyear for d in dates] == expected
# GH 12806
@@ -383,6 +386,15 @@ def test_iter_readonly():
list(dti)
+def test_week_and_weekofyear_are_deprecated():
+ # GH#33595 Deprecate week and weekofyear
+ idx = pd.date_range(start="2019-12-29", freq="D", periods=4)
+ with tm.assert_produces_warning(FutureWarning):
+ idx.week
+ with tm.assert_produces_warning(FutureWarning):
+ idx.weekofyear
+
+
def test_isocalendar_returns_correct_values_close_to_new_year_with_tz():
# GH 6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index 21ee8649172da..e5d1277aed9cd 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -40,8 +40,6 @@ def test_dti_date_out_of_range(self, data):
[
"dayofweek",
"dayofyear",
- "week",
- "weekofyear",
"quarter",
"days_in_month",
"is_month_start",
@@ -59,6 +57,12 @@ def test_dti_timestamp_fields(self, field):
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
+ def test_dti_timestamp_isocalendar_fields(self):
+ idx = tm.makeDateIndex(100)
+ expected = tuple(idx.isocalendar().iloc[-1].to_list())
+ result = idx[-1].isocalendar()
+ assert result == expected
+
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 5a92ee37342d5..e1e2ea1a5cec8 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -66,6 +66,9 @@ def test_nat_vector_field_access():
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
+ if field in ["week", "weekofyear"]:
+ # GH#33595 Deprecate week and weekofyear
+ continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
@@ -78,6 +81,9 @@ def test_nat_vector_field_access():
# on NaT/Timestamp for compat with datetime
if field == "weekday":
continue
+ if field in ["week", "weekofyear"]:
+ # GH#33595 Deprecate week and weekofyear
+ continue
result = getattr(ser.dt, field)
expected = [getattr(x, field) for x in idx]
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index a6430b4525d4a..042841bb4e019 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -719,6 +719,9 @@ def test_dt_accessor_api_for_categorical(self):
tm.assert_equal(res, exp)
for attr in attr_names:
+ if attr in ["week", "weekofyear"]:
+ # GH#33595 Deprecate week and weekofyear
+ continue
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 8b1f58414e175..0fd51b8828bc5 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -89,7 +89,8 @@ def compare(s, name):
for s in cases:
for prop in ok_for_dt:
# we test freq below
- if prop != "freq":
+ # we ignore week and weekofyear because they are deprecated
+ if prop not in ["freq", "week", "weekofyear"]:
compare(s, prop)
for prop in ok_for_dt_methods:
@@ -122,7 +123,8 @@ def compare(s, name):
for prop in ok_for_dt:
# we test freq below
- if prop != "freq":
+ # we ignore week and weekofyear because they are deprecated
+ if prop not in ["freq", "week", "weekofyear"]:
compare(s, prop)
for prop in ok_for_dt_methods:
@@ -687,3 +689,12 @@ def test_isocalendar(self, input_series, expected_output):
expected_output, columns=["year", "week", "day"], dtype="UInt32"
)
tm.assert_frame_equal(result, expected_frame)
+
+
+def test_week_and_weekofyear_are_deprecated():
+ # GH#33595 Deprecate week and weekofyear
+ series = pd.to_datetime(pd.Series(["2020-01-01"]))
+ with tm.assert_produces_warning(FutureWarning):
+ series.dt.week
+ with tm.assert_produces_warning(FutureWarning):
+ series.dt.weekofyear
| Closes https://github.com/pandas-dev/pandas/issues/33503
This PR is a followup to #33220 and implements what was discussed there.
A few comments:
- `DatetimeIndex.isocalendar` does not set the index on the returned dataframe,
leading to some cumbersome syntax. This may be worth changing, please let me
know what you think.
- Should we also deprecate `Timestamp.week/weekofyear`?
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/33595 | 2020-04-16T20:02:13Z | 2020-05-26T03:22:04Z | 2020-05-26T03:22:03Z | 2020-05-26T06:36:11Z |
Added a check for standard documentation | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 45b7db74fa409..13e232610c929 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -237,6 +237,14 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -RI --exclude=\*.{svg,c,cpp,html,js} --exclude-dir=env "\s$" *
RET=$(($RET + $?)) ; echo $MSG "DONE"
unset INVGREP_APPEND
+
+ MSG = 'Check files that in pandas/doc/source that use standardized documentation' ; echo $MSG
+ for entry in $(find ../doc/source/ -type f)
+ do
+ if grep -q Pandas "$entry" || grep -q *pandas* "$entry" ; then
+ echo "$entry"
+ fi
+ done
fi
### CODE ###
@@ -378,4 +386,4 @@ if [[ -z "$CHECK" || "$CHECK" == "typing" ]]; then
fi
-exit $RET
+exit $RET
\ No newline at end of file
| - [ ] closes #33213
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I created a check to see which files contain Pandas or *pandas* which will be of use for individuals aiming to resolve issue #32316. When the codecheck script is run, it will output the files that contain those strings. | https://api.github.com/repos/pandas-dev/pandas/pulls/33592 | 2020-04-16T15:41:44Z | 2020-07-17T11:16:19Z | null | 2023-05-11T01:19:36Z |
TST: added test for GH28597 | diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index da8327f64e26f..b2545e0e1b4d2 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -1392,3 +1392,48 @@ def test_read_only_category_no_sort():
expected = DataFrame(data={"a": [2, 6]}, index=CategoricalIndex([1, 2], name="b"))
result = df.groupby("b", sort=False).mean()
tm.assert_frame_equal(result, expected)
+
+
+def test_sorted_missing_category_values():
+ # GH 28597
+ df = pd.DataFrame(
+ {
+ "foo": [
+ "small",
+ "large",
+ "large",
+ "large",
+ "medium",
+ "large",
+ "large",
+ "medium",
+ ],
+ "bar": ["C", "A", "A", "C", "A", "C", "A", "C"],
+ }
+ )
+ df["foo"] = (
+ df["foo"]
+ .astype("category")
+ .cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)
+ )
+
+ expected = pd.DataFrame(
+ {
+ "tiny": {"A": 0, "C": 0},
+ "small": {"A": 0, "C": 1},
+ "medium": {"A": 1, "C": 1},
+ "large": {"A": 3, "C": 2},
+ }
+ )
+ expected = expected.rename_axis("bar", axis="index")
+ expected.columns = pd.CategoricalIndex(
+ ["tiny", "small", "medium", "large"],
+ categories=["tiny", "small", "medium", "large"],
+ ordered=True,
+ name="foo",
+ dtype="category",
+ )
+
+ result = df.groupby(["bar", "foo"]).size().unstack()
+
+ tm.assert_frame_equal(result, expected)
| Added test to ensure that categories stay ordered when grouping
with missing values.
- [ ] closes #28597
- [X] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33588 | 2020-04-16T11:05:18Z | 2020-04-17T22:08:52Z | 2020-04-17T22:08:51Z | 2020-04-17T22:09:13Z |
BUG: Raise a TypeError when record_path doesn't point to an array | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 82c43811c0444..5762c60a0d10a 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -529,6 +529,7 @@ I/O
- Bug in :meth:`DataFrame.to_sql` where an ``AttributeError`` was raised when saving an out of bounds date (:issue:`26761`)
- Bug in :meth:`read_excel` did not correctly handle multiple embedded spaces in OpenDocument text cells. (:issue:`32207`)
- Bug in :meth:`read_json` was raising ``TypeError`` when reading a list of booleans into a Series. (:issue:`31464`)
+- Bug in :func:`pandas.io.json.json_normalize` where location specified by `record_path` doesn't point to an array. (:issue:`26284`)
Plotting
^^^^^^^^
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index 69e9b111a6c20..e833fdc20d542 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -239,23 +239,23 @@ def _pull_field(
result = result[spec]
return result
- def _pull_records(js: Dict[str, Any], spec: Union[List, str]) -> Iterable:
+ def _pull_records(js: Dict[str, Any], spec: Union[List, str]) -> List:
"""
Interal function to pull field for records, and similar to
- _pull_field, but require to return Iterable. And will raise error
+ _pull_field, but require to return list. And will raise error
if has non iterable value.
"""
result = _pull_field(js, spec)
- # GH 31507 GH 30145, if result is not Iterable, raise TypeError if not
+ # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not
# null, otherwise return an empty list
- if not isinstance(result, Iterable):
+ if not isinstance(result, list):
if pd.isnull(result):
result = []
else:
raise TypeError(
- f"{js} has non iterable value {result} for path {spec}. "
- "Must be iterable or null."
+ f"{js} has non list value {result} for path {spec}. "
+ "Must be list or null."
)
return result
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index b7a9918ff46da..4a32f3809c82b 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -475,13 +475,15 @@ def test_nonetype_record_path(self, nulls_fixture):
expected = DataFrame({"i": 2}, index=[0])
tm.assert_equal(result, expected)
- def test_non_interable_record_path_errors(self):
- # see gh-30148
- test_input = {"state": "Texas", "info": 1}
+ @pytest.mark.parametrize("value", ["false", "true", "{}", "1", '"text"'])
+ def test_non_list_record_path_errors(self, value):
+ # see gh-30148, GH 26284
+ parsed_value = json.loads(value)
+ test_input = {"state": "Texas", "info": parsed_value}
test_path = "info"
msg = (
- f"{test_input} has non iterable value 1 for path {test_path}. "
- "Must be iterable or null."
+ f"{test_input} has non list value {parsed_value} for path {test_path}. "
+ "Must be list or null."
)
with pytest.raises(TypeError, match=msg):
json_normalize([test_input], record_path=[test_path])
| When `record_path` points to something that is Iterable but is not
a sequence in JSON world we will receive odd results.
```
>>> json_normalize([{'key': 'value'}], record_path='key')
0
0 v
1 a
2 l
3 u
4 e
```
Based on RFC 8259 (https://tools.ietf.org/html/rfc8259) a JSON value MUST be
object, array, number, or string, false, null, true. But only two of them
should be treated as Iterable.
```
An object is an unordered *collection* of zero or more name/value
pairs, where a name is a string and a value is a string, number,
boolean, null, object, or array.
An array is an ordered *sequence* of zero or more values.
--
https://tools.ietf.org/html/rfc8259#page-3
```
Based on that `[{'key': 'value'}]` and `{'key': 'value'}` should not be
treated in the same way. In `json_normalize` documentation `record_path`
is described as `Path in each object to list of records`.
So when we want to translate JSON to Python like an object we need to take
into consideration a list (sequence). Based on that `record_path` should
point out to `list`, not `Iterable`.
In specs I added all possibile values that are allowed in JSON and
should not be treated as a collection. There is a special case for null
value that is already implemented.
| type | value | Iterable | Should be treated as list |
|--------|---------|----------|---------------------------|
| object | {} | Yes | No (unordered list) |
| array | [] | Yes | Yes |
| number | 1 | No | No |
| string | "value" | Yes | No |
| false | False | No | No |
| null | Null | No | No (Check #30148) |
| true | True | No | No |
- [x] closes #26284
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33585 | 2020-04-16T08:19:35Z | 2020-04-16T23:38:21Z | 2020-04-16T23:38:21Z | 2020-04-17T05:04:34Z |
TST: add message check to pytest.raises (tests/arrays/boolean/test_arithmetic.py) | diff --git a/pandas/tests/arrays/boolean/test_arithmetic.py b/pandas/tests/arrays/boolean/test_arithmetic.py
index df4c218cbf9bf..9a8d03dc47529 100644
--- a/pandas/tests/arrays/boolean/test_arithmetic.py
+++ b/pandas/tests/arrays/boolean/test_arithmetic.py
@@ -23,20 +23,25 @@ def test_error(self, data, all_arithmetic_operators):
opa = getattr(data, op)
# invalid scalars
- with pytest.raises(TypeError):
+ msg="invalid scalars"
+ with pytest.raises(TypeError, match=msg):
ops("foo")
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
+
# invalid array-likes
- if op not in ("__mul__", "__rmul__"):
- # TODO(extension) numpy's mul with object array sees booleans as numbers
- with pytest.raises(TypeError):
- ops(pd.Series("foo", index=s.index))
+ if op not in ("__mul__", "__rmul__"):
+ # TODO(extension) numpy's mul with object array sees booleans as numbers
+ msg="invalid array-likes,numpy's mul with object array sees booleans as numbers"
+ with pytest.raises(TypeError, match=msg):
+ ops(pd.Series("foo", index=s.index))
+
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
- with pytest.raises(NotImplementedError):
+ msg="invalid array-likes"
+ with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
| - [ ] xref #30999
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33584 | 2020-04-16T06:35:01Z | 2020-04-16T14:08:59Z | null | 2020-04-16T14:08:59Z |
Issue 33428 fix | diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 30c5ba0ed94b6..65af0b50178bb 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -359,8 +359,7 @@ def parallel_coordinates(
Examples
--------
>>> from matplotlib import pyplot as plt
- >>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master'
- '/pandas/tests/data/iris.csv')
+ >>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv')
>>> pd.plotting.parallel_coordinates(
df, 'Name',
color=('#556270', '#4ECDC4', '#C7F464'))
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index fe3e1ff906919..9b4bb151c04ec 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -219,7 +219,7 @@ def test_frame_equal_unicode(df1, df2, msg, by_blocks_fixture, obj_fixture):
#
# Test ensures that `tm.assert_frame_equals` raises the right exception
# when comparing DataFrames containing differing unicode objects.
- msg = msg.format(obj=obj_fixture)
+ msg = f"{obj_fixture}"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, by_blocks=by_blocks_fixture, obj=obj_fixture)
| - [ ] closes #33428
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33583 | 2020-04-16T06:34:31Z | 2020-04-22T04:00:45Z | null | 2020-04-22T04:17:57Z |
fixup some f-strings | diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py
index fe3e1ff906919..9b4bb151c04ec 100644
--- a/pandas/tests/util/test_assert_frame_equal.py
+++ b/pandas/tests/util/test_assert_frame_equal.py
@@ -219,7 +219,7 @@ def test_frame_equal_unicode(df1, df2, msg, by_blocks_fixture, obj_fixture):
#
# Test ensures that `tm.assert_frame_equals` raises the right exception
# when comparing DataFrames containing differing unicode objects.
- msg = msg.format(obj=obj_fixture)
+ msg = f"{obj_fixture}"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, by_blocks=by_blocks_fixture, obj=obj_fixture)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
xref #29547 | https://api.github.com/repos/pandas-dev/pandas/pulls/33582 | 2020-04-16T06:34:21Z | 2020-04-18T19:44:17Z | null | 2020-04-19T00:12:02Z |
Issue 30999 fix | diff --git a/pandas/tests/arrays/boolean/test_arithmetic.py b/pandas/tests/arrays/boolean/test_arithmetic.py
index df4c218cbf9bf..9a8d03dc47529 100644
--- a/pandas/tests/arrays/boolean/test_arithmetic.py
+++ b/pandas/tests/arrays/boolean/test_arithmetic.py
@@ -23,20 +23,25 @@ def test_error(self, data, all_arithmetic_operators):
opa = getattr(data, op)
# invalid scalars
- with pytest.raises(TypeError):
+ msg="invalid scalars"
+ with pytest.raises(TypeError, match=msg):
ops("foo")
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
+
# invalid array-likes
- if op not in ("__mul__", "__rmul__"):
- # TODO(extension) numpy's mul with object array sees booleans as numbers
- with pytest.raises(TypeError):
- ops(pd.Series("foo", index=s.index))
+ if op not in ("__mul__", "__rmul__"):
+ # TODO(extension) numpy's mul with object array sees booleans as numbers
+ msg="invalid array-likes,numpy's mul with object array sees booleans as numbers"
+ with pytest.raises(TypeError, match=msg):
+ ops(pd.Series("foo", index=s.index))
+
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
- with pytest.raises(NotImplementedError):
+ msg="invalid array-likes"
+ with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33581 | 2020-04-16T06:34:05Z | 2020-05-22T10:39:09Z | null | 2020-05-22T10:39:09Z |
Update pytables version | diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml
index e553330b962a2..ae8fffa59fd50 100644
--- a/ci/deps/azure-36-minimum_versions.yaml
+++ b/ci/deps/azure-36-minimum_versions.yaml
@@ -21,7 +21,7 @@ dependencies:
- numexpr=2.6.2
- numpy=1.13.3
- openpyxl=2.5.7
- - pytables=3.4.2
+ - pytables=3.4.3
- python-dateutil=2.7.3
- pytz=2017.2
- scipy=0.19.0
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 7fa2233e79fc0..d392e151e3f97 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -262,7 +262,7 @@ BeautifulSoup4 4.6.0 HTML parser for read_html (see :ref
Jinja2 Conditional formatting with DataFrame.style
PyQt4 Clipboard I/O
PyQt5 Clipboard I/O
-PyTables 3.4.2 HDF5-based reading / writing
+PyTables 3.4.3 HDF5-based reading / writing
SQLAlchemy 1.1.4 SQL support for databases other than sqlite
SciPy 0.19.0 Miscellaneous statistical functions
XLsxWriter 0.9.8 Excel writing
@@ -279,7 +279,7 @@ psycopg2 PostgreSQL engine for sqlalchemy
pyarrow 0.12.0 Parquet, ORC (requires 0.13.0), and feather reading / writing
pymysql 0.7.11 MySQL engine for sqlalchemy
pyreadstat SPSS files (.sav) reading
-pytables 3.4.2 HDF5 reading / writing
+pytables 3.4.3 HDF5 reading / writing
pyxlsb 1.0.6 Reading for xlsb files
qtpy Clipboard I/O
s3fs 0.3.0 Amazon S3 access
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 82c43811c0444..fa5e4e00cb6fd 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -104,7 +104,7 @@ Other enhancements
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Some minimum supported versions of dependencies were updated (:issue:`29766`, :issue:`29723`).
+Some minimum supported versions of dependencies were updated (:issue:`29766`, :issue:`29723`, pytables >= 3.4.3).
If installed, we now require:
+-----------------+-----------------+----------+---------+
@@ -113,6 +113,49 @@ If installed, we now require:
| python-dateutil | 2.7.3 | X | |
+-----------------+-----------------+----------+---------+
+For `optional libraries <https://dev.pandas.io/docs/install.html#dependencies>`_ the general recommendation is to use the latest version.
+The following table lists the lowest version per library that is currently being tested throughout the development of pandas.
+Optional libraries below the lowest tested version may still work, but are not considered supported.
+
++-----------------+-----------------+---------+
+| Package | Minimum Version | Changed |
++=================+=================+=========+
+| beautifulsoup4 | 4.6.0 | |
++-----------------+-----------------+---------+
+| fastparquet | 0.3.2 | |
++-----------------+-----------------+---------+
+| gcsfs | 0.2.2 | |
++-----------------+-----------------+---------+
+| lxml | 3.8.0 | |
++-----------------+-----------------+---------+
+| matplotlib | 2.2.2 | |
++-----------------+-----------------+---------+
+| numba | 0.46.0 | |
++-----------------+-----------------+---------+
+| openpyxl | 2.5.7 | |
++-----------------+-----------------+---------+
+| pyarrow | 0.13.0 | |
++-----------------+-----------------+---------+
+| pymysql | 0.7.1 | |
++-----------------+-----------------+---------+
+| pytables | 3.4.3 | X |
++-----------------+-----------------+---------+
+| s3fs | 0.3.0 | |
++-----------------+-----------------+---------+
+| scipy | 0.19.0 | |
++-----------------+-----------------+---------+
+| sqlalchemy | 1.1.4 | |
++-----------------+-----------------+---------+
+| xarray | 0.8.2 | |
++-----------------+-----------------+---------+
+| xlrd | 1.1.0 | |
++-----------------+-----------------+---------+
+| xlsxwriter | 0.9.8 | |
++-----------------+-----------------+---------+
+| xlwt | 1.2.0 | |
++-----------------+-----------------+---------+
+
+See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more.
Development Changes
^^^^^^^^^^^^^^^^^^^
diff --git a/environment.yml b/environment.yml
index 67b2df4dc5a0e..8893302b4c9b2 100644
--- a/environment.yml
+++ b/environment.yml
@@ -97,7 +97,7 @@ dependencies:
- python-snappy # required by pyarrow
- pyqt>=5.9.2 # pandas.read_clipboard
- - pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf
+ - pytables>=3.4.3 # pandas.read_hdf, DataFrame.to_hdf
- s3fs # pandas.read_csv... when using 's3://...' path
- sqlalchemy # pandas.read_sql, DataFrame.to_sql
- xarray # DataFrame.to_xarray
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index cd711bcace013..7e253a52a9c00 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -17,13 +17,13 @@
"openpyxl": "2.5.7",
"pandas_gbq": "0.8.0",
"pyarrow": "0.13.0",
- "pytables": "3.4.2",
+ "pytables": "3.4.3",
"pytest": "5.0.1",
"pyxlsb": "1.0.6",
"s3fs": "0.3.0",
"scipy": "0.19.0",
"sqlalchemy": "1.1.4",
- "tables": "3.4.2",
+ "tables": "3.4.3",
"tabulate": "0.8.3",
"xarray": "0.8.2",
"xlrd": "1.1.0",
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 5cef428d35452..8a954fabd2d8d 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -64,7 +64,7 @@ fastparquet>=0.3.2
pyarrow>=0.13.1
python-snappy
pyqt5>=5.9.2
-tables>=3.4.2
+tables>=3.4.3
s3fs
sqlalchemy
xarray
| Based on https://github.com/pandas-dev/pandas/pull/32700#pullrequestreview-391684059 I update pytables minimal required version to 3.4.3
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33580 | 2020-04-16T06:10:51Z | 2020-04-16T19:45:00Z | 2020-04-16T19:45:00Z | 2020-04-16T19:45:01Z |
solve bug #32580 | diff --git a/pandas/core/base.py b/pandas/core/base.py
index 5945d8a4b432d..dab2e8a217dd5 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -438,7 +438,19 @@ def is_any_frame() -> bool:
# we have a dict of DataFrames
# return a MI DataFrame
- return concat([result[k] for k in keys], keys=keys, axis=1), True
+ #return concat([result[k] for k in keys], keys=keys, axis=1), True
+
+ # #issue 32580: Grouped-by column loses name when empty list of aggregations is specified.
+ #Bug in the method `DataFrame.groupby` lost index, when one of the ``agg`` keys referenced an empty list (:issue:`32580`)
+ #return concat([result[k] for k in keys], keys=keys, axis=1), True
+ keys_to_use = [k for k in keys if not result[k].empty]
+ # check: if at least one DataFrame is not empty
+ if not keys_to_use:
+ keys_to_use=keys_to_use
+ else:
+ keys_to_use=keys_to_use
+ return(concat([result[k] for k in keys_to_use], keys=keys_to_use, axis=1), True)
+
elif isinstance(self, ABCSeries) and is_any_series():
| …ns is specified.
- [ ] closes #32580
- [ ] 0 tests added / 0 passed
- [ ] passes `black pandas`
| https://api.github.com/repos/pandas-dev/pandas/pulls/33579 | 2020-04-16T04:12:36Z | 2020-04-16T15:13:16Z | null | 2020-04-16T15:13:16Z |
DOC: Added check for standard | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 45b7db74fa409..838cf88300db5 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -377,5 +377,14 @@ if [[ -z "$CHECK" || "$CHECK" == "typing" ]]; then
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
+### Checking for standardized documentation ###
+echo "Checking which files in pandas/doc/source have standardized documentation"
+
+for entry in $(find ../doc/source/ -type f)
+do
+ if grep -q Pandas "$entry" || grep -q *pandas* "$entry" ; then
+ echo "$entry"
+ fi
+done
exit $RET
| - [ ] xref #32316
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is my first time contributing, if I made any errors please tell me! I tried to continue what @joybhallaa was working on. I made a check to see which files are not using the standard documentation. | https://api.github.com/repos/pandas-dev/pandas/pulls/33578 | 2020-04-16T00:32:03Z | 2020-04-16T15:18:59Z | null | 2020-04-16T15:18:59Z |
TST: Added test case for DataFrame.at | diff --git a/pandas/tests/frame/indexing/test_at.py b/pandas/tests/frame/indexing/test_at.py
new file mode 100644
index 0000000000000..9c2d88f1589c2
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_at.py
@@ -0,0 +1,14 @@
+from datetime import datetime, timezone
+
+import pandas as pd
+import pandas._testing as tm
+
+
+def test_at_timezone():
+ # https://github.com/pandas-dev/pandas/issues/33544
+ result = pd.DataFrame({"foo": [datetime(2000, 1, 1)]})
+ result.at[0, "foo"] = datetime(2000, 1, 2, tzinfo=timezone.utc)
+ expected = pd.DataFrame(
+ {"foo": [datetime(2000, 1, 2, tzinfo=timezone.utc)]}, dtype=object
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #33544
- [x] tests added
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33577 | 2020-04-15T23:31:59Z | 2020-04-21T13:25:22Z | 2020-04-21T13:25:22Z | 2020-04-21T14:53:36Z |
ENH: Add index to output of assert_series_equal on category and datetime values | diff --git a/pandas/_testing.py b/pandas/_testing.py
index 1f6b645c821c8..d2b48b54e8ab0 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -999,7 +999,12 @@ def _raise(left, right, err_msg):
def assert_extension_array_equal(
- left, right, check_dtype=True, check_less_precise=False, check_exact=False
+ left,
+ right,
+ check_dtype=True,
+ check_less_precise=False,
+ check_exact=False,
+ index_values=None,
):
"""
Check that left and right ExtensionArrays are equal.
@@ -1016,6 +1021,8 @@ def assert_extension_array_equal(
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
+ index_values : numpy.ndarray, default None
+ optional index (shared by both left and right), used in output.
Notes
-----
@@ -1031,17 +1038,23 @@ def assert_extension_array_equal(
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
- assert_numpy_array_equal(np.asarray(left.asi8), np.asarray(right.asi8))
+ assert_numpy_array_equal(
+ np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
+ )
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
- assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
+ assert_numpy_array_equal(
+ left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
+ )
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
- assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
+ assert_numpy_array_equal(
+ left_valid, right_valid, obj="ExtensionArray", index_values=index_values
+ )
else:
_testing.assert_almost_equal(
left_valid,
@@ -1049,6 +1062,7 @@ def assert_extension_array_equal(
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
+ index_values=index_values,
)
@@ -1181,12 +1195,17 @@ def assert_series_equal(
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
+ index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
- assert_extension_array_equal(left._values, right._values)
+ assert_extension_array_equal(
+ left._values, right._values, index_values=np.asarray(left.index)
+ )
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
- assert_extension_array_equal(left._values, right._values)
+ assert_extension_array_equal(
+ left._values, right._values, index_values=np.asarray(left.index)
+ )
else:
_testing.assert_almost_equal(
left._values,
diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index 8bf3d82672695..337a06b91e443 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -165,7 +165,7 @@ def test_series_equal_length_mismatch(check_less_precise):
tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise)
-def test_series_equal_values_mismatch(check_less_precise):
+def test_series_equal_numeric_values_mismatch(check_less_precise):
msg = """Series are different
Series values are different \\(33\\.33333 %\\)
@@ -180,6 +180,38 @@ def test_series_equal_values_mismatch(check_less_precise):
tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise)
+def test_series_equal_categorical_values_mismatch(check_less_precise):
+ msg = """Series are different
+
+Series values are different \\(66\\.66667 %\\)
+\\[index\\]: \\[0, 1, 2\\]
+\\[left\\]: \\[a, b, c\\]
+Categories \\(3, object\\): \\[a, b, c\\]
+\\[right\\]: \\[a, c, b\\]
+Categories \\(3, object\\): \\[a, b, c\\]"""
+
+ s1 = Series(Categorical(["a", "b", "c"]))
+ s2 = Series(Categorical(["a", "c", "b"]))
+
+ with pytest.raises(AssertionError, match=msg):
+ tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise)
+
+
+def test_series_equal_datetime_values_mismatch(check_less_precise):
+ msg = """numpy array are different
+
+numpy array values are different \\(100.0 %\\)
+\\[index\\]: \\[0, 1, 2\\]
+\\[left\\]: \\[1514764800000000000, 1514851200000000000, 1514937600000000000\\]
+\\[right\\]: \\[1549065600000000000, 1549152000000000000, 1549238400000000000\\]"""
+
+ s1 = Series(pd.date_range("2018-01-01", periods=3, freq="D"))
+ s2 = Series(pd.date_range("2019-02-02", periods=3, freq="D"))
+
+ with pytest.raises(AssertionError, match=msg):
+ tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise)
+
+
def test_series_equal_categorical_mismatch(check_categorical):
msg = """Attributes of Series are different
| - [ ] closes #xxxx
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is really an extension of #31435, which added index output to the messages produced when series (and DataFrames) are compared. I failed to notice that categorical values and datetime values were evaluated through a different code path and didn't add the index output for these data types. Since the original problem (index reordering) could just as easily happen for these types the functionality should be there as well. Also, it makes the output more consistent.
I added a couple of new tests to surface and check the additional output. | https://api.github.com/repos/pandas-dev/pandas/pulls/33575 | 2020-04-15T22:19:44Z | 2020-04-26T20:22:51Z | 2020-04-26T20:22:51Z | 2020-04-26T20:22:54Z |
BUG: DatetimeIndex.insert on empty can preserve freq | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 82c43811c0444..f892dcdd364b9 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -479,6 +479,7 @@ Indexing
- Bug in :meth:`DataFrame.copy` _item_cache not invalidated after copy causes post-copy value updates to not be reflected (:issue:`31784`)
- Bug in `Series.__getitem__` with an integer key and a :class:`MultiIndex` with leading integer level failing to raise ``KeyError`` if the key is not present in the first level (:issue:`33355`)
- Bug in :meth:`DataFrame.iloc` when slicing a single column-:class:`DataFrame`` with ``ExtensionDtype`` (e.g. ``df.iloc[:, :1]``) returning an invalid result (:issue:`32957`)
+- Bug in :meth:`DatetimeIndex.insert` and :meth:`TimedeltaIndex.insert` causing index ``freq`` to be lost when setting an element into an empty :class:`Series` (:issue:33573`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index c15680a47d216..b83b64c144681 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -941,6 +941,10 @@ def insert(self, loc, item):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
+ elif self.freq is not None:
+ # Adding a single item to an empty index may preserve freq
+ if self.freq.is_on_offset(item):
+ freq = self.freq
item = item.asm8
try:
diff --git a/pandas/tests/indexes/datetimes/test_insert.py b/pandas/tests/indexes/datetimes/test_insert.py
index 4abb4f0006444..034e1c6a4e1b0 100644
--- a/pandas/tests/indexes/datetimes/test_insert.py
+++ b/pandas/tests/indexes/datetimes/test_insert.py
@@ -24,6 +24,20 @@ def test_insert_invalid_na(self, tz):
with pytest.raises(TypeError, match="incompatible label"):
idx.insert(0, np.timedelta64("NaT"))
+ def test_insert_empty_preserves_freq(self, tz_naive_fixture):
+ # GH#33573
+ tz = tz_naive_fixture
+ dti = DatetimeIndex([], tz=tz, freq="D")
+ item = Timestamp("2017-04-05").tz_localize(tz)
+
+ result = dti.insert(0, item)
+ assert result.freq == dti.freq
+
+ # But not when we insert an item that doesnt conform to freq
+ dti = DatetimeIndex([], tz=tz, freq="W-THU")
+ result = dti.insert(0, item)
+ assert result.freq is None
+
def test_insert(self):
idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"], name="idx")
diff --git a/pandas/tests/indexes/timedeltas/test_insert.py b/pandas/tests/indexes/timedeltas/test_insert.py
index b214e009db869..e65c871428bab 100644
--- a/pandas/tests/indexes/timedeltas/test_insert.py
+++ b/pandas/tests/indexes/timedeltas/test_insert.py
@@ -93,9 +93,15 @@ def test_insert_dont_cast_strings(self):
def test_insert_empty(self):
# Corner case inserting with length zero doesnt raise IndexError
+ # GH#33573 for freq preservation
idx = timedelta_range("1 Day", periods=3)
td = idx[0]
- idx[:0].insert(0, td)
- idx[:0].insert(1, td)
- idx[:0].insert(-1, td)
+ result = idx[:0].insert(0, td)
+ assert result.freq == "D"
+
+ result = idx[:0].insert(1, td)
+ assert result.freq == "D"
+
+ result = idx[:0].insert(-1, td)
+ assert result.freq == "D"
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 522ed4df96ad2..900374824eb25 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -284,6 +284,8 @@ def test_setitem(datetime_series, string_series):
expected = string_series.append(app)
tm.assert_series_equal(s, expected)
+
+def test_setitem_empty_series():
# Test for issue #10193
key = pd.Timestamp("2012-01-01")
series = pd.Series(dtype=object)
@@ -291,10 +293,12 @@ def test_setitem(datetime_series, string_series):
expected = pd.Series(47, [key])
tm.assert_series_equal(series, expected)
+ # GH#33573 our index should retain its freq
series = pd.Series([], pd.DatetimeIndex([], freq="D"), dtype=object)
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq="D"))
tm.assert_series_equal(series, expected)
+ assert series.index.freq == expected.index.freq
def test_setitem_dtypes():
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33573 | 2020-04-15T21:21:47Z | 2020-04-16T20:53:03Z | 2020-04-16T20:53:03Z | 2020-04-16T20:59:42Z |
BUG: set_levels set wrong order levels for MutiIndex | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 4e2d07ddf9225..a046d60e79ed0 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -741,7 +741,14 @@ def _set_levels(
self._tuples = None
self._reset_cache()
- def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
+ def set_levels(
+ self,
+ levels,
+ level=None,
+ inplace=False,
+ verify_integrity=True,
+ change_codes=False,
+ ):
"""
Set new levels on MultiIndex. Defaults to returning new index.
@@ -755,6 +762,8 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
If True, mutates in place.
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
+ change_codes : bool, default False
+ If True, resets the codes for the levels specified.
Returns
-------
@@ -845,6 +854,29 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
+ # reset codes
+ if change_codes:
+ all_codes = []
+ for lev_nums, lev in zip(levels, level):
+ # for each lev, construct a code
+ codes = []
+ code = 0
+ for num in lev_nums:
+ if num not in codes:
+ codes.append(code)
+ code += 1
+ else:
+ codes.append(code)
+ new_codes = []
+ index = 0
+ for i in range(len(self._codes[lev])):
+ if self._codes[lev][i] != -1:
+ new_codes.append(codes[index % len(codes)])
+ else:
+ new_codes.append(-1)
+ index += 1
+ all_codes.append(new_codes)
+ idx._set_codes(all_codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index 8a3deca0236e4..7f9c3e58baad6 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -329,3 +329,25 @@ def test_set_levels_with_iterable():
[expected_sizes, colors], names=["size", "color"]
)
tm.assert_index_equal(result, expected)
+
+
+def test_set_levels_with_changed_multiindex():
+ # GH33420
+ np.random.seed(seed=0)
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "one", "one", "one", "one", "one", "one", "one"],
+ ["three", "four", "three", "four", "three", "four", "three", "four"],
+ ]
+ ran_array = np.random.rand(8, 4)
+ test_df = pd.DataFrame(ran_array, index=arrays)
+ test_df.index.set_levels([3, 4], level=2, inplace=True, change_codes=True)
+ correct_df = pd.DataFrame(ran_array, index=arrays)
+ correct_df.index = pd.MultiIndex.from_arrays(
+ [
+ test_df.index.get_level_values(0),
+ test_df.index.get_level_values(1),
+ np.tile([3, 4], 4),
+ ]
+ )
+ tm.assert_equal(test_df, correct_df)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Added parameter that allows user to reset codes | https://api.github.com/repos/pandas-dev/pandas/pulls/33572 | 2020-04-15T20:21:14Z | 2020-04-17T16:07:47Z | null | 2020-04-17T16:07:47Z |
BUG: Debug grouped quantile with NA values | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 2a641a37b46d8..65cc8df30cdce 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -598,6 +598,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.agg` with dictionary input losing ``ExtensionArray`` dtypes (:issue:`32194`)
- Bug in :meth:`DataFrame.resample` where an ``AmbiguousTimeError`` would be raised when the resulting timezone aware :class:`DatetimeIndex` had a DST transition at midnight (:issue:`25758`)
- Bug in :meth:`DataFrame.groupby` where a ``ValueError`` would be raised when grouping by a categorical column with read-only categories and ``sort=False`` (:issue:`33410`)
+- Bug in :meth:`DataFrameGroupBy.quantile` where incorrect values would be returned when missing group keys were present (:issue:`33569`)
- Bug in :meth:`GroupBy.first` and :meth:`GroupBy.last` where None is not preserved in object dtype (:issue:`32800`)
Reshaping
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 53e66c4b8723d..e7ef13303646e 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -778,9 +778,14 @@ def group_quantile(ndarray[float64_t] out,
if not mask[i]:
non_na_counts[lab] += 1
- # Get an index of values sorted by labels and then values
- order = (values, labels)
- sort_arr = np.lexsort(order).astype(np.int64, copy=False)
+ # Get an index of values sorted by labels and then values,
+ # make sure missing labels sort to the back of the array
+ if labels.size:
+ labels_for_lexsort = np.where(labels == -1, labels.max() + 1, labels)
+ else:
+ labels_for_lexsort = labels
+
+ sort_arr = np.lexsort((values, labels_for_lexsort)).astype(np.int64, copy=False)
with nogil:
for i in range(ngroups):
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 346de55f551df..1bc236f459992 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1507,14 +1507,25 @@ def test_quantile_missing_group_values_no_segfaults():
grp.quantile()
-def test_quantile_missing_group_values_correct_results():
+@pytest.mark.parametrize(
+ "key",
+ [
+ ["a"] * 4 + ["b"] * 3 + [np.nan],
+ ["a"] * 3 + [np.nan] + ["b"] * 4,
+ ["a"] * 3 + [np.nan] + ["b"] * 3 + [np.nan],
+ ],
+)
+@pytest.mark.parametrize(
+ "quantile, expected_value", [(0.0, 1.0), (0.5, 2.0), (1.0, 3.0)]
+)
+def test_quantile_missing_group_values_correct_results(key, quantile, expected_value):
# GH 28662
- data = np.array([1.0, np.nan, 3.0, np.nan])
- df = pd.DataFrame(dict(key=data, val=range(4)))
-
- result = df.groupby("key").quantile()
+ # https://github.com/pandas-dev/pandas/issues/33569
+ value = np.array([1.0, 2.0, 3.0, np.nan] * 2)
+ df = pd.DataFrame({"key": key, "value": value})
+ result = df.groupby("key").quantile(quantile)
expected = pd.DataFrame(
- [1.0, 3.0], index=pd.Index([1.0, 3.0], name="key"), columns=["val"]
+ [expected_value] * 2, index=pd.Index(["a", "b"], name="key"), columns=["value"]
)
tm.assert_frame_equal(result, expected)
| - [x] closes #33569
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33571 | 2020-04-15T20:10:13Z | 2020-05-04T12:48:43Z | null | 2020-05-04T12:48:47Z |
Reverted cython pin | diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml
index 17c3d318ce54d..29ebfe2639e32 100644
--- a/ci/deps/azure-37-numpydev.yaml
+++ b/ci/deps/azure-37-numpydev.yaml
@@ -14,8 +14,7 @@ dependencies:
- pytz
- pip
- pip:
- - cython==0.29.16
- # GH#33507 cython 3.0a1 is causing TypeErrors 2020-04-13
+ - cython>=0.29.16
- "git+git://github.com/dateutil/dateutil.git"
- "-f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com"
- "--pre"
| reverts #33534 and closes #33507 | https://api.github.com/repos/pandas-dev/pandas/pulls/33570 | 2020-04-15T19:37:17Z | 2020-04-22T04:17:18Z | null | 2020-06-11T15:40:31Z |
DOC: Fix heading capitalization in doc/source/whatsnew - part5 (#32550) | diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst
index e371f1d9fe69a..fbe24675ddfe2 100644
--- a/doc/source/whatsnew/v0.18.0.rst
+++ b/doc/source/whatsnew/v0.18.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0180:
-v0.18.0 (March 13, 2016)
-------------------------
+Version 0.18.0 (March 13, 2016)
+-------------------------------
{{ header }}
@@ -145,7 +145,7 @@ This continues to work as before for function or dict-like values.
.. _whatsnew_0180.enhancements.rangeindex:
-Range index
+Range Index
^^^^^^^^^^^
A ``RangeIndex`` has been added to the ``Int64Index`` sub-classes to support a memory saving alternative for common use cases. This has a similar implementation to the python ``range`` object (``xrange`` in python 2), in that it only stores the start, stop, and step values for the index. It will transparently interact with the user API, converting to ``Int64Index`` if needed.
@@ -456,8 +456,8 @@ New behavior:
.. _whatsnew_0180.enhancements.xarray:
-to_xarray
-^^^^^^^^^
+Method to_xarray
+^^^^^^^^^^^^^^^^
In a future version of pandas, we will be deprecating ``Panel`` and other > 2 ndim objects. In order to provide for continuity,
all ``NDFrame`` objects have gained the ``.to_xarray()`` method in order to convert to ``xarray`` objects, which has
diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst
index 2c6e8f0e27154..13ed6bc38163b 100644
--- a/doc/source/whatsnew/v0.18.1.rst
+++ b/doc/source/whatsnew/v0.18.1.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0181:
-v0.18.1 (May 3, 2016)
----------------------
+Version 0.18.1 (May 3, 2016)
+----------------------------
{{ header }}
@@ -61,8 +61,8 @@ Tuesday after MLK Day (Monday is skipped because it's a holiday)
.. _whatsnew_0181.deferred_ops:
-``.groupby(..)`` syntax with window and resample operations
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``.groupby(..)`` syntax with window and resample operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``.groupby(...)`` has been enhanced to provide convenient syntax when working with ``.rolling(..)``, ``.expanding(..)`` and ``.resample(..)`` per group, see (:issue:`12486`, :issue:`12738`).
@@ -111,7 +111,7 @@ Now you can do:
.. _whatsnew_0181.enhancements.method_chain:
Method chaining improvements
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following methods / indexers now accept a ``callable``. It is intended to make
these more useful in method chains, see the :ref:`documentation <indexing.callable>`.
@@ -121,8 +121,8 @@ these more useful in method chains, see the :ref:`documentation <indexing.callab
- ``.loc[]``, ``iloc[]`` and ``.ix[]``
- ``[]`` indexing
-``.where()`` and ``.mask()``
-""""""""""""""""""""""""""""
+Methods ``.where()`` and ``.mask()``
+""""""""""""""""""""""""""""""""""""
These can accept a callable for the condition and ``other``
arguments.
@@ -134,8 +134,8 @@ arguments.
'C': [7, 8, 9]})
df.where(lambda x: x > 4, lambda x: x + 10)
-``.loc[]``, ``.iloc[]``, ``.ix[]``
-""""""""""""""""""""""""""""""""""
+Methods ``.loc[]``, ``.iloc[]``, ``.ix[]``
+""""""""""""""""""""""""""""""""""""""""""
These can accept a callable, and a tuple of callable as a slicer. The callable
can return a valid boolean indexer or anything which is valid for these indexer's input.
@@ -148,8 +148,8 @@ can return a valid boolean indexer or anything which is valid for these indexer'
# callable returns list of labels
df.loc[lambda x: [1, 2], lambda x: ['A', 'B']]
-``[]`` indexing
-"""""""""""""""
+Indexing with``[]``
+"""""""""""""""""""
Finally, you can use a callable in ``[]`` indexing of Series, DataFrame and Panel.
The callable must return a valid input for ``[]`` indexing depending on its
@@ -171,7 +171,7 @@ without using temporary variable.
.. _whatsnew_0181.partial_string_indexing:
-Partial string indexing on ``DateTimeIndex`` when part of a ``MultiIndex``
+Partial string indexing on ``DatetimeIndex`` when part of a ``MultiIndex``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Partial string indexing now matches on ``DateTimeIndex`` when part of a ``MultiIndex`` (:issue:`10331`)
@@ -306,8 +306,8 @@ API changes
.. _whatsnew_0181.api.groubynth:
-``.groupby(..).nth()`` changes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``.groupby(..).nth()`` changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The index in ``.groupby(..).nth()`` output is now more consistent when the ``as_index`` argument is passed (:issue:`11039`):
@@ -414,7 +414,7 @@ New behaviour:
.. _whatsnew_0181.apply_resample:
-Using ``.apply`` on groupby resampling
+Using ``.apply`` on GroupBy resampling
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using ``apply`` on resampling groupby operations (using a ``pd.TimeGrouper``) now has the same output types as similar ``apply`` calls on other groupby operations. (:issue:`11742`).
@@ -513,8 +513,8 @@ In addition to this error change, several others have been made as well:
.. _whatsnew_0181.api.to_datetime:
-``to_datetime`` error changes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``to_datetime`` error changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Bugs in ``pd.to_datetime()`` when passing a ``unit`` with convertible entries and ``errors='coerce'`` or non-convertible with ``errors='ignore'``. Furthermore, an ``OutOfBoundsDateime`` exception will be raised when an out-of-range value is encountered for that unit when ``errors='raise'``. (:issue:`11758`, :issue:`13052`, :issue:`13059`)
diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst
index 7390b80217b2c..2d3bb7056b75a 100644
--- a/doc/source/whatsnew/v0.19.0.rst
+++ b/doc/source/whatsnew/v0.19.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0190:
-v0.19.0 (October 2, 2016)
--------------------------
+Version 0.19.0 (October 2, 2016)
+--------------------------------
{{ header }}
@@ -37,8 +37,8 @@ New features
.. _whatsnew_0190.enhancements.asof_merge:
-``merge_asof`` for asof-style time-series joining
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Function ``merge_asof`` for asof-style time-series joining
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A long-time requested feature has been added through the :func:`merge_asof` function, to
support asof style joining of time-series (:issue:`1870`, :issue:`13695`, :issue:`13709`, :issue:`13902`). Full documentation is
@@ -127,8 +127,8 @@ passed DataFrame (``trades`` in this case), with the fields of the ``quotes`` me
.. _whatsnew_0190.enhancements.rolling_ts:
-``.rolling()`` is now time-series aware
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``.rolling()`` is now time-series aware
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``.rolling()`` objects are now time-series aware and can accept a time-series offset (or convertible) for the ``window`` argument (:issue:`13327`, :issue:`12995`).
See the full documentation :ref:`here <stats.moments.ts>`.
@@ -186,8 +186,8 @@ default of the index) in a DataFrame.
.. _whatsnew_0190.enhancements.read_csv_dupe_col_names_support:
-``read_csv`` has improved support for duplicate column names
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``read_csv`` has improved support for duplicate column names
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. ipython:: python
:suppress:
@@ -225,8 +225,8 @@ contained the values ``[0, 3]``.
.. _whatsnew_0190.enhancements.read_csv_categorical:
-``read_csv`` supports parsing ``Categorical`` directly
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``read_csv`` supports parsing ``Categorical`` directly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :func:`read_csv` function now supports parsing a ``Categorical`` column when
specified as a dtype (:issue:`10153`). Depending on the structure of the data,
@@ -394,8 +394,8 @@ After upgrading pandas, you may see *new* ``RuntimeWarnings`` being issued from
.. _whatsnew_0190.get_dummies_dtypes:
-``get_dummies`` now returns integer dtypes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``get_dummies`` now returns integer dtypes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``pd.get_dummies`` function now returns dummy-encoded columns as small integers, rather than floats (:issue:`8725`). This should provide an improved memory footprint.
@@ -731,8 +731,8 @@ A ``Series`` will now correctly promote its dtype for assignment with incompat v
.. _whatsnew_0190.api.to_datetime_coerce:
-``.to_datetime()`` changes
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+Function ``.to_datetime()`` changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Previously if ``.to_datetime()`` encountered mixed integers/floats and strings, but no datetimes with ``errors='coerce'`` it would convert all to ``NaT``.
@@ -809,8 +809,8 @@ resulting dtype will be upcast, which is unchanged from previous.
.. _whatsnew_0190.api.describe:
-``.describe()`` changes
-^^^^^^^^^^^^^^^^^^^^^^^
+Method ``.describe()`` changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Percentile identifiers in the index of a ``.describe()`` output will now be rounded to the least precision that keeps them distinct (:issue:`13104`)
@@ -863,8 +863,8 @@ Furthermore:
``Period`` changes
^^^^^^^^^^^^^^^^^^
-``PeriodIndex`` now has ``period`` dtype
-""""""""""""""""""""""""""""""""""""""""
+The ``PeriodIndex`` now has ``period`` dtype
+""""""""""""""""""""""""""""""""""""""""""""
``PeriodIndex`` now has its own ``period`` dtype. The ``period`` dtype is a
pandas extension dtype like ``category`` or the :ref:`timezone aware dtype <timeseries.timezone_series>` (``datetime64[ns, tz]``) (:issue:`13941`).
@@ -1151,8 +1151,8 @@ As a consequence, ``groupby`` and ``set_index`` also preserve categorical dtypes
.. _whatsnew_0190.api.autogenerated_chunksize_index:
-``read_csv`` will progressively enumerate chunks
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Function ``read_csv`` will progressively enumerate chunks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When :func:`read_csv` is called with ``chunksize=n`` and without specifying an index,
each chunk used to have an independently generated index from ``0`` to ``n-1``.
@@ -1190,8 +1190,8 @@ Sparse changes
These changes allow pandas to handle sparse data with more dtypes, and for work to make a smoother experience with data handling.
-``int64`` and ``bool`` support enhancements
-"""""""""""""""""""""""""""""""""""""""""""
+Types ``int64`` and ``bool`` support enhancements
+"""""""""""""""""""""""""""""""""""""""""""""""""
Sparse data structures now gained enhanced support of ``int64`` and ``bool`` ``dtype`` (:issue:`667`, :issue:`13849`).
diff --git a/doc/source/whatsnew/v0.19.1.rst b/doc/source/whatsnew/v0.19.1.rst
index a89d1461073bd..9e6b884e08587 100644
--- a/doc/source/whatsnew/v0.19.1.rst
+++ b/doc/source/whatsnew/v0.19.1.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0191:
-v0.19.1 (November 3, 2016)
---------------------------
+Version 0.19.1 (November 3, 2016)
+---------------------------------
{{ header }}
diff --git a/doc/source/whatsnew/v0.19.2.rst b/doc/source/whatsnew/v0.19.2.rst
index 023bc78081ec9..924c95f21ceff 100644
--- a/doc/source/whatsnew/v0.19.2.rst
+++ b/doc/source/whatsnew/v0.19.2.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0192:
-v0.19.2 (December 24, 2016)
----------------------------
+Version 0.19.2 (December 24, 2016)
+----------------------------------
{{ header }}
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst
index 06bbd9679bb4d..09980b52b6b3a 100644
--- a/doc/source/whatsnew/v0.20.0.rst
+++ b/doc/source/whatsnew/v0.20.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0200:
-v0.20.1 (May 5, 2017)
----------------------
+Version 0.20.1 (May 5, 2017)
+----------------------------
{{ header }}
@@ -47,8 +47,8 @@ New features
.. _whatsnew_0200.enhancements.agg:
-``agg`` API for DataFrame/Series
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``agg`` API for DataFrame/Series
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Series & DataFrame have been enhanced to support the aggregation API. This is a familiar API
from groupby, window operations, and resampling. This allows aggregation operations in a concise way
@@ -110,8 +110,8 @@ aggregations. This is similar to how groupby ``.agg()`` works. (:issue:`15015`)
.. _whatsnew_0200.enhancements.dataio_dtype:
-``dtype`` keyword for data IO
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Keyword argument ``dtype`` for data IO
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``'python'`` engine for :func:`read_csv`, as well as the :func:`read_fwf` function for parsing
fixed-width text files and :func:`read_excel` for parsing Excel files, now accept the ``dtype`` keyword argument for specifying the types of specific columns (:issue:`14295`). See the :ref:`io docs <io.dtypes>` for more information.
@@ -129,8 +129,8 @@ fixed-width text files and :func:`read_excel` for parsing Excel files, now accep
.. _whatsnew_0120.enhancements.datetime_origin:
-``.to_datetime()`` has gained an ``origin`` parameter
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``.to_datetime()`` has gained an ``origin`` parameter
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:func:`to_datetime` has gained a new parameter, ``origin``, to define a reference date
from where to compute the resulting timestamps when parsing numerical values with a specific ``unit`` specified. (:issue:`11276`, :issue:`11745`)
@@ -151,7 +151,7 @@ commonly called 'unix epoch' or POSIX time. This was the previous default, so th
.. _whatsnew_0200.enhancements.groupby_access:
-Groupby enhancements
+GroupBy enhancements
^^^^^^^^^^^^^^^^^^^^
Strings passed to ``DataFrame.groupby()`` as the ``by`` parameter may now reference either column names or index level names. Previously, only column names could be referenced. This allows to easily group by a column and index level at the same time. (:issue:`5677`)
@@ -197,8 +197,8 @@ support for bz2 compression in the python 2 C-engine improved (:issue:`14874`).
.. _whatsnew_0200.enhancements.pickle_compression:
-Pickle file I/O now supports compression
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Pickle file IO now supports compression
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:func:`read_pickle`, :meth:`DataFrame.to_pickle` and :meth:`Series.to_pickle`
can now read from and write to compressed pickle files. Compression methods
@@ -356,7 +356,7 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you
.. _whatsnew_0200.enhancements.style_excel:
-Excel output for styled dataframes
+Excel output for styled DataFrames
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Experimental support has been added to export ``DataFrame.style`` formats to Excel using the ``openpyxl`` engine. (:issue:`15530`)
@@ -928,7 +928,7 @@ New behavior:
.. _whatsnew_0200.api_breaking.groupby_describe:
-Groupby describe formatting
+GroupBy describe formatting
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The output formatting of ``groupby.describe()`` now labels the ``describe()`` metrics in the columns instead of the index.
@@ -1670,8 +1670,8 @@ Indexing
- Bug in in ``pd.concat()`` when combining objects with a ``CategoricalIndex`` (:issue:`16111`)
- Bug in indexing with a scalar and a ``CategoricalIndex`` (:issue:`16123`)
-I/O
-^^^
+IO
+^^
- Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`)
- Bug in ``pd.read_fwf()`` where the skiprows parameter was not being respected during column width inference (:issue:`11256`)
@@ -1712,7 +1712,7 @@ Plotting
- Bug in the date and time converters pandas registers with matplotlib not handling multiple dimensions (:issue:`16026`)
- Bug in ``pd.scatter_matrix()`` could accept either ``color`` or ``c``, but not both (:issue:`14855`)
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in ``.groupby(..).resample()`` when passed the ``on=`` kwarg. (:issue:`15021`)
diff --git a/doc/source/whatsnew/v0.20.2.rst b/doc/source/whatsnew/v0.20.2.rst
index 232d1d283d9bd..7f84c6b3f17bd 100644
--- a/doc/source/whatsnew/v0.20.2.rst
+++ b/doc/source/whatsnew/v0.20.2.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0202:
-v0.20.2 (June 4, 2017)
-----------------------
+Version 0.20.2 (June 4, 2017)
+-----------------------------
{{ header }}
@@ -74,8 +74,8 @@ Indexing
- Bug in partial string indexing with a monotonic, but not strictly-monotonic, index incorrectly reversing the slice bounds (:issue:`16515`)
- Bug in ``MultiIndex.remove_unused_levels()`` that would not return a ``MultiIndex`` equal to the original. (:issue:`16556`)
-I/O
-^^^
+IO
+^^
- Bug in :func:`read_csv` when ``comment`` is passed in a space delimited text file (:issue:`16472`)
- Bug in :func:`read_csv` not raising an exception with nonexistent columns in ``usecols`` when it had the correct length (:issue:`14671`)
@@ -97,7 +97,7 @@ Plotting
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in creating a time-based rolling window on an empty ``DataFrame`` (:issue:`15819`)
diff --git a/doc/source/whatsnew/v0.20.3.rst b/doc/source/whatsnew/v0.20.3.rst
index 72faabd95bf1f..888d0048ca9f3 100644
--- a/doc/source/whatsnew/v0.20.3.rst
+++ b/doc/source/whatsnew/v0.20.3.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0203:
-v0.20.3 (July 7, 2017)
------------------------
+Version 0.20.3 (July 7, 2017)
+-----------------------------
{{ header }}
@@ -40,8 +40,8 @@ Indexing
- Bug in ``MultiIndex.isin`` causing an error when passing an empty iterable (:issue:`16777`)
- Fixed a bug in a slicing DataFrame/Series that have a ``TimedeltaIndex`` (:issue:`16637`)
-I/O
-^^^
+IO
+^^
- Bug in :func:`read_csv` in which files weren't opened as binary files by the C engine on Windows, causing EOF characters mid-field, which would fail (:issue:`16039`, :issue:`16559`, :issue:`16675`)
- Bug in :func:`read_hdf` in which reading a ``Series`` saved to an HDF file in 'fixed' format fails when an explicit ``mode='r'`` argument is supplied (:issue:`16583`)
diff --git a/doc/source/whatsnew/v0.21.0.rst b/doc/source/whatsnew/v0.21.0.rst
index 71969c4de6b02..926bcaa21ac3a 100644
--- a/doc/source/whatsnew/v0.21.0.rst
+++ b/doc/source/whatsnew/v0.21.0.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0210:
-v0.21.0 (October 27, 2017)
---------------------------
+Version 0.21.0 (October 27, 2017)
+---------------------------------
{{ header }}
@@ -55,8 +55,8 @@ For more details, see see :ref:`the IO docs on Parquet <io.parquet>`.
.. _whatsnew_0210.enhancements.infer_objects:
-``infer_objects`` type conversion
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``infer_objects`` type conversion
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`DataFrame.infer_objects` and :meth:`Series.infer_objects`
methods have been added to perform dtype inference on object columns, replacing
@@ -115,8 +115,8 @@ Setting a list-like data structure into a new attribute now raises a ``UserWarni
.. _whatsnew_0210.enhancements.drop_api:
-``drop`` now also accepts index/columns keywords
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Method ``drop`` now also accepts index/columns keywords
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`~DataFrame.drop` method has gained ``index``/``columns`` keywords as an
alternative to specifying the ``axis``. This is similar to the behavior of ``reindex``
@@ -135,8 +135,8 @@ For example:
.. _whatsnew_0210.enhancements.rename_reindex_axis:
-``rename``, ``reindex`` now also accept axis keyword
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Methods ``rename``, ``reindex`` now also accept axis keyword
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`DataFrame.rename` and :meth:`DataFrame.reindex` methods have gained
the ``axis`` keyword to specify the axis to target with the operation
@@ -380,7 +380,7 @@ Additionally, support has been dropped for Python 3.4 (:issue:`15251`).
.. _whatsnew_0210.api_breaking.bottleneck:
-Sum/Prod of all-NaN or empty Series/DataFrames is now consistently NaN
+Sum/prod of all-NaN or empty Series/DataFrames is now consistently NaN
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
@@ -814,7 +814,7 @@ length 2+ levels, so a :class:`MultiIndex` is always returned from all of the
.. _whatsnew_0210.api.utc_localization_with_series:
-UTC Localization with Series
+UTC localization with Series
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Previously, :func:`to_datetime` did not localize datetime ``Series`` data when ``utc=True`` was passed. Now, :func:`to_datetime` will correctly localize ``Series`` with a ``datetime64[ns, UTC]`` dtype to be consistent with how list-like and ``Index`` data are handled. (:issue:`6415`).
@@ -1085,8 +1085,8 @@ Indexing
- Bug in :func:`Series.rename` when called with a callable, incorrectly alters the name of the ``Series``, rather than the name of the ``Index``. (:issue:`17407`)
- Bug in :func:`String.str_get` raises ``IndexError`` instead of inserting NaNs when using a negative index. (:issue:`17704`)
-I/O
-^^^
+IO
+^^
- Bug in :func:`read_hdf` when reading a timezone aware index from ``fixed`` format HDFStore (:issue:`17618`)
- Bug in :func:`read_csv` in which columns were not being thoroughly de-duplicated (:issue:`17060`)
@@ -1115,7 +1115,7 @@ Plotting
- Bug causing ``plotting.parallel_coordinates`` to reset the random seed when using random colors (:issue:`17525`)
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in ``DataFrame.resample(...).size()`` where an empty ``DataFrame`` did not return a ``Series`` (:issue:`14962`)
diff --git a/doc/source/whatsnew/v0.21.1.rst b/doc/source/whatsnew/v0.21.1.rst
index 64f3339834b38..f930dfac869cd 100644
--- a/doc/source/whatsnew/v0.21.1.rst
+++ b/doc/source/whatsnew/v0.21.1.rst
@@ -1,7 +1,7 @@
.. _whatsnew_0211:
-v0.21.1 (December 12, 2017)
----------------------------
+Version 0.21.1 (December 12, 2017)
+----------------------------------
{{ header }}
@@ -122,8 +122,8 @@ Indexing
- Bug in ``Index.putmask`` when an invalid mask passed (:issue:`18368`)
- Bug in masked assignment of a ``timedelta64[ns]`` dtype ``Series``, incorrectly coerced to float (:issue:`18493`)
-I/O
-^^^
+IO
+^^
- Bug in class:`~pandas.io.stata.StataReader` not converting date/time columns with display formatting addressed (:issue:`17990`). Previously columns with display formatting were normally left as ordinal numbers and not converted to datetime objects.
- Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`)
@@ -143,7 +143,7 @@ Plotting
- Bug in ``DataFrame.plot()`` and ``Series.plot()`` with :class:`DatetimeIndex` where a figure generated by them is not pickleable in Python 3 (:issue:`18439`)
-Groupby/resample/rolling
+GroupBy/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in ``DataFrame.resample(...).apply(...)`` when there is a callable that returns different columns (:issue:`15169`)
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py
index 9cf4922fa2662..5de2a07381ae5 100755
--- a/scripts/validate_rst_title_capitalization.py
+++ b/scripts/validate_rst_title_capitalization.py
@@ -18,6 +18,7 @@
CAPITALIZATION_EXCEPTIONS = {
"pandas",
+ "pd",
"Python",
"IPython",
"PyTables",
@@ -34,6 +35,7 @@
"Series",
"Index",
"DataFrame",
+ "DataFrames",
"C",
"Git",
"GitHub",
@@ -48,15 +50,19 @@
"PeriodIndex",
"NA",
"NaN",
+ "NaT",
"ValueError",
+ "Boolean",
"BooleanArray",
"KeyError",
"API",
"FAQ",
"IO",
+ "Timedelta",
"TimedeltaIndex",
"DatetimeIndex",
"IntervalIndex",
+ "Categorical",
"CategoricalIndex",
"Categorical",
"GroupBy",
@@ -113,6 +119,7 @@
"November",
"December",
"Float64Index",
+ "FloatIndex",
"TZ",
"GIL",
"strftime",
@@ -121,6 +128,16 @@
"East",
"Asian",
"None",
+ "URLs",
+ "UInt64",
+ "SciPy",
+ "Matplotlib",
+ "PyPy",
+ "SparseDataFrame",
+ "Google",
+ "CategoricalDtype",
+ "UTC",
+ "Panel",
}
CAP_EXCEPTIONS_DICT = {word.lower(): word for word in CAPITALIZATION_EXCEPTIONS}
| -Quite a lot of complicated issues here, need reviewing. Such as 'Groupby' transformed to 'GroupBy', 'I/O' to 'IO', and some others.
- [ ] Modify files v0.18.0.rst, v0.18.1.rst, v0.19.0.rst, v0.19.1.rst, v0.19.2.rst, v0.20.0.rst, v0.20.2.rst, v0.20.3.rst, v0.21.0.rst, v0.21.1.rst
-File v0.20.0.rst has bad file name as it is actually talking about version v0.20.1.rst | https://api.github.com/repos/pandas-dev/pandas/pulls/33568 | 2020-04-15T15:53:50Z | 2020-05-06T21:14:50Z | 2020-05-06T21:14:49Z | 2020-05-06T21:14:57Z |
CI: Fix jedi deprecation warning for 0.17.0 on IPython | diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 4149485be181d..ec8613faaa663 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -529,7 +529,18 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; df = pd.DataFrame()"
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+
+ # TODO: remove it when Ipython updates
+ # GH 33567, jedi version raises Deprecation warning in Ipython
+ import jedi
+
+ if jedi.__version__ < "0.17.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(
+ DeprecationWarning, check_stacklevel=False
+ )
+ with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 03c1445e099a0..035698687cfc2 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -28,7 +28,15 @@ async def test_tab_complete_ipython6_warning(ip):
)
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+ # TODO: remove it when Ipython updates
+ # GH 33567, jedi version raises Deprecation warning in Ipython
+ import jedi
+
+ if jedi.__version__ < "0.17.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False)
+ with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 302ca8d1aa43e..a6430b4525d4a 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -491,7 +491,18 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; s = pd.Series()"
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+
+ # TODO: remove it when Ipython updates
+ # GH 33567, jedi version raises Deprecation warning in Ipython
+ import jedi
+
+ if jedi.__version__ < "0.17.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(
+ DeprecationWarning, check_stacklevel=False
+ )
+ with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("s.", 1))
| - [ ] closes #33567
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33566 | 2020-04-15T15:09:09Z | 2020-04-15T18:09:32Z | 2020-04-15T18:09:32Z | 2020-05-26T09:40:37Z |
REF: simplify broadcasting code | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index e693341d10a55..04089e81db331 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -390,11 +390,7 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T:
if f == "where":
align_copy = True
- aligned_args = {
- k: kwargs[k]
- for k in align_keys
- if isinstance(kwargs[k], (ABCSeries, ABCDataFrame))
- }
+ aligned_args = {k: kwargs[k] for k in align_keys}
for b in self.blocks:
@@ -402,8 +398,14 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
- axis = obj._info_axis_number
- kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)._values
+ if isinstance(obj, (ABCSeries, ABCDataFrame)):
+ axis = obj._info_axis_number
+ kwargs[k] = obj.reindex(
+ b_items, axis=axis, copy=align_copy
+ )._values
+ else:
+ # otherwise we have an ndarray
+ kwargs[k] = obj[b.mgr_locs.indexer]
if callable(f):
applied = b.apply(f, **kwargs)
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index c14c4a311d66c..9a7c9fdadf90d 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -510,11 +510,13 @@ def _combine_series_frame(left, right, func, axis: int, str_rep: str):
if axis == 0:
values = right._values
if isinstance(values, np.ndarray):
+ # TODO(EA2D): no need to special-case with 2D EAs
# We can operate block-wise
values = values.reshape(-1, 1)
+ values = np.broadcast_to(values, left.shape)
array_op = get_array_op(func, str_rep=str_rep)
- bm = left._mgr.apply(array_op, right=values.T)
+ bm = left._mgr.apply(array_op, right=values.T, align_keys=["right"])
return type(left)(bm)
new_data = dispatch_to_series(left, right, func)
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index 5dd7af454cbd1..a1d853e38e757 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -75,14 +75,7 @@ def masked_arith_op(x: np.ndarray, y, op):
result = np.empty(x.size, dtype=dtype)
if len(x) != len(y):
- if not _can_broadcast(x, y):
- raise ValueError(x.shape, y.shape)
-
- # Call notna on pre-broadcasted y for performance
- ymask = notna(y)
- y = np.broadcast_to(y, x.shape)
- ymask = np.broadcast_to(ymask, x.shape)
-
+ raise ValueError(x.shape, y.shape)
else:
ymask = notna(y)
@@ -211,51 +204,6 @@ def arithmetic_op(left: ArrayLike, right: Any, op, str_rep: str):
return res_values
-def _broadcast_comparison_op(lvalues, rvalues, op) -> np.ndarray:
- """
- Broadcast a comparison operation between two 2D arrays.
-
- Parameters
- ----------
- lvalues : np.ndarray or ExtensionArray
- rvalues : np.ndarray or ExtensionArray
-
- Returns
- -------
- np.ndarray[bool]
- """
- if isinstance(rvalues, np.ndarray):
- rvalues = np.broadcast_to(rvalues, lvalues.shape)
- result = comparison_op(lvalues, rvalues, op)
- else:
- result = np.empty(lvalues.shape, dtype=bool)
- for i in range(len(lvalues)):
- result[i, :] = comparison_op(lvalues[i], rvalues[:, 0], op)
- return result
-
-
-def _can_broadcast(lvalues, rvalues) -> bool:
- """
- Check if we can broadcast rvalues to match the shape of lvalues.
-
- Parameters
- ----------
- lvalues : np.ndarray or ExtensionArray
- rvalues : np.ndarray or ExtensionArray
-
- Returns
- -------
- bool
- """
- # We assume that lengths dont match
- if lvalues.ndim == rvalues.ndim == 2:
- # See if we can broadcast unambiguously
- if lvalues.shape[1] == rvalues.shape[-1]:
- if rvalues.shape[0] == 1:
- return True
- return False
-
-
def comparison_op(
left: ArrayLike, right: Any, op, str_rep: Optional[str] = None,
) -> ArrayLike:
@@ -287,8 +235,6 @@ def comparison_op(
# We are not catching all listlikes here (e.g. frozenset, tuple)
# The ambiguous case is object-dtype. See GH#27803
if len(lvalues) != len(rvalues):
- if _can_broadcast(lvalues, rvalues):
- return _broadcast_comparison_op(lvalues, rvalues, op)
raise ValueError(
"Lengths must match to compare", lvalues.shape, rvalues.shape
)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33565 | 2020-04-15T14:58:54Z | 2020-04-15T19:23:25Z | 2020-04-15T19:23:25Z | 2020-04-15T19:29:48Z | |
CI: Fix Deprecation warning from jedi | diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 4149485be181d..eeb2c409e00d3 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -529,7 +529,18 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; df = pd.DataFrame()"
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+
+ # TODO: remove it when Ipython updates
+ # jedi version raises Deprecation warning in Ipython
+ import jedi
+
+ if jedi.__version__ < "0.16.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(
+ DeprecationWarning, check_stacklevel=False
+ )
+ with tm.assert_produces_warning(warning):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 03c1445e099a0..4d64643bf10e7 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -28,7 +28,17 @@ async def test_tab_complete_ipython6_warning(ip):
)
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+ # TODO: remove it when Ipython updates
+ # jedi version raises Deprecation warning in Ipython
+ import jedi
+
+ if jedi.__version__ < "0.16.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(
+ DeprecationWarning, check_stacklevel=False
+ )
+ with tm.assert_produces_warning(warning):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 302ca8d1aa43e..1818634b14504 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -491,7 +491,18 @@ async def test_tab_complete_warning(self, ip):
code = "import pandas as pd; s = pd.Series()"
await ip.run_code(code)
- with tm.assert_produces_warning(None):
+
+ # TODO: remove it when Ipython updates
+ # jedi version raises Deprecation warning in Ipython
+ import jedi
+
+ if jedi.__version__ < "0.16.0":
+ warning = tm.assert_produces_warning(None)
+ else:
+ warning = tm.assert_produces_warning(
+ DeprecationWarning, check_stacklevel=False
+ )
+ with tm.assert_produces_warning(warning):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("s.", 1))
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33563 | 2020-04-15T11:58:31Z | 2020-04-15T12:24:55Z | null | 2020-04-15T12:24:59Z |
PERF: operate on arrays instead of Series in DataFrame/DataFrame ops | diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 856a1823353d5..15af453e7b5f8 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -262,15 +262,11 @@ def dispatch_to_series(left, right, func, axis=None):
-------
DataFrame
"""
- # Note: we use iloc to access columns for compat with cases
- # with non-unique columns.
- import pandas.core.computation.expressions as expressions
+ # Get the appropriate array-op to apply to each column/block's values.
+ array_op = get_array_op(func)
right = lib.item_from_zerodim(right)
if lib.is_scalar(right) or np.ndim(right) == 0:
-
- # Get the appropriate array-op to apply to each block's values.
- array_op = get_array_op(func)
bm = left._mgr.apply(array_op, right=right)
return type(left)(bm)
@@ -281,7 +277,6 @@ def dispatch_to_series(left, right, func, axis=None):
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
- array_op = get_array_op(func)
bm = left._mgr.operate_blockwise(right._mgr, array_op)
return type(left)(bm)
@@ -295,27 +290,24 @@ def dispatch_to_series(left, right, func, axis=None):
# Note: we do not do this unconditionally as it may be lossy or
# expensive for EA dtypes.
right = np.asarray(right)
-
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b[i]) for i in range(len(a.columns))}
-
else:
+ right = right._values
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))}
+ arrays = [array_op(l, r) for l, r in zip(left._iter_column_arrays(), right)]
elif isinstance(right, ABCSeries):
assert right.index.equals(left.index) # Handle other cases later
+ right = right._values
- def column_op(a, b):
- return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))}
+ arrays = [array_op(l, right) for l in left._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
- new_data = expressions.evaluate(column_op, left, right)
- return new_data
+ return type(left)._from_arrays(
+ arrays, left.columns, left.index, verify_integrity=False
+ )
# -----------------------------------------------------------------------------
| xref https://github.com/pandas-dev/pandas/pull/32779 | https://api.github.com/repos/pandas-dev/pandas/pulls/33561 | 2020-04-15T08:53:22Z | 2020-05-25T14:57:13Z | 2020-05-25T14:57:12Z | 2020-05-25T14:57:16Z |
fstring updates. Changing from .format to fstring | diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 17815c437249b..b7bdbde5bac5e 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -237,15 +237,13 @@ def _format_argument_list(allow_args: Union[List[str], int]):
elif allow_args == 1:
return " except for the first argument"
elif isinstance(allow_args, int):
- return " except for the first {num_args} arguments".format(num_args=allow_args)
+ return f" except for the first {allow_args} arguments"
elif len(allow_args) == 1:
- return " except for the argument '{arg}'".format(arg=allow_args[0])
+ return f" except for the argument '{allow_args[0]}'"
else:
last = allow_args[-1]
args = ", ".join(["'" + x + "'" for x in allow_args[:-1]])
- return " except for the arguments {args} and '{last}'".format(
- args=args, last=last
- )
+ return f" except for the arguments {args} and '{last}'"
def deprecate_nonkeyword_arguments(
| Please accept my updates by converting .format to fstrings.
https://github.com/pandas-dev/pandas/issues/29547
| https://api.github.com/repos/pandas-dev/pandas/pulls/33557 | 2020-04-15T00:58:53Z | 2020-04-15T19:40:39Z | 2020-04-15T19:40:39Z | 2020-04-15T19:40:48Z |
fix | diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 17815c437249b..b7bdbde5bac5e 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -237,15 +237,13 @@ def _format_argument_list(allow_args: Union[List[str], int]):
elif allow_args == 1:
return " except for the first argument"
elif isinstance(allow_args, int):
- return " except for the first {num_args} arguments".format(num_args=allow_args)
+ return f" except for the first {allow_args} arguments"
elif len(allow_args) == 1:
- return " except for the argument '{arg}'".format(arg=allow_args[0])
+ return f" except for the argument '{allow_args[0]}'"
else:
last = allow_args[-1]
args = ", ".join(["'" + x + "'" for x in allow_args[:-1]])
- return " except for the arguments {args} and '{last}'".format(
- args=args, last=last
- )
+ return f" except for the arguments {args} and '{last}'"
def deprecate_nonkeyword_arguments(
| - [x] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33556 | 2020-04-15T00:36:34Z | 2020-04-15T14:38:49Z | null | 2020-04-15T14:38:49Z |
CLN: x-array test warnings | diff --git a/pandas/tests/generic/test_to_xarray.py b/pandas/tests/generic/test_to_xarray.py
index b6abdf09a7f62..2fde96a1c8f89 100644
--- a/pandas/tests/generic/test_to_xarray.py
+++ b/pandas/tests/generic/test_to_xarray.py
@@ -99,7 +99,7 @@ def test_to_xarray_index_types(self, indices):
from xarray import DataArray
- s = Series(range(len(indices)), index=indices)
+ s = Series(range(len(indices)), index=indices, dtype="int64")
s.index.name = "foo"
result = s.to_xarray()
repr(result)
@@ -123,7 +123,7 @@ def test_to_xarray(self):
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
- s = Series(range(6))
+ s = Series(range(6), dtype="int64")
s.index.name = "foo"
s.index = pd.MultiIndex.from_product(
[["a", "b"], range(3)], names=["one", "two"]
| Remove these warnings:
```
pandas/tests/generic/test_to_xarray.py:102
/home/travis/build/pandas-dev/pandas/pandas/tests/generic/test_to_xarray.py:102: DeprecationWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.
s = Series(range(len(indices)), index=indices)
```
https://travis-ci.org/github/pandas-dev/pandas/jobs/675056013 | https://api.github.com/repos/pandas-dev/pandas/pulls/33555 | 2020-04-14T23:49:57Z | 2020-04-15T00:33:14Z | 2020-04-15T00:33:14Z | 2020-04-15T00:33:23Z |
BUG: tz_localize needs to invalidate freq | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index d0e3e5c96dc3a..b2a87ea5fefe6 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -412,7 +412,7 @@ Datetimelike
- Bug where :meth:`PeriodIndex` raised when passed a :class:`Series` of strings (:issue:`26109`)
- Bug in :class:`Timestamp` arithmetic when adding or subtracting a ``np.ndarray`` with ``timedelta64`` dtype (:issue:`33296`)
- Bug in :meth:`DatetimeIndex.to_period` not infering the frequency when called with no arguments (:issue:`33358`)
-
+- Bug in :meth:`DatetimeIndex.tz_localize` incorrectly retaining ``freq`` in some cases where the original freq is no longer valid (:issue:`30511`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index f5cc0817e8bd7..2d58138d56ad9 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -886,7 +886,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
- dtype='datetime64[ns, US/Eastern]', freq='D')
+ dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
@@ -894,7 +894,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
- dtype='datetime64[ns]', freq='D')
+ dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
@@ -973,7 +973,16 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
- return self._simple_new(new_dates, dtype=dtype, freq=self.freq)
+
+ freq = None
+ if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
+ # we can preserve freq
+ # TODO: Also for fixed-offsets
+ freq = self.freq
+ elif tz is None and self.tz is None:
+ # no-op
+ freq = self.freq
+ return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index fbddf765be79c..8628ce7ade212 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -1161,3 +1161,26 @@ def test_iteration_preserves_nanoseconds(self, tz):
)
for i, ts in enumerate(index):
assert ts == index[i]
+
+
+def test_tz_localize_invalidates_freq():
+ # we only preserve freq in unambiguous cases
+
+ # if localized to US/Eastern, this crosses a DST transition
+ dti = date_range("2014-03-08 23:00", "2014-03-09 09:00", freq="H")
+ assert dti.freq == "H"
+
+ result = dti.tz_localize(None) # no-op
+ assert result.freq == "H"
+
+ result = dti.tz_localize("UTC") # unambiguous freq preservation
+ assert result.freq == "H"
+
+ result = dti.tz_localize("US/Eastern", nonexistent="shift_forward")
+ assert result.freq is None
+ assert result.inferred_freq is None # i.e. we are not _too_ strict here
+
+ # Case where we _can_ keep freq because we're length==1
+ dti2 = dti[:1]
+ result = dti2.tz_localize("US/Eastern")
+ assert result.freq == "H"
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index a6385240537ca..16163ee76ba63 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -378,6 +378,7 @@ def test_ser_cmp_result_names(self, names, op):
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
+ dti._set_freq("infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
| - [x] closes #30511
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Identified when trying to add `freq` check to `assert_index_equal` for DatetimeIndex and TimedeltaIndex. | https://api.github.com/repos/pandas-dev/pandas/pulls/33553 | 2020-04-14T22:39:20Z | 2020-04-16T21:27:52Z | 2020-04-16T21:27:52Z | 2020-04-16T21:45:51Z |
BUG: Setting DTI/TDI freq affecting other indexes viewing the same data | diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index cf653a6875a9c..a1967c939092f 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -1,7 +1,7 @@
"""
Base and utility classes for tseries type pandas objects.
"""
-from datetime import datetime
+from datetime import datetime, timedelta
from typing import Any, List, Optional, Union, cast
import numpy as np
@@ -17,14 +17,18 @@
ensure_int64,
ensure_platform_int,
is_bool_dtype,
+ is_datetime64_any_dtype,
is_dtype_equal,
is_integer,
is_list_like,
+ is_object_dtype,
is_period_dtype,
is_scalar,
+ is_timedelta64_dtype,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
@@ -41,7 +45,8 @@
from pandas.core.ops import get_op_result_name
from pandas.core.tools.timedeltas import to_timedelta
-from pandas.tseries.frequencies import DateOffset
+from pandas.tseries.frequencies import DateOffset, to_offset
+from pandas.tseries.offsets import Tick
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
@@ -72,13 +77,33 @@ def wrapper(left, right):
return wrapper
+def _make_wrapped_arith_op_with_freq(opname: str):
+ """
+ Dispatch the operation to the underlying ExtensionArray, and infer
+ the appropriate frequency for the result.
+ """
+ meth = make_wrapped_arith_op(opname)
+
+ def wrapped(self, other):
+ result = meth(self, other)
+ if result is NotImplemented:
+ return NotImplemented
+
+ new_freq = self._get_addsub_freq(other)
+ result._freq = new_freq
+ return result
+
+ wrapped.__name__ = opname
+ return wrapped
+
+
@inherit_names(
["inferred_freq", "_isnan", "_resolution", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(
- ["mean", "freq", "freqstr", "asi8", "_box_func"], DatetimeLikeArrayMixin,
+ ["mean", "asi8", "_box_func"], DatetimeLikeArrayMixin,
)
class DatetimeIndexOpsMixin(ExtensionIndex):
"""
@@ -446,10 +471,45 @@ def get_indexer_non_unique(self, target):
return ensure_platform_int(indexer), missing
# --------------------------------------------------------------------
+ # Arithmetic Methods
+
+ def _get_addsub_freq(self, other) -> Optional[DateOffset]:
+ """
+ Find the freq we expect the result of an addition/subtraction operation
+ to have.
+ """
+ if is_period_dtype(self.dtype):
+ # Only used for ops that stay PeriodDtype
+ return self.freq
+ elif self.freq is None:
+ return None
+ elif lib.is_scalar(other) and isna(other):
+ return None
+
+ elif isinstance(other, (Tick, timedelta, np.timedelta64)):
+ new_freq = None
+ if isinstance(self.freq, Tick):
+ new_freq = self.freq
+ return new_freq
+
+ elif isinstance(other, DateOffset):
+ # otherwise just DatetimeArray
+ return None # TODO: Should we infer if it matches self.freq * n?
+ elif isinstance(other, (datetime, np.datetime64)):
+ return self.freq
+
+ elif is_timedelta64_dtype(other):
+ return None # TODO: shouldnt we be able to do self.freq + other.freq?
+ elif is_object_dtype(other):
+ return None # TODO: is this quite right? sometimes we unpack singletons
+ elif is_datetime64_any_dtype(other):
+ return None # TODO: shouldnt we be able to do self.freq + other.freq?
+ else:
+ raise NotImplementedError
- __add__ = make_wrapped_arith_op("__add__")
+ __add__ = _make_wrapped_arith_op_with_freq("__add__")
+ __sub__ = _make_wrapped_arith_op_with_freq("__sub__")
__radd__ = make_wrapped_arith_op("__radd__")
- __sub__ = make_wrapped_arith_op("__sub__")
__rsub__ = make_wrapped_arith_op("__rsub__")
__pow__ = make_wrapped_arith_op("__pow__")
__rpow__ = make_wrapped_arith_op("__rpow__")
@@ -558,7 +618,9 @@ def shift(self, periods=1, freq=None):
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
- result = self._data._time_shift(periods, freq=freq)
+ arr = self._data.view()
+ arr._freq = self.freq
+ result = arr._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
# --------------------------------------------------------------------
@@ -610,21 +672,40 @@ class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index):
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
+ _freq = lib.no_default
- def _set_freq(self, freq):
+ @property
+ def freq(self):
+ """
+ In limited circumstances, our freq may differ from that of our _data.
"""
- Set the _freq attribute on our underlying DatetimeArray.
+ if self._freq is not lib.no_default:
+ return self._freq
+ return self._data.freq
- Parameters
- ----------
- freq : DateOffset, None, or "infer"
+ @property
+ def freqstr(self):
+ """
+ Return the frequency object as a string if its set, otherwise None.
"""
- # GH#29843
- self._data._with_freq(freq)
+ if self.freq is None:
+ return None
+ return self.freq.freqstr
def _with_freq(self, freq):
index = self.copy(deep=False)
- index._set_freq(freq)
+ if freq is None:
+ # Even if we _can_ have a freq, we might want to set it to None
+ index._freq = None
+ elif len(self) == 0 and isinstance(freq, DateOffset):
+ # Always valid. In the TimedeltaArray case, we assume this
+ # is a Tick offset.
+ index._freq = freq
+ else:
+ assert freq == "infer", freq
+ freq = to_offset(self.inferred_freq)
+ index._freq = freq
+
return index
def _shallow_copy(self, values=None, name: Label = lib.no_default):
@@ -647,8 +728,7 @@ def _shallow_copy(self, values=None, name: Label = lib.no_default):
@Appender(Index.difference.__doc__)
def difference(self, other, sort=None):
- new_idx = super().difference(other, sort=sort)
- new_idx._set_freq(None)
+ new_idx = super().difference(other, sort=sort)._with_freq(None)
return new_idx
def intersection(self, other, sort=False):
@@ -693,7 +773,7 @@ def intersection(self, other, sort=False):
result = Index.intersection(self, other, sort=sort)
if isinstance(result, type(self)):
if result.freq is None:
- result._set_freq("infer")
+ result = result._with_freq("infer")
return result
elif (
@@ -704,14 +784,7 @@ def intersection(self, other, sort=False):
or (not self.is_monotonic or not other.is_monotonic)
):
result = Index.intersection(self, other, sort=sort)
-
- # Invalidate the freq of `result`, which may not be correct at
- # this point, depending on the values.
-
- result._set_freq(None)
- result = self._shallow_copy(result._data, name=result.name)
- if result.freq is None:
- result._set_freq("infer")
+ result = result._with_freq("infer")
return result
# to make our life easier, "sort" the two ranges
@@ -781,10 +854,9 @@ def _fast_union(self, other, sort=None):
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
- dates = concat_compat([left._values, right_chunk])
- result = self._shallow_copy(dates)
- result._set_freq("infer")
+ dates = concat_compat((left._values, right_chunk))
# TODO: can we infer that it has self.freq?
+ result = self._shallow_copy(dates)._with_freq("infer")
return result
else:
left, right = other, self
@@ -797,9 +869,8 @@ def _fast_union(self, other, sort=None):
loc = right.searchsorted(left_end, side="right")
right_chunk = right._values[loc:]
dates = concat_compat([left._values, right_chunk])
- result = self._shallow_copy(dates)
- result._set_freq("infer")
# TODO: can we infer that it has self.freq?
+ result = self._shallow_copy(dates)._with_freq("infer")
return result
else:
return left
@@ -816,7 +887,7 @@ def _union(self, other, sort):
if this._can_fast_union(other):
result = this._fast_union(other, sort=sort)
if result.freq is None:
- result._set_freq("infer")
+ result = result._with_freq("infer")
return result
else:
i8self = Int64Index._simple_new(self.asi8, name=self.name)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 957c01c2dca96..d3c49c5ed0796 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -71,7 +71,7 @@ def _new_PeriodIndex(cls, **d):
PeriodArray,
wrap=True,
)
-@inherit_names(["is_leap_year", "freq", "_format_native_types"], PeriodArray)
+@inherit_names(["is_leap_year", "freq", "freqstr", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index bfef4f63e2e8a..06751d9c35fab 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1017,7 +1017,8 @@ def _downsample(self, how, **kwargs):
if not len(ax):
# reset to the new freq
obj = obj.copy()
- obj.index._set_freq(self.freq)
+ obj.index = obj.index._with_freq(self.freq)
+ assert obj.index.freq == self.freq, (obj.index.freq, self.freq)
return obj
# do we have a regular frequency
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index 79fcb5e9478c3..912ce2a953e0a 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -2052,7 +2052,7 @@ def test_dti_add_tdi(self, tz_naive_fixture):
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
- expected._set_freq(None)
+ expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
@@ -2074,7 +2074,7 @@ def test_dti_iadd_tdi(self, tz_naive_fixture):
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
- expected._set_freq(None)
+ expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 3ffdc87ff84c8..9378a70044d83 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -544,7 +544,7 @@ def test_tda_add_sub_index(self):
def test_tda_add_dt64_object_array(self, box_df_fail, tz_naive_fixture):
# Result should be cast back to DatetimeArray
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
- dti._set_freq(None)
+ dti = dti._with_freq(None)
tdi = dti - dti
obj = tm.box_expected(tdi, box_df_fail)
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 957ca138498d9..52b82b36d13be 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -267,7 +267,7 @@ def test_ensure_copied_data(self, indices):
if is_datetime64tz_dtype(indices.dtype):
result = result.tz_localize("UTC").tz_convert(indices.tz)
if isinstance(indices, (DatetimeIndex, TimedeltaIndex)):
- indices._set_freq(None)
+ indices = indices._with_freq(None)
tm.assert_index_equal(indices, result)
@@ -397,7 +397,7 @@ def test_where(self, klass):
i = self.create_index()
if isinstance(i, (pd.DatetimeIndex, pd.TimedeltaIndex)):
# where does not preserve freq
- i._set_freq(None)
+ i = i._with_freq(None)
cond = [True] * len(i)
result = i.where(klass(cond))
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 944358b1540b0..dfefdc0f211b1 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -82,7 +82,7 @@ def test_map_dictlike(self, mapper):
# don't compare the freqs
if isinstance(expected, (pd.DatetimeIndex, pd.TimedeltaIndex)):
- expected._set_freq(None)
+ expected = expected._with_freq(None)
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index a8e08bbe9a2e9..691f542fc2084 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -131,7 +131,7 @@ def test_construction_with_alt(self, kwargs, tz_aware_fixture):
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range("20130101", periods=5, freq="H", tz=tz)
- i._set_freq(None)
+ i = i._with_freq(None)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if "tz" in kwargs:
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 08706dce7e1e0..81fa1a27ac911 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -463,6 +463,5 @@ def test_split_non_utc(self):
# GH 14042
indices = pd.date_range("2016-01-01 00:00:00+0200", freq="S", periods=10)
result = np.split(indices, indices_or_sections=[])[0]
- expected = indices.copy()
- expected._set_freq(None)
+ expected = indices._with_freq(None)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index f0fe5e9b293fc..603a0a452391c 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -134,7 +134,7 @@ def test_value_counts_unique(self, tz_naive_fixture):
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
- expected.index._set_freq(None)
+ expected.index = expected.index._with_freq(None)
for obj in [idx, Series(idx)]:
@@ -406,6 +406,20 @@ def test_freq_setter_errors(self):
with pytest.raises(ValueError, match="Invalid frequency"):
idx._data.freq = "foo"
+ def test_freq_view_safe(self):
+ # Setting the freq for one DatetimeIndex shouldn't alter the freq
+ # for another that views the same data
+
+ dti = pd.date_range("2016-01-01", periods=5)
+ dta = dti._data
+
+ dti2 = DatetimeIndex(dta)._with_freq(None)
+ assert dti2.freq is None
+
+ # Original was not altered
+ assert dti.freq == "D"
+ assert dta.freq == "D"
+
class TestBusinessDatetimeIndex:
def setup_method(self, method):
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index df6e2dac72f95..0473ecf9de24d 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -231,9 +231,7 @@ def test_intersection(self, tz, sort):
]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
- assert result.name == expected.name
assert result.freq == expected.freq
- assert result.tz == expected.tz
# non-monotonic
base = DatetimeIndex(
@@ -255,6 +253,7 @@ def test_intersection(self, tz, sort):
# GH 7880
rng4 = date_range("7/1/2000", "7/31/2000", freq="D", tz=tz, name="idx")
expected4 = DatetimeIndex([], tz=tz, name="idx")
+ assert expected4.freq is None
for (rng, expected) in [
(rng2, expected2),
@@ -265,9 +264,7 @@ def test_intersection(self, tz, sort):
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
- assert result.name == expected.name
assert result.freq is None
- assert result.tz == expected.tz
# parametrize over both anchored and non-anchored freqs, as they
# have different code paths
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 0e5abe2f5ccd1..19cbd74b31172 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -290,3 +290,17 @@ def test_freq_setter_errors(self):
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx._data.freq = "foo"
+
+ def test_freq_view_safe(self):
+ # Setting the freq for one TimedeltaIndex shouldn't alter the freq
+ # for another that views the same data
+
+ tdi = TimedeltaIndex(["0 days", "2 days", "4 days"], freq="2D")
+ tda = tdi._data
+
+ tdi2 = TimedeltaIndex(tda)._with_freq(None)
+ assert tdi2.freq is None
+
+ # Original was not altered
+ assert tdi.freq == "2D"
+ assert tda.freq == "2D"
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index f724badd51da8..637a2629dda8a 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -32,7 +32,9 @@ def indices(self):
def create_index(self) -> TimedeltaIndex:
index = pd.to_timedelta(range(5), unit="d")._with_freq("infer")
assert index.freq == "D"
- return index + pd.offsets.Hour(1)
+ ret = index + pd.offsets.Hour(1)
+ assert ret.freq == "D"
+ return ret
def test_numeric_compat(self):
# Dummy method to override super's version; this test is now done
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index e70a06cc5f582..280424c68297f 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -386,7 +386,7 @@ def test_write_index(self, engine):
for index in indexes:
df.index = index
if isinstance(index, pd.DatetimeIndex):
- index._set_freq(None) # freq doesnt round-trip
+ df.index = df.index._with_freq(None) # freq doesnt round-trip
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
@@ -465,7 +465,7 @@ def test_basic(self, pa, df_full):
# additional supported types for pyarrow
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
- dti._set_freq(None) # freq doesnt round-trip
+ dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["bool_with_none"] = [True, None, True]
@@ -634,7 +634,7 @@ def test_basic(self, fp, df_full):
df = df_full
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
- dti._set_freq(None) # freq doesnt round-trip
+ dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 16163ee76ba63..c7a04843b8296 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -378,7 +378,7 @@ def test_ser_cmp_result_names(self, names, op):
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
- dti._set_freq("infer") # freq not preserved by tz_localize
+ dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 43461d465b9e7..1ba73292dc0b4 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1505,7 +1505,7 @@ def test_set_index_datetime(self):
tz="US/Eastern",
)
idx3 = pd.date_range("2011-01-01 09:00", periods=6, tz="Asia/Tokyo")
- idx3._set_freq(None)
+ idx3 = idx3._with_freq(None)
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
The issue this addresses is related to #31218. | https://api.github.com/repos/pandas-dev/pandas/pulls/33552 | 2020-04-14T22:32:33Z | 2020-04-25T20:45:15Z | 2020-04-25T20:45:15Z | 2020-04-26T02:11:00Z |
BUG: Fix behavior of isocalendar with timezones | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index f5cc0817e8bd7..dd553011c6a6d 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1277,7 +1277,11 @@ def isocalendar(self):
"""
from pandas import DataFrame
- sarray = fields.build_isocalendar_sarray(self.asi8)
+ if self.tz is not None and not timezones.is_utc(self.tz):
+ values = self._local_timestamps()
+ else:
+ values = self.asi8
+ sarray = fields.build_isocalendar_sarray(values)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index d0464698e3f24..bb228eadccc6c 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -373,3 +373,17 @@ def test_iter_readonly():
arr.setflags(write=False)
dti = pd.to_datetime(arr)
list(dti)
+
+
+def test_isocalendar_returns_correct_values_close_to_new_year_with_tz():
+ # GH 6538: Check that DatetimeIndex and its TimeStamp elements
+ # return the same weekofyear accessor close to new year w/ tz
+ dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
+ dates = DatetimeIndex(dates, tz="Europe/Brussels")
+ result = dates.isocalendar()
+ expected_data_frame = pd.DataFrame(
+ [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]],
+ columns=["year", "week", "day"],
+ dtype="UInt32",
+ )
+ tm.assert_frame_equal(result, expected_data_frame)
| - If timezone is not UTC, then convert to UTC
- This bug was found while deprecating 'week' and 'weekofyear' | https://api.github.com/repos/pandas-dev/pandas/pulls/33551 | 2020-04-14T21:25:14Z | 2020-04-15T01:24:38Z | 2020-04-15T01:24:38Z | 2020-05-27T08:23:27Z |
DOC: Fix some typos | diff --git a/doc/source/getting_started/intro_tutorials/03_subset_data.rst b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
index 4167166a3f34a..31f434758876f 100644
--- a/doc/source/getting_started/intro_tutorials/03_subset_data.rst
+++ b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
@@ -23,7 +23,7 @@
<div class="card-body">
<p class="card-text">
-This tutorial uses the titanic data set, stored as CSV. The data
+This tutorial uses the Titanic data set, stored as CSV. The data
consists of the following data columns:
- PassengerId: Id of every passenger.
@@ -72,7 +72,7 @@ How do I select specific columns from a ``DataFrame``?
<ul class="task-bullet">
<li>
-I’m interested in the age of the titanic passengers.
+I’m interested in the age of the Titanic passengers.
.. ipython:: python
@@ -111,7 +111,7 @@ the number of rows is returned.
<ul class="task-bullet">
<li>
-I’m interested in the age and sex of the titanic passengers.
+I’m interested in the age and sex of the Titanic passengers.
.. ipython:: python
@@ -198,7 +198,7 @@ can be used to filter the ``DataFrame`` by putting it in between the
selection brackets ``[]``. Only rows for which the value is ``True``
will be selected.
-We now from before that the original titanic ``DataFrame`` consists of
+We know from before that the original Titanic ``DataFrame`` consists of
891 rows. Let’s have a look at the amount of rows which satisfy the
condition by checking the ``shape`` attribute of the resulting
``DataFrame`` ``above_35``:
@@ -212,7 +212,7 @@ condition by checking the ``shape`` attribute of the resulting
<ul class="task-bullet">
<li>
-I’m interested in the titanic passengers from cabin class 2 and 3.
+I’m interested in the Titanic passengers from cabin class 2 and 3.
.. ipython:: python
| Closes #33547, also Titanic should be capitalized | https://api.github.com/repos/pandas-dev/pandas/pulls/33550 | 2020-04-14T21:03:16Z | 2020-04-14T22:05:53Z | 2020-04-14T22:05:53Z | 2020-04-14T22:14:07Z |
[WIP] Add remote file io using fsspec. | diff --git a/pandas/io/common.py b/pandas/io/common.py
index ff527de79c387..98f584c60a964 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -158,6 +158,23 @@ def urlopen(*args, **kwargs):
return urllib.request.urlopen(*args, **kwargs)
+def is_fsspec_url(url) -> bool:
+ """
+ Returns true if fsspec is installed and the URL references a known
+ fsspec filesystem.
+ """
+
+ if not isinstance(url, str):
+ return False
+
+ try:
+ from fsspec.registry import known_implementations
+ scheme = parse_url(url).scheme
+ return scheme != "file" and scheme in known_implementations
+ except ImportError:
+ return False
+
+
def get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: Optional[str] = None,
@@ -194,19 +211,26 @@ def get_filepath_or_buffer(
req.close()
return reader, encoding, compression, True
- if is_s3_url(filepath_or_buffer):
- from pandas.io import s3
+ if is_fsspec_url(filepath_or_buffer):
+ import fsspec
+ scheme = parse_url(filepath_or_buffer).scheme
+ filesystem = fsspec.filesystem(scheme)
+ file_obj = filesystem.open(filepath_or_buffer, mode=mode or "rb")
+ return file_obj, encoding, compression, True
- return s3.get_filepath_or_buffer(
- filepath_or_buffer, encoding=encoding, compression=compression, mode=mode
- )
+ # if is_s3_url(filepath_or_buffer):
+ # from pandas.io import s3
- if is_gcs_url(filepath_or_buffer):
- from pandas.io import gcs
+ # return s3.get_filepath_or_buffer(
+ # filepath_or_buffer, encoding=encoding, compression=compression, mode=mode
+ # )
- return gcs.get_filepath_or_buffer(
- filepath_or_buffer, encoding=encoding, compression=compression, mode=mode
- )
+ # if is_gcs_url(filepath_or_buffer):
+ # from pandas.io import gcs
+
+ # return gcs.get_filepath_or_buffer(
+ # filepath_or_buffer, encoding=encoding, compression=compression, mode=mode
+ # )
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
return _expand_user(filepath_or_buffer), None, compression, False
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index 557a9d5c13987..f9282487e559a 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -29,7 +29,7 @@ def test_read_csv_gcs(monkeypatch):
)
class MockGCSFileSystem:
- def open(*args):
+ def open(self, path, mode, *args):
return StringIO(df1.to_csv(index=False))
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
@@ -51,7 +51,7 @@ def test_to_csv_gcs(monkeypatch):
s = StringIO()
class MockGCSFileSystem:
- def open(*args):
+ def open(self, path, mode, *args):
return s
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
| - [x] closes #33452
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33549 | 2020-04-14T20:42:38Z | 2020-05-19T20:10:05Z | null | 2020-05-19T20:10:05Z |
TST: rename checknull_old -> test_checknull_old | diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index cad46d0a23967..f9a854c5778a2 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -564,7 +564,7 @@ def test_checknull(self):
for value in never_na_vals:
assert not libmissing.checknull(value)
- def checknull_old(self):
+ def test_checknull_old(self):
for value in na_vals:
assert libmissing.checknull_old(value)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33546 | 2020-04-14T16:41:32Z | 2020-04-14T17:21:42Z | 2020-04-14T17:21:42Z | 2020-04-14T19:32:42Z | |
Preserving boolean dtype in Series.any/all function with level keyword #33449 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2adfd2bb9a7b3..db880368fffed 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9974,11 +9974,15 @@ def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
- return getattr(grouped, name)(**kwargs)
- axis = self._get_axis_number(axis)
- method = getattr(type(self), name)
- applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
- return grouped.aggregate(applyf)
+ result = getattr(grouped, name)(**kwargs)
+ else:
+ axis = self._get_axis_number(axis)
+ method = getattr(type(self), name)
+ applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
+ result = grouped.aggregate(applyf)
+ if isinstance(self, ABCSeries) and self.dtype.name == "boolean":
+ return result.astype("boolean")
+ return result
@classmethod
def _add_numeric_operations(cls):
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index fa62d5d8c4983..ead8183bb7d8f 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -912,8 +912,13 @@ def test_all_any_boolean(self):
index=[0, 0, 1, 1, 2, 2],
dtype="boolean",
)
- tm.assert_series_equal(s.all(level=0), Series([False, True, False]))
- tm.assert_series_equal(s.any(level=0), Series([False, True, True]))
+ result = s.all(level=0)
+ expected = Series([False, True, False], dtype="boolean")
+ tm.assert_series_equal(result, expected)
+
+ result = s.any(level=0)
+ expected = Series([False, True, True], dtype="boolean")
+ tm.assert_series_equal(result, expected)
def test_timedelta64_analytics(self):
| https://api.github.com/repos/pandas-dev/pandas/pulls/33543 | 2020-04-14T08:53:45Z | 2021-04-20T05:06:19Z | null | 2021-04-20T05:06:20Z | |
PERF: Fix performance regression #33365 | diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 635bf32639075..ba1a9a4e08fa0 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -25,6 +25,7 @@
from pandas.core.algorithms import take_1d
from pandas.core.arrays.categorical import Categorical, contains, recode_for_categories
import pandas.core.common as com
+from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
from pandas.core.indexes.extension import ExtensionIndex, inherit_names
@@ -198,8 +199,13 @@ def __new__(
data = []
assert isinstance(dtype, CategoricalDtype), dtype
- if not isinstance(data, Categorical) or data.dtype != dtype:
+ data = extract_array(data, extract_numpy=True)
+
+ if not isinstance(data, Categorical):
data = Categorical(data, dtype=dtype)
+ elif isinstance(dtype, CategoricalDtype) and dtype != data.dtype:
+ # we want to silently ignore dtype='category'
+ data = data._set_dtype(dtype)
data = data.copy() if copy else data
| Fix performance regression in Series.is_monotonic_increasing for categorical
by avoiding Categorical construction for categorical series
- [x] closes #33365
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry: perf regression was introduced after previous version
| https://api.github.com/repos/pandas-dev/pandas/pulls/33540 | 2020-04-14T03:28:12Z | 2020-04-15T17:41:38Z | 2020-04-15T17:41:38Z | 2020-04-16T05:28:55Z |
BUG: Series[listlike_of_ints] incorrect on MultiIndex | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 07849702c646d..9a1f8d00225aa 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -323,6 +323,36 @@ key and type of :class:`Index`. These now consistently raise ``KeyError`` (:iss
...
KeyError: Timestamp('1970-01-01 00:00:00')
+.. _whatsnew_110.api_breaking.indexing_int_multiindex_raises_key_errors:
+
+Failed Integer Lookups on MultiIndex Raise KeyError
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Indexing with integers with a :class:`MultiIndex` that has a integer-dtype
+first level incorrectly failed to raise ``KeyError`` when one or more of
+those integer keys is not present in the first level of the index (:issue:`33539`)
+
+.. ipython:: python
+
+ idx = pd.Index(range(4))
+ dti = pd.date_range("2000-01-03", periods=3)
+ mi = pd.MultiIndex.from_product([idx, dti])
+ ser = pd.Series(range(len(mi)), index=mi)
+
+*Previous behavior*:
+
+.. code-block:: ipython
+
+ In [5]: ser[[5]]
+ Out[5]: Series([], dtype: int64)
+
+*New behavior*:
+
+.. code-block:: ipython
+
+ In [5]: ser[[5]]
+ ...
+ KeyError: '[5] not in index'
+
:meth:`DataFrame.merge` preserves right frame's row order
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:meth:`DataFrame.merge` now preserves right frame's row order when executing a right merge (:issue:`27453`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 303365f50c546..5752f00ca5a18 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1082,37 +1082,6 @@ def _getitem_axis(self, key, axis: int):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
- # convert various list-like indexers
- # to a list of keys
- # we will use the *values* of the object
- # and NOT the index if its a PandasObject
- if isinstance(labels, ABCMultiIndex):
-
- if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
- # Series, or 0,1 ndim ndarray
- # GH 14730
- key = list(key)
- elif isinstance(key, ABCDataFrame):
- # GH 15438
- raise NotImplementedError(
- "Indexing a MultiIndex with a "
- "DataFrame key is not "
- "implemented"
- )
- elif hasattr(key, "ndim") and key.ndim > 1:
- raise NotImplementedError(
- "Indexing a MultiIndex with a "
- "multidimensional key is not "
- "implemented"
- )
-
- if (
- not isinstance(key, tuple)
- and len(key)
- and not isinstance(key[0], tuple)
- ):
- key = tuple([key])
-
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)):
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index b7802d9b8fe0c..f0cbdbe8d0564 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -134,16 +134,15 @@ def test_loc_multiindex_missing_label_raises(self):
@pytest.mark.parametrize("key, pos", [([2, 4], [0, 1]), ([2], []), ([2, 3], [])])
def test_loc_multiindex_list_missing_label(self, key, pos):
- # GH 27148 - lists with missing labels do not raise:
+ # GH 27148 - lists with missing labels _do_ raise
df = DataFrame(
np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]],
)
- expected = df.iloc[pos]
- result = df.loc[key]
- tm.assert_frame_equal(result, expected)
+ with pytest.raises(KeyError, match="not in index"):
+ df.loc[key]
def test_loc_multiindex_too_many_dims_raises(self):
# GH 14885
@@ -295,8 +294,8 @@ def convert_nested_indexer(indexer_type, keys):
[
([], []), # empty ok
(["A"], slice(3)),
- (["A", "D"], slice(3)),
- (["D", "E"], []), # no values found - fine
+ (["A", "D"], []), # "D" isnt present -> raise
+ (["D", "E"], []), # no values found -> raise
(["D"], []), # same, with single item list: GH 27148
(pd.IndexSlice[:, ["foo"]], slice(2, None, 3)),
(pd.IndexSlice[:, ["foo", "bah"]], slice(2, None, 3)),
@@ -310,8 +309,13 @@ def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, pos):
)
s = Series(np.arange(9, dtype="int64"), index=idx).sort_index()
expected = s.iloc[pos]
- result = s.loc[indexer]
- tm.assert_series_equal(result, expected)
+
+ if expected.size == 0 and indexer != []:
+ with pytest.raises(KeyError, match=str(indexer)):
+ s.loc[indexer]
+ else:
+ result = s.loc[indexer]
+ tm.assert_series_equal(result, expected)
def test_series_loc_getitem_fancy(multiindex_year_month_day_dataframe_random_data):
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py
index f367a92d0b006..532bb4f2e6dac 100644
--- a/pandas/tests/indexing/multiindex/test_slice.py
+++ b/pandas/tests/indexing/multiindex/test_slice.py
@@ -118,11 +118,11 @@ def test_per_axis_per_level_getitem(self):
with pytest.raises(ValueError, match=msg):
df.loc[(slice(None), np.array([True, False])), :]
- # ambiguous notation
- # this is interpreted as slicing on both axes (GH #16396)
- result = df.loc[slice(None), [1]]
- expected = df.iloc[:, []]
- tm.assert_frame_equal(result, expected)
+ with pytest.raises(KeyError, match=r"\[1\] not in index"):
+ # slice(None) is on the index, [1] is on the columns, but 1 is
+ # not in the columns, so we raise
+ # This used to treat [1] as positional GH#16396
+ df.loc[slice(None), [1]]
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 9ce31f5f6decf..164c63483f71f 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -114,6 +114,19 @@ def test_getitem_intlist_intervalindex_non_int(self, box):
result = ser[key]
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize("box", [list, np.array, pd.Index])
+ @pytest.mark.parametrize("dtype", [np.int64, np.float64, np.uint64])
+ def test_getitem_intlist_multiindex_numeric_level(self, dtype, box):
+ # GH#33404 do _not_ fall back to positional since ints are ambiguous
+ idx = pd.Index(range(4)).astype(dtype)
+ dti = date_range("2000-01-03", periods=3)
+ mi = pd.MultiIndex.from_product([idx, dti])
+ ser = Series(range(len(mi))[::-1], index=mi)
+
+ key = box([5])
+ with pytest.raises(KeyError, match="5"):
+ ser[key]
+
def test_getitem_generator(string_series):
gen = (x > 0 for x in string_series)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is one of the cases that @jorisvandenbossche identified in #33355. | https://api.github.com/repos/pandas-dev/pandas/pulls/33539 | 2020-04-14T02:51:16Z | 2020-04-25T21:02:28Z | 2020-04-25T21:02:28Z | 2020-04-25T21:22:00Z |
ENH: Implement IntegerArray.sum | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index cd1cb0b64f74a..726c9b8bde91d 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -660,6 +660,7 @@ Other
- Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`)
- Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`)
- Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`)
+- :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index 260cc69187d38..30b4a365ad912 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -252,6 +252,7 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
STAT_FUNC_DEFAULTS["out"] = None
PROD_DEFAULTS = SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
+SUM_DEFAULTS["axis"] = None
SUM_DEFAULTS["keepdims"] = False
SUM_DEFAULTS["initial"] = None
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 5605b3fbc5dfa..9d41071755e6f 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -7,6 +7,7 @@
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike
from pandas.compat import set_function_name
+from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import ExtensionDtype
@@ -573,6 +574,13 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs):
return result
+ def sum(self, skipna=True, min_count=0, **kwargs):
+ nv.validate_sum((), kwargs)
+ result = masked_reductions.sum(
+ values=self._data, mask=self._mask, skipna=skipna, min_count=min_count
+ )
+ return result
+
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
diff --git a/pandas/tests/arrays/integer/test_function.py b/pandas/tests/arrays/integer/test_function.py
index bdf902d1aca62..44c3077228e80 100644
--- a/pandas/tests/arrays/integer/test_function.py
+++ b/pandas/tests/arrays/integer/test_function.py
@@ -113,6 +113,26 @@ def test_value_counts_empty():
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize("skipna", [True, False])
+@pytest.mark.parametrize("min_count", [0, 4])
+def test_integer_array_sum(skipna, min_count):
+ arr = pd.array([1, 2, 3, None], dtype="Int64")
+ result = arr.sum(skipna=skipna, min_count=min_count)
+ if skipna and min_count == 0:
+ assert result == 6
+ else:
+ assert result is pd.NA
+
+
+@pytest.mark.parametrize(
+ "values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)]
+)
+def test_integer_array_numpy_sum(values, expected):
+ arr = pd.array(values, dtype="Int64")
+ result = np.sum(arr)
+ assert result == expected
+
+
# TODO(jreback) - these need testing / are broken
# shift
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I think this is mostly interesting in that it allows normalize=True for value_counts on an IntegerArray backed Series, which currently doesn't work:
```python
[ins] In [1]: s = pd.Series([1, 2, 3], dtype="Int64")
[ins] In [2]: s.value_counts(normalize=True)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-2bf1a78353e5> in <module>
----> 1 s.value_counts(normalize=True)
~/pandas/pandas/core/base.py in value_counts(self, normalize, sort, ascending, bins, dropna)
1252 normalize=normalize,
1253 bins=bins,
-> 1254 dropna=dropna,
1255 )
1256 return result
~/pandas/pandas/core/algorithms.py in value_counts(values, sort, ascending, normalize, bins, dropna)
725
726 if normalize:
--> 727 result = result / float(counts.sum())
728
729 return result
AttributeError: 'IntegerArray' object has no attribute 'sum'
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/33538 | 2020-04-14T02:46:14Z | 2020-04-25T08:05:18Z | 2020-04-25T08:05:18Z | 2020-04-25T13:38:58Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.