title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
CLN: Remove redundant tests for .duplicated and .drop_duplicates in tests/base | diff --git a/pandas/conftest.py b/pandas/conftest.py
index dcfc523315c8b..d8f96021cdb15 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -425,6 +425,15 @@ def nselect_method(request):
return request.param
+@pytest.fixture(params=["first", "last", False])
+def keep(request):
+ """
+ Valid values for the 'keep' parameter used in
+ .duplicated or .drop_duplicates
+ """
+ return request.param
+
+
@pytest.fixture(params=["left", "right", "both", "neither"])
def closed(request):
"""
diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py
index 8f48d0a3e8378..dc7f85de15de3 100644
--- a/pandas/tests/base/test_ops.py
+++ b/pandas/tests/base/test_ops.py
@@ -594,108 +594,6 @@ def test_factorize_repeated(self):
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
- def test_duplicated_drop_duplicates_index(self):
- # GH 4060
- for original in self.objs:
- if isinstance(original, Index):
-
- # special case
- if original.is_boolean():
- result = original.drop_duplicates()
- expected = Index([False, True], name="a")
- tm.assert_index_equal(result, expected)
- continue
-
- # original doesn't have duplicates
- expected = np.array([False] * len(original), dtype=bool)
- duplicated = original.duplicated()
- tm.assert_numpy_array_equal(duplicated, expected)
- assert duplicated.dtype == bool
- result = original.drop_duplicates()
- tm.assert_index_equal(result, original)
- assert result is not original
-
- # has_duplicates
- assert not original.has_duplicates
-
- # create repeated values, 3rd and 5th values are duplicated
- idx = original[list(range(len(original))) + [5, 3]]
- expected = np.array([False] * len(original) + [True, True], dtype=bool)
- duplicated = idx.duplicated()
- tm.assert_numpy_array_equal(duplicated, expected)
- assert duplicated.dtype == bool
- tm.assert_index_equal(idx.drop_duplicates(), original)
-
- base = [False] * len(idx)
- base[3] = True
- base[5] = True
- expected = np.array(base)
-
- duplicated = idx.duplicated(keep="last")
- tm.assert_numpy_array_equal(duplicated, expected)
- assert duplicated.dtype == bool
- result = idx.drop_duplicates(keep="last")
- tm.assert_index_equal(result, idx[~expected])
-
- base = [False] * len(original) + [True, True]
- base[3] = True
- base[5] = True
- expected = np.array(base)
-
- duplicated = idx.duplicated(keep=False)
- tm.assert_numpy_array_equal(duplicated, expected)
- assert duplicated.dtype == bool
- result = idx.drop_duplicates(keep=False)
- tm.assert_index_equal(result, idx[~expected])
-
- with pytest.raises(
- TypeError,
- match=r"drop_duplicates\(\) got an unexpected keyword argument",
- ):
- idx.drop_duplicates(inplace=True)
-
- else:
- expected = Series(
- [False] * len(original), index=original.index, name="a"
- )
- tm.assert_series_equal(original.duplicated(), expected)
- result = original.drop_duplicates()
- tm.assert_series_equal(result, original)
- assert result is not original
-
- idx = original.index[list(range(len(original))) + [5, 3]]
- values = original._values[list(range(len(original))) + [5, 3]]
- s = Series(values, index=idx, name="a")
-
- expected = Series(
- [False] * len(original) + [True, True], index=idx, name="a"
- )
- tm.assert_series_equal(s.duplicated(), expected)
- tm.assert_series_equal(s.drop_duplicates(), original)
-
- base = [False] * len(idx)
- base[3] = True
- base[5] = True
- expected = Series(base, index=idx, name="a")
-
- tm.assert_series_equal(s.duplicated(keep="last"), expected)
- tm.assert_series_equal(
- s.drop_duplicates(keep="last"), s[~np.array(base)]
- )
-
- base = [False] * len(original) + [True, True]
- base[3] = True
- base[5] = True
- expected = Series(base, index=idx, name="a")
-
- tm.assert_series_equal(s.duplicated(keep=False), expected)
- tm.assert_series_equal(
- s.drop_duplicates(keep=False), s[~np.array(base)]
- )
-
- s.drop_duplicates(inplace=True)
- tm.assert_series_equal(s, original)
-
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index c6ba5c9d61e9e..6f0920c11a6e6 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -302,32 +302,65 @@ def test_pickle(self, indices):
assert indices.equals(unpickled)
indices.name = original_name
- @pytest.mark.parametrize("keep", ["first", "last", False])
- def test_duplicated(self, indices, keep):
- if not len(indices) or isinstance(indices, (MultiIndex, RangeIndex)):
- # MultiIndex tested separately in:
- # tests/indexes/multi/test_unique_and_duplicates
- pytest.skip("Skip check for empty Index, MultiIndex, RangeIndex")
-
+ def test_drop_duplicates(self, indices, keep):
+ if isinstance(indices, MultiIndex):
+ pytest.skip("MultiIndex is tested separately")
+ if isinstance(indices, RangeIndex):
+ pytest.skip(
+ "RangeIndex is tested in test_drop_duplicates_no_duplicates"
+ " as it cannot hold duplicates"
+ )
+ if len(indices) == 0:
+ pytest.skip(
+ "empty index is tested in test_drop_duplicates_no_duplicates"
+ " as it cannot hold duplicates"
+ )
+
+ # make unique index
holder = type(indices)
+ unique_values = list(set(indices))
+ unique_idx = holder(unique_values)
+
+ # make duplicated index
+ n = len(unique_idx)
+ duplicated_selection = np.random.choice(n, int(n * 1.5))
+ idx = holder(unique_idx.values[duplicated_selection])
+
+ # Series.duplicated is tested separately
+ expected_duplicated = (
+ pd.Series(duplicated_selection).duplicated(keep=keep).values
+ )
+ tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
+
+ # Series.drop_duplicates is tested separately
+ expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
+ tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
+
+ def test_drop_duplicates_no_duplicates(self, indices):
+ if isinstance(indices, MultiIndex):
+ pytest.skip("MultiIndex is tested separately")
- idx = holder(indices)
- if idx.has_duplicates:
- # We are testing the duplicated-method here, so we need to know
- # exactly which indices are duplicate and how (for the result).
- # This is not possible if "idx" has duplicates already, which we
- # therefore remove. This is seemingly circular, as drop_duplicates
- # invokes duplicated, but in the end, it all works out because we
- # cross-check with Series.duplicated, which is tested separately.
- idx = idx.drop_duplicates()
-
- n, k = len(idx), 10
- duplicated_selection = np.random.choice(n, k * n)
- expected = pd.Series(duplicated_selection).duplicated(keep=keep).values
- idx = holder(idx.values[duplicated_selection])
-
- result = idx.duplicated(keep=keep)
- tm.assert_numpy_array_equal(result, expected)
+ # make unique index
+ if isinstance(indices, RangeIndex):
+ # RangeIndex cannot have duplicates
+ unique_idx = indices
+ else:
+ holder = type(indices)
+ unique_values = list(set(indices))
+ unique_idx = holder(unique_values)
+
+ # check on unique index
+ expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
+ tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
+ result_dropped = unique_idx.drop_duplicates()
+ tm.assert_index_equal(result_dropped, unique_idx)
+ # validate shallow copy
+ assert result_dropped is not unique_idx
+
+ def test_drop_duplicates_inplace(self, indices):
+ msg = r"drop_duplicates\(\) got an unexpected keyword argument"
+ with pytest.raises(TypeError, match=msg):
+ indices.drop_duplicates(inplace=True)
def test_has_duplicates(self, indices):
holder = type(indices)
diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py
index 2d052505d5ecc..54f32f979232d 100644
--- a/pandas/tests/series/methods/test_drop_duplicates.py
+++ b/pandas/tests/series/methods/test_drop_duplicates.py
@@ -44,6 +44,26 @@ def test_drop_duplicates_bool(keep, expected):
tm.assert_series_equal(sc, tc[~expected])
+@pytest.mark.parametrize("values", [[], list(range(5))])
+def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
+ tc = Series(values, dtype=np.dtype(any_numpy_dtype))
+ expected = Series([False] * len(tc), dtype="bool")
+
+ if tc.dtype == "bool":
+ # 0 -> False and 1-> True
+ # any other value would be duplicated
+ tc = tc[:2]
+ expected = expected[:2]
+
+ tm.assert_series_equal(tc.duplicated(keep=keep), expected)
+
+ result_dropped = tc.drop_duplicates(keep=keep)
+ tm.assert_series_equal(result_dropped, tc)
+
+ # validate shallow copy
+ assert result_dropped is not tc
+
+
class TestSeriesDropDuplicates:
@pytest.mark.parametrize(
"dtype",
| part of #23877
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
duplicated and drop_duplicates are already thoroughly tested in `tests/indexes` and `tests/series`. I added comments to highlight the redundancy and extended the existing test cases where needed. | https://api.github.com/repos/pandas-dev/pandas/pulls/32487 | 2020-03-06T10:20:17Z | 2020-03-15T00:51:19Z | 2020-03-15T00:51:19Z | 2020-03-15T00:51:49Z |
CLN: Replace isinstance(foo, Class) with isinstance(foo, ABCClass) | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 2b9a461e0e95d..da016e4d966bf 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -116,6 +116,7 @@
)
from pandas.core.arrays.sparse import SparseDtype
+from pandas.core.dtypes.generic import ABCSparseArray
from pandas.tseries.api import infer_freq
from pandas.tseries import offsets
@@ -341,7 +342,7 @@ class __SparseArray(type):
SparseArray = sa
def __instancecheck__(cls, other):
- return isinstance(other, cls.SparseArray)
+ return isinstance(other, ABCSparseArray)
class __SparseArraySub(metaclass=__SparseArray):
def emit_warning(dummy=0):
| - [x] ref #27353
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32485 | 2020-03-06T09:59:31Z | 2020-03-11T01:56:35Z | null | 2020-03-14T14:08:14Z |
CLN: Remove redundant index test from tests/base/test_ops.py | diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py
index c368db13b9017..b99af438d05fa 100644
--- a/pandas/tests/base/test_ops.py
+++ b/pandas/tests/base/test_ops.py
@@ -826,23 +826,6 @@ def test_access_by_position(self, indices):
with pytest.raises(IndexError, match=msg):
series.iloc[size]
- @pytest.mark.parametrize("indexer_klass", [list, pd.Index])
- @pytest.mark.parametrize(
- "indexer",
- [
- [True] * 10,
- [False] * 10,
- [True, False, True, True, False, False, True, True, False, True],
- ],
- )
- def test_bool_indexing(self, indexer_klass, indexer):
- # GH 22533
- for idx in self.indexes:
- exp_idx = [i for i in range(len(indexer)) if indexer[i]]
- tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
- s = pd.Series(idx)
- tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
-
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
diff --git a/pandas/tests/indexing/test_na_indexing.py b/pandas/tests/indexing/test_na_indexing.py
index 345ca30ec77eb..9e8ef6e6e1c22 100644
--- a/pandas/tests/indexing/test_na_indexing.py
+++ b/pandas/tests/indexing/test_na_indexing.py
@@ -7,6 +7,7 @@
@pytest.mark.parametrize(
"values, dtype",
[
+ ([], "object"),
([1, 2, 3], "int64"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "object"),
@@ -22,42 +23,43 @@
@pytest.mark.parametrize(
"mask", [[True, False, False], [True, True, True], [False, False, False]]
)
-@pytest.mark.parametrize("box_mask", [True, False])
+@pytest.mark.parametrize("indexer_class", [list, pd.array, pd.Index, pd.Series])
@pytest.mark.parametrize("frame", [True, False])
-def test_series_mask_boolean(values, dtype, mask, box_mask, frame):
- ser = pd.Series(values, dtype=dtype, index=["a", "b", "c"])
- if frame:
- ser = ser.to_frame()
- mask = pd.array(mask, dtype="boolean")
- if box_mask:
- mask = pd.Series(mask, index=ser.index)
-
- expected = ser[mask.astype("bool")]
+def test_series_mask_boolean(values, dtype, mask, indexer_class, frame):
+ # In case len(values) < 3
+ index = ["a", "b", "c"][: len(values)]
+ mask = mask[: len(values)]
- result = ser[mask]
- tm.assert_equal(result, expected)
-
- if not box_mask:
- # Series.iloc[Series[bool]] isn't allowed
- result = ser.iloc[mask]
- tm.assert_equal(result, expected)
+ obj = pd.Series(values, dtype=dtype, index=index)
+ if frame:
+ if len(values) == 0:
+ # Otherwise obj is an empty DataFrame with shape (0, 1)
+ obj = pd.DataFrame(dtype=dtype)
+ else:
+ obj = obj.to_frame()
+
+ if indexer_class is pd.array:
+ mask = pd.array(mask, dtype="boolean")
+ elif indexer_class is pd.Series:
+ mask = pd.Series(mask, index=obj.index, dtype="boolean")
+ else:
+ mask = indexer_class(mask)
- result = ser.loc[mask]
- tm.assert_equal(result, expected)
+ expected = obj[mask]
- # empty
- mask = mask[:0]
- ser = ser.iloc[:0]
- expected = ser[mask.astype("bool")]
- result = ser[mask]
+ result = obj[mask]
tm.assert_equal(result, expected)
- if not box_mask:
- # Series.iloc[Series[bool]] isn't allowed
- result = ser.iloc[mask]
+ if indexer_class is pd.Series:
+ msg = "iLocation based boolean indexing cannot use an indexable as a mask"
+ with pytest.raises(ValueError, match=msg):
+ result = obj.iloc[mask]
+ tm.assert_equal(result, expected)
+ else:
+ result = obj.iloc[mask]
tm.assert_equal(result, expected)
- result = ser.loc[mask]
+ result = obj.loc[mask]
tm.assert_equal(result, expected)
| part of #23877
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Boolean indexing is already thoroughly tested in `test_series_mask_boolean` in `tests/indexes/test_na_indexing.py` - [see here](https://github.com/pandas-dev/pandas/blob/master/pandas/tests/indexing/test_na_indexing.py#L7-L61) | https://api.github.com/repos/pandas-dev/pandas/pulls/32484 | 2020-03-06T09:48:04Z | 2020-03-14T20:12:54Z | 2020-03-14T20:12:54Z | 2020-03-14T20:12:58Z |
CLN: Split and fixturized test_fillna in tests/base/test_ops.py | diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py
index c368db13b9017..5fb5072e5c9d9 100644
--- a/pandas/tests/base/test_ops.py
+++ b/pandas/tests/base/test_ops.py
@@ -18,6 +18,7 @@
is_object_dtype,
needs_i8_conversion,
)
+from pandas.core.dtypes.generic import ABCMultiIndex
import pandas as pd
from pandas import (
@@ -694,58 +695,65 @@ def test_drop_duplicates_series_vs_dataframe(self):
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
- def test_fillna(self):
+ def test_fillna(self, index_or_series_obj):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
- for orig in self.objs:
-
- o = orig.copy()
- values = o.values
-
- # values will not be changed
- result = o.fillna(o.astype(object).values[0])
- if isinstance(o, Index):
- tm.assert_index_equal(o, result)
- else:
- tm.assert_series_equal(o, result)
- # check shallow_copied
- assert o is not result
-
- for null_obj in [np.nan, None]:
- for orig in self.objs:
- o = orig.copy()
- klass = type(o)
+ obj = index_or_series_obj
+ if isinstance(obj, ABCMultiIndex):
+ pytest.skip("MultiIndex doesn't support isna")
+
+ # values will not be changed
+ fill_value = obj.values[0] if len(obj) > 0 else 0
+ result = obj.fillna(fill_value)
+ if isinstance(obj, Index):
+ tm.assert_index_equal(obj, result)
+ else:
+ tm.assert_series_equal(obj, result)
- if not allow_na_ops(o):
- continue
+ # check shallow_copied
+ if isinstance(obj, Series) and len(obj) == 0:
+ # TODO: GH-32543
+ pytest.xfail("Shallow copy for empty Series is bugged")
+ assert obj is not result
- if needs_i8_conversion(o):
+ @pytest.mark.parametrize("null_obj", [np.nan, None])
+ def test_fillna_null(self, null_obj, index_or_series_obj):
+ # # GH 11343
+ # though Index.fillna and Series.fillna has separate impl,
+ # test here to confirm these works as the same
+ obj = index_or_series_obj
+ klass = type(obj)
- values = o.astype(object).values
- fill_value = values[0]
- values[0:2] = pd.NaT
- else:
- values = o.values.copy()
- fill_value = o.values[0]
- values[0:2] = null_obj
+ if not allow_na_ops(obj):
+ pytest.skip(f"{klass} doesn't allow for NA operations")
+ elif len(obj) < 1:
+ pytest.skip("Test doesn't make sense on empty data")
+ elif isinstance(obj, ABCMultiIndex):
+ pytest.skip(f"MultiIndex can't hold '{null_obj}'")
- expected = [fill_value] * 2 + list(values[2:])
+ values = obj.values
+ fill_value = values[0]
+ expected = values.copy()
+ if needs_i8_conversion(obj):
+ values[0:2] = iNaT
+ expected[0:2] = fill_value
+ else:
+ values[0:2] = null_obj
+ expected[0:2] = fill_value
- expected = klass(expected, dtype=orig.dtype)
- o = klass(values)
+ expected = klass(expected)
+ obj = klass(values)
- # check values has the same dtype as the original
- assert o.dtype == orig.dtype
+ result = obj.fillna(fill_value)
+ if isinstance(obj, Index):
+ tm.assert_index_equal(result, expected)
+ else:
+ tm.assert_series_equal(result, expected)
- result = o.fillna(fill_value)
- if isinstance(o, Index):
- tm.assert_index_equal(result, expected)
- else:
- tm.assert_series_equal(result, expected)
- # check shallow_copied
- assert o is not result
+ # check shallow_copied
+ assert obj is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self, index_or_series_obj):
| part of #23877
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/32483 | 2020-03-06T09:23:46Z | 2020-03-11T03:53:52Z | 2020-03-11T03:53:52Z | 2020-03-11T03:54:08Z |
CLN: imports in pandas/io/excel/_base.py | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index d2f9dd285582f..f98d9501f1f73 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -1,5 +1,5 @@
import abc
-from datetime import date, datetime, timedelta
+import datetime
from io import BytesIO
import os
from textwrap import fill
@@ -28,7 +28,6 @@
_pop_header_name,
get_writer,
)
-from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_read_excel_doc = (
@@ -742,11 +741,11 @@ def _value_with_fmt(self, val):
val = float(val)
elif is_bool(val):
val = bool(val)
- elif isinstance(val, datetime):
+ elif isinstance(val, datetime.datetime):
fmt = self.datetime_format
- elif isinstance(val, date):
+ elif isinstance(val, datetime.date):
fmt = self.date_format
- elif isinstance(val, timedelta):
+ elif isinstance(val, datetime.timedelta):
val = val.total_seconds() / float(86400)
fmt = "0"
else:
@@ -763,9 +762,7 @@ def check_extension(cls, ext):
if ext.startswith("."):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
- msg = "Invalid extension for engine"
- f"'{pprint_thing(cls.engine)}': '{pprint_thing(ext)}'"
- raise ValueError(msg)
+ raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
else:
return True
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32481 | 2020-03-06T08:12:48Z | 2020-03-06T19:01:12Z | 2020-03-06T19:01:12Z | 2020-03-07T11:00:43Z |
BUG: Fix issue with datetime[ns, tz] input in Block.setitem GH32395 | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index a3499f857d158..96cb64e1ffb0e 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -726,6 +726,7 @@ Indexing
- Bug in :meth:`DataFrame.iloc.__setitem__` creating a new array instead of overwriting ``Categorical`` values in-place (:issue:`32831`)
- Bug in :class:`Interval` where a :class:`Timedelta` could not be added or subtracted from a :class:`Timestamp` interval (:issue:`32023`)
- Bug in :meth:`DataFrame.copy` _item_cache not invalidated after copy causes post-copy value updates to not be reflected (:issue:`31784`)
+- Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`)
- Bug in `Series.__getitem__` with an integer key and a :class:`MultiIndex` with leading integer level failing to raise ``KeyError`` if the key is not present in the first level (:issue:`33355`)
- Bug in :meth:`DataFrame.iloc` when slicing a single column-:class:`DataFrame`` with ``ExtensionDtype`` (e.g. ``df.iloc[:, :1]``) returning an invalid result (:issue:`32957`)
- Bug in :meth:`DatetimeIndex.insert` and :meth:`TimedeltaIndex.insert` causing index ``freq`` to be lost when setting an element into an empty :class:`Series` (:issue:33573`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index c052c6c9d7d1d..d880bd81bd947 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -824,8 +824,10 @@ def setitem(self, indexer, value):
if is_extension_array_dtype(getattr(value, "dtype", None)):
# We need to be careful not to allow through strings that
# can be parsed to EADtypes
+ is_ea_value = True
arr_value = value
else:
+ is_ea_value = False
arr_value = np.array(value)
if transpose:
@@ -853,6 +855,11 @@ def setitem(self, indexer, value):
values[indexer] = value
return self.make_block(Categorical(self.values, dtype=arr_value.dtype))
+ elif exact_match and is_ea_value:
+ # GH#32395 if we're going to replace the values entirely, just
+ # substitute in the new array
+ return self.make_block(arr_value)
+
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif exact_match:
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index dece8098c8542..eed9a584cc030 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -301,3 +301,31 @@ def test_setitem_preserves_views(self, data):
data[0] = data[1]
assert view1[0] == data[1]
assert view2[0] == data[1]
+
+ def test_setitem_dataframe_column_with_index(self, data):
+ # https://github.com/pandas-dev/pandas/issues/32395
+ df = expected = pd.DataFrame({"data": pd.Series(data)})
+ result = pd.DataFrame(index=df.index)
+ result.loc[df.index, "data"] = df["data"]
+ self.assert_frame_equal(result, expected)
+
+ def test_setitem_dataframe_column_without_index(self, data):
+ # https://github.com/pandas-dev/pandas/issues/32395
+ df = expected = pd.DataFrame({"data": pd.Series(data)})
+ result = pd.DataFrame(index=df.index)
+ result.loc[:, "data"] = df["data"]
+ self.assert_frame_equal(result, expected)
+
+ def test_setitem_series_with_index(self, data):
+ # https://github.com/pandas-dev/pandas/issues/32395
+ ser = expected = pd.Series(data, name="data")
+ result = pd.Series(index=ser.index, dtype=np.object, name="data")
+ result.loc[ser.index] = ser
+ self.assert_series_equal(result, expected)
+
+ def test_setitem_series_without_index(self, data):
+ # https://github.com/pandas-dev/pandas/issues/32395
+ ser = expected = pd.Series(data, name="data")
+ result = pd.Series(index=ser.index, dtype=np.object, name="data")
+ result.loc[:] = ser
+ self.assert_series_equal(result, expected)
| - [x] closes #32395
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32479 | 2020-03-06T03:08:40Z | 2020-05-25T17:39:42Z | 2020-05-25T17:39:42Z | 2020-05-25T19:29:50Z |
Made apt changes to pandas.Series.str.replace() | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3be9c5fcdfb26..c4bdbaff1649c 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -572,7 +572,7 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
r"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
- :func:`re.sub`.
+ :func:`re.sub`, depending on the regex value.
Parameters
----------
| - [x] closes #31225
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/32478 | 2020-03-06T02:14:16Z | 2020-03-06T15:25:33Z | 2020-03-06T15:25:33Z | 2020-03-20T03:20:23Z |
BUG: iloc.__setitem__ with duplicate columns | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e745bf3f5feed..6c0ad313350ed 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -262,7 +262,7 @@ Indexing
- Bug in :meth:`Series.xs` incorrectly returning ``Timestamp`` instead of ``datetime64`` in some object-dtype cases (:issue:`31630`)
- Bug in :meth:`DataFrame.iat` incorrectly returning ``Timestamp`` instead of ``datetime`` in some object-dtype cases (:issue:`32809`)
- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` when indexing with an integer key on a object-dtype :class:`Index` that is not all-integers (:issue:`31905`)
--
+- Bug in :meth:`DataFrame.iloc.__setitem__` on a :class:`DataFrame` with duplicate columns incorrectly setting values for all matching columns (:issue:`15686`, :issue:`22036`)
Missing
^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cd5d81bc70dd9..83d44e546356c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2706,6 +2706,20 @@ def _setitem_frame(self, key, value):
self._check_setitem_copy()
self._where(-key, value, inplace=True)
+ def _iset_item(self, loc: int, value):
+ self._ensure_valid_index(value)
+
+ # technically _sanitize_column expects a label, not a position,
+ # but the behavior is the same as long as we pass broadcast=False
+ value = self._sanitize_column(loc, value, broadcast=False)
+ NDFrame._iset_item(self, loc, value)
+
+ # check if we are modifying a copy
+ # try to set first as we want an invalid
+ # value exception to occur first
+ if len(self):
+ self._check_setitem_copy()
+
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e6c5ac9dbf733..1555de92c6ab6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3579,6 +3579,10 @@ def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
result._set_is_copy(self, copy=is_copy)
return result
+ def _iset_item(self, loc: int, value) -> None:
+ self._data.iset(loc, value)
+ self._clear_item_cache()
+
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 9a671c7fc170a..c9362a0527c06 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1615,6 +1615,12 @@ def _setitem_with_indexer(self, indexer, value):
info_idx = [info_idx]
labels = item_labels[info_idx]
+ # Ensure we have something we can iterate over
+ ilocs = info_idx
+ if isinstance(info_idx, slice):
+ ri = Index(range(len(self.obj.columns)))
+ ilocs = ri[info_idx]
+
plane_indexer = indexer[:1]
lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
# lplane_indexer gives the expected length of obj[indexer[0]]
@@ -1632,9 +1638,11 @@ def _setitem_with_indexer(self, indexer, value):
"length than the value"
)
- def setter(item, v):
- ser = self.obj[item]
- pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
+ pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
+
+ def isetter(loc, v):
+ # positional setting on column loc
+ ser = self.obj._ixs(loc, axis=1)
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
@@ -1654,7 +1662,7 @@ def setter(item, v):
ser._maybe_update_cacher(clear=True)
# reset the sliced object if unique
- self.obj[item] = ser
+ self.obj._iset_item(loc, ser)
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
@@ -1664,8 +1672,10 @@ def setter(item, v):
if isinstance(value, ABCDataFrame):
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, ABCMultiIndex)
+ # TODO: we are implicitly assuming value.columns is unique
- for item in labels:
+ for loc in ilocs:
+ item = item_labels[loc]
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
@@ -1674,7 +1684,7 @@ def setter(item, v):
else:
v = np.nan
- setter(item, v)
+ isetter(loc, v)
# we have an equal len ndarray/convertible to our labels
# hasattr first, to avoid coercing to ndarray without reason.
@@ -1685,16 +1695,15 @@ def setter(item, v):
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
- if len(labels) != value.shape[1]:
+ if len(ilocs) != value.shape[1]:
raise ValueError(
"Must have equal len keys and value "
"when setting with an ndarray"
)
- for i, item in enumerate(labels):
-
+ for i, loc in enumerate(ilocs):
# setting with a list, re-coerces
- setter(item, value[:, i].tolist())
+ isetter(loc, value[:, i].tolist())
elif (
len(labels) == 1
@@ -1702,7 +1711,8 @@ def setter(item, v):
and not is_scalar(plane_indexer[0])
):
# we have an equal len list/ndarray
- setter(labels[0], value)
+ # We only get here with len(labels) == len(ilocs) == 1
+ isetter(ilocs[0], value)
elif lplane_indexer == 0 and len(value) == len(self.obj.index):
# We get here in one case via .loc with a all-False mask
@@ -1710,19 +1720,19 @@ def setter(item, v):
else:
# per-label values
- if len(labels) != len(value):
+ if len(ilocs) != len(value):
raise ValueError(
"Must have equal len keys and value "
"when setting with an iterable"
)
- for item, v in zip(labels, value):
- setter(item, v)
+ for loc, v in zip(ilocs, value):
+ isetter(loc, v)
else:
- # scalar
- for item in labels:
- setter(item, value)
+ # scalar value
+ for loc in ilocs:
+ isetter(loc, value)
else:
if isinstance(indexer, tuple):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 98afc5ac3a0e3..d0a9e5cdcc6ee 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1086,7 +1086,10 @@ def value_getitem(placement):
"Shape of new values must be compatible with manager shape"
)
- if isinstance(loc, int):
+ if lib.is_integer(loc):
+ # We have 6 tests where loc is _not_ an int.
+ # In this case, get_blkno_placements will yield only one tuple,
+ # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))
loc = [loc]
# Accessing public blknos ensures the public versions are initialized
@@ -1138,7 +1141,7 @@ def value_getitem(placement):
# one item.
new_blocks.extend(
make_block(
- values=value.copy(),
+ values=value,
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 683d4f2605712..f6b9e9a44ba14 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -349,7 +349,6 @@ def test_iloc_setitem_dups(self):
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
- expected["A"] = expected["A"].astype("float64")
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
@@ -694,3 +693,32 @@ def test_series_indexing_zerodim_np_array(self):
s = Series([1, 2])
result = s.iloc[np.array(0)]
assert result == 1
+
+
+class TestILocSetItemDuplicateColumns:
+ def test_iloc_setitem_scalar_duplicate_columns(self):
+ # GH#15686, duplicate columns and mixed dtype
+ df1 = pd.DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
+ df2 = pd.DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
+ df = pd.concat([df1, df2], axis=1)
+ df.iloc[0, 0] = -1
+
+ assert df.iloc[0, 0] == -1
+ assert df.iloc[0, 2] == 3
+ assert df.dtypes.iloc[2] == np.int64
+
+ def test_iloc_setitem_list_duplicate_columns(self):
+ # GH#22036 setting with same-sized list
+ df = pd.DataFrame([[0, "str", "str2"]], columns=["a", "b", "b"])
+
+ df.iloc[:, 2] = ["str3"]
+
+ expected = pd.DataFrame([[0, "str", "str3"]], columns=["a", "b", "b"])
+ tm.assert_frame_equal(df, expected)
+
+ def test_iloc_setitem_series_duplicate_columns(self):
+ df = pd.DataFrame(
+ np.arange(8, dtype=np.int64).reshape(2, 4), columns=["A", "B", "A", "B"]
+ )
+ df.iloc[:, 0] = df.iloc[:, 0].astype(np.float64)
+ assert df.dtypes.iloc[2] == np.int64
| - [x] closes #15686
- [x] closes #22036
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32477 | 2020-03-06T00:33:41Z | 2020-03-11T02:35:22Z | 2020-03-11T02:35:21Z | 2020-03-11T02:38:32Z |
ENH: implement ExtensionIndex.insert | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index cef8a39d75a4c..76bb8db8de531 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5197,9 +5197,11 @@ def insert(self, loc: int, item):
-------
new_index : Index
"""
- _self = np.asarray(self)
- item = self._coerce_scalar_to_index(item)._ndarray_values
- idx = np.concatenate((_self[:loc], item, _self[loc:]))
+ # Note: this method is overriden by all ExtensionIndex subclasses,
+ # so self is never backed by an EA.
+ arr = np.asarray(self)
+ item = self._coerce_scalar_to_index(item)._values
+ idx = np.concatenate((arr[:loc], item, arr[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors: str_t = "raise"):
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index 4984fc27516ff..6d5f0dbb830f9 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -6,6 +6,7 @@
import numpy as np
from pandas.compat.numpy import function as nv
+from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
@@ -248,6 +249,10 @@ def repeat(self, repeats, axis=None):
result = self._data.repeat(repeats, axis=axis)
return self._shallow_copy(result)
+ def insert(self, loc: int, item):
+ # ExtensionIndex subclasses must override Index.insert
+ raise AbstractMethodError(self)
+
def _concat_same_dtype(self, to_concat, name):
arr = type(self._data)._concat_same_type(to_concat)
return type(self)._simple_new(arr, name=name)
| Sort of. | https://api.github.com/repos/pandas-dev/pandas/pulls/32476 | 2020-03-06T00:03:26Z | 2020-03-14T16:34:48Z | 2020-03-14T16:34:48Z | 2020-03-14T16:46:23Z |
CLN: assorted cleanups, annotations | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 7bd02b734beeb..457f3eb0749c2 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1167,7 +1167,7 @@ class Timedelta(_Timedelta):
Possible values:
- * 'Y', 'M', 'W', 'D', 'T', 'S', 'L', 'U', or 'N'
+ * 'W', 'D', 'T', 'S', 'L', 'U', or 'N'
* 'days' or 'day'
* 'hours', 'hour', 'hr', or 'h'
* 'minutes', 'minute', 'min', or 'm'
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 56939cda6d21c..9f19c7ba0be6e 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -988,7 +988,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
- def to_pydatetime(self):
+ def to_pydatetime(self) -> np.ndarray:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 749489a0a04fb..dbc0b0b3ccbbf 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -825,7 +825,7 @@ def total_seconds(self):
"""
return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)
- def to_pytimedelta(self):
+ def to_pytimedelta(self) -> np.ndarray:
"""
Return Timedelta Array/Index as object ndarray of datetime.timedelta
objects.
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index db774a03c02f8..71ae92df1970b 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -1,6 +1,8 @@
"""
datetimelike delegation
"""
+from typing import TYPE_CHECKING
+
import numpy as np
from pandas.core.dtypes.common import (
@@ -21,9 +23,12 @@
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
+if TYPE_CHECKING:
+ from pandas import Series # noqa:F401
+
class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
- def __init__(self, data, orig):
+ def __init__(self, data: "Series", orig):
if not isinstance(data, ABCSeries):
raise TypeError(
f"cannot convert an object of type {type(data)} to a datetimelike index"
@@ -137,7 +142,7 @@ class DatetimeProperties(Properties):
Raises TypeError if the Series does not contain datetimelike values.
"""
- def to_pydatetime(self):
+ def to_pydatetime(self) -> np.ndarray:
"""
Return the data as an array of native Python datetime objects.
@@ -209,7 +214,7 @@ class TimedeltaProperties(Properties):
Raises TypeError if the Series does not contain datetimelike values.
"""
- def to_pytimedelta(self):
+ def to_pytimedelta(self) -> np.ndarray:
"""
Return an array of native `datetime.timedelta` objects.
@@ -271,7 +276,7 @@ def components(self):
2 0 0 0 2 0 0 0
3 0 0 0 3 0 0 0
4 0 0 0 4 0 0 0
- """ # noqa: E501
+ """
return self._get_values().components.set_index(self._parent.index)
@property
@@ -303,7 +308,7 @@ class PeriodProperties(Properties):
class CombinedDatetimelikeProperties(
DatetimeProperties, TimedeltaProperties, PeriodProperties
):
- def __new__(cls, data):
+ def __new__(cls, data: "Series"):
# CombinedDatetimelikeProperties isn't really instantiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index f621a3c153adf..c21d8df2476b3 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -168,7 +168,7 @@ def _data(self):
return self._cached_data
@cache_readonly
- def _int64index(self):
+ def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index f6e79a0f2045d..98afc5ac3a0e3 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -24,7 +24,6 @@
is_list_like,
is_numeric_v_string_like,
is_scalar,
- is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
@@ -32,6 +31,7 @@
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
+from pandas.core.arrays.sparse import SparseDtype
from pandas.core.base import PandasObject
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.api import Index, MultiIndex, ensure_index
@@ -843,8 +843,8 @@ def _interleave(self) -> np.ndarray:
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
- if is_sparse(dtype):
- dtype = dtype.subtype # type: ignore
+ if isinstance(dtype, SparseDtype):
+ dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 4398a1569ac56..725d1adc6d161 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -981,7 +981,7 @@ def nanskew(
Examples
--------
>>> import pandas.core.nanops as nanops
- >>> s = pd.Series([1,np.nan, 1, 2])
+ >>> s = pd.Series([1, np.nan, 1, 2])
>>> nanops.nanskew(s)
1.7320508075688787
"""
@@ -1065,7 +1065,7 @@ def nankurt(
Examples
--------
>>> import pandas.core.nanops as nanops
- >>> s = pd.Series([1,np.nan, 1, 3, 2])
+ >>> s = pd.Series([1, np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076
"""
diff --git a/pandas/core/series.py b/pandas/core/series.py
index bb4e222193608..cfdd5635b60a9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -47,7 +47,6 @@
ABCMultiIndex,
ABCPeriodIndex,
ABCSeries,
- ABCSparseArray,
)
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
@@ -289,9 +288,6 @@ def __init__(
pass
elif isinstance(data, (set, frozenset)):
raise TypeError(f"'{type(data).__name__}' type is unordered")
- elif isinstance(data, ABCSparseArray):
- # handle sparse passed here (and force conversion)
- data = data.to_dense()
else:
data = com.maybe_iterable_to_list(data)
diff --git a/pandas/tests/frame/indexing/test_datetime.py b/pandas/tests/frame/indexing/test_datetime.py
index 6bfcac3793584..0fd60c151b9c4 100644
--- a/pandas/tests/frame/indexing/test_datetime.py
+++ b/pandas/tests/frame/indexing/test_datetime.py
@@ -40,7 +40,7 @@ def test_set_reset(self):
# set/reset
df = DataFrame({"A": [0, 1, 2]}, index=idx)
result = df.reset_index()
- assert result["foo"].dtype, "M8[ns, US/Eastern"
+ assert result["foo"].dtype == "datetime64[ns, US/Eastern]"
df = result.set_index("foo")
tm.assert_index_equal(df.index, idx)
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 1a1b7e8e1bd08..0426351310e36 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -4,7 +4,6 @@
import warnings
import numpy as np
-from pytz import AmbiguousTimeError
from pandas._libs.algos import unique_deltas
from pandas._libs.tslibs import Timedelta, Timestamp
@@ -288,10 +287,7 @@ def infer_freq(index, warn: bool = True) -> Optional[str]:
index = index.values
if not isinstance(index, pd.DatetimeIndex):
- try:
- index = pd.DatetimeIndex(index)
- except AmbiguousTimeError:
- index = pd.DatetimeIndex(index.asi8)
+ index = pd.DatetimeIndex(index)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
@@ -490,6 +486,7 @@ def _is_business_daily(self) -> bool:
)
def _get_wom_rule(self) -> Optional[str]:
+ # FIXME: dont leave commented-out
# wdiffs = unique(np.diff(self.index.week))
# We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
diff --git a/setup.cfg b/setup.cfg
index bbd8489622005..42c507a2b6b01 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -98,6 +98,7 @@ exclude_lines =
# Don't complain if non-runnable code isn't run:
if 0:
if __name__ == .__main__.:
+ if TYPE_CHECKING:
[coverage:html]
directory = coverage_html_report
| https://api.github.com/repos/pandas-dev/pandas/pulls/32475 | 2020-03-05T23:59:41Z | 2020-03-07T10:31:16Z | 2020-03-07T10:31:16Z | 2020-03-09T17:05:56Z | |
CLN: remove unreachable _internal_get_values in blocks | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 70fd3ecdc2098..c088b7020927b 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -3144,14 +3144,15 @@ def _safe_reshape(arr, new_shape):
return arr
-def _putmask_smart(v, mask, n):
+def _putmask_smart(v: np.ndarray, mask: np.ndarray, n) -> np.ndarray:
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
- v : `values`, updated in-place (array like)
- mask : np.ndarray
+ v : np.ndarray
+ `values`, updated in-place.
+ mask : np.ndarray[bool]
Applies to both sides (array like).
n : `new values` either scalar or an array like aligned with `values`
@@ -3218,9 +3219,6 @@ def _putmask_preserve(nv, n):
# change the dtype if needed
dtype, _ = maybe_promote(n.dtype)
- if is_extension_array_dtype(v.dtype) and is_object_dtype(dtype):
- v = v._internal_get_values(dtype)
- else:
- v = v.astype(dtype)
+ v = v.astype(dtype)
return _putmask_preserve(v, n)
| _putmask_smart is only called from Block.putmask, which ExtensionBlock overrides. | https://api.github.com/repos/pandas-dev/pandas/pulls/32472 | 2020-03-05T21:16:25Z | 2020-03-07T11:02:48Z | 2020-03-07T11:02:48Z | 2020-03-07T15:14:48Z |
CLN: use _values_for_argsort for join_non_unique, join_monotonic | diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 40a169d03f39c..71a3ccfb64024 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1494,7 +1494,7 @@ def check_for_ordered(self, op):
)
def _values_for_argsort(self):
- return self._codes.copy()
+ return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6f44b5abf5b04..1cd772a72058d 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3385,6 +3385,7 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False)
-------
join_index, (left_indexer, right_indexer)
"""
+ other = ensure_index(other)
self_is_mi = isinstance(self, ABCMultiIndex)
other_is_mi = isinstance(other, ABCMultiIndex)
@@ -3404,8 +3405,6 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False)
other, level, how=how, return_indexers=return_indexers
)
- other = ensure_index(other)
-
if len(other) == 0 and how in ("left", "outer"):
join_index = self._shallow_copy()
if return_indexers:
@@ -3567,16 +3566,26 @@ def _join_multi(self, other, how, return_indexers=True):
def _join_non_unique(self, other, how="left", return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
+ # We only get here if dtypes match
+ assert self.dtype == other.dtype
+
+ if is_extension_array_dtype(self.dtype):
+ lvalues = self._data._values_for_argsort()
+ rvalues = other._data._values_for_argsort()
+ else:
+ lvalues = self._values
+ rvalues = other._values
+
left_idx, right_idx = _get_join_indexers(
- [self._ndarray_values], [other._ndarray_values], how=how, sort=True
+ [lvalues], [rvalues], how=how, sort=True
)
left_idx = ensure_platform_int(left_idx)
right_idx = ensure_platform_int(right_idx)
- join_index = np.asarray(self._ndarray_values.take(left_idx))
+ join_index = np.asarray(lvalues.take(left_idx))
mask = left_idx == -1
- np.putmask(join_index, mask, other._ndarray_values.take(right_idx))
+ np.putmask(join_index, mask, rvalues.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
@@ -3727,6 +3736,9 @@ def _get_leaf_sorter(labels):
return join_index
def _join_monotonic(self, other, how="left", return_indexers=False):
+ # We only get here with matching dtypes
+ assert other.dtype == self.dtype
+
if self.equals(other):
ret_index = other if how == "right" else self
if return_indexers:
@@ -3734,8 +3746,12 @@ def _join_monotonic(self, other, how="left", return_indexers=False):
else:
return ret_index
- sv = self._ndarray_values
- ov = other._ndarray_values
+ if is_extension_array_dtype(self.dtype):
+ sv = self._data._values_for_argsort()
+ ov = other._data._values_for_argsort()
+ else:
+ sv = self._values
+ ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
| With the `.copy()` removed from `Categorical._values_for_argsort`, `ea_backed_index._data._values_for_argsort()` matches `ea_backed_index._ndarray_values` in all extant cases.
cc @jorisvandenbossche @TomAugspurger need to confirm
a) this is an intended-adjacent use of _values_for_argsort, and not just a coincidence that it matches extant behavior
b) the `.copy()` this removes from `Categorical._values_for_argsort` is not important for some un-tested reason
xref #32452, #32426 | https://api.github.com/repos/pandas-dev/pandas/pulls/32467 | 2020-03-05T18:29:29Z | 2020-03-11T02:13:24Z | 2020-03-11T02:13:24Z | 2020-03-13T15:53:19Z |
TST: Fixed xfail for tests in pandas/tests/tseries/offsets/test_offsets_properties.py | diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py
index 716d3ff3faf1c..6e01c83c725bf 100644
--- a/pandas/tests/tseries/offsets/test_offsets_properties.py
+++ b/pandas/tests/tseries/offsets/test_offsets_properties.py
@@ -85,8 +85,6 @@
# Offset-specific behaviour tests
-# Based on CI runs: Always passes on OSX, fails on Linux, sometimes on Windows
-@pytest.mark.xfail(strict=False, reason="inconsistent between OSs, Pythons")
@given(gen_random_datetime, gen_yqm_offset)
def test_on_offset_implementations(dt, offset):
assume(not offset.normalize)
@@ -97,40 +95,38 @@ def test_on_offset_implementations(dt, offset):
assert offset.is_on_offset(dt) == (compare == dt)
-@pytest.mark.xfail(
- reason="res_v2 below is incorrect, needs to use the "
- "commented-out version with tz_localize. "
- "But with that fix in place, hypothesis then "
- "has errors in timezone generation."
-)
@given(gen_yqm_offset, gen_date_range)
def test_apply_index_implementations(offset, rng):
# offset.apply_index(dti)[i] should match dti[i] + offset
- assume(offset.n != 0) # TODO: test for that case separately
- # rng = pd.date_range(start='1/1/2000', periods=100000, freq='T')
+ # TODO: test for that case separately
+ assume(offset.n != 0)
+
ser = pd.Series(rng)
res = rng + offset
- res_v2 = offset.apply_index(rng)
- # res_v2 = offset.apply_index(rng.tz_localize(None)).tz_localize(rng.tz)
+ res_v2 = offset.apply_index(rng.tz_localize(None)).tz_localize(rng.tz)
assert (res == res_v2).all()
+ # apply_index is only for indexes, not series, so no res2_v2
assert res[0] == rng[0] + offset
assert res[-1] == rng[-1] + offset
res2 = ser + offset
- # apply_index is only for indexes, not series, so no res2_v2
+
+ # TODO: Check randomly assorted entries, not just first/last
assert res2.iloc[0] == ser.iloc[0] + offset
assert res2.iloc[-1] == ser.iloc[-1] + offset
- # TODO: Check randomly assorted entries, not just first/last
@pytest.mark.xfail # TODO: reason?
@given(gen_yqm_offset)
def test_shift_across_dst(offset):
- # GH#18319 check that 1) timezone is correctly normalized and
+ # GH#18319
+ # check that:
+ # 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
# Note that dti includes a transition across DST boundary
+
dti = pd.date_range(
start="2017-10-30 12:00:00", end="2017-11-06", freq="D", tz="US/Eastern"
)
| - [x] ref https://github.com/pandas-dev/pandas/pull/32459#issuecomment-595277125
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32465 | 2020-03-05T16:08:18Z | 2020-06-14T17:27:09Z | null | 2021-05-03T12:29:04Z |
Disallow .__call__() as workaround for non-named functions | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 84ad478226175..58aab96aa95eb 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -784,6 +784,7 @@ Reshaping
- Bug in :func:`concat` was not allowing for concatenation of ``DataFrame`` and ``Series`` with duplicate keys (:issue:`33654`)
- Bug in :func:`cut` raised an error when non-unique labels (:issue:`33141`)
- Bug in :meth:`DataFrame.replace` casts columns to ``object`` dtype if items in ``to_replace`` not in values (:issue:`32988`)
+- Ensure only named functions can be used in :func:`eval()` (:issue:`32460`)
Sparse
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 6cd9a15b70d39..fcccc24ed7615 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -641,7 +641,7 @@ def visit_Attribute(self, node, **kwargs):
def visit_Call(self, node, side=None, **kwargs):
- if isinstance(node.func, ast.Attribute):
+ if isinstance(node.func, ast.Attribute) and node.func.attr != "__call__":
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 1a07780462ea3..8612c05e6a996 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -1181,3 +1181,24 @@ def test_failing_character_outside_range(self, df):
def test_failing_hashtag(self, df):
with pytest.raises(SyntaxError):
df.query("`foo#bar` > 4")
+
+ def test_call_non_named_expression(self, df):
+ """
+ Only attributes and variables ('named functions') can be called.
+ .__call__() is not an allowed attribute because that would allow
+ calling anything.
+ https://github.com/pandas-dev/pandas/pull/32460
+ """
+
+ def func(*_):
+ return 1
+
+ funcs = [func] # noqa
+
+ df.eval("@func()")
+
+ with pytest.raises(TypeError, match="Only named functions are supported"):
+ df.eval("@funcs[0]()")
+
+ with pytest.raises(TypeError, match="Only named functions are supported"):
+ df.eval("@funcs[0].__call__()")
| Currently this script:
```python
import pandas as pd
funcs = [lambda: 1]
pd.eval("funcs[0]()")
```
Fails with:
TypeError: Only named functions are supported
however this can easily be worked around by adding `.__call__`:
```python
pd.eval("funcs[0].__call__()")
```
I'm assuming we don't want to allow this workaround. This PR ensures that it will fail with the same error. | https://api.github.com/repos/pandas-dev/pandas/pulls/32460 | 2020-03-05T12:07:04Z | 2020-05-13T15:53:45Z | 2020-05-13T15:53:44Z | 2020-05-13T15:53:48Z |
CLN: Using clearer imports | diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index dfe050c7bbff7..a48c3365947dc 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -4,7 +4,7 @@ import time
import locale
import calendar
import re
-from datetime import date as datetime_date
+import datetime
from _thread import allocate_lock as _thread_allocate_lock
@@ -288,20 +288,20 @@ def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise'
elif iso_year != -1 and iso_week != -1:
year, julian = _calc_julian_from_V(iso_year, iso_week,
weekday + 1)
- # Cannot pre-calculate datetime_date() since can change in Julian
+ # Cannot pre-calculate datetime.date() since can change in Julian
# calculation and thus could have different value for the day of the wk
# calculation.
try:
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not
# 0.
- ordinal = datetime_date(year, month, day).toordinal()
- julian = ordinal - datetime_date(year, 1, 1).toordinal() + 1
+ ordinal = datetime.date(year, month, day).toordinal()
+ julian = ordinal - datetime.date(year, 1, 1).toordinal() + 1
else:
# Assume that if they bothered to include Julian day it will
# be accurate.
- datetime_result = datetime_date.fromordinal(
- (julian - 1) + datetime_date(year, 1, 1).toordinal())
+ datetime_result = datetime.date.fromordinal(
+ (julian - 1) + datetime.date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
@@ -311,7 +311,7 @@ def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise'
continue
raise
if weekday == -1:
- weekday = datetime_date(year, month, day).weekday()
+ weekday = datetime.date(year, month, day).weekday()
dts.year = year
dts.month = month
@@ -649,7 +649,7 @@ cdef int _calc_julian_from_U_or_W(int year, int week_of_year,
cdef:
int first_weekday, week_0_length, days_to_week
- first_weekday = datetime_date(year, 1, 1).weekday()
+ first_weekday = datetime.date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
@@ -692,14 +692,14 @@ cdef (int, int) _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday)
cdef:
int correction, ordinal
- correction = datetime_date(iso_year, 1, 4).isoweekday() + 3
+ correction = datetime.date(iso_year, 1, 4).isoweekday() + 3
ordinal = (iso_week * 7) + iso_weekday - correction
# ordinal may be negative or 0 now, which means the date is in the previous
# calendar year
if ordinal < 1:
- ordinal += datetime_date(iso_year, 1, 1).toordinal()
+ ordinal += datetime.date(iso_year, 1, 1).toordinal()
iso_year -= 1
- ordinal -= datetime_date(iso_year, 1, 1).toordinal()
+ ordinal -= datetime.date(iso_year, 1, 1).toordinal()
return iso_year, ordinal
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32459 | 2020-03-05T11:39:44Z | 2020-03-19T00:09:46Z | 2020-03-19T00:09:46Z | 2020-03-24T13:58:11Z |
CLN: Removed unused variable | diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 437406cbbd819..8374934ece2fe 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -367,11 +367,9 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True):
cdef:
int64_t cur_blkno
Py_ssize_t i, start, stop, n, diff
-
object blkno
list group_order
dict group_dict
- int64_t[:] res_view
n = blknos.shape[0]
@@ -418,12 +416,10 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True):
else:
tot_len = sum(stop - start for start, stop in slices)
result = np.empty(tot_len, dtype=np.int64)
- res_view = result
i = 0
for start, stop in slices:
for diff in range(start, stop):
- res_view[i] = diff
i += 1
yield blkno, result
| I can't see the reason for ```res_view``` to exist, the CI should tell me if I am wrong.
Also this is removing one warning when compiling ```pandas/_libs/internals.pyx```
This is the warning that is removing:
```
pandas/_libs/internals.c:8952:36: warning: ‘__pyx_t_23’ may be used uninitialized in this function [-Wmaybe-uninitialized]
8952 | __pyx_cur_scope->__pyx_t_9 = __pyx_t_23;
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32458 | 2020-03-05T11:20:26Z | 2020-03-06T16:54:37Z | null | 2020-03-14T14:07:03Z |
Backport PR #32444 on branch 1.0.x (CI: ax.rowNum and ax.colNum attributes deprecated in Matplotlib 3.2) | diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index dd4034a97f58e..aa5516894c7e3 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -9,6 +9,8 @@
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+from pandas.plotting._matplotlib import compat
+
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
@@ -288,6 +290,12 @@ def _remove_labels_from_axis(axis):
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
+ if compat._mpl_ge_3_2_0():
+ row_num = lambda x: x.get_subplotspec().rowspan.start
+ col_num = lambda x: x.get_subplotspec().colspan.start
+ else:
+ row_num = lambda x: x.rowNum
+ col_num = lambda x: x.colNum
if nrows > 1:
try:
@@ -295,13 +303,13 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
- layout[ax.rowNum, ax.colNum] = ax.get_visible()
+ layout[row_num(ax), col_num(ax)] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
- if not layout[ax.rowNum + 1, ax.colNum]:
+ if not layout[row_num(ax) + 1, col_num(ax)]:
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
| Backport PR #32444: CI: ax.rowNum and ax.colNum attributes deprecated in Matplotlib 3.2 | https://api.github.com/repos/pandas-dev/pandas/pulls/32456 | 2020-03-05T10:11:04Z | 2020-03-05T10:45:00Z | 2020-03-05T10:45:00Z | 2020-03-05T10:45:00Z |
VIZ: Fixing Matplotlib depecation warnings for ax.rowNum and ax.colNum | diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 5743288982da4..1951f7dea2de8 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -295,13 +295,19 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
- layout[ax.rowNum, ax.colNum] = ax.get_visible()
+ layout[
+ ax.get_subplotspec().rowspan.start,
+ ax.get_subplotspec().colspan.start,
+ ] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
- if not layout[ax.rowNum + 1, ax.colNum]:
+ if not layout[
+ ax.get_subplotspec().rowspan.start + 1,
+ ax.get_subplotspec().colspan.start,
+ ]:
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
| - [X] xref #32444
Fixing the deprecation warning in the docs (testing if we really need to be compatible with the old syntax) | https://api.github.com/repos/pandas-dev/pandas/pulls/32455 | 2020-03-05T08:20:17Z | 2020-03-05T08:37:01Z | null | 2020-03-05T08:37:01Z |
DEPS: Removing snappy from local/docs dependencies | diff --git a/environment.yml b/environment.yml
index cbdaf8e6c4217..ed381fc9d3529 100644
--- a/environment.yml
+++ b/environment.yml
@@ -94,7 +94,6 @@ dependencies:
- fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet
- pyarrow>=0.13.1 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather
- - python-snappy # required by pyarrow
- pyqt>=5.9.2 # pandas.read_clipboard
- pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf
diff --git a/requirements-dev.txt b/requirements-dev.txt
index a469cbdd93ceb..3678b6c3d49cd 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -62,7 +62,6 @@ xlwt
odfpy
fastparquet>=0.3.2
pyarrow>=0.13.1
-python-snappy
pyqt5>=5.9.2
tables>=3.4.2
s3fs
| - [X] xref #32417
The docs build should fail if snappy is required to build the docs, and should point out where it's being used. If it's not failed, I think this can be merged and we can remove snappy from the local and docs dependencies.
| https://api.github.com/repos/pandas-dev/pandas/pulls/32454 | 2020-03-05T07:54:10Z | 2020-03-09T20:47:22Z | null | 2020-03-09T20:47:23Z |
CLN: avoid _ndarray_values, values in MultiIndex | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index c1efa512f326a..c22b289bb4017 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -18,7 +18,7 @@
from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
-from pandas._typing import AnyArrayLike, ArrayLike, Scalar
+from pandas._typing import AnyArrayLike, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly
@@ -52,6 +52,7 @@
ensure_index,
)
from pandas.core.indexes.frozen import FrozenList
+from pandas.core.indexes.numeric import Int64Index
import pandas.core.missing as missing
from pandas.core.sorting import (
get_group_index,
@@ -1180,7 +1181,7 @@ def _format_native_types(self, na_rep="nan", **kwargs):
sortorder=self.sortorder,
verify_integrity=False,
)
- return mi.values
+ return mi._values
def format(
self,
@@ -1419,7 +1420,7 @@ def is_monotonic_increasing(self) -> bool:
except TypeError:
# we have mixed types and np.lexsort is not happy
- return Index(self.values).is_monotonic
+ return Index(self._values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
@@ -1612,7 +1613,7 @@ def to_flat_index(self):
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
- return Index(self.values, tupleize_cols=False)
+ return Index(self._values, tupleize_cols=False)
@property
def is_all_dates(self) -> bool:
@@ -1914,7 +1915,7 @@ def append(self, other):
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
- to_concat = (self.values,) + tuple(k._values for k in other)
+ to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
@@ -1924,7 +1925,7 @@ def append(self, other):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
- return self.values.argsort(*args, **kwargs)
+ return self._values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
@@ -2368,7 +2369,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
# let's instead try with a straight Index
if method is None:
- return Index(self.values).get_indexer(
+ return Index(self._values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
@@ -2831,7 +2832,8 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
- m = result.map(mapper)._ndarray_values
+ m = result.map(mapper)
+ m = np.asarray(m)
else:
m = np.zeros(len(codes), dtype=bool)
@@ -2949,7 +2951,7 @@ def get_locs(self, seq):
n = len(self)
indexer = None
- def _convert_to_indexer(r):
+ def _convert_to_indexer(r) -> Int64Index:
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
@@ -3026,13 +3028,16 @@ def _update_indexer(idxr, indexer=indexer):
if indexer is None:
return np.array([], dtype=np.int64)
+ assert isinstance(indexer, Int64Index), type(indexer)
indexer = self._reorder_indexer(seq, indexer)
- return indexer._ndarray_values
+ return indexer._values
def _reorder_indexer(
- self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike
- ) -> ArrayLike:
+ self,
+ seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...],
+ indexer: Int64Index,
+ ) -> Int64Index:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
@@ -3139,8 +3144,8 @@ def equals(self, other) -> bool:
if self.nlevels != other.nlevels:
return False
- other_vals = com.values_from_object(ensure_index(other))
- return array_equivalent(self._ndarray_values, other_vals)
+ other_vals = com.values_from_object(other)
+ return array_equivalent(self._values, other_vals)
if self.nlevels != other.nlevels:
return False
@@ -3232,7 +3237,7 @@ def union(self, other, sort=None):
# TODO: Index.union returns other when `len(self)` is 0.
uniq_tuples = lib.fast_unique_multiple(
- [self._ndarray_values, other._ndarray_values], sort=sort
+ [self._values, other._ndarray_values], sort=sort
)
return MultiIndex.from_arrays(
@@ -3267,7 +3272,7 @@ def intersection(self, other, sort=False):
if self.equals(other):
return self
- lvals = self._ndarray_values
+ lvals = self._values
rvals = other._ndarray_values
uniq_tuples = None # flag whether _inner_indexer was succesful
@@ -3342,7 +3347,7 @@ def difference(self, other, sort=None):
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
- difference = this.values.take(label_diff)
+ difference = this._values.take(label_diff)
if sort is None:
difference = sorted(difference)
@@ -3359,7 +3364,8 @@ def difference(self, other, sort=None):
def _convert_can_do_setop(self, other):
result_names = self.names
- if not hasattr(other, "names"):
+ if not isinstance(other, Index):
+
if len(other) == 0:
other = MultiIndex(
levels=[[]] * self.nlevels,
@@ -3456,8 +3462,8 @@ def _wrap_joined_index(self, joined, other):
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
- values = MultiIndex.from_tuples(values, names=self.names).values
- return algos.isin(self.values, values)
+ values = MultiIndex.from_tuples(values, names=self.names)._values
+ return algos.isin(self._values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
| There are a couple of places in MultiIndex left after this that are a little trickier, will do in a separate pass. | https://api.github.com/repos/pandas-dev/pandas/pulls/32452 | 2020-03-05T00:36:55Z | 2020-03-08T15:40:35Z | 2020-03-08T15:40:35Z | 2020-04-05T17:45:35Z |
TST/VIZ: add test for legend colors for DataFrame with duplicate column labels #11136 | diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index ffbd135466709..32673b9a0a5cf 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -3316,6 +3316,16 @@ def test_missing_markers_legend_using_style(self):
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=[".", ".", "."])
+ def test_colors_of_columns_with_same_name(self):
+ # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136
+ # Creating a DataFrame with duplicate column labels and testing colors of them.
+ df = pd.DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]})
+ df1 = pd.DataFrame({"a": [2, 4, 6]})
+ df_concat = pd.concat([df, df1], axis=1)
+ result = df_concat.plot()
+ for legend, line in zip(result.get_legend().legendHandles, result.lines):
+ assert legend.get_color() == line.get_color()
+
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
| - [X] closes #11136
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32451 | 2020-03-04T23:16:33Z | 2020-03-12T04:52:30Z | 2020-03-12T04:52:29Z | 2020-03-12T04:52:36Z |
CI: Fix flaky test_value_counts_null | diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py
index 8f48d0a3e8378..f1cc98a1b773d 100644
--- a/pandas/tests/base/test_ops.py
+++ b/pandas/tests/base/test_ops.py
@@ -311,9 +311,10 @@ def test_value_counts(self, index_or_series_obj):
if isinstance(obj, pd.MultiIndex):
expected.index = pd.Index(expected.index)
- # sort_index to avoid switched order when values share the same count
- result = result.sort_index()
- expected = expected.sort_index()
+ # TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
+ if obj.duplicated().any():
+ result = result.sort_index()
+ expected = expected.sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
@@ -344,13 +345,26 @@ def test_value_counts_null(self, null_obj, index_or_series_obj):
expected = pd.Series(dict(counter.most_common()), dtype=np.int64)
expected.index = expected.index.astype(obj.dtype)
- tm.assert_series_equal(obj.value_counts(), expected)
+ result = obj.value_counts()
+ if obj.duplicated().any():
+ # TODO:
+ # Order of entries with the same count is inconsistent on CI (gh-32449)
+ expected = expected.sort_index()
+ result = result.sort_index()
+ tm.assert_series_equal(result, expected)
# can't use expected[null_obj] = 3 as
# IntervalIndex doesn't allow assignment
new_entry = pd.Series({np.nan: 3}, dtype=np.int64)
expected = expected.append(new_entry)
- tm.assert_series_equal(obj.value_counts(dropna=False), expected)
+
+ result = obj.value_counts(dropna=False)
+ if obj.duplicated().any():
+ # TODO:
+ # Order of entries with the same count is inconsistent on CI (gh-32449)
+ expected = expected.sort_index()
+ result = result.sort_index()
+ tm.assert_series_equal(result, expected)
def test_value_counts_inferred(self, index_or_series):
klass = index_or_series
| As mentioned by @simonjayhawkins in #32438
Trace of the failing test:
https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=29965&view=logs&j=077026cf-93c0-54aa-45e0-9996ba75f6f7&t=e95cf409-86ae-5b4d-6c5f-79395ef75e8f | https://api.github.com/repos/pandas-dev/pandas/pulls/32449 | 2020-03-04T21:21:24Z | 2020-03-07T09:40:10Z | 2020-03-07T09:40:10Z | 2020-03-09T19:32:12Z |
Backport PR #32442 on branch 1.0.x (CI: fix test_matplotlib_scatter_datetime64) | diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py
index e7855068334f7..f2c5032112bc9 100644
--- a/pandas/plotting/_matplotlib/compat.py
+++ b/pandas/plotting/_matplotlib/compat.py
@@ -20,3 +20,4 @@ def inner():
_mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge)
_mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge)
_mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge)
+_mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge)
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 9f43027836eb4..45452526b905a 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -34,6 +34,7 @@ def setup_method(self, method):
self.mpl_ge_2_2_3 = compat._mpl_ge_2_2_3()
self.mpl_ge_3_0_0 = compat._mpl_ge_3_0_0()
self.mpl_ge_3_1_0 = compat._mpl_ge_3_1_0()
+ self.mpl_ge_3_2_0 = compat._mpl_ge_3_2_0()
self.bp_n_objects = 7
self.polycollection_factor = 2
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 10d7efd22971b..bd5781cb08816 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1463,7 +1463,9 @@ def test_matplotlib_scatter_datetime64(self):
ax.scatter(x="time", y="y", data=df)
self.plt.draw()
label = ax.get_xticklabels()[0]
- if self.mpl_ge_3_0_0:
+ if self.mpl_ge_3_2_0:
+ expected = "2018-01-01"
+ elif self.mpl_ge_3_0_0:
expected = "2017-12-08"
else:
expected = "2017-12-12"
| Backport PR #32442: CI: fix test_matplotlib_scatter_datetime64 | https://api.github.com/repos/pandas-dev/pandas/pulls/32445 | 2020-03-04T19:12:19Z | 2020-03-04T20:03:13Z | 2020-03-04T20:03:13Z | 2020-03-04T20:03:13Z |
CI: ax.rowNum and ax.colNum attributes deprecated in Matplotlib 3.2 | diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 5743288982da4..08d945f679810 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -9,6 +9,8 @@
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+from pandas.plotting._matplotlib import compat
+
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
@@ -288,6 +290,12 @@ def _remove_labels_from_axis(axis):
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
+ if compat._mpl_ge_3_2_0():
+ row_num = lambda x: x.get_subplotspec().rowspan.start
+ col_num = lambda x: x.get_subplotspec().colspan.start
+ else:
+ row_num = lambda x: x.rowNum
+ col_num = lambda x: x.colNum
if nrows > 1:
try:
@@ -295,13 +303,13 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
- layout[ax.rowNum, ax.colNum] = ax.get_visible()
+ layout[row_num(ax), col_num(ax)] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
- if not layout[ax.rowNum + 1, ax.colNum]:
+ if not layout[row_num(ax) + 1, col_num(ax)]:
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
| https://github.com/pandas-dev/pandas/pull/32442/checks?check_run_id=485569958 | https://api.github.com/repos/pandas-dev/pandas/pulls/32444 | 2020-03-04T18:21:01Z | 2020-03-05T10:10:35Z | 2020-03-05T10:10:35Z | 2020-03-05T10:12:00Z |
CI: fix test_matplotlib_scatter_datetime64 | diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py
index e7855068334f7..f2c5032112bc9 100644
--- a/pandas/plotting/_matplotlib/compat.py
+++ b/pandas/plotting/_matplotlib/compat.py
@@ -20,3 +20,4 @@ def inner():
_mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge)
_mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge)
_mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge)
+_mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge)
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index ea0ec8ad98ffe..75b825687209c 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -34,6 +34,7 @@ def setup_method(self, method):
self.mpl_ge_2_2_3 = compat._mpl_ge_2_2_3()
self.mpl_ge_3_0_0 = compat._mpl_ge_3_0_0()
self.mpl_ge_3_1_0 = compat._mpl_ge_3_1_0()
+ self.mpl_ge_3_2_0 = compat._mpl_ge_3_2_0()
self.bp_n_objects = 7
self.polycollection_factor = 2
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 979b89a87d843..b85a2affc4e4b 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1468,7 +1468,9 @@ def test_matplotlib_scatter_datetime64(self):
ax.scatter(x="time", y="y", data=df)
self.plt.draw()
label = ax.get_xticklabels()[0]
- if self.mpl_ge_3_0_0:
+ if self.mpl_ge_3_2_0:
+ expected = "2018-01-01"
+ elif self.mpl_ge_3_0_0:
expected = "2017-12-08"
else:
expected = "2017-12-12"
| - [ ] closes #32440
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32442 | 2020-03-04T17:29:27Z | 2020-03-04T19:11:38Z | 2020-03-04T19:11:38Z | 2020-03-04T19:14:42Z |
DOC: Fix capitalization of the word pandas in the docs | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 1b223cf5f026b..fa7532a68a06d 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -21,9 +21,9 @@ Patterns
foo.__class__
-------------
-*pandas* uses 'type(foo)' instead 'foo.__class__' as it makes the code more
-readable.
+pandas uses 'type(foo)' instead 'foo.__class__' as it is making the code more
+readable.
For example:
**Good:**
@@ -50,7 +50,7 @@ Concatenated strings
f-strings
~~~~~~~~~
-*pandas* uses f-strings formatting instead of '%' and '.format()' string formatters.
+pandas uses f-strings formatting instead of '%' and '.format()' string formatters.
The convention of using f-strings on a string that is concatenated over several lines,
is to prefix only the lines containing values which need to be interpreted.
@@ -114,7 +114,7 @@ For example:
Representation function (aka 'repr()')
--------------------------------------
-*pandas* uses 'repr()' instead of '%r' and '!r'.
+pandas uses 'repr()' instead of '%r' and '!r'.
The use of 'repr()' will only happen when the value is not an obvious string.
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index f904781178656..bd903936b2ab9 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -53,7 +53,7 @@ Feel free to ask questions on the `mailing list
Bug reports and enhancement requests
====================================
-Bug reports are an important part of making *pandas* more stable. Having a complete bug report
+Bug reports are an important part of making pandas more stable. Having a complete bug report
will allow others to reproduce the bug and provide insight into fixing. See
`this stackoverflow article <https://stackoverflow.com/help/mcve>`_ and
`this blogpost <https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports>`_
@@ -75,14 +75,14 @@ Bug reports must:
...
```
-#. Include the full version string of *pandas* and its dependencies. You can use the built-in function::
+#. Include the full version string of pandas and its dependencies. You can use the built-in function::
>>> import pandas as pd
>>> pd.show_versions()
#. Explain why the current behavior is wrong/not desired and what you expect instead.
-The issue will then show up to the *pandas* community and be open to comments/ideas from others.
+The issue will then show up to the pandas community and be open to comments/ideas from others.
.. _contributing.github:
@@ -90,14 +90,14 @@ Working with the code
=====================
Now that you have an issue you want to fix, enhancement to add, or documentation to improve,
-you need to learn how to work with GitHub and the *pandas* code base.
+you need to learn how to work with GitHub and the pandas code base.
.. _contributing.version_control:
Version control, Git, and GitHub
--------------------------------
-To the new user, working with Git is one of the more daunting aspects of contributing to *pandas*.
+To the new user, working with Git is one of the more daunting aspects of contributing to pandas.
It can very quickly become overwhelming, but sticking to the guidelines below will help keep the process
straightforward and mostly trouble free. As always, if you are having difficulties please
feel free to ask for help.
@@ -221,7 +221,7 @@ environment:
<https://conda.io/miniconda.html>`_
* Make sure your conda is up to date (``conda update conda``)
* Make sure that you have :ref:`cloned the repository <contributing.forking>`
-* ``cd`` to the *pandas* source directory
+* ``cd`` to the pandas source directory
We'll now kick off a three-step process:
@@ -330,7 +330,7 @@ The above can be simplified to::
This changes your working directory to the shiny-new-feature branch. Keep any
changes in this branch specific to one bug or feature so it is clear
-what the branch brings to *pandas*. You can have many shiny-new-features
+what the branch brings to pandas. You can have many shiny-new-features
and switch in between them using the git checkout command.
When creating this branch, make sure your master branch is up to date with
@@ -349,9 +349,9 @@ you created the branch, check the section on
Contributing to the documentation
=================================
-Contributing to the documentation benefits everyone who uses *pandas*.
+Contributing to the documentation benefits everyone who uses pandas.
We encourage you to help us improve the documentation, and
-you don't have to be an expert on *pandas* to do so! In fact,
+you don't have to be an expert on pandas to do so! In fact,
there are sections of the docs that are worse off after being written by
experts. If something in the docs doesn't make sense to you, updating the
relevant section after you figure it out is a great way to ensure it will help
@@ -361,7 +361,7 @@ the next person.
:local:
-About the *pandas* documentation
+About the pandas documentation
--------------------------------
The documentation is written in **reStructuredText**, which is almost like writing
@@ -372,7 +372,7 @@ complex changes to the documentation as well.
Some other important things to know about the docs:
-* The *pandas* documentation consists of two parts: the docstrings in the code
+* The pandas documentation consists of two parts: the docstrings in the code
itself and the docs in this folder ``doc/``.
The docstrings provide a clear explanation of the usage of the individual
@@ -452,7 +452,7 @@ This will identify methods documented in ``doc/source/reference`` that are not a
class methods, and existing methods that are not documented in ``doc/source/reference``.
-Updating a *pandas* docstring
+Updating a pandas docstring
-----------------------------
When improving a single function or method's docstring, it is not necessarily
@@ -477,7 +477,7 @@ When doing a PR with a docstring update, it is good to post the
output of the validation script in a comment on github.
-How to build the *pandas* documentation
+How to build the pandas documentation
---------------------------------------
Requirements
@@ -543,7 +543,7 @@ And you'll have the satisfaction of seeing your new and improved documentation!
Building master branch documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-When pull requests are merged into the *pandas* ``master`` branch, the main parts of
+When pull requests are merged into the pandas ``master`` branch, the main parts of
the documentation are also built by Travis-CI. These docs are then hosted `here
<https://dev.pandas.io>`__, see also
the :ref:`Continuous Integration <contributing.ci>` section.
@@ -563,7 +563,7 @@ Writing good code is not just about what you write. It is also about *how* you
write it. During :ref:`Continuous Integration <contributing.ci>` testing, several
tools will be run to check your code for stylistic errors.
Generating any warnings will cause the test to fail.
-Thus, good style is a requirement for submitting code to *pandas*.
+Thus, good style is a requirement for submitting code to pandas.
There is a tool in pandas to help contributors verify their changes before
contributing them to the project::
@@ -601,7 +601,7 @@ set in the ``pandas.compat._optional.VERSIONS`` dict.
C (cpplint)
~~~~~~~~~~~
-*pandas* uses the `Google <https://google.github.io/styleguide/cppguide.html>`_
+pandas uses the `Google <https://google.github.io/styleguide/cppguide.html>`_
standard. Google provides an open source style checker called ``cpplint``, but we
use a fork of it that can be found `here <https://github.com/cpplint/cpplint>`__.
Here are *some* of the more common ``cpplint`` issues:
@@ -652,7 +652,7 @@ fixes manually.
Python (PEP8 / black)
~~~~~~~~~~~~~~~~~~~~~
-*pandas* follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ standard
+pandas follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ standard
and uses `Black <https://black.readthedocs.io/en/stable/>`_ and
`Flake8 <http://flake8.pycqa.org/en/latest/>`_ to ensure a consistent code
format throughout the project.
@@ -703,7 +703,7 @@ Note that these commands can be run analogously with ``black``.
Import formatting
~~~~~~~~~~~~~~~~~
-*pandas* uses `isort <https://pypi.org/project/isort/>`__ to standardise import
+pandas uses `isort <https://pypi.org/project/isort/>`__ to standardise import
formatting across the codebase.
A guide to import layout as per pep8 can be found `here <https://www.python.org/dev/peps/pep-0008/#imports/>`__.
@@ -778,7 +778,7 @@ Note that if needed, you can skip these checks with ``git commit --no-verify``.
Backwards compatibility
~~~~~~~~~~~~~~~~~~~~~~~
-Please try to maintain backward compatibility. *pandas* has lots of users with lots of
+Please try to maintain backward compatibility. pandas has lots of users with lots of
existing code, so don't break it if at all possible. If you think breakage is required,
clearly state why as part of the pull request. Also, be careful when changing method
signatures and add deprecation warnings where needed. Also, add the deprecated sphinx
@@ -825,7 +825,7 @@ See :ref:`contributing.warnings` for more.
Type Hints
----------
-*pandas* strongly encourages the use of :pep:`484` style type hints. New development should contain type hints and pull requests to annotate existing code are accepted as well!
+pandas strongly encourages the use of :pep:`484` style type hints. New development should contain type hints and pull requests to annotate existing code are accepted as well!
Style Guidelines
~~~~~~~~~~~~~~~~
@@ -906,9 +906,9 @@ With custom types and inference this is not always possible so exceptions are ma
Pandas-specific Types
~~~~~~~~~~~~~~~~~~~~~
-Commonly used types specific to *pandas* will appear in `pandas._typing <https://github.com/pandas-dev/pandas/blob/master/pandas/_typing.py>`_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas.
+Commonly used types specific to pandas will appear in `pandas._typing <https://github.com/pandas-dev/pandas/blob/master/pandas/_typing.py>`_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas.
-For example, quite a few functions in *pandas* accept a ``dtype`` argument. This can be expressed as a string like ``"object"``, a ``numpy.dtype`` like ``np.int64`` or even a pandas ``ExtensionDtype`` like ``pd.CategoricalDtype``. Rather than burden the user with having to constantly annotate all of those options, this can simply be imported and reused from the pandas._typing module
+For example, quite a few functions in pandas accept a ``dtype`` argument. This can be expressed as a string like ``"object"``, a ``numpy.dtype`` like ``np.int64`` or even a pandas ``ExtensionDtype`` like ``pd.CategoricalDtype``. Rather than burden the user with having to constantly annotate all of those options, this can simply be imported and reused from the pandas._typing module
.. code-block:: python
@@ -922,7 +922,7 @@ This module will ultimately house types for repeatedly used concepts like "path-
Validating Type Hints
~~~~~~~~~~~~~~~~~~~~~
-*pandas* uses `mypy <http://mypy-lang.org>`_ to statically analyze the code base and type hints. After making any change you can ensure your type hints are correct by running
+pandas uses `mypy <http://mypy-lang.org>`_ to statically analyze the code base and type hints. After making any change you can ensure your type hints are correct by running
.. code-block:: shell
@@ -933,7 +933,7 @@ Validating Type Hints
Testing with continuous integration
-----------------------------------
-The *pandas* test suite will run automatically on `Travis-CI <https://travis-ci.org/>`__ and
+The pandas test suite will run automatically on `Travis-CI <https://travis-ci.org/>`__ and
`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__
continuous integration services, once your pull request is submitted.
However, if you wish to run the test suite on a branch prior to submitting the pull request,
@@ -959,7 +959,7 @@ This is an example of a green build.
Test-driven development/code writing
------------------------------------
-*pandas* is serious about testing and strongly encourages contributors to embrace
+pandas is serious about testing and strongly encourages contributors to embrace
`test-driven development (TDD) <https://en.wikipedia.org/wiki/Test-driven_development>`_.
This development process "relies on the repetition of a very short development cycle:
first the developer writes an (initially failing) automated test case that defines a desired
@@ -968,10 +968,10 @@ So, before actually writing any code, you should write your tests. Often the te
taken from the original GitHub issue. However, it is always worth considering additional
use cases and writing corresponding tests.
-Adding tests is one of the most common requests after code is pushed to *pandas*. Therefore,
+Adding tests is one of the most common requests after code is pushed to pandas. Therefore,
it is worth getting in the habit of writing tests ahead of time so this is never an issue.
-Like many packages, *pandas* uses `pytest
+Like many packages, pandas uses `pytest
<https://docs.pytest.org/en/latest/>`_ and the convenient
extensions in `numpy.testing
<https://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
@@ -1018,7 +1018,7 @@ E.g. "# brief comment, see GH#28907"
Transitioning to ``pytest``
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-*pandas* existing test structure is *mostly* class-based, meaning that you will typically find tests wrapped in a class.
+pandas existing test structure is *mostly* class-based, meaning that you will typically find tests wrapped in a class.
.. code-block:: python
@@ -1220,7 +1220,7 @@ Running the test suite
----------------------
The tests can then be run directly inside your Git clone (without having to
-install *pandas*) by typing::
+install pandas) by typing::
pytest pandas
@@ -1272,9 +1272,9 @@ Running the performance test suite
----------------------------------
Performance matters and it is worth considering whether your code has introduced
-performance regressions. *pandas* is in the process of migrating to
+performance regressions. pandas is in the process of migrating to
`asv benchmarks <https://github.com/spacetelescope/asv>`__
-to enable easy monitoring of the performance of critical *pandas* operations.
+to enable easy monitoring of the performance of critical pandas operations.
These benchmarks are all found in the ``pandas/asv_bench`` directory. asv
supports both python2 and python3.
@@ -1361,7 +1361,7 @@ directive. This should also be put in the docstring when adding a new function
or method (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/frame.py#L1495>`__)
or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/generic.py#L568>`__).
-Contributing your changes to *pandas*
+Contributing your changes to pandas
=====================================
.. _contributing.commit-code:
@@ -1386,7 +1386,7 @@ Doing 'git status' again should give something like::
# modified: /relative/path/to/file-you-added.py
#
-Finally, commit your changes to your local repository with an explanatory message. *Pandas*
+Finally, commit your changes to your local repository with an explanatory message. pandas
uses a convention for commit message prefixes and layout. Here are
some common prefixes along with general guidelines for when to use them:
@@ -1434,7 +1434,7 @@ like::
upstream git://github.com/pandas-dev/pandas.git (fetch)
upstream git://github.com/pandas-dev/pandas.git (push)
-Now your code is on GitHub, but it is not yet a part of the *pandas* project. For that to
+Now your code is on GitHub, but it is not yet a part of the pandas project. For that to
happen, a pull request needs to be submitted on GitHub.
Review your code
diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst
index 1c99b341f6c5a..4cc21bbac1b10 100644
--- a/doc/source/development/contributing_docstring.rst
+++ b/doc/source/development/contributing_docstring.rst
@@ -80,7 +80,7 @@ about reStructuredText can be found in:
* `Quick reStructuredText reference <https://docutils.sourceforge.io/docs/user/rst/quickref.html>`_
* `Full reStructuredText specification <https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html>`_
-Pandas has some helpers for sharing docstrings between related classes, see
+pandas has some helpers for sharing docstrings between related classes, see
:ref:`docstring.sharing`.
The rest of this document will summarize all the above guides, and will
@@ -932,7 +932,7 @@ plot will be generated automatically when building the documentation.
Sharing docstrings
------------------
-Pandas has a system for sharing docstrings, with slight variations, between
+pandas has a system for sharing docstrings, with slight variations, between
classes. This helps us keep docstrings consistent, while keeping things clear
for the user reading. It comes at the cost of some complexity when writing.
diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index 270f20e8118bc..98e3ffcf74ad1 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -7,7 +7,7 @@ Extending pandas
****************
While pandas provides a rich set of methods, containers, and data types, your
-needs may not be fully satisfied. Pandas offers a few options for extending
+needs may not be fully satisfied. pandas offers a few options for extending
pandas.
.. _extending.register-accessors:
@@ -80,8 +80,8 @@ Extension types
The :class:`pandas.api.extensions.ExtensionDtype` and :class:`pandas.api.extensions.ExtensionArray` APIs are new and
experimental. They may change between versions without warning.
-Pandas defines an interface for implementing data types and arrays that *extend*
-NumPy's type system. Pandas itself uses the extension system for some types
+pandas defines an interface for implementing data types and arrays that *extend*
+NumPy's type system. pandas itself uses the extension system for some types
that aren't built into NumPy (categorical, period, interval, datetime with
timezone).
@@ -122,7 +122,7 @@ This class provides all the array-like functionality. ExtensionArrays are
limited to 1 dimension. An ExtensionArray is linked to an ExtensionDtype via the
``dtype`` attribute.
-Pandas makes no restrictions on how an extension array is created via its
+pandas makes no restrictions on how an extension array is created via its
``__new__`` or ``__init__``, and puts no restrictions on how you store your
data. We do require that your array be convertible to a NumPy array, even if
this is relatively expensive (as it is for ``Categorical``).
@@ -224,7 +224,7 @@ for an example.
As part of your implementation, we require that you defer to pandas when a pandas
container (:class:`Series`, :class:`DataFrame`, :class:`Index`) is detected in ``inputs``.
-If any of those is present, you should return ``NotImplemented``. Pandas will take care of
+If any of those is present, you should return ``NotImplemented``. pandas will take care of
unboxing the array from the container and re-calling the ufunc with the unwrapped input.
.. _extending.extension.testing:
diff --git a/doc/source/development/internals.rst b/doc/source/development/internals.rst
index 748caae295460..4ad045a91b5fe 100644
--- a/doc/source/development/internals.rst
+++ b/doc/source/development/internals.rst
@@ -85,7 +85,7 @@ if you compute the levels and codes yourself, please be careful.
Values
~~~~~~
-Pandas extends NumPy's type system with custom types, like ``Categorical`` or
+pandas extends NumPy's type system with custom types, like ``Categorical`` or
datetimes with a timezone, so we have multiple notions of "values". For 1-D
containers (``Index`` classes and ``Series``) we have the following convention:
diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index e65b66fc243c5..9ae9d47b89341 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -13,7 +13,7 @@ The main contributing guide is available at :ref:`contributing`.
Roles
-----
-Pandas uses two levels of permissions: **triage** and **core** team members.
+pandas uses two levels of permissions: **triage** and **core** team members.
Triage members can label and close issues and pull requests.
@@ -25,7 +25,7 @@ GitHub publishes the full `list of permissions`_.
Tasks
-----
-Pandas is largely a volunteer project, so these tasks shouldn't be read as
+pandas is largely a volunteer project, so these tasks shouldn't be read as
"expectations" of triage and maintainers. Rather, they're general descriptions
of what it means to be a maintainer.
diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst
index 224948738341e..b7cc3db3ad260 100644
--- a/doc/source/development/policies.rst
+++ b/doc/source/development/policies.rst
@@ -11,7 +11,7 @@ Version Policy
.. versionchanged:: 1.0.0
-Pandas uses a loose variant of semantic versioning (`SemVer`_) to govern
+pandas uses a loose variant of semantic versioning (`SemVer`_) to govern
deprecations, API compatibility, and version numbering.
A pandas release number is made up of ``MAJOR.MINOR.PATCH``.
@@ -23,7 +23,7 @@ and how to migrate existing code to the new behavior.
Whenever possible, a deprecation path will be provided rather than an outright
breaking change.
-Pandas will introduce deprecations in **minor** releases. These deprecations
+pandas will introduce deprecations in **minor** releases. These deprecations
will preserve the existing behavior while emitting a warning that provide
guidance on:
@@ -39,19 +39,19 @@ deprecation removed in the next next major release (2.0.0).
.. note::
- Pandas will sometimes make *behavior changing* bug fixes, as part of
+ pandas will sometimes make *behavior changing* bug fixes, as part of
minor or patch releases. Whether or not a change is a bug fix or an
API-breaking change is a judgement call. We'll do our best, and we
invite you to participate in development discussion on the issue
tracker or mailing list.
These policies do not apply to features marked as **experimental** in the documentation.
-Pandas may change the behavior of experimental features at any time.
+pandas may change the behavior of experimental features at any time.
Python Support
~~~~~~~~~~~~~~
-Pandas will only drop support for specific Python versions (e.g. 3.6.x, 3.7.x) in
+pandas will only drop support for specific Python versions (e.g. 3.6.x, 3.7.x) in
pandas **major** releases.
.. _SemVer: https://semver.org
diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst
index fafe63d80249c..e57ff82add278 100644
--- a/doc/source/development/roadmap.rst
+++ b/doc/source/development/roadmap.rst
@@ -22,8 +22,8 @@ See :ref:`roadmap.evolution` for proposing changes to this document.
Extensibility
-------------
-Pandas :ref:`extending.extension-types` allow for extending NumPy types with custom
-data types and array storage. Pandas uses extension types internally, and provides
+pandas :ref:`extending.extension-types` allow for extending NumPy types with custom
+data types and array storage. pandas uses extension types internally, and provides
an interface for 3rd-party libraries to define their own custom data types.
Many parts of pandas still unintentionally convert data to a NumPy array.
@@ -71,7 +71,7 @@ Block manager rewrite
We'd like to replace pandas current internal data structures (a collection of
1 or 2-D arrays) with a simpler collection of 1-D arrays.
-Pandas internal data model is quite complex. A DataFrame is made up of
+pandas internal data model is quite complex. A DataFrame is made up of
one or more 2-dimensional "blocks", with one or more blocks per dtype. This
collection of 2-D arrays is managed by the BlockManager.
@@ -132,7 +132,7 @@ Some specific goals include
Performance monitoring
----------------------
-Pandas uses `airspeed velocity <https://asv.readthedocs.io/en/stable/>`__ to
+pandas uses `airspeed velocity <https://asv.readthedocs.io/en/stable/>`__ to
monitor for performance regressions. ASV itself is a fabulous tool, but requires
some additional work to be integrated into an open source project's workflow.
@@ -155,7 +155,7 @@ We'd like to fund improvements and maintenance of these tools to
Roadmap Evolution
-----------------
-Pandas continues to evolve. The direction is primarily determined by community
+pandas continues to evolve. The direction is primarily determined by community
interest. Everyone is welcome to review existing items on the roadmap and
to propose a new item.
| - [x] closes #32316
- [x] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
As stated in the referred issue, _pandas_ reference in the docs should be standardised to pandas, not *pandas* or Pandas.
I've only changed the references in `doc/source/development/*.rst` for now, to see if the changes are okay or not | https://api.github.com/repos/pandas-dev/pandas/pulls/32439 | 2020-03-04T17:09:20Z | 2020-03-12T04:49:24Z | 2020-03-12T04:49:24Z | 2020-03-12T04:49:31Z |
CI: mypy fixup for #32261 | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 7320a6407b248..f6e79a0f2045d 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -122,6 +122,9 @@ class BlockManager(PandasObject):
"_blklocs",
]
+ _blknos: np.ndarray
+ _blklocs: np.ndarray
+
def __init__(
self,
blocks: Sequence[Block],
| https://github.com/pandas-dev/pandas/runs/485121000 | https://api.github.com/repos/pandas-dev/pandas/pulls/32438 | 2020-03-04T17:07:29Z | 2020-03-04T22:24:39Z | 2020-03-04T22:24:39Z | 2020-03-05T10:13:15Z |
DOC: correct issue number for PR #32424 | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 5310419bfc100..44deab25db695 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -234,7 +234,7 @@ Numeric
Conversion
^^^^^^^^^^
- Bug in :class:`Series` construction from NumPy array with big-endian ``datetime64`` dtype (:issue:`29684`)
-- Bug in :class:`Timedelta` construction with large nanoseconds keyword value (:issue:`34202`)
+- Bug in :class:`Timedelta` construction with large nanoseconds keyword value (:issue:`32402`)
-
Strings
| xref #32424
| https://api.github.com/repos/pandas-dev/pandas/pulls/32436 | 2020-03-04T16:37:55Z | 2020-03-05T11:23:34Z | 2020-03-05T11:23:34Z | 2020-03-05T11:26:32Z |
DOC: Fix EX02 in pandas.Index.get_loc | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 173a83103b8ad..70a0e29b0494c 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2844,7 +2844,7 @@ def get_loc(self, key, method=None, tolerance=None):
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
- array([False, True, False, True], dtype=bool)
+ array([False, True, False, True])
"""
if method is None:
if tolerance is not None:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Related to #27977.
```
################################################################################
################################## Validation ##################################
################################################################################
5 Errors found:
No extended summary found
Parameter "key" has no description
The first line of the Returns section should contain only the type, unless multiple values are being returned
Return value has no description
See Also section not found
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32429 | 2020-03-04T09:39:27Z | 2020-03-04T16:22:05Z | 2020-03-04T16:22:05Z | 2020-03-04T16:22:17Z |
WIP: ENH: Add engine keyword argument to groupby.apply to leverage Numba | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index fb935c9065b83..c90da5c4001f2 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -223,8 +223,10 @@ def _selection_name(self):
input="series", examples=_apply_docs["series_examples"]
)
)
- def apply(self, func, *args, **kwargs):
- return super().apply(func, *args, **kwargs)
+ def apply(self, func, engine="cython", engine_kwargs=None, *args, **kwargs):
+ return super().apply(
+ func, engine=engine, engine_kwargs=engine_kwargs, *args, **kwargs
+ )
@Substitution(
see_also=_agg_see_also_doc,
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 6362f11a3e032..f17bb03f4cffa 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -58,7 +58,7 @@ class providing the base-class of operations.
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
-from pandas.core.groupby import base, ops
+from pandas.core.groupby import base, ops, numba_
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
@@ -703,36 +703,50 @@ def __iter__(self):
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
- def apply(self, func, *args, **kwargs):
+ def apply(self, func, engine="cython", engine_kwargs=None, *args, **kwargs):
func = self._is_builtin_func(func)
- # this is needed so we don't try and wrap strings. If we could
- # resolve functions to their callable functions prior, this
- # wouldn't be needed
- if args or kwargs:
- if callable(func):
-
- @wraps(func)
- def f(g):
- with np.errstate(all="ignore"):
- return func(g, *args, **kwargs)
+ if engine == "cython":
+ # this is needed so we don't try and wrap strings. If we could
+ # resolve functions to their callable functions prior, this
+ # wouldn't be needed
+ if args or kwargs:
+ if callable(func):
+
+ @wraps(func)
+ def f(g):
+ with np.errstate(all="ignore"):
+ return func(g, *args, **kwargs)
+
+ elif hasattr(nanops, "nan" + func):
+ # TODO: should we wrap this in to e.g. _is_builtin_func?
+ f = getattr(nanops, "nan" + func)
+
+ else:
+ raise ValueError(
+ "func must be a callable if args or kwargs are supplied"
+ )
+ else:
+ f = func
+ elif engine == "numba":
- elif hasattr(nanops, "nan" + func):
- # TODO: should we wrap this in to e.g. _is_builtin_func?
- f = getattr(nanops, "nan" + func)
+ numba_.validate_apply_function_signature(func)
+ if func in self.grouper._numba_apply_cache:
+ # Return an already compiled version of the function if available
+ # TODO: this cache needs to be populated
+ f = self.grouper._numba_apply_cache[func]
else:
- raise ValueError(
- "func must be a callable if args or kwargs are supplied"
- )
+ # TODO: support args
+ f = numba_.generate_numba_apply_func(args, kwargs, func, engine_kwargs)
else:
- f = func
+ raise ValueError("engine must be either 'numba' or 'cython'")
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
- result = self._python_apply_general(f)
+ result = self._python_apply_general(f, engine)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
@@ -743,12 +757,14 @@ def f(g):
# on a string grouper column
with _group_selection_context(self):
- return self._python_apply_general(f)
+ return self._python_apply_general(f, engine)
return result
- def _python_apply_general(self, f):
- keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
+ def _python_apply_general(self, f, engine="cython"):
+ keys, values, mutated = self.grouper.apply(
+ f, self._selected_obj, self.axis, engine=engine
+ )
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py
new file mode 100644
index 0000000000000..2f71a46775c94
--- /dev/null
+++ b/pandas/core/groupby/numba_.py
@@ -0,0 +1,138 @@
+import inspect
+import types
+
+import numpy as np
+
+from pandas.compat._optional import import_optional_dependency
+
+
+class InvalidApply(Exception):
+ pass
+
+
+def execute_groupby_function(splitter, f):
+ """Mimics apply_frame_axis0 which is the Cython equivalent of this function."""
+ results = []
+ for _, group in splitter:
+ # TODO: what about series names/dataframe columns
+ index = group.index
+ values_as_array = group.to_numpy()
+ index_as_array = index.to_numpy()
+ try:
+ # TODO: support *args, **kwargs here
+ group_result = f(values_as_array, index_as_array)
+ except Exception:
+ # We can't be more specific without knowing something about `f`
+ # Like we do in Cython
+ raise InvalidApply("Let this error raise above us")
+ # Reconstruct the pandas object (expected downstream)
+ # This construction will fail is there is mutation,
+ # but we're banning it with numba?
+ group_result = group._constructor(group_result, index=index)
+ results.append(group_result)
+
+ return results
+
+
+def validate_apply_function_signature(func):
+ """
+ Validate that the apply function's first 2 arguments are 'values' and 'index'.
+
+ func : function
+ function to be applied to each group and will be JITed
+ """
+ apply_function_signature = list(inspect.signature(func).parameters.keys())[:2]
+ if apply_function_signature != ["values", "index"]:
+ raise ValueError(
+ "The apply function's first 2 arguments must be 'values' and 'index'"
+ )
+
+
+def make_groupby_apply(
+ func, args, nogil, parallel, nopython,
+):
+ """
+ Creates a JITted groupby apply function with a JITted version of
+ the user's function.
+
+ Parameters
+ ----------
+ func : function
+ function to be applied to each group and will be JITed
+ args : tuple
+ *args to be passed into the function
+ nogil : bool
+ nogil parameter from engine_kwargs for numba.jit
+ parallel : bool
+ parallel parameter from engine_kwargs for numba.jit
+ nopython : bool
+ nopython parameter from engine_kwargs for numba.jit
+
+ Returns
+ -------
+ Numba function
+ """
+ numba = import_optional_dependency("numba")
+
+ if isinstance(func, numba.targets.registry.CPUDispatcher):
+ # Don't jit a user passed jitted function
+ numba_func = func
+ else:
+
+ @numba.generated_jit(nopython=nopython, nogil=nogil, parallel=parallel)
+ def numba_func(group, *_args):
+ if getattr(np, func.__name__, False) is func or isinstance(
+ func, types.BuiltinFunctionType
+ ):
+ jf = func
+ else:
+ jf = numba.jit(func, nopython=nopython, nogil=nogil)
+
+ def impl(group, *_args):
+ return jf(group, *_args)
+
+ return impl
+
+ return numba_func
+
+
+def generate_numba_apply_func(
+ args, kwargs, func, engine_kwargs,
+):
+ """
+ Generate a numba jitted apply function specified by values from engine_kwargs.
+
+ 1. jit the user's function
+
+ Configurations specified in engine_kwargs apply to both the user's
+ function _AND_ the rolling apply function.
+
+ Parameters
+ ----------
+ args : tuple
+ *args to be passed into the function
+ kwargs : dict
+ **kwargs to be passed into the function
+ func : function
+ function to be applied to each group and will be JITed
+ engine_kwargs : dict
+ dictionary of arguments to be passed into numba.jit
+
+ Returns
+ -------
+ Numba function
+ """
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ nopython = engine_kwargs.get("nopython", True)
+ nogil = engine_kwargs.get("nogil", False)
+ parallel = engine_kwargs.get("parallel", False)
+
+ if kwargs and nopython:
+ raise ValueError(
+ "numba does not support kwargs with nopython=True: "
+ "https://github.com/numba/numba/issues/2916"
+ )
+
+ return make_groupby_apply(func, args, nogil, parallel, nopython)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 7259268ac3f2b..2d5755c4470d8 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -43,7 +43,7 @@
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
-from pandas.core.groupby import base, grouper
+from pandas.core.groupby import base, grouper, numba_
from pandas.core.indexes.api import Index, MultiIndex, ensure_index
from pandas.core.series import Series
from pandas.core.sorting import (
@@ -96,6 +96,7 @@ def __init__(
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
+ self._numba_apply_cache = dict()
@property
def groupings(self) -> List["grouper.Grouping"]:
@@ -148,13 +149,23 @@ def _get_group_keys(self):
# provide "flattened" iterator for multi-group setting
return get_flattened_iterator(comp_ids, ngroups, self.levels, self.codes)
- def apply(self, f, data: FrameOrSeries, axis: int = 0):
+ def apply(self, f, data: FrameOrSeries, axis: int = 0, engine="cython"):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
result_values = None
sdata: FrameOrSeries = splitter._get_sorted_data()
+
+ if engine == "numba":
+ result_values = numba_.execute_groupby_function(splitter, f)
+
+ # mutation is determined based on index alignment
+ # numba functions always return numpy arrays w/o indexes
+ # therefore, mutated=False?
+ # or just ban mutation so mutated=False always
+ return group_keys, result_values, False
+
if sdata.ndim == 2 and np.any(sdata.dtypes.apply(is_extension_array_dtype)):
# calling splitter.fast_apply will raise TypeError via apply_frame_axis0
# if we pass EA instead of ndarray
| - [x] closes #31845
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32428 | 2020-03-04T08:39:03Z | 2020-03-14T04:05:03Z | null | 2020-04-21T16:30:48Z |
CLN: avoid _internal_get_values in groupby.generic | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index fb935c9065b83..ac522fc7863b2 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -589,7 +589,7 @@ def nunique(self, dropna: bool = True) -> Series:
"""
ids, _, _ = self.grouper.group_info
- val = self.obj._internal_get_values()
+ val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
@@ -657,7 +657,7 @@ def value_counts(
)
ids, _, _ = self.grouper.group_info
- val = self.obj._internal_get_values()
+ val = self.obj._values
# groupby removes null keys from groupings
mask = ids != -1
@@ -774,7 +774,7 @@ def count(self) -> Series:
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
- val = self.obj._internal_get_values()
+ val = self.obj._values
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
| xref #27165, #27167 | https://api.github.com/repos/pandas-dev/pandas/pulls/32427 | 2020-03-04T04:19:38Z | 2020-03-05T10:49:42Z | 2020-03-05T10:49:42Z | 2020-03-05T15:00:17Z |
CLN: avoid values_from_object in Series | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cd5d81bc70dd9..4fcefd5c32b6b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -77,6 +77,7 @@
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
+ is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
@@ -88,6 +89,7 @@
is_list_like,
is_named_tuple,
is_object_dtype,
+ is_period_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
@@ -7789,11 +7791,13 @@ def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
- dtype_is_dt = self.dtypes.apply(lambda x: x.kind == "M")
+ dtype_is_dt = self.dtypes.apply(
+ lambda x: is_datetime64_any_dtype(x) or is_period_dtype(x)
+ )
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
- "will include datetime64 and datetime64tz columns in a "
+ "will include datetime64, datetime64tz, and PeriodDtype columns in a "
"future version.",
FutureWarning,
stacklevel=3,
@@ -7854,6 +7858,10 @@ def blk_func(values):
assert len(res) == max(list(res.keys())) + 1, res.keys()
out = df._constructor_sliced(res, index=range(len(res)), dtype=out_dtype)
out.index = df.columns
+ if axis == 0 and df.dtypes.apply(needs_i8_conversion).any():
+ # FIXME: needs_i8_conversion check is kludge, not sure
+ # why it is necessary in this case and this case alone
+ out[:] = coerce_to_dtypes(out.values, df.dtypes)
return out
if numeric_only is None:
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 78313f5c3bbbf..269843abb15ee 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -7,7 +7,7 @@
from pandas._config import get_option
-from pandas._libs import NaT, Timedelta, Timestamp, iNaT, lib
+from pandas._libs import NaT, Period, Timedelta, Timestamp, iNaT, lib
from pandas._typing import Dtype, Scalar
from pandas.compat._optional import import_optional_dependency
@@ -17,9 +17,7 @@
is_any_int_dtype,
is_bool_dtype,
is_complex,
- is_datetime64_dtype,
- is_datetime64tz_dtype,
- is_datetime_or_timedelta_dtype,
+ is_datetime64_any_dtype,
is_float,
is_float_dtype,
is_integer,
@@ -28,8 +26,10 @@
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
+ needs_i8_conversion,
pandas_dtype,
)
+from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.construction import extract_array
@@ -134,10 +134,8 @@ def f(
def _bn_ok_dtype(dtype: Dtype, name: str) -> bool:
- # Bottleneck chokes on datetime64
- if not is_object_dtype(dtype) and not (
- is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype)
- ):
+ # Bottleneck chokes on datetime64, PeriodDtype (or and EA)
+ if not is_object_dtype(dtype) and not needs_i8_conversion(dtype):
# GH 15507
# bottleneck does not properly upcast during the sum
@@ -283,17 +281,16 @@ def _get_values(
# with scalar fill_value. This guarantee is important for the
# maybe_upcast_putmask call below
assert is_scalar(fill_value)
+ values = extract_array(values, extract_numpy=True)
mask = _maybe_get_mask(values, skipna, mask)
- values = extract_array(values, extract_numpy=True)
dtype = values.dtype
- if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values):
+ if needs_i8_conversion(values):
# changing timedelta64/datetime64 to int64 needs to happen after
# finding `mask` above
- values = getattr(values, "asi8", values)
- values = values.view(np.int64)
+ values = np.asarray(values.view("i8"))
dtype_ok = _na_ok_dtype(dtype)
@@ -307,7 +304,8 @@ def _get_values(
if skipna and copy:
values = values.copy()
- if dtype_ok:
+ assert mask is not None # for mypy
+ if dtype_ok and mask.any():
np.putmask(values, mask, fill_value)
# promote if needed
@@ -325,13 +323,14 @@ def _get_values(
def _na_ok_dtype(dtype) -> bool:
- # TODO: what about datetime64tz? PeriodDtype?
- return not issubclass(dtype.type, (np.integer, np.timedelta64, np.datetime64))
+ if needs_i8_conversion(dtype):
+ return False
+ return not issubclass(dtype.type, np.integer)
def _wrap_results(result, dtype: Dtype, fill_value=None):
""" wrap our results if needed """
- if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
+ if is_datetime64_any_dtype(dtype):
if fill_value is None:
# GH#24293
fill_value = iNaT
@@ -342,7 +341,8 @@ def _wrap_results(result, dtype: Dtype, fill_value=None):
result = np.nan
result = Timestamp(result, tz=tz)
else:
- result = result.view(dtype)
+ # If we have float dtype, taking a view will give the wrong result
+ result = result.astype(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
if result == fill_value:
@@ -356,6 +356,14 @@ def _wrap_results(result, dtype: Dtype, fill_value=None):
else:
result = result.astype("m8[ns]").view(dtype)
+ elif isinstance(dtype, PeriodDtype):
+ if is_float(result) and result.is_integer():
+ result = int(result)
+ if is_integer(result):
+ result = Period._from_ordinal(result, freq=dtype.freq)
+ else:
+ raise NotImplementedError(type(result), result)
+
return result
@@ -542,12 +550,7 @@ def nanmean(values, axis=None, skipna=True, mask=None):
)
dtype_sum = dtype_max
dtype_count = np.float64
- if (
- is_integer_dtype(dtype)
- or is_timedelta64_dtype(dtype)
- or is_datetime64_dtype(dtype)
- or is_datetime64tz_dtype(dtype)
- ):
+ if is_integer_dtype(dtype) or needs_i8_conversion(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 568e99622dd29..bf66f9224148f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1984,7 +1984,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs):
nan
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
- i = nanops.nanargmin(com.values_from_object(self), skipna=skipna)
+ i = nanops.nanargmin(self._values, skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
@@ -2055,7 +2055,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs):
nan
"""
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
- i = nanops.nanargmax(com.values_from_object(self), skipna=skipna)
+ i = nanops.nanargmax(self._values, skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
@@ -2093,7 +2093,7 @@ def round(self, decimals=0, *args, **kwargs) -> "Series":
dtype: float64
"""
nv.validate_round(args, kwargs)
- result = com.values_from_object(self).round(decimals)
+ result = self._values.round(decimals)
result = self._constructor(result, index=self.index).__finalize__(self)
return result
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 07e30d41c216d..d7cd3bc3b1c49 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -875,11 +875,6 @@ def test_mean_datetimelike(self):
expected = pd.Series({"A": 1.0, "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
- @pytest.mark.xfail(
- reason="casts to object-dtype and then tries to add timestamps",
- raises=TypeError,
- strict=True,
- )
def test_mean_datetimelike_numeric_only_false(self):
df = pd.DataFrame(
{
| xref #32422, #32419 | https://api.github.com/repos/pandas-dev/pandas/pulls/32426 | 2020-03-04T00:59:24Z | 2020-03-11T02:29:35Z | 2020-03-11T02:29:35Z | 2020-04-24T14:59:13Z |
BUG: Fix DataFrame.apply(..., raw=True) not calling with raw array | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 57c53f73962dc..618b244dede4d 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -328,6 +328,7 @@ Reshaping
- :meth:`DataFrame.pivot` can now take lists for ``index`` and ``columns`` arguments (:issue:`21425`)
- Bug in :func:`concat` where the resulting indices are not copied when ``copy=True`` (:issue:`29879`)
- :meth:`DataFrame.replace` and :meth:`Series.replace` will raise a ``TypeError`` if ``to_replace`` is not an expected type. Previously the ``replace`` would fail silently (:issue:`18634`)
+- Bug in :meth:`DataFrame.apply` where callback was called with :class:`Series` parameter even though ``raw=True`` requested. (:issue:`32423`)
Sparse
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 70e0a129c055f..ceb45bc71326e 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -179,7 +179,7 @@ def get_result(self):
return self.apply_empty_result()
# raw
- elif self.raw and not self.obj._is_mixed_type:
+ elif self.raw:
return self.apply_raw()
return self.apply_standard()
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index fe6abef97acc4..c117f7ce7ac60 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -235,7 +235,14 @@ def test_apply_broadcast_error(self, int_frame_const_col):
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]), axis=1, result_type="broadcast")
- def test_apply_raw(self, float_frame):
+ def test_apply_raw(self, float_frame, mixed_type_frame):
+ def _assert_raw(x):
+ assert isinstance(x, np.ndarray)
+ assert x.ndim == 1
+
+ float_frame.apply(_assert_raw, raw=True)
+ float_frame.apply(_assert_raw, axis=1, raw=True)
+
result0 = float_frame.apply(np.mean, raw=True)
result1 = float_frame.apply(np.mean, axis=1, raw=True)
@@ -250,6 +257,10 @@ def test_apply_raw(self, float_frame):
expected = float_frame * 2
tm.assert_frame_equal(result, expected)
+ # Mixed dtype (GH-32423)
+ mixed_type_frame.apply(_assert_raw, raw=True)
+ mixed_type_frame.apply(_assert_raw, axis=1, raw=True)
+
def test_apply_axis1(self, float_frame):
d = float_frame.index[0]
tapplied = float_frame.apply(np.mean, axis=1)
| - [x] closes https://github.com/pandas-dev/pandas/issues/32423
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32425 | 2020-03-03T23:37:50Z | 2020-03-14T20:21:12Z | 2020-03-14T20:21:12Z | 2020-03-16T09:25:35Z |
Fix BUG: overflow on pd.Timedelta(nanoseconds=) constructor | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 57c53f73962dc..2e805e725c7b2 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -234,7 +234,7 @@ Numeric
Conversion
^^^^^^^^^^
- Bug in :class:`Series` construction from NumPy array with big-endian ``datetime64`` dtype (:issue:`29684`)
--
+- Bug in :class:`Timedelta` construction with large nanoseconds keyword value (:issue:`34202`)
-
Strings
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 298028227e18b..7bd02b734beeb 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1198,7 +1198,7 @@ class Timedelta(_Timedelta):
kwargs = {key: _to_py_int_float(kwargs[key]) for key in kwargs}
- nano = np.timedelta64(kwargs.pop('nanoseconds', 0), 'ns')
+ nano = convert_to_timedelta64(kwargs.pop('nanoseconds', 0), 'ns')
try:
value = nano + convert_to_timedelta64(timedelta(**kwargs),
'ns')
diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py
index 86d5cc749b5e1..c87752ccf151e 100644
--- a/pandas/tests/tslibs/test_timedeltas.py
+++ b/pandas/tests/tslibs/test_timedeltas.py
@@ -28,3 +28,9 @@ def test_delta_to_nanoseconds_error():
with pytest.raises(TypeError, match="<class 'numpy.ndarray'>"):
delta_to_nanoseconds(obj)
+
+
+def test_huge_nanoseconds_overflow():
+ # GH 32402
+ assert delta_to_nanoseconds(Timedelta(1e10)) == 1e10
+ assert delta_to_nanoseconds(Timedelta(nanoseconds=1e10)) == 1e10
| Add regression test
- [x] closes #32402
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32424 | 2020-03-03T22:24:14Z | 2020-03-04T14:22:36Z | 2020-03-04T14:22:36Z | 2020-03-04T15:43:52Z |
CLN: avoid values_from_object in NDFrame | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f7eb79a4f1c78..93b79627ee2e4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1327,7 +1327,7 @@ def equals(self, other):
# Unary Methods
def __neg__(self):
- values = com.values_from_object(self)
+ values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
@@ -1341,7 +1341,7 @@ def __neg__(self):
return self.__array_wrap__(arr)
def __pos__(self):
- values = com.values_from_object(self)
+ values = self._values
if is_bool_dtype(values) or is_period_arraylike(values):
arr = values
elif (
@@ -1796,7 +1796,7 @@ def empty(self) -> bool_t:
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
- return com.values_from_object(self)
+ return np.asarray(self._values, dtype=dtype)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
@@ -8521,7 +8521,7 @@ def _where(
# try to not change dtype at first (if try_quick)
if try_quick:
- new_other = com.values_from_object(self)
+ new_other = np.asarray(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
| xref #32419 | https://api.github.com/repos/pandas-dev/pandas/pulls/32422 | 2020-03-03T21:20:54Z | 2020-03-04T14:07:57Z | 2020-03-04T14:07:57Z | 2020-04-05T17:45:35Z |
TYP: enforce annotation on SingleBlockManager.__init__ | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 98afc5ac3a0e3..c59499e1bee5c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -3,12 +3,12 @@
import itertools
import operator
import re
-from typing import Dict, List, Optional, Sequence, Tuple, Union
+from typing import Dict, List, Optional, Sequence, Tuple, TypeVar, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
-from pandas._typing import DtypeObj, Label
+from pandas._typing import ArrayLike, DtypeObj, Label
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
@@ -58,6 +58,8 @@
# TODO: flexible with index=None and/or items=None
+T = TypeVar("T", bound="BlockManager")
+
class BlockManager(PandasObject):
"""
@@ -149,6 +151,13 @@ def __init__(
self._blknos = None
self._blklocs = None
+ @classmethod
+ def from_blocks(cls, blocks: List[Block], axes: List[Index]):
+ """
+ Constructor for BlockManager and SingleBlockManager with same signature.
+ """
+ return cls(blocks, axes, do_integrity_check=False)
+
@property
def blknos(self):
"""
@@ -176,7 +185,7 @@ def blklocs(self):
return self._blklocs
- def make_empty(self, axes=None) -> "BlockManager":
+ def make_empty(self: T, axes=None) -> T:
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [Index([])] + self.axes[1:]
@@ -184,10 +193,11 @@ def make_empty(self, axes=None) -> "BlockManager":
# preserve dtype if possible
if self.ndim == 1:
assert isinstance(self, SingleBlockManager) # for mypy
- blocks = np.array([], dtype=self.array_dtype)
+ arr = np.array([], dtype=self.array_dtype)
+ blocks = [make_block(arr, placement=slice(0, 0), ndim=1)]
else:
blocks = []
- return type(self)(blocks, axes)
+ return type(self).from_blocks(blocks, axes)
def __nonzero__(self) -> bool:
return True
@@ -380,7 +390,7 @@ def reduce(self, func, *args, **kwargs):
return res
- def apply(self, f, filter=None, **kwargs) -> "BlockManager":
+ def apply(self: T, f, filter=None, **kwargs) -> T:
"""
Iterate over the blocks, collect and create a new BlockManager.
@@ -458,8 +468,8 @@ def apply(self, f, filter=None, **kwargs) -> "BlockManager":
if len(result_blocks) == 0:
return self.make_empty(self.axes)
- bm = type(self)(result_blocks, self.axes, do_integrity_check=False)
- return bm
+
+ return type(self).from_blocks(result_blocks, self.axes)
def quantile(
self,
@@ -658,7 +668,7 @@ def comp(s, regex=False):
rb = new_rb
result_blocks.extend(rb)
- bm = type(self)(result_blocks, self.axes)
+ bm = type(self).from_blocks(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
@@ -747,7 +757,7 @@ def combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager":
axes = list(self.axes)
axes[0] = self.items.take(indexer)
- return type(self)(new_blocks, axes, do_integrity_check=False)
+ return type(self).from_blocks(new_blocks, axes)
def get_slice(self, slobj: slice, axis: int = 0) -> "BlockManager":
@@ -774,7 +784,7 @@ def __contains__(self, item) -> bool:
def nblocks(self) -> int:
return len(self.blocks)
- def copy(self, deep=True) -> "BlockManager":
+ def copy(self: T, deep=True) -> T:
"""
Make deep or shallow copy of BlockManager
@@ -1244,14 +1254,14 @@ def reindex_axis(
)
def reindex_indexer(
- self,
+ self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
- allow_dups=False,
+ allow_dups: bool = False,
copy: bool = True,
- ):
+ ) -> T:
"""
Parameters
----------
@@ -1299,7 +1309,8 @@ def reindex_indexer(
new_axes = list(self.axes)
new_axes[axis] = new_axis
- return type(self)(new_blocks, new_axes)
+
+ return type(self).from_blocks(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
@@ -1500,6 +1511,8 @@ def __init__(
do_integrity_check: bool = False,
fastpath: bool = False,
):
+ assert isinstance(block, Block), type(block)
+
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
@@ -1510,38 +1523,29 @@ def __init__(
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
- if isinstance(block, list):
-
- # empty block
- if len(block) == 0:
- block = [np.array([])]
- elif len(block) != 1:
- raise ValueError(
- "Cannot create SingleBlockManager with more than 1 block"
- )
- block = block[0]
else:
self.axes = [ensure_index(axis)]
- # create the block here
- if isinstance(block, list):
-
- # provide consolidation to the interleaved_dtype
- if len(block) > 1:
- dtype = _interleaved_dtype(block)
- block = [b.astype(dtype) for b in block]
- block = _consolidate(block)
-
- if len(block) != 1:
- raise ValueError(
- "Cannot create SingleBlockManager with more than 1 block"
- )
- block = block[0]
+ self.blocks = tuple([block])
- if not isinstance(block, Block):
- block = make_block(block, placement=slice(0, len(axis)), ndim=1)
+ @classmethod
+ def from_blocks(
+ cls, blocks: List[Block], axes: List[Index]
+ ) -> "SingleBlockManager":
+ """
+ Constructor for BlockManager and SingleBlockManager with same signature.
+ """
+ assert len(blocks) == 1
+ assert len(axes) == 1
+ return cls(blocks[0], axes[0], do_integrity_check=False, fastpath=True)
- self.blocks = tuple([block])
+ @classmethod
+ def from_array(cls, array: ArrayLike, index: Index) -> "SingleBlockManager":
+ """
+ Constructor for if we have an array that is not yet a Block.
+ """
+ block = make_block(array, placement=slice(0, len(index)), ndim=1)
+ return cls(block, index, fastpath=True)
def _post_setstate(self):
pass
@@ -1568,7 +1572,10 @@ def get_slice(self, slobj: slice, axis: int = 0) -> "SingleBlockManager":
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
- return type(self)(self._block._slice(slobj), self.index[slobj], fastpath=True)
+ blk = self._block
+ array = blk._slice(slobj)
+ block = blk.make_block_same_class(array, placement=range(len(array)))
+ return type(self)(block, self.index[slobj], fastpath=True)
@property
def index(self) -> Index:
@@ -1630,7 +1637,7 @@ def fast_xs(self, loc):
"""
raise NotImplementedError("Use series._values[loc] instead")
- def concat(self, to_concat, new_axis) -> "SingleBlockManager":
+ def concat(self, to_concat, new_axis: Index) -> "SingleBlockManager":
"""
Concatenate a list of SingleBlockManagers into a single
SingleBlockManager.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 568e99622dd29..c44c72077aa29 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -205,7 +205,7 @@ def __init__(
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
- data = SingleBlockManager(data, index, fastpath=True)
+ data = SingleBlockManager.from_array(data, index)
if copy:
data = data.copy()
if index is None:
@@ -317,7 +317,7 @@ def __init__(
else:
data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
- data = SingleBlockManager(data, index, fastpath=True)
+ data = SingleBlockManager.from_array(data, index)
generic.NDFrame.__init__(self, data)
self.name = name
diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py
index 6311070cfe2bb..8a8dac54cf96a 100644
--- a/pandas/tests/extension/test_external_block.py
+++ b/pandas/tests/extension/test_external_block.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas.core.internals import BlockManager
+from pandas.core.internals import BlockManager, SingleBlockManager
from pandas.core.internals.blocks import Block, NonConsolidatableMixIn
@@ -36,7 +36,8 @@ def test_concat_series():
# GH17728
values = np.arange(3, dtype="int64")
block = CustomBlock(values, placement=slice(0, 3))
- s = pd.Series(block, pd.RangeIndex(3), fastpath=True)
+ mgr = SingleBlockManager(block, pd.RangeIndex(3))
+ s = pd.Series(mgr, pd.RangeIndex(3), fastpath=True)
res = pd.concat([s, s])
assert isinstance(res._data.blocks[0], CustomBlock)
| cc @simonjayhawkins my impression from yesterday's threads was that mypy should be catching the wrong-type being passed in `SingleBlockManager.__init__`, did I misunderstand something?
Also (and I know ive asked before) is there a nice way to annotate the return type as "same type as self"? I tried `pandas._typing.T` but that didnt do it.
Parts of this are repeated in several BlockManager methods, should probably be made into a helper function, pending discussion on above. | https://api.github.com/repos/pandas-dev/pandas/pulls/32421 | 2020-03-03T20:18:58Z | 2020-03-11T02:32:11Z | 2020-03-11T02:32:11Z | 2020-03-11T14:07:31Z |
ENH: Categorical.fillna allow Categorical/ndarray | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index f9059054ba59f..1a51101bc8db8 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -11,6 +11,7 @@
from pandas._libs import Timestamp, algos, hashtable as htable, lib
from pandas._libs.tslib import iNaT
+from pandas._typing import AnyArrayLike
from pandas.util._decorators import doc
from pandas.core.dtypes.cast import (
@@ -45,10 +46,14 @@
is_unsigned_integer_dtype,
needs_i8_conversion,
)
-from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
+from pandas.core.dtypes.generic import (
+ ABCExtensionArray,
+ ABCIndex,
+ ABCIndexClass,
+ ABCSeries,
+)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
-import pandas.core.common as com
from pandas.core.construction import array, extract_array
from pandas.core.indexers import validate_indices
@@ -384,7 +389,7 @@ def unique(values):
unique1d = unique
-def isin(comps, values) -> np.ndarray:
+def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
"""
Compute the isin boolean array.
@@ -409,15 +414,14 @@ def isin(comps, values) -> np.ndarray:
f"to isin(), you passed a [{type(values).__name__}]"
)
- if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
+ if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
values = construct_1d_object_array_from_listlike(list(values))
+ comps = extract_array(comps, extract_numpy=True)
if is_categorical_dtype(comps):
# TODO(extension)
# handle categoricals
- return comps._values.isin(values)
-
- comps = com.values_from_object(comps)
+ return comps.isin(values) # type: ignore
comps, dtype = _ensure_data(comps)
values, _ = _ensure_data(values, dtype=dtype)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 40a169d03f39c..c6e6acfd47e9a 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1738,12 +1738,17 @@ def fillna(self, value=None, method=None, limit=None):
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
- if isinstance(value, ABCSeries):
- if not value[~value.isin(self.categories)].isna().all():
+ if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
+ # We get ndarray or Categorical if called via Series.fillna,
+ # where it will unwrap another aligned Series before getting here
+
+ mask = ~algorithms.isin(value, self.categories)
+ if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
+ codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f0147859cae97..d34b7175c5a86 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5988,6 +5988,8 @@ def fillna(
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
+ value = value.reindex(self.index, copy=False)
+ value = value._values
elif not is_list_like(value):
pass
else:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index f6e79a0f2045d..842e55d9abaa8 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -396,6 +396,7 @@ def apply(self, f, filter=None, **kwargs) -> "BlockManager":
BlockManager
"""
result_blocks = []
+ # fillna: Series/DataFrame is responsible for making sure value is aligned
# filter kwarg is used in replace-* family of methods
if filter is not None:
@@ -420,11 +421,6 @@ def apply(self, f, filter=None, **kwargs) -> "BlockManager":
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
- elif f == "fillna":
- # fillna internally does putmask, maybe it's better to do this
- # at mgr, not block level?
- align_copy = False
- align_keys = ["value"]
else:
align_keys = []
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index 8889f45a84237..9eb3c8b3a8c48 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -82,3 +82,18 @@ def test_fillna_iterable_category(self, named):
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
tm.assert_categorical_equal(result, expected)
+
+ def test_fillna_array(self):
+ # accept Categorical or ndarray value if it holds appropriate values
+ cat = Categorical(["A", "B", "C", None, None])
+
+ other = cat.fillna("C")
+ result = cat.fillna(other)
+ tm.assert_categorical_equal(result, other)
+ assert isna(cat[-1]) # didnt modify original inplace
+
+ other = np.array(["A", "B", "C", "B", "A"])
+ result = cat.fillna(other)
+ expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype)
+ tm.assert_categorical_equal(result, expected)
+ assert isna(cat[-1]) # didnt modify original inplace
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index a1de9c435c9ba..ad7028702ec8c 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -760,6 +760,16 @@ def test_categorical_from_codes(self):
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
+ def test_categorical_isin(self):
+ vals = np.array([0, 1, 2, 0])
+ cats = ["a", "b", "c"]
+ cat = Categorical(1).from_codes(vals, cats)
+ other = Categorical(1).from_codes(np.array([0, 1]), cats)
+
+ expected = np.array([True, True, False, True])
+ result = algos.isin(cat, other)
+ tm.assert_numpy_array_equal(expected, result)
+
def test_same_nan_is_in(self):
# GH 22160
# nan is special, because from " a is b" doesn't follow "a == b"
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
xref #32414.
cc @TomAugspurger ATM the new test implemented in tests.arrays.categorical.test_missing is failing on the ndarray case for reasons that I dont fully grok. Are you familiar with this? | https://api.github.com/repos/pandas-dev/pandas/pulls/32420 | 2020-03-03T20:03:02Z | 2020-03-14T16:47:45Z | 2020-03-14T16:47:45Z | 2020-03-14T16:52:48Z |
CLN: move away from .values, _ndarray_values | diff --git a/pandas/_testing.py b/pandas/_testing.py
index b0f18cb6fdd39..33ec4e4886aa6 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -706,11 +706,11 @@ def _get_ilevel_values(index, level):
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
- assert_interval_array_equal(left.values, right.values)
+ assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
- assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
+ assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
@@ -883,7 +883,7 @@ def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray")
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
- assert_numpy_array_equal(left._data, right._data, obj=f"{obj}.values")
+ assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
@@ -1170,10 +1170,10 @@ def assert_series_equal(
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
- if not Index(left.values).equals(Index(right.values)):
+ if not Index(left._values).equals(Index(right._values)):
msg = (
- f"[datetimelike_compat=True] {left.values} "
- f"is not equal to {right.values}."
+ f"[datetimelike_compat=True] {left._values} "
+ f"is not equal to {right._values}."
)
raise AssertionError(msg)
else:
@@ -1212,8 +1212,8 @@ def assert_series_equal(
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
- left.values,
- right.values,
+ left._values,
+ right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index c06bd8a1d6e36..7dac36b53fce5 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1181,9 +1181,11 @@ def try_timedelta(v):
from pandas import to_timedelta
try:
- return to_timedelta(v)._ndarray_values.reshape(shape)
+ td_values = to_timedelta(v)
except ValueError:
return v.reshape(shape)
+ else:
+ return np.asarray(td_values).reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 106128004f549..f74ce22ce071e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4582,7 +4582,7 @@ def drop_duplicates(
duplicated = self.duplicated(subset, keep=keep)
if inplace:
- (inds,) = (-duplicated)._ndarray_values.nonzero()
+ (inds,) = np.asarray(-duplicated).nonzero()
new_data = self._data.take(inds)
if ignore_index:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index f70975e19b9a4..c1efa512f326a 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2998,7 +2998,7 @@ def _update_indexer(idxr, indexer=indexer):
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
- return Int64Index([])._ndarray_values
+ return np.array([], dtype=np.int64)
elif com.is_null_slice(k):
# empty slice
@@ -3024,7 +3024,7 @@ def _update_indexer(idxr, indexer=indexer):
# empty indexer
if indexer is None:
- return Int64Index([])._ndarray_values
+ return np.array([], dtype=np.int64)
indexer = self._reorder_indexer(seq, indexer)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index cb6f68ae0376d..6c250ccd09a51 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -425,7 +425,7 @@ def equals(self, other) -> bool:
other = self._constructor(other)
if not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape:
return False
- left, right = self._ndarray_values, other._ndarray_values
+ left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 0397dfa923afb..6e79f5890f76d 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1672,7 +1672,7 @@ def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFra
continue
if convert_missing: # Replacement follows Stata notation
- missing_loc = np.nonzero(missing._ndarray_values)[0]
+ missing_loc = np.nonzero(np.asarray(missing))[0]
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index c399e5b9b7017..8260684c02ea6 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -218,13 +218,13 @@ def _convert_1d(values, units, axis):
if isinstance(values, valid_types) or is_integer(values) or is_float(values):
return get_datevalue(values, axis.freq)
elif isinstance(values, PeriodIndex):
- return values.asfreq(axis.freq)._ndarray_values
+ return values.asfreq(axis.freq).asi8
elif isinstance(values, Index):
return values.map(lambda x: get_datevalue(x, axis.freq))
elif lib.infer_dtype(values, skipna=False) == "period":
# https://github.com/pandas-dev/pandas/issues/24304
# convert ndarray[period] -> PeriodIndex
- return PeriodIndex(values, freq=axis.freq)._ndarray_values
+ return PeriodIndex(values, freq=axis.freq).asi8
elif isinstance(values, (list, tuple, np.ndarray, Index)):
return [get_datevalue(x, axis.freq) for x in values]
return values
@@ -607,7 +607,7 @@ def _daily_finder(vmin, vmax, freq):
info = np.zeros(
span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")]
)
- info["val"][:] = dates_._ndarray_values
+ info["val"][:] = dates_.asi8
info["fmt"][:] = ""
info["maj"][[0, -1]] = True
# .. and set some shortcuts
| Also _values_from_object and _internal_get_values, though none of those made it into this PR. | https://api.github.com/repos/pandas-dev/pandas/pulls/32419 | 2020-03-03T19:51:04Z | 2020-03-04T14:14:20Z | 2020-03-04T14:14:20Z | 2020-03-04T15:08:21Z |
`_ensure_type` should use `issubclass` | diff --git a/pandas/core/base.py b/pandas/core/base.py
index f55d9f905945d..40ca70ec7973a 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -93,7 +93,7 @@ def _ensure_type(self: T, obj) -> T:
Used by type checkers.
"""
- assert isinstance(obj, type(self)), type(obj)
+ assert issubclass(type(obj), type(self)), type(obj)
return obj
diff --git a/pandas/tests/base/test_base.py b/pandas/tests/base/test_base.py
new file mode 100644
index 0000000000000..22eb4f03653f6
--- /dev/null
+++ b/pandas/tests/base/test_base.py
@@ -0,0 +1,38 @@
+import pytest
+
+from pandas.core.base import PandasObject
+
+pandas_object = PandasObject()
+
+
+class SubclassPandasObject(PandasObject):
+ pass
+
+
+subclass_pandas_object = SubclassPandasObject()
+
+
+@pytest.mark.parametrize("other_object", [pandas_object, subclass_pandas_object])
+def test_pandas_object_ensure_type(other_object):
+ pandas_object = PandasObject()
+ assert pandas_object._ensure_type(other_object)
+
+
+def test_pandas_object_ensure_type_for_same_object():
+ pandas_object_a = PandasObject()
+ pandas_object_b = pandas_object_a
+ assert pandas_object_a._ensure_type(pandas_object_b)
+
+
+class OtherClass:
+ pass
+
+
+other_class = OtherClass()
+
+
+@pytest.mark.parametrize("other_object", [other_class])
+def test_pandas_object_ensure_type_for_false(other_object):
+ pandas_object = PandasObject()
+ with pytest.raises(AssertionError):
+ assert pandas_object._ensure_type(other_object)
| Commit pandas-dev/pandas@6fd326d5a249967f9b6be60fc3c5f7366d914684 in pull request pandas-dev/pandas#30613 added `_ensure_type`, which utilizes `isinstance`. However, it is reasonable to assume that someone may want to create a DataFrame subclass. Therefore, `_ensure_type` should use `issubclass`.
- [x] closes #31925
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
whatsnew entry isn't necessary?
| https://api.github.com/repos/pandas-dev/pandas/pulls/32416 | 2020-03-03T18:12:19Z | 2020-03-13T17:37:26Z | null | 2020-03-13T17:37:26Z |
BUG: fixes plotting with nullable integers (#32073) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 57c53f73962dc..75d0512547ad5 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -306,8 +306,8 @@ Plotting
^^^^^^^^
- :func:`.plot` for line/bar now accepts color by dictonary (:issue:`8193`).
--
- Bug in :meth:`DataFrame.boxplot` and :meth:`DataFrame.plot.boxplot` lost color attributes of ``medianprops``, ``whiskerprops``, ``capprops`` and ``medianprops`` (:issue:`30346`)
+- Updated :meth:`DataFrame.plot` to handle nullable integers (:issue:`32073`).
Groupby/resample/rolling
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 63d0b8abe59d9..50e00e2e1db86 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -8,8 +8,10 @@
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
+ is_extension_array_dtype,
is_hashable,
is_integer,
+ is_integer_dtype,
is_iterator,
is_list_like,
is_number,
@@ -409,13 +411,19 @@ def _compute_plot_data(self):
if is_empty:
raise TypeError("no numeric data to plot")
- # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to
- # np.ndarray before plot.
- numeric_data = numeric_data.copy()
- for col in numeric_data:
- numeric_data[col] = np.asarray(numeric_data[col])
+ def convert_to_ndarray(data):
+ # GH32073: cast to float if values contain nulled integers
+ if is_integer_dtype(data.dtype) and is_extension_array_dtype(data.dtype):
+ return data.to_numpy(dtype="float", na_value=np.nan)
- self.data = numeric_data
+ # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to
+ # np.ndarray before plot.
+ if len(data) > 0:
+ return np.asarray(data)
+
+ return data
+
+ self.data = numeric_data.apply(convert_to_ndarray)
def _make_plot(self):
raise AbstractMethodError(self)
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index ffbd135466709..a90075ddf4d38 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -3316,6 +3316,21 @@ def test_missing_markers_legend_using_style(self):
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=[".", ".", "."])
+ def test_nullable_int_plot(self):
+ # GH32073
+ dates = ["2008", "2009", None, "2011", "2012"]
+ df = pd.DataFrame(
+ {
+ "A": [1, 2, 3, 4, 5],
+ "B": [7, 5, np.nan, 3, 2],
+ "C": pd.to_datetime(dates, format="%Y"),
+ }
+ )
+
+ _check_plot_works(df.plot, x="A", y="B")
+ _check_plot_works(df[["A", "B"]].astype("Int64").plot, x="A", y="B")
+ _check_plot_works(df[["A", "C"]].plot, x="A", y="C")
+
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
| - [x] closes #32073
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32410 | 2020-03-03T13:33:42Z | 2020-05-08T16:07:38Z | null | 2020-05-08T16:07:39Z |
Properly handle missing attributes in query/eval strings | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 97a7f22df3985..15dec55bb86f8 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -500,6 +500,7 @@ Other
- Bug in :meth:`DataFrame.to_records` incorrectly losing timezone information in timezone-aware ``datetime64`` columns (:issue:`32535`)
- Fixed :func:`pandas.testing.assert_series_equal` to correctly raise if left object is a different subclass with ``check_series_type=True`` (:issue:`32670`).
- :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:32538`)
+- Getting a missing attribute in a query/eval string raises the correct ``AttributeError`` (:issue:`32408`)
- Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`)
- Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`)
- Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`)
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index c59952bea8dc0..6cd9a15b70d39 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -635,8 +635,9 @@ def visit_Attribute(self, node, **kwargs):
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
+ raise
- raise ValueError(f"Invalid Attribute context {ctx.__name__}")
+ raise ValueError(f"Invalid Attribute context {type(ctx).__name__}")
def visit_Call(self, node, side=None, **kwargs):
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index bf9eeb532b43b..1a07780462ea3 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -1165,6 +1165,11 @@ def test_lots_of_operators_string(self, df):
expect = df[df[" &^ :!€$?(} > <++*'' "] > 4]
tm.assert_frame_equal(res, expect)
+ def test_missing_attribute(self, df):
+ message = "module 'pandas' has no attribute 'thing'"
+ with pytest.raises(AttributeError, match=message):
+ df.eval("@pd.thing")
+
def test_failing_quote(self, df):
with pytest.raises(SyntaxError):
df.query("`it's` > `that's`")
| Consider this script:
```python
import pandas as pd
pd.eval("pd.thing")
```
Currently it raises an error like this:
```
File "/home/alex/work/pandas/pandas/core/computation/expr.py", line 640, in visit_Attribute
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
AttributeError: 'Load' object has no attribute '__name__'
```
Adding `__class__` to that line changes the error to the more sensible:
ValueError: Invalid Attribute context Load
Re-raising the original error gives what's really needed:
```
File "/home/alex/work/pandas/pandas/core/computation/expr.py", line 631, in visit_Attribute
v = getattr(resolved, attr)
File "/home/alex/work/pandas/pandas/__init__.py", line 260, in __getattr__
raise AttributeError(f"module 'pandas' has no attribute '{name}'")
AttributeError: module 'pandas' has no attribute 'thing'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/32408 | 2020-03-03T11:33:08Z | 2020-04-10T19:30:56Z | 2020-04-10T19:30:55Z | 2020-04-10T19:31:01Z |
CLN: remove is_period_arraylike, is_datetime_arraylike | diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index df5bac1071985..1afe7edf2641b 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -7,7 +7,7 @@
import numpy as np
-from pandas._libs import algos, lib
+from pandas._libs import algos
from pandas._libs.tslibs import conversion
from pandas._typing import ArrayLike, DtypeObj
@@ -19,14 +19,7 @@
PeriodDtype,
registry,
)
-from pandas.core.dtypes.generic import (
- ABCCategorical,
- ABCDatetimeIndex,
- ABCIndexClass,
- ABCPeriodArray,
- ABCPeriodIndex,
- ABCSeries,
-)
+from pandas.core.dtypes.generic import ABCCategorical, ABCIndexClass
from pandas.core.dtypes.inference import ( # noqa:F401
is_array_like,
is_bool,
@@ -606,71 +599,6 @@ def is_excluded_dtype(dtype) -> bool:
return _is_dtype(arr_or_dtype, condition)
-def is_period_arraylike(arr) -> bool:
- """
- Check whether an array-like is a periodical array-like or PeriodIndex.
-
- Parameters
- ----------
- arr : array-like
- The array-like to check.
-
- Returns
- -------
- boolean
- Whether or not the array-like is a periodical array-like or
- PeriodIndex instance.
-
- Examples
- --------
- >>> is_period_arraylike([1, 2, 3])
- False
- >>> is_period_arraylike(pd.Index([1, 2, 3]))
- False
- >>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
- True
- """
- if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):
- return True
- elif isinstance(arr, (np.ndarray, ABCSeries)):
- return is_period_dtype(arr.dtype)
- return getattr(arr, "inferred_type", None) == "period"
-
-
-def is_datetime_arraylike(arr) -> bool:
- """
- Check whether an array-like is a datetime array-like or DatetimeIndex.
-
- Parameters
- ----------
- arr : array-like
- The array-like to check.
-
- Returns
- -------
- boolean
- Whether or not the array-like is a datetime array-like or
- DatetimeIndex.
-
- Examples
- --------
- >>> is_datetime_arraylike([1, 2, 3])
- False
- >>> is_datetime_arraylike(pd.Index([1, 2, 3]))
- False
- >>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
- True
- """
- if isinstance(arr, ABCDatetimeIndex):
- return True
- elif isinstance(arr, (np.ndarray, ABCSeries)):
- return (
- is_object_dtype(arr.dtype)
- and lib.infer_dtype(arr, skipna=False) == "datetime"
- )
- return getattr(arr, "inferred_type", None) == "datetime"
-
-
def is_dtype_equal(source, target) -> bool:
"""
Check if two dtypes are equal.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f0147859cae97..ed26bfa5369c7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -72,7 +72,6 @@
is_number,
is_numeric_dtype,
is_object_dtype,
- is_period_arraylike,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
@@ -1342,7 +1341,7 @@ def __neg__(self):
def __pos__(self):
values = self._values
- if is_bool_dtype(values) or is_period_arraylike(values):
+ if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index db774a03c02f8..e13e575291324 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -7,10 +7,9 @@
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
- is_datetime_arraylike,
is_integer_dtype,
is_list_like,
- is_period_arraylike,
+ is_period_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
@@ -45,12 +44,8 @@ def _get_values(self):
elif is_timedelta64_dtype(data.dtype):
return TimedeltaIndex(data, copy=False, name=self.name)
- else:
- if is_period_arraylike(data):
- # TODO: use to_period_array
- return PeriodArray(data, copy=False)
- if is_datetime_arraylike(data):
- return DatetimeIndex(data, copy=False, name=self.name)
+ elif is_period_dtype(data):
+ return PeriodArray(data, copy=False)
raise TypeError(
f"cannot convert an object of type {type(data)} to a datetimelike index"
@@ -330,9 +325,7 @@ def __new__(cls, data):
return DatetimeProperties(data, orig)
elif is_timedelta64_dtype(data.dtype):
return TimedeltaProperties(data, orig)
- elif is_period_arraylike(data):
+ elif is_period_dtype(data):
return PeriodProperties(data, orig)
- elif is_datetime_arraylike(data):
- return DatetimeProperties(data, orig)
raise AttributeError("Can only use .dt accessor with datetimelike values")
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 8da2797835080..66bf696cbe912 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -281,18 +281,6 @@ def test_is_string_dtype():
assert com.is_string_dtype(pd.array(["a", "b"], dtype="string"))
-def test_is_period_arraylike():
- assert not com.is_period_arraylike([1, 2, 3])
- assert not com.is_period_arraylike(pd.Index([1, 2, 3]))
- assert com.is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
-
-
-def test_is_datetime_arraylike():
- assert not com.is_datetime_arraylike([1, 2, 3])
- assert not com.is_datetime_arraylike(pd.Index([1, 2, 3]))
- assert com.is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
-
-
integer_dtypes: List = []
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 1a1b7e8e1bd08..287d602015009 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -20,7 +20,7 @@
from pandas.core.dtypes.common import (
is_datetime64_dtype,
- is_period_arraylike,
+ is_period_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
@@ -270,7 +270,7 @@ def infer_freq(index, warn: bool = True) -> Optional[str]:
index = values
inferer: _FrequencyInferer
- if is_period_arraylike(index):
+ if is_period_dtype(index):
raise TypeError(
"PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq."
| There are better alternatives | https://api.github.com/repos/pandas-dev/pandas/pulls/32406 | 2020-03-03T04:04:22Z | 2020-03-07T10:33:24Z | 2020-03-07T10:33:24Z | 2020-03-07T15:11:18Z |
CLN: remove unreachable branch in Index._union | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ceb3f26a0526a..00e662fae4991 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -32,7 +32,6 @@
is_categorical,
is_categorical_dtype,
is_datetime64_any_dtype,
- is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
@@ -2525,14 +2524,8 @@ def _union(self, other, sort):
return other._get_reconciled_name_object(self)
# TODO(EA): setops-refactor, clean all this up
- if is_datetime64tz_dtype(self):
- lvals = self._ndarray_values
- else:
- lvals = self._values
- if is_datetime64tz_dtype(other):
- rvals = other._ndarray_values
- else:
- rvals = other._values
+ lvals = self._values
+ rvals = other._values
if sort is None and self.is_monotonic and other.is_monotonic:
try:
| https://api.github.com/repos/pandas-dev/pandas/pulls/32405 | 2020-03-03T03:20:40Z | 2020-03-03T16:11:54Z | 2020-03-03T16:11:54Z | 2020-03-03T16:12:34Z | |
Add missing newline | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ceb3f26a0526a..af6cbd0e95ec1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4817,6 +4817,7 @@ def isin(self, values, level=None):
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
+
>>> idx.isin([1, 4])
array([ True, False, False])
| The line ">>> idx.isin([1, 4]) array([ True, False, False])" is was not rendered properly, because it was missing a newline above it.
https://pandas.pydata.org/docs/reference/api/pandas.Index.isin.html
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32404 | 2020-03-03T02:19:04Z | 2020-03-03T03:49:54Z | 2020-03-03T03:49:54Z | 2020-03-03T03:51:41Z |
TYP: internals | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 34fa4c0e6544e..7592fbdc93af4 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -11,6 +11,7 @@
import pandas._libs.internals as libinternals
from pandas._libs.tslibs import Timedelta, conversion
from pandas._libs.tslibs.timezones import tz_compare
+from pandas._typing import DtypeObj
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
@@ -170,20 +171,20 @@ def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
- def _is_single_block(self):
+ def _is_single_block(self) -> bool:
return self.ndim == 1
@property
- def is_view(self):
+ def is_view(self) -> bool:
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
- def is_datelike(self):
+ def is_datelike(self) -> bool:
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
- def is_categorical_astype(self, dtype):
+ def is_categorical_astype(self, dtype) -> bool:
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
@@ -255,7 +256,7 @@ def mgr_locs(self, new_mgr_locs):
self._mgr_locs = new_mgr_locs
@property
- def array_dtype(self):
+ def array_dtype(self) -> DtypeObj:
"""
the dtype to return if I want to construct this block as an
array
@@ -333,7 +334,7 @@ def dtype(self):
return self.values.dtype
@property
- def ftype(self):
+ def ftype(self) -> str:
if getattr(self.values, "_pandas_ftype", False):
dtype = self.dtype.subtype
else:
@@ -367,7 +368,7 @@ def set(self, locs, values):
"""
self.values[locs] = values
- def delete(self, loc):
+ def delete(self, loc) -> None:
"""
Delete given loc(-s) from block in-place.
"""
@@ -401,7 +402,7 @@ def _split_op_result(self, result) -> List["Block"]:
return [result]
- def fillna(self, value, limit=None, inplace=False, downcast=None):
+ def fillna(self, value, limit=None, inplace: bool = False, downcast=None):
"""
fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
@@ -687,7 +688,7 @@ def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
return values
# block actions #
- def copy(self, deep=True):
+ def copy(self, deep: bool = True):
""" copy constructor """
values = self.values
if deep:
@@ -695,7 +696,13 @@ def copy(self, deep=True):
return self.make_block_same_class(values, ndim=self.ndim)
def replace(
- self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
+ self,
+ to_replace,
+ value,
+ inplace: bool = False,
+ filter=None,
+ regex: bool = False,
+ convert: bool = True,
):
"""
replace the to_replace value with value, possible to create new
@@ -917,7 +924,15 @@ def setitem(self, indexer, value):
block = self.make_block(values)
return block
- def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
+ def putmask(
+ self,
+ mask,
+ new,
+ align: bool = True,
+ inplace: bool = False,
+ axis: int = 0,
+ transpose: bool = False,
+ ):
"""
putmask the data to the block; it is possible that we may create a
new dtype of block
@@ -1264,7 +1279,7 @@ def func(x):
blocks = [self.make_block_same_class(interp_values)]
return self._maybe_downcast(blocks, downcast)
- def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
+ def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
@@ -1305,7 +1320,7 @@ def diff(self, n: int, axis: int = 1) -> List["Block"]:
new_values = _block_shape(new_values, ndim=self.ndim)
return [self.make_block(values=new_values)]
- def shift(self, periods, axis=0, fill_value=None):
+ def shift(self, periods, axis: int = 0, fill_value=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
@@ -1337,7 +1352,7 @@ def where(
self,
other,
cond,
- align=True,
+ align: bool = True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
@@ -1349,11 +1364,12 @@ def where(
----------
other : a ndarray/object
cond : the condition to respect
- align : boolean, perform alignment on other/cond
+ align : bool, default True
+ Perform alignment on other/cond.
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
- axis : int
+ axis : int, default 0
Returns
-------
@@ -1485,7 +1501,7 @@ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
blocks = [make_block(new_values, placement=new_placement)]
return blocks, mask
- def quantile(self, qs, interpolation="linear", axis=0):
+ def quantile(self, qs, interpolation="linear", axis: int = 0):
"""
compute the quantiles of the
@@ -1542,7 +1558,13 @@ def quantile(self, qs, interpolation="linear", axis=0):
return make_block(result, placement=np.arange(len(result)), ndim=ndim)
def _replace_coerce(
- self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
+ self,
+ to_replace,
+ value,
+ inplace: bool = True,
+ regex: bool = False,
+ convert: bool = False,
+ mask=None,
):
"""
Replace value corresponding to the given boolean array with another
@@ -1554,7 +1576,7 @@ def _replace_coerce(
Scalar to replace or regular expression to match.
value : object
Replacement object.
- inplace : bool, default False
+ inplace : bool, default True
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
@@ -1641,7 +1663,9 @@ def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
- def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
+ def putmask(
+ self, mask, new, align=True, inplace=False, axis=0, transpose=False,
+ ):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
@@ -1757,7 +1781,7 @@ def _can_hold_na(self):
return self._holder._can_hold_na
@property
- def is_view(self):
+ def is_view(self) -> bool:
"""Extension arrays are never treated as views."""
return False
@@ -1822,7 +1846,7 @@ def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
- def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
+ def take_nd(self, indexer, axis: int = 0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.
"""
@@ -2083,7 +2107,7 @@ def to_native_types(
)
return formatter.get_result_as_array()
- def should_store(self, value):
+ def should_store(self, value) -> bool:
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype
@@ -2101,7 +2125,7 @@ def _can_hold_element(self, element: Any) -> bool:
element, (float, int, complex, np.float_, np.int_)
) and not isinstance(element, (bool, np.bool_))
- def should_store(self, value):
+ def should_store(self, value) -> bool:
return issubclass(value.dtype.type, np.complexfloating)
@@ -2120,7 +2144,7 @@ def _can_hold_element(self, element: Any) -> bool:
)
return is_integer(element)
- def should_store(self, value):
+ def should_store(self, value) -> bool:
return is_integer_dtype(value) and value.dtype == self.dtype
@@ -2258,7 +2282,7 @@ def to_native_types(
).reshape(i8values.shape)
return np.atleast_2d(result)
- def should_store(self, value):
+ def should_store(self, value) -> bool:
return (
issubclass(value.dtype.type, np.datetime64)
and not is_datetime64tz_dtype(value)
@@ -2323,7 +2347,7 @@ def _maybe_coerce_values(self, values):
return values
@property
- def is_view(self):
+ def is_view(self) -> bool:
""" return a boolean if I am possibly a view """
# check the ndarray values of the DatetimeIndex values
return self.values._data.base is not None
@@ -2510,7 +2534,7 @@ def fillna(self, value, **kwargs):
)
return super().fillna(value, **kwargs)
- def should_store(self, value):
+ def should_store(self, value) -> bool:
return issubclass(
value.dtype.type, np.timedelta64
) and not is_extension_array_dtype(value)
@@ -2556,7 +2580,7 @@ def _can_hold_element(self, element: Any) -> bool:
return issubclass(tipo.type, np.bool_)
return isinstance(element, (bool, np.bool_))
- def should_store(self, value):
+ def should_store(self, value) -> bool:
return issubclass(value.dtype.type, np.bool_) and not is_extension_array_dtype(
value
)
@@ -2648,7 +2672,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]
def _can_hold_element(self, element: Any) -> bool:
return True
- def should_store(self, value):
+ def should_store(self, value) -> bool:
return not (
issubclass(
value.dtype.type,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 9c90b20fc0f16..64896e2cac311 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -145,19 +145,20 @@ def __init__(
self._rebuild_blknos_and_blklocs()
- def make_empty(self, axes=None):
+ def make_empty(self, axes=None) -> "BlockManager":
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
- axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]]
+ axes = [Index([])] + self.axes[1:]
# preserve dtype if possible
if self.ndim == 1:
+ assert isinstance(self, SingleBlockManager) # for mypy
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return type(self)(blocks, axes)
- def __nonzero__(self):
+ def __nonzero__(self) -> bool:
return True
# Python3 compat
@@ -171,7 +172,7 @@ def shape(self) -> Tuple[int, ...]:
def ndim(self) -> int:
return len(self.axes)
- def set_axis(self, axis: int, new_labels: Index):
+ def set_axis(self, axis: int, new_labels: Index) -> None:
# Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
@@ -214,7 +215,7 @@ def _is_single_block(self) -> bool:
0, len(self), 1
)
- def _rebuild_blknos_and_blklocs(self):
+ def _rebuild_blknos_and_blklocs(self) -> None:
"""
Update mgr._blknos / mgr._blklocs.
"""
@@ -288,7 +289,7 @@ def unpickle_block(values, mgr_locs):
self._post_setstate()
- def _post_setstate(self):
+ def _post_setstate(self) -> None:
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
@@ -308,7 +309,7 @@ def __repr__(self) -> str:
output += f"\n{pprint_thing(block)}"
return output
- def _verify_integrity(self):
+ def _verify_integrity(self) -> None:
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
@@ -347,7 +348,7 @@ def reduce(self, func, *args, **kwargs):
return res
- def apply(self, f, filter=None, **kwargs):
+ def apply(self, f, filter=None, **kwargs) -> "BlockManager":
"""
Iterate over the blocks, collect and create a new BlockManager.
@@ -430,13 +431,13 @@ def apply(self, f, filter=None, **kwargs):
def quantile(
self,
- axis=0,
- consolidate=True,
- transposed=False,
+ axis: int = 0,
+ consolidate: bool = True,
+ transposed: bool = False,
interpolation="linear",
qs=None,
numeric_only=None,
- ):
+ ) -> "BlockManager":
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
@@ -455,7 +456,7 @@ def quantile(
Returns
-------
- Block Manager (new object)
+ BlockManager
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
@@ -533,44 +534,48 @@ def get_axe(block, qs, axes):
fastpath=True,
)
- def isna(self, func):
+ def isna(self, func) -> "BlockManager":
return self.apply("apply", func=func)
- def where(self, **kwargs):
+ def where(self, **kwargs) -> "BlockManager":
return self.apply("where", **kwargs)
- def setitem(self, **kwargs):
+ def setitem(self, **kwargs) -> "BlockManager":
return self.apply("setitem", **kwargs)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
- def diff(self, **kwargs):
+ def diff(self, **kwargs) -> "BlockManager":
return self.apply("diff", **kwargs)
- def interpolate(self, **kwargs):
+ def interpolate(self, **kwargs) -> "BlockManager":
return self.apply("interpolate", **kwargs)
- def shift(self, **kwargs):
+ def shift(self, **kwargs) -> "BlockManager":
return self.apply("shift", **kwargs)
- def fillna(self, **kwargs):
+ def fillna(self, **kwargs) -> "BlockManager":
return self.apply("fillna", **kwargs)
- def downcast(self, **kwargs):
+ def downcast(self, **kwargs) -> "BlockManager":
return self.apply("downcast", **kwargs)
- def astype(self, dtype, copy: bool = False, errors: str = "raise"):
+ def astype(
+ self, dtype, copy: bool = False, errors: str = "raise"
+ ) -> "BlockManager":
return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
- def convert(self, **kwargs):
+ def convert(self, **kwargs) -> "BlockManager":
return self.apply("convert", **kwargs)
- def replace(self, value, **kwargs):
+ def replace(self, value, **kwargs) -> "BlockManager":
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
- def replace_list(self, src_list, dest_list, inplace=False, regex=False):
+ def replace_list(
+ self, src_list, dest_list, inplace: bool = False, regex: bool = False
+ ) -> "BlockManager":
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -602,7 +607,7 @@ def comp(s, regex=False):
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
# TODO: assert/validate that `d` is always a scalar?
- new_rb = []
+ new_rb: List[Block] = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
@@ -633,7 +638,7 @@ def is_consolidated(self) -> bool:
self._consolidate_check()
return self._is_consolidated
- def _consolidate_check(self):
+ def _consolidate_check(self) -> None:
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@@ -670,7 +675,7 @@ def is_view(self) -> bool:
return False
- def get_bool_data(self, copy: bool = False):
+ def get_bool_data(self, copy: bool = False) -> "BlockManager":
"""
Parameters
----------
@@ -680,7 +685,7 @@ def get_bool_data(self, copy: bool = False):
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
- def get_numeric_data(self, copy: bool = False):
+ def get_numeric_data(self, copy: bool = False) -> "BlockManager":
"""
Parameters
----------
@@ -712,7 +717,7 @@ def combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager":
return type(self)(new_blocks, axes, do_integrity_check=False)
- def get_slice(self, slobj: slice, axis: int = 0):
+ def get_slice(self, slobj: slice, axis: int = 0) -> "BlockManager":
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
@@ -737,7 +742,7 @@ def __contains__(self, item) -> bool:
def nblocks(self) -> int:
return len(self.blocks)
- def copy(self, deep=True):
+ def copy(self, deep=True) -> "BlockManager":
"""
Make deep or shallow copy of BlockManager
@@ -797,7 +802,7 @@ def as_array(self, transpose: bool = False) -> np.ndarray:
return arr.transpose() if transpose else arr
- def _interleave(self):
+ def _interleave(self) -> np.ndarray:
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
@@ -807,7 +812,7 @@ def _interleave(self):
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if is_sparse(dtype):
- dtype = dtype.subtype
+ dtype = dtype.subtype # type: ignore
elif is_extension_array_dtype(dtype):
dtype = "object"
@@ -906,7 +911,7 @@ def consolidate(self) -> "BlockManager":
bm._consolidate_inplace()
return bm
- def _consolidate_inplace(self):
+ def _consolidate_inplace(self) -> None:
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
@@ -1168,7 +1173,13 @@ def insert(self, loc: int, item, value, allow_duplicates: bool = False):
self._consolidate_inplace()
def reindex_axis(
- self, new_index, axis, method=None, limit=None, fill_value=None, copy=True
+ self,
+ new_index,
+ axis: int,
+ method=None,
+ limit=None,
+ fill_value=None,
+ copy: bool = True,
):
"""
Conform block manager to new index.
@@ -1183,7 +1194,13 @@ def reindex_axis(
)
def reindex_indexer(
- self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True
+ self,
+ new_axis,
+ indexer,
+ axis: int,
+ fill_value=None,
+ allow_dups=False,
+ copy: bool = True,
):
"""
Parameters
@@ -1191,8 +1208,10 @@ def reindex_indexer(
new_axis : Index
indexer : ndarray of int64 or None
axis : int
- fill_value : object
- allow_dups : bool
+ fill_value : object, default None
+ allow_dups : bool, default False
+ copy : bool, default True
+
pandas-indexer with -1's only.
"""
@@ -1329,7 +1348,7 @@ def _make_na_block(self, placement, fill_value=None):
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
- def take(self, indexer, axis=1, verify=True, convert=True):
+ def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True):
"""
Take items along any axis.
"""
@@ -1506,11 +1525,11 @@ def index(self) -> Index:
return self.axes[0]
@property
- def dtype(self):
+ def dtype(self) -> DtypeObj:
return self._block.dtype
@property
- def array_dtype(self):
+ def array_dtype(self) -> DtypeObj:
return self._block.array_dtype
def get_dtype_counts(self):
| some of these are a real bear, in particular `SingleBlockManager.__init__` | https://api.github.com/repos/pandas-dev/pandas/pulls/32403 | 2020-03-02T23:43:42Z | 2020-03-03T13:32:41Z | 2020-03-03T13:32:41Z | 2020-03-03T15:43:27Z |
HDFStore: Fix empty result of keys() method on non-pandas hdf5 file | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 21e59805fa143..c00c5ac867f38 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -340,6 +340,7 @@ I/O
- Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`)
- Bug in :meth:`read_excel` where a UTF-8 string with a high surrogate would cause a segmentation violation (:issue:`23809`)
- Bug in :meth:`read_csv` was causing a file descriptor leak on an empty file (:issue:`31488`)
+- :meth:`HDFStore.keys` now tries to get the list of native pandas tables first, and if there are none, it gets the native HDF5 table names (:issue:`29916`)
Plotting
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 7aeed5c316d7f..916b3149f091e 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -583,13 +583,20 @@ def __exit__(self, exc_type, exc_value, traceback):
def keys(self) -> List[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
+ If the store contains pandas native tables, it will return their names.
+ Otherwise the list of names of HDF5 Table objects will be returned.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
"""
- return [n._v_pathname for n in self.groups()]
+ objects = [n._v_pathname for n in self.groups()]
+ if objects:
+ return objects
+
+ assert self._handle is not None # mypy
+ return [n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")]
def __iter__(self):
return iter(self.keys())
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 61ca2e7f5f19d..e55c9157cac1f 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -296,6 +296,29 @@ def test_keys(self, setup_path):
assert set(store.keys()) == expected
assert set(store) == expected
+ def test_non_pandas_keys(self, setup_path):
+ # GH 29916
+ class Table1(tables.IsDescription):
+ value1 = tables.Float32Col()
+
+ class Table2(tables.IsDescription):
+ value2 = tables.Float32Col()
+
+ class Table3(tables.IsDescription):
+ value3 = tables.Float32Col()
+
+ with ensure_clean_path(setup_path) as path:
+ with tables.open_file(path, mode="w") as h5file:
+ group = h5file.create_group("/", "group")
+ h5file.create_table(group, "table1", Table1, "Table 1")
+ h5file.create_table(group, "table2", Table2, "Table 2")
+ h5file.create_table(group, "table3", Table3, "Table 3")
+ with HDFStore(path) as store:
+ assert len(store.keys()) == 3
+ expected = {"/group/table1", "/group/table2", "/group/table3"}
+ assert set(store.keys()) == expected
+ assert set(store) == expected
+
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
| First try to get pandas style tables, if there are none, return the list of non-pandas (hdf5 native) tables in the file (if any)
- [x] closes #29916
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32401 | 2020-03-02T23:26:12Z | 2020-06-14T15:32:10Z | null | 2020-06-14T15:32:11Z |
CLN: remove unused values from interpolate call | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 890c0540d9851..a82708e5e7602 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6799,7 +6799,6 @@ def interpolate(
method=method,
axis=ax,
index=index,
- values=_maybe_transposed_self,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 34fa4c0e6544e..67b734b8890f4 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1107,7 +1107,6 @@ def interpolate(
method="pad",
axis=0,
index=None,
- values=None,
inplace=False,
limit=None,
limit_direction="forward",
@@ -1157,7 +1156,6 @@ def check_int_bool(self, inplace):
return self._interpolate(
method=m,
index=index,
- values=values,
axis=axis,
limit=limit,
limit_direction=limit_direction,
@@ -1211,7 +1209,6 @@ def _interpolate(
self,
method=None,
index=None,
- values=None,
fill_value=None,
axis=0,
limit=None,
| https://api.github.com/repos/pandas-dev/pandas/pulls/32400 | 2020-03-02T23:21:24Z | 2020-03-03T02:58:53Z | 2020-03-03T02:58:52Z | 2020-03-03T03:01:35Z | |
Backport PR #32104 on branch 1.0.x (BUG: Pickle NA objects) | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 1b6098e6b6ac1..808e6ae709ce9 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -74,6 +74,7 @@ Bug fixes
**I/O**
- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
+- Fixed pickling of ``pandas.NA``. Previously a new object was returned, which broke computations relying on ``NA`` being a singleton (:issue:`31847`)
- Fixed bug in parquet roundtrip with nullable unsigned integer dtypes (:issue:`31896`).
**Experimental dtypes**
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 4d17a6f883c1c..c54cb652d7b21 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -364,6 +364,9 @@ class NAType(C_NAType):
exponent = 31 if is_32bit else 61
return 2 ** exponent - 1
+ def __reduce__(self):
+ return "NA"
+
# Binary arithmetic and comparison ops -> propagate
__add__ = _create_binary_propagating_op("__add__")
diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py
index dcb9d66708724..07656de2e9062 100644
--- a/pandas/tests/scalar/test_na_scalar.py
+++ b/pandas/tests/scalar/test_na_scalar.py
@@ -1,3 +1,5 @@
+import pickle
+
import numpy as np
import pytest
@@ -267,3 +269,26 @@ def test_integer_hash_collision_set():
assert len(result) == 2
assert NA in result
assert hash(NA) in result
+
+
+def test_pickle_roundtrip():
+ # https://github.com/pandas-dev/pandas/issues/31847
+ result = pickle.loads(pickle.dumps(pd.NA))
+ assert result is pd.NA
+
+
+def test_pickle_roundtrip_pandas():
+ result = tm.round_trip_pickle(pd.NA)
+ assert result is pd.NA
+
+
+@pytest.mark.parametrize(
+ "values, dtype", [([1, 2, pd.NA], "Int64"), (["A", "B", pd.NA], "string")]
+)
+@pytest.mark.parametrize("as_frame", [True, False])
+def test_pickle_roundtrip_containers(as_frame, values, dtype):
+ s = pd.Series(pd.array(values, dtype=dtype))
+ if as_frame:
+ s = s.to_frame(name="A")
+ result = tm.round_trip_pickle(s)
+ tm.assert_equal(result, s)
| Backport PR #32104: BUG: Pickle NA objects | https://api.github.com/repos/pandas-dev/pandas/pulls/32399 | 2020-03-02T23:21:19Z | 2020-03-03T01:27:02Z | 2020-03-03T01:27:02Z | 2020-03-03T01:27:02Z |
CLN: Avoid unnecessary values_from_object | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 7a18429f21a18..61d6a660a0357 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -100,11 +100,9 @@ def values_from_object(obj: object):
"""
func: object
- if getattr(obj, '_typ', '') == 'dataframe':
- return obj.values
-
func = getattr(obj, '_internal_get_values', None)
if func is not None:
+ # Includes DataFrame, for which we get frame.values
obj = func()
return obj
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 705c618fc49dc..6230ee34bcd50 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -122,7 +122,7 @@ def is_bool_indexer(key: Any) -> bool:
is_array_like(key) and is_extension_array_dtype(key.dtype)
):
if key.dtype == np.object_:
- key = np.asarray(values_from_object(key))
+ key = np.asarray(key)
if not lib.is_bool_array(key):
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index ee74b02af9516..682a0722de3b7 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -581,7 +581,7 @@ def remove_na_arraylike(arr):
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
- return arr[notna(lib.values_from_object(arr))]
+ return arr[notna(np.asarray(arr))]
def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index faac472b3fc31..d4d9c26686891 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1923,9 +1923,6 @@ def _factorize_keys(lk, rk, sort=True):
def _sort_labels(uniques: np.ndarray, left, right):
- if not isinstance(uniques, np.ndarray):
- # tuplesafe
- uniques = Index(uniques).values
llength = len(left)
labels = np.concatenate([left, right])
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index b0c5d6a48d99a..3be9c5fcdfb26 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -36,7 +36,6 @@
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
-import pandas.core.common as com
from pandas.core.construction import extract_array
if TYPE_CHECKING:
@@ -782,7 +781,7 @@ def rep(x, r):
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
- result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
+ result = libops.vec_binop(np.asarray(arr), repeats, rep)
return result
| xref #27165. | https://api.github.com/repos/pandas-dev/pandas/pulls/32398 | 2020-03-02T19:57:40Z | 2020-03-03T01:36:19Z | 2020-03-03T01:36:19Z | 2020-03-03T01:51:08Z |
TST: add message check to pytest.raises (tests/arrays/test_boolean.py) | diff --git a/pandas/tests/arrays/test_boolean.py b/pandas/tests/arrays/test_boolean.py
index d14d6f3ff0c41..f4b466f4804c7 100644
--- a/pandas/tests/arrays/test_boolean.py
+++ b/pandas/tests/arrays/test_boolean.py
@@ -248,7 +248,11 @@ def test_coerce_to_numpy_array():
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
- with pytest.raises(ValueError):
+ msg = (
+ "cannot convert to 'bool'-dtype NumPy array with missing values. "
+ "Specify an appropriate 'na_value' for this dtype."
+ )
+ with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
| - [x] ref #30999
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] Added in an error message
| https://api.github.com/repos/pandas-dev/pandas/pulls/32397 | 2020-03-02T17:13:06Z | 2020-03-03T12:31:31Z | 2020-03-03T12:31:31Z | 2020-03-03T12:58:21Z |
Added figsize for autocorrelation_plot and lag_plot (#31650) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 21e59805fa143..5794a2a6a1cd9 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -346,9 +346,8 @@ Plotting
^^^^^^^^
- :func:`.plot` for line/bar now accepts color by dictonary (:issue:`8193`).
--
- Bug in :meth:`DataFrame.boxplot` and :meth:`DataFrame.plot.boxplot` lost color attributes of ``medianprops``, ``whiskerprops``, ``capprops`` and ``medianprops`` (:issue:`30346`)
-
+- :func:`.autocorrelation_plot` and :func:`.lag_plot` now accept ``figsize`` attribute (:issue:`31650`)
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py
index 0720f544203f7..78ff799dde628 100644
--- a/pandas/plotting/_matplotlib/misc.py
+++ b/pandas/plotting/_matplotlib/misc.py
@@ -383,7 +383,7 @@ def parallel_coordinates(
return ax
-def lag_plot(series, lag=1, ax=None, **kwds):
+def lag_plot(series, lag=1, ax=None, figsize=None, **kwds):
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
import matplotlib.pyplot as plt
@@ -392,21 +392,27 @@ def lag_plot(series, lag=1, ax=None, **kwds):
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
+
if ax is None:
- ax = plt.gca()
+ fig, ax = plt.subplots(1, 1, figsize=figsize)
+
ax.set_xlabel("y(t)")
ax.set_ylabel(f"y(t + {lag})")
ax.scatter(y1, y2, **kwds)
return ax
-def autocorrelation_plot(series, ax=None, **kwds):
+def autocorrelation_plot(series, ax=None, figsize=None, **kwds):
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
+
if ax is None:
- ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
+ fig, ax = plt.subplots(1, 1, figsize=figsize)
+ ax.set_xlim(1, n)
+ ax.set_ylim(-1.0, 1.0)
+
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 47a4fd8ff0e95..e44c09827de08 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -382,15 +382,20 @@ def parallel_coordinates(
)
-def lag_plot(series, lag=1, ax=None, **kwds):
+def lag_plot(series, lag=1, ax=None, figsize=None, **kwds):
"""
Lag plot for time series.
Parameters
----------
- series : Time series
- lag : lag of the scatter plot, default 1
- ax : Matplotlib axis object, optional
+ series : Series
+ time series
+ lag : int
+ lag of the scatter plot, default 1
+ ax : matplotlib.axis, optional
+ Matplotlib axis object.
+ figsize : (float,float), optional
+ A tuple (width, height) in inches.
**kwds
Matplotlib scatter method keyword arguments.
@@ -399,17 +404,21 @@ def lag_plot(series, lag=1, ax=None, **kwds):
class:`matplotlib.axis.Axes`
"""
plot_backend = _get_plot_backend("matplotlib")
- return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds)
+ return plot_backend.lag_plot(series=series, lag=lag, ax=ax, figsize=figsize, **kwds)
-def autocorrelation_plot(series, ax=None, **kwargs):
+def autocorrelation_plot(series, ax=None, figsize=None, **kwargs):
"""
Autocorrelation plot for time series.
Parameters
----------
- series : Time series
- ax : Matplotlib axis object, optional
+ series : Series
+ time series
+ ax : matplotlib.axis, optional
+ Matplotlib axis object.
+ figsize : (float,float), optional
+ A tuple (width, height) in inches.
**kwargs
Options to pass to matplotlib plotting method.
@@ -418,7 +427,9 @@ def autocorrelation_plot(series, ax=None, **kwargs):
class:`matplotlib.axis.Axes`
"""
plot_backend = _get_plot_backend("matplotlib")
- return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)
+ return plot_backend.autocorrelation_plot(
+ series=series, ax=ax, figsize=figsize, **kwargs
+ )
class _Options(dict):
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 168e8c7de0b83..b4ed92940b963 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -76,16 +76,24 @@ def test_autocorrelation_plot(self):
_check_plot_works(autocorrelation_plot, series=self.ts)
_check_plot_works(autocorrelation_plot, series=self.ts.values)
+ _check_plot_works(autocorrelation_plot, series=self.ts.values, figsize=(15, 8))
ax = autocorrelation_plot(self.ts, label="Test")
self._check_legend_labels(ax, labels=["Test"])
+ ax = autocorrelation_plot(self.ts, figsize=(15, 8))
+ self._check_axes_shape(ax, figsize=(15, 8))
+
@pytest.mark.slow
def test_lag_plot(self):
from pandas.plotting import lag_plot
_check_plot_works(lag_plot, series=self.ts)
_check_plot_works(lag_plot, series=self.ts, lag=5)
+ _check_plot_works(lag_plot, series=self.ts, figsize=(15, 8))
+
+ ax = lag_plot(self.ts, figsize=(15, 8))
+ self._check_axes_shape(ax, figsize=(15, 8))
@pytest.mark.slow
def test_bootstrap_plot(self):
| - closes #31650
- Passed pandas.tests.plotting.test_misc.py
- Now creating new axes with "fig, ax = plt.subplots(1, 1, figsize=figsize)"
- Also changed plotting.lag_plot regarding the same issue
- Figure size is only changed if no axis parameter is given
| https://api.github.com/repos/pandas-dev/pandas/pulls/32396 | 2020-03-02T17:03:21Z | 2020-07-17T11:24:05Z | null | 2023-05-11T01:19:33Z |
DOC: change link to contributing guide in README.md | diff --git a/README.md b/README.md
index 5342eda4390eb..d66a5bc4a7ef1 100644
--- a/README.md
+++ b/README.md
@@ -158,7 +158,7 @@ Most development discussion is taking place on github in this repo. Further, the
All contributions, bug reports, bug fixes, documentation improvements, enhancements and ideas are welcome.
-A detailed overview on how to contribute can be found in the **[contributing guide](https://dev.pandas.io/docs/contributing.html)**. There is also an [overview](.github/CONTRIBUTING.md) on GitHub.
+A detailed overview on how to contribute can be found in the **[contributing guide](https://pandas.pydata.org/docs/dev/development/contributing.html)**. There is also an [overview](.github/CONTRIBUTING.md) on GitHub.
If you are simply looking to start working with the pandas codebase, navigate to the [GitHub "issues" tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [good first issue](https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open) where you could start out.
| xref https://github.com/pandas-dev/pandas/pull/32068#issuecomment-593362224 | https://api.github.com/repos/pandas-dev/pandas/pulls/32393 | 2020-03-02T11:47:08Z | 2020-04-08T10:32:14Z | 2020-04-08T10:32:14Z | 2020-04-13T14:39:34Z |
DOC: Reorganize Getting Started documentation pages | diff --git a/doc/redirects.csv b/doc/redirects.csv
index ef93955c14fe6..a6db4dd975a35 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -7,13 +7,10 @@ release,whatsnew/index
# getting started
install,getting_started/install
-10min,getting_started/10min
-basics,getting_started/basics
comparison_with_r,getting_started/comparison/comparison_with_r
comparison_with_sql,getting_started/comparison/comparison_with_sql
comparison_with_sas,getting_started/comparison/comparison_with_sas
comparison_with_stata,getting_started/comparison/comparison_with_stata
-dsintro,getting_started/dsintro
overview,getting_started/overview
tutorials,getting_started/tutorials
@@ -38,6 +35,9 @@ text,user_guide/text
timedeltas,user_guide/timedeltas
timeseries,user_guide/timeseries
visualization,user_guide/visualization
+10min,user_guide/10min
+basics,user_guide/basics
+dsintro,user_guide/dsintro
# development
contributing,development/contributing
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
index a2f8f79f22ae4..9ac8c58e1d8f2 100644
--- a/doc/source/getting_started/index.rst
+++ b/doc/source/getting_started/index.rst
@@ -9,8 +9,6 @@ Getting started
Installation
------------
-Before you can use pandas, you’ll need to get it installed.
-
.. raw:: html
<div class="container">
@@ -23,7 +21,7 @@ Before you can use pandas, you’ll need to get it installed.
<div class="card-body">
<p class="card-text">
-Pandas is part of the `Anaconda <http://docs.continuum.io/anaconda/>`__ distribution and can be
+pandas is part of the `Anaconda <http://docs.continuum.io/anaconda/>`__ distribution and can be
installed with Anaconda or Miniconda:
.. raw:: html
@@ -49,7 +47,7 @@ installed with Anaconda or Miniconda:
<div class="card-body">
<p class="card-text">
-Pandas can be installed via pip from `PyPI <https://pypi.org/project/pandas>`__.
+pandas can be installed via pip from `PyPI <https://pypi.org/project/pandas>`__.
.. raw:: html
@@ -103,7 +101,7 @@ Intro to pandas
<div class="d-flex flex-row tutorial-card-header-1">
<div class="d-flex flex-row tutorial-card-header-2">
<button class="btn btn-dark btn-sm"></button>
- What kind of data does Pandas handle?
+ What kind of data does pandas handle?
</div>
<span class="badge gs-badge-link">
@@ -117,8 +115,8 @@ Intro to pandas
<div id="collapseOne" class="collapse" data-parent="#accordion">
<div class="card-body">
-When working with tabular data, such as data stored in spreadsheets or databases, Pandas is the right tool for you. Pandas will help you
-to explore, clean and process your data. In Pandas, a data table is called a :class:`DataFrame`.
+When working with tabular data, such as data stored in spreadsheets or databases, pandas is the right tool for you. pandas will help you
+to explore, clean and process your data. In pandas, a data table is called a :class:`DataFrame`.
.. image:: ../_static/schemas/01_table_dataframe.svg
:align: center
@@ -164,7 +162,7 @@ to explore, clean and process your data. In Pandas, a data table is called a :cl
<div id="collapseTwo" class="collapse" data-parent="#accordion">
<div class="card-body">
-Pandas supports the integration with many file formats or data sources out of the box (csv, excel, sql, json, parquet,…). Importing data from each of these
+pandas supports the integration with many file formats or data sources out of the box (csv, excel, sql, json, parquet,…). Importing data from each of these
data sources is provided by function with the prefix ``read_*``. Similarly, the ``to_*`` methods are used to store data.
.. image:: ../_static/schemas/02_io_readwrite.svg
@@ -212,7 +210,7 @@ data sources is provided by function with the prefix ``read_*``. Similarly, the
<div class="card-body">
Selecting or filtering specific rows and/or columns? Filtering the data on a condition? Methods for slicing, selecting, and extracting the
-data you need are available in Pandas.
+data you need are available in pandas.
.. image:: ../_static/schemas/03_subset_columns_rows.svg
:align: center
@@ -258,7 +256,7 @@ data you need are available in Pandas.
<div id="collapseFour" class="collapse" data-parent="#accordion">
<div class="card-body">
-Pandas provides plotting your data out of the box, using the power of Matplotlib. You can pick the plot type (scatter, bar, boxplot,...)
+pandas provides plotting your data out of the box, using the power of Matplotlib. You can pick the plot type (scatter, bar, boxplot,...)
corresponding to your data.
.. image:: ../_static/schemas/04_plot_overview.svg
@@ -492,7 +490,7 @@ Multiple tables can be concatenated both column wise as row wise and database-li
<div id="collapseNine" class="collapse" data-parent="#accordion">
<div class="card-body">
-Pandas has great support for time series and has an extensive set of tools for working with dates, times, and time-indexed data.
+pandas has great support for time series and has an extensive set of tools for working with dates, times, and time-indexed data.
.. raw:: html
@@ -535,7 +533,7 @@ Pandas has great support for time series and has an extensive set of tools for w
<div id="collapseTen" class="collapse" data-parent="#accordion">
<div class="card-body">
-Data sets do not only contain numerical data. Pandas provides a wide range of functions to cleaning textual data and extract useful information from it.
+Data sets do not only contain numerical data. pandas provides a wide range of functions to cleaning textual data and extract useful information from it.
.. raw:: html
@@ -568,9 +566,8 @@ Data sets do not only contain numerical data. Pandas provides a wide range of fu
Coming from...
--------------
-Currently working with other software for data manipulation in a tabular format? You're probably familiar to typical
-data operations and know *what* to do with your tabular data, but lacking the syntax to execute these operations. Get to know
-the pandas syntax by looking for equivalents from the software you already know:
+Are you familiar with other software for manipulating tablular data? Learn
+the pandas-equivalent operations compared to software you already know:
.. raw:: html
@@ -580,7 +577,7 @@ the pandas syntax by looking for equivalents from the software you already know:
<div class="card text-center intro-card shadow">
<img src="../_static/logo_r.svg" class="card-img-top" alt="R project logo" height="72">
<div class="card-body flex-fill">
- <p class="card-text">The <a href="https://www.r-project.org/">R programming language</a> provides the <code>data.frame</code> data structure and multiple packages,
+ <p class="card-text">The <a href="https://www.r-project.org/">R programming language</a> provides the <code>dataframe</code> data structure and multiple packages,
such as <a href="https://www.tidyverse.org/">tidyverse</a> use and extend <code>data.frame</code>s for convenient data handling
functionalities similar to pandas.</p>
@@ -597,7 +594,7 @@ the pandas syntax by looking for equivalents from the software you already know:
<div class="card text-center intro-card shadow">
<img src="../_static/logo_sql.svg" class="card-img-top" alt="SQL logo" height="72">
<div class="card-body flex-fill">
- <p class="card-text">Already familiar to <code>SELECT</code>, <code>GROUP BY</code>, <code>JOIN</code>,...?
+ <p class="card-text">Already familiar to <code>SELECT</code>, <code>GROUP BY</code>, <code>JOIN</code>, etc.?
Most of these SQL manipulations do have equivalents in pandas.</p>
.. container:: custom-button
@@ -615,7 +612,7 @@ the pandas syntax by looking for equivalents from the software you already know:
<div class="card-body flex-fill">
<p class="card-text">The <code>data set</code> included in the
<a href="https://en.wikipedia.org/wiki/Stata">STATA</a> statistical software suite corresponds
- to the pandas <code>data.frame</code>. Many of the operations known from STATA have an equivalent
+ to the pandas <code>dataframe</code>. Many of the operations known from STATA have an equivalent
in pandas.</p>
.. container:: custom-button
@@ -632,8 +629,8 @@ the pandas syntax by looking for equivalents from the software you already know:
<img src="../_static/logo_sas.svg" class="card-img-top" alt="SAS logo" height="52">
<div class="card-body flex-fill">
<p class="card-text">The <a href="https://en.wikipedia.org/wiki/SAS_(software)">SAS</a> statistical software suite
- also provides the <code>data set</code> corresponding to the pandas <code>data.frame</code>.
- Also vectorized operations, filtering, string processing operations,... from SAS have similar
+ also provides the <code>data set</code> corresponding to the pandas <code>dataframe</code>.
+ Also SAS vectorized operations, filtering, string processing operations, and more have similar
functions in pandas.</p>
.. container:: custom-button
@@ -648,11 +645,16 @@ the pandas syntax by looking for equivalents from the software you already know:
</div>
</div>
-Community tutorials
--------------------
+Tutorials
+---------
+
+For a quick overview of pandas functionality, see :ref:`10 Minutes to pandas<10min>`.
+
+You can also reference the pandas `cheat sheet <https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf>`_
+for a succinct guide for manipulating data with pandas.
The community produces a wide variety of tutorials available online. Some of the
-material is enlisted in the community contributed :ref:`tutorials`.
+material is enlisted in the community contributed :ref:`communitytutorials`.
.. If you update this toctree, also update the manual toctree in the
@@ -664,9 +666,6 @@ material is enlisted in the community contributed :ref:`tutorials`.
install
overview
- 10min
intro_tutorials/index
- basics
- dsintro
comparison/index
tutorials
diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst
index 434d791474807..fce4aa4beba60 100644
--- a/doc/source/getting_started/tutorials.rst
+++ b/doc/source/getting_started/tutorials.rst
@@ -1,24 +1,12 @@
-.. _tutorials:
+.. _communitytutorials:
{{ header }}
-*********
-Tutorials
-*********
+*******************
+Community Tutorials
+*******************
-This is a guide to many pandas tutorials, geared mainly for new users.
-
-Internal guides
-===============
-
-pandas' own :ref:`10 Minutes to pandas<10min>`.
-
-More complex recipes are in the :ref:`Cookbook<cookbook>`.
-
-A handy pandas `cheat sheet <https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf>`_.
-
-Community guides
-================
+This is a guide to many pandas tutorials by the community, geared mainly for new users.
pandas Cookbook by Julia Evans
------------------------------
diff --git a/doc/source/getting_started/10min.rst b/doc/source/user_guide/10min.rst
similarity index 100%
rename from doc/source/getting_started/10min.rst
rename to doc/source/user_guide/10min.rst
diff --git a/doc/source/getting_started/basics.rst b/doc/source/user_guide/basics.rst
similarity index 100%
rename from doc/source/getting_started/basics.rst
rename to doc/source/user_guide/basics.rst
diff --git a/doc/source/getting_started/dsintro.rst b/doc/source/user_guide/dsintro.rst
similarity index 100%
rename from doc/source/getting_started/dsintro.rst
rename to doc/source/user_guide/dsintro.rst
diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst
index 30b1c0b4eac0d..8226e72779588 100644
--- a/doc/source/user_guide/index.rst
+++ b/doc/source/user_guide/index.rst
@@ -12,6 +12,8 @@ pandas approaches the problem, with many examples throughout.
Users brand-new to pandas should start with :ref:`10min`.
+For a high level summary of the pandas fundamentals, see :ref:`dsintro` and :ref:`basics`.
+
Further information on any specific method can be obtained in the
:ref:`api`.
@@ -21,6 +23,9 @@ Further information on any specific method can be obtained in the
.. toctree::
:maxdepth: 2
+ 10min
+ dsintro
+ basics
io
indexing
advanced
| - [x] closes #32308
Issue Related:
- `getting_started/index.html` removes `10 minutes to pandas`, `Essential basic functionality` and `Intro to data structures` from the toctree and moves them as links in `user_guide/index.html`
- `getting_started/tutorials.html` now only references community tutorials
- the pandas cheatsheet is now referenced at the bottom `Tutorial` section of `getting_started/index.html`
Misc:
- Lowercases `Pandas` to `pandas`
- Some rephrasing
| https://api.github.com/repos/pandas-dev/pandas/pulls/32389 | 2020-03-02T07:24:43Z | 2020-03-11T22:15:22Z | 2020-03-11T22:15:21Z | 2020-03-11T22:22:19Z |
Create CITATION.md | diff --git a/CITATION.md b/CITATION.md
new file mode 100644
index 0000000000000..46a901b392da2
--- /dev/null
+++ b/CITATION.md
@@ -0,0 +1,49 @@
+# Citing and logo
+
+## Citing pandas
+
+If you use *pandas* for a scientific publication, we would appreciate citations to the published software and one of the two given papers:
+
+- *pandas* version 1.0.1 published on Zenodo (please find us on Zenodo and replace with the citation for the version you are using)
+```
+@software{reback2020pandas,
+ author = {The pandas development team},
+ title = {pandas-dev/pandas: Pandas 1.0.1},
+ month = feb,
+ year = 2020,
+ publisher = {Zenodo},
+ version = {v1.0.1},
+ doi = {10.5281/zenodo.3644238},
+ url = {https://doi.org/10.5281/zenodo.3644238}
+}
+```
+
+- Data structures for statistical computing in python, McKinney, Proceedings of the 9th Python in Science Conference, Volume 445, 2010.
+```
+@inproceedings{mckinney2010data,
+ title={Data structures for statistical computing in python},
+ author={Wes McKinney},
+ booktitle={Proceedings of the 9th Python in Science Conference},
+ volume={445},
+ pages={51--56},
+ year={2010},
+ organization={Austin, TX}
+}
+```
+
+- pandas: a foundational Python library for data analysis and statistics, McKinney, Python for High Performance and Scientific Computing, Volume 14, 2011.
+```
+@article{mckinney2011pandas,
+ title={pandas: a foundational Python library for data analysis and statistics},
+ author={Wes McKinney},
+ journal={Python for High Performance and Scientific Computing},
+ volume={14},
+ year={2011}
+}
+```
+
+## Brand and logo
+
+When using the project name *pandas*, please use it in lower case, even at the beginning of a sentence.
+
+You can find the official *pandas* logos [here](https://pandas.io/about/citing.html).
| Addresses #24036 by adding a `CITATION.md` file to the repository.
I copied the citation instructions from here: https://pandas.io/about/citing.html
and added a section for the published software on Zenodo. Following [this recommendation](https://github.com/sherpa/sherpa/pull/634#issuecomment-553668211), I did this for the latest released version (v1.0.1) with a note for the user to go fetch the citation from Zenodo for the version they are actually using.
Tagging @TomAugspurger and @jreback since you were active in the linked issue.
I still recommend to update the citation request on the pandas website directly (https://pandas.io/about/citing.html), as well as on the Scipy website (https://www.scipy.org/citing.html#pandas), on https://pandas.pydata.org/ and maybe provide a `<package>.__citation__` variable as suggested [here](https://github.com/pandas-dev/pandas/issues/24036#issuecomment-585672595).
| https://api.github.com/repos/pandas-dev/pandas/pulls/32388 | 2020-03-01T19:29:10Z | 2020-03-31T17:39:44Z | null | 2020-03-31T17:39:44Z |
Plotting Int64 columns with nulled integers (NAType) fails #32073 | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 1b6098e6b6ac1..808e6ae709ce9 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -74,6 +74,7 @@ Bug fixes
**I/O**
- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
+- Fixed pickling of ``pandas.NA``. Previously a new object was returned, which broke computations relying on ``NA`` being a singleton (:issue:`31847`)
- Fixed bug in parquet roundtrip with nullable unsigned integer dtypes (:issue:`31896`).
**Experimental dtypes**
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 0f18a1fd81815..57c53f73962dc 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -66,6 +66,7 @@ Other enhancements
- :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`)
- When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`)
- `OptionError` is now exposed in `pandas.errors` (:issue:`27553`)
+- :func:`timedelta_range` will now infer a frequency when passed ``start``, ``stop``, and ``periods`` (:issue:`32377`)
-
.. ---------------------------------------------------------------------------
@@ -198,6 +199,7 @@ Categorical
- Bug where :func:`merge` was unable to join on non-unique categorical indices (:issue:`28189`)
- Bug when passing categorical data to :class:`Index` constructor along with ``dtype=object`` incorrectly returning a :class:`CategoricalIndex` instead of object-dtype :class:`Index` (:issue:`32167`)
+- Bug where :class:`Categorical` comparison operator ``__ne__`` would incorrectly evaluate to ``False`` when either element was missing (:issue:`32276`)
-
Datetimelike
@@ -213,6 +215,7 @@ Timedelta
^^^^^^^^^
- Bug in constructing a :class:`Timedelta` with a high precision integer that would round the :class:`Timedelta` components (:issue:`31354`)
+- Bug in dividing ``np.nan`` or ``None`` by :class:`Timedelta`` incorrectly returning ``NaT`` (:issue:`31869`)
-
Timezones
@@ -295,8 +298,10 @@ I/O
- Bug in :meth:`DataFrame.to_parquet` overwriting pyarrow's default for
``coerce_timestamps``; following pyarrow's default allows writing nanosecond
timestamps with ``version="2.0"`` (:issue:`31652`).
+- Bug in :meth:`read_csv` was raising `TypeError` when `sep=None` was used in combination with `comment` keyword (:issue:`31396`)
- Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`)
+
Plotting
^^^^^^^^
@@ -322,6 +327,7 @@ Reshaping
- Bug in :func:`crosstab` when inputs are two Series and have tuple names, the output will keep dummy MultiIndex as columns. (:issue:`18321`)
- :meth:`DataFrame.pivot` can now take lists for ``index`` and ``columns`` arguments (:issue:`21425`)
- Bug in :func:`concat` where the resulting indices are not copied when ``copy=True`` (:issue:`29879`)
+- :meth:`DataFrame.replace` and :meth:`Series.replace` will raise a ``TypeError`` if ``to_replace`` is not an expected type. Previously the ``replace`` would fail silently (:issue:`18634`)
Sparse
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 7a18429f21a18..61d6a660a0357 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -100,11 +100,9 @@ def values_from_object(obj: object):
"""
func: object
- if getattr(obj, '_typ', '') == 'dataframe':
- return obj.values
-
func = getattr(obj, '_internal_get_values', None)
if func is not None:
+ # Includes DataFrame, for which we get frame.values
obj = func()
return obj
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 4d17a6f883c1c..dacf454824190 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -10,10 +10,13 @@ cnp.import_array()
cimport pandas._libs.util as util
-from pandas._libs.tslibs.np_datetime cimport (
- get_timedelta64_value, get_datetime64_value)
+
+from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
from pandas._libs.tslibs.nattype cimport (
- checknull_with_nat, c_NaT as NaT, is_null_datetimelike)
+ c_NaT as NaT,
+ checknull_with_nat,
+ is_null_datetimelike,
+)
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
from pandas.compat import is_platform_32bit
@@ -44,7 +47,7 @@ cpdef bint checknull(object val):
Returns
-------
- result : bool
+ bool
Notes
-----
@@ -223,7 +226,7 @@ def isnaobj2d_old(arr: ndarray) -> ndarray:
Returns
-------
- result : ndarray (dtype=np.bool_)
+ ndarray (dtype=np.bool_)
Notes
-----
@@ -248,17 +251,11 @@ def isnaobj2d_old(arr: ndarray) -> ndarray:
def isposinf_scalar(val: object) -> bool:
- if util.is_float_object(val) and val == INF:
- return True
- else:
- return False
+ return util.is_float_object(val) and val == INF
def isneginf_scalar(val: object) -> bool:
- if util.is_float_object(val) and val == NEGINF:
- return True
- else:
- return False
+ return util.is_float_object(val) and val == NEGINF
cdef inline bint is_null_datetime64(v):
@@ -364,6 +361,9 @@ class NAType(C_NAType):
exponent = 31 if is_32bit else 61
return 2 ** exponent - 1
+ def __reduce__(self):
+ return "NA"
+
# Binary arithmetic and comparison ops -> propagate
__add__ = _create_binary_propagating_op("__add__")
@@ -423,7 +423,6 @@ class NAType(C_NAType):
return NA
elif isinstance(other, np.ndarray):
return np.where(other == 1, other, NA)
-
return NotImplemented
# Logical ops using Kleene logic
@@ -433,8 +432,7 @@ class NAType(C_NAType):
return False
elif other is True or other is C_NA:
return NA
- else:
- return NotImplemented
+ return NotImplemented
__rand__ = __and__
@@ -443,8 +441,7 @@ class NAType(C_NAType):
return True
elif other is False or other is C_NA:
return NA
- else:
- return NotImplemented
+ return NotImplemented
__ror__ = __or__
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 66660c5f641fd..298028227e18b 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1407,7 +1407,14 @@ class Timedelta(_Timedelta):
# convert to Timedelta below
pass
+ elif util.is_nan(other):
+ # i.e. np.nan or np.float64("NaN")
+ raise TypeError("Cannot divide float by Timedelta")
+
elif hasattr(other, 'dtype'):
+ if other.dtype.kind == "O":
+ # GH#31869
+ return np.array([x / self for x in other])
return other / self.to_timedelta64()
elif not _validate_ops_compat(other):
@@ -1415,7 +1422,8 @@ class Timedelta(_Timedelta):
other = Timedelta(other)
if other is NaT:
- return NaT
+ # In this context we treat NaT as timedelta-like
+ return np.nan
return float(other.value) / self.value
def __floordiv__(self, other):
diff --git a/pandas/_testing.py b/pandas/_testing.py
index fce06e216dfd7..b0f18cb6fdd39 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -742,9 +742,9 @@ def repr_class(x):
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
-def assert_attr_equal(attr, left, right, obj="Attributes"):
+def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
- checks attributes are equal. Both objects must have attribute.
+ Check attributes are equal. Both objects must have attribute.
Parameters
----------
diff --git a/pandas/_typing.py b/pandas/_typing.py
index e2858441605f7..3b7392f781525 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -64,7 +64,7 @@
Label = Optional[Hashable]
Level = Union[Label, int]
Ordered = Optional[bool]
-JSONSerializable = Union[PythonScalar, List, Dict]
+JSONSerializable = Optional[Union[PythonScalar, List, Dict]]
Axes = Collection
# For functions like rename that convert one label to another
diff --git a/pandas/conftest.py b/pandas/conftest.py
index be44e6c2b36da..dcfc523315c8b 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1047,6 +1047,16 @@ def series_with_simple_index(indices):
for dtype in _narrow_dtypes
}
+
+@pytest.fixture(params=_narrow_series.keys())
+def narrow_series(request):
+ """
+ Fixture for Series with low precision data types
+ """
+ # copy to avoid mutation, e.g. setting .name
+ return _narrow_series[request.param].copy()
+
+
_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}
@@ -1057,3 +1067,17 @@ def index_or_series_obj(request):
copy to avoid mutation, e.g. setting .name
"""
return _index_or_series_objs[request.param].copy(deep=True)
+
+
+@pytest.fixture
+def multiindex_year_month_day_dataframe_random_data():
+ """
+ DataFrame with 3 level MultiIndex (year, month, day) covering
+ first 100 business days from 2000-01-01 with random data
+ """
+ tdf = tm.makeTimeDataFrame(100)
+ ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
+ # use Int64Index, to make sure things work
+ ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True)
+ ymd.index.set_names(["year", "month", "day"], inplace=True)
+ return ymd
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 4167c75eb5782..40a169d03f39c 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -103,7 +103,10 @@ def func(self, other):
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
- ret[mask] = False
+ if opname == "__ne__":
+ ret[(self._codes == -1) & (other_codes == -1)] = True
+ else:
+ ret[mask] = False
return ret
if is_scalar(other):
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index f637e16caa4c6..8c870c6255200 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -42,6 +42,7 @@
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
+from pandas.core.construction import array, extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
@@ -623,7 +624,7 @@ def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
- return self._box_values(self.asi8)
+ return self._box_values(self.asi8.ravel()).reshape(self.shape)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
@@ -1256,19 +1257,13 @@ def _addsub_object_array(self, other: np.ndarray, op):
PerformanceWarning,
)
- # For EA self.astype('O') returns a numpy array, not an Index
- left = self.astype("O")
+ # Caller is responsible for broadcasting if necessary
+ assert self.shape == other.shape, (self.shape, other.shape)
- res_values = op(left, np.array(other))
- kwargs = {}
- if not is_period_dtype(self):
- kwargs["freq"] = "infer"
- try:
- res = type(self)._from_sequence(res_values, **kwargs)
- except ValueError:
- # e.g. we've passed a Timestamp to TimedeltaArray
- res = res_values
- return res
+ res_values = op(self.astype("O"), np.array(other))
+ result = array(res_values.ravel())
+ result = extract_array(result, extract_numpy=True).reshape(self.shape)
+ return result
def _time_shift(self, periods, freq=None):
"""
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 81fc934748d3e..749489a0a04fb 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -39,6 +39,7 @@
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
+from pandas.core.construction import extract_array
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import Tick
@@ -141,8 +142,7 @@ def dtype(self):
# Constructors
def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
- if isinstance(values, (ABCSeries, ABCIndexClass)):
- values = values._values
+ values = extract_array(values)
inferred_freq = getattr(values, "_freq", None)
@@ -258,6 +258,10 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
index = _generate_regular_range(start, end, periods, freq)
else:
index = np.linspace(start.value, end.value, periods).astype("i8")
+ if len(index) >= 2:
+ # Infer a frequency
+ td = Timedelta(index[1] - index[0])
+ freq = to_offset(td)
if not left_closed:
index = index[1:]
@@ -614,6 +618,10 @@ def __floordiv__(self, other):
if self.freq is not None:
# Note: freq gets division, not floor-division
freq = self.freq / other
+ if freq.nanos == 0 and self.freq.nanos != 0:
+ # e.g. if self.freq is Nano(1) then dividing by 2
+ # rounds down to zero
+ freq = None
return type(self)(result.view("m8[ns]"), freq=freq)
if not hasattr(other, "dtype"):
diff --git a/pandas/core/base.py b/pandas/core/base.py
index ff251324d4896..f55d9f905945d 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1035,7 +1035,8 @@ def argmin(self, axis=None, skipna=True, *args, **kwargs):
See Also
--------
- numpy.ndarray.argmin
+ numpy.ndarray.argmin : Return indices of the minimum values along
+ the given axis.
"""
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
@@ -1055,7 +1056,8 @@ def tolist(self):
See Also
--------
- numpy.ndarray.tolist
+ numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
+ nested list of Python scalars.
"""
if not isinstance(self._values, np.ndarray):
# check for ndarray instead of dtype to catch DTA/TDA
@@ -1402,7 +1404,8 @@ def memory_usage(self, deep=False):
See Also
--------
- numpy.ndarray.nbytes
+ numpy.ndarray.nbytes : Total bytes consumed by the elements of the
+ array.
Notes
-----
@@ -1473,8 +1476,8 @@ def factorize(self, sort=False, na_sentinel=-1):
See Also
--------
- sort_values
- numpy.searchsorted
+ sort_values : Sort by the values along either axis.
+ numpy.searchsorted : Similar method from NumPy.
Notes
-----
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 705c618fc49dc..6230ee34bcd50 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -122,7 +122,7 @@ def is_bool_indexer(key: Any) -> bool:
is_array_like(key) and is_extension_array_dtype(key.dtype)
):
if key.dtype == np.object_:
- key = np.asarray(values_from_object(key))
+ key = np.asarray(key)
if not lib.is_bool_array(key):
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index fe3d3f49f16a7..a488aac08e060 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -266,8 +266,10 @@ def eval(
See Also
--------
- DataFrame.query
- DataFrame.eval
+ DataFrame.query : Evaluates a boolean expression to query the columns
+ of a frame.
+ DataFrame.eval : Evaluate a string describing operations on
+ DataFrame columns.
Notes
-----
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 33daf6627721f..181f0c8906853 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -175,6 +175,8 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
----------
categories : sequence, optional
Must be unique, and must not contain any nulls.
+ The categories are stored in an Index,
+ and if an index is provided the dtype of that index will be used.
ordered : bool or None, default False
Whether or not this categorical is treated as a ordered categorical.
None can be used to maintain the ordered value of existing categoricals when
@@ -210,6 +212,12 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
3 NaN
dtype: category
Categories (2, object): [b < a]
+
+ An empty CategoricalDtype with a specific dtype can be created
+ by providing an empty index. As follows,
+
+ >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype
+ dtype('<M8[ns]')
"""
# TODO: Document public vs. private API
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index ee74b02af9516..682a0722de3b7 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -581,7 +581,7 @@ def remove_na_arraylike(arr):
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
- return arr[notna(lib.values_from_object(arr))]
+ return arr[notna(np.asarray(arr))]
def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f515b57e24cfa..bfb83b59497e6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2741,7 +2741,7 @@ def _set_value(self, index, col, value, takeable: bool = False):
"""
try:
if takeable is True:
- series = self._iget_item_cache(col)
+ series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
@@ -3599,7 +3599,7 @@ def align(
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
- def set_axis(self, labels, axis=0, inplace=False):
+ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 25770c2c6470c..f7eb79a4f1c78 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -520,7 +520,7 @@ def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
- def set_axis(self, labels, axis=0, inplace=False):
+ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
@@ -561,7 +561,8 @@ def set_axis(self, labels, axis=0, inplace=False):
obj.set_axis(labels, axis=axis, inplace=True)
return obj
- def _set_axis(self, axis, labels) -> None:
+ def _set_axis(self, axis: int, labels: Index) -> None:
+ labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
@@ -1752,8 +1753,9 @@ def empty(self) -> bool_t:
See Also
--------
- Series.dropna
- DataFrame.dropna
+ Series.dropna : Return series without null values.
+ DataFrame.dropna : Return DataFrame with labels on given axis omitted
+ where (all or any) data are missing.
Notes
-----
@@ -2172,7 +2174,7 @@ def to_json(
See Also
--------
- read_json
+ read_json : Convert a JSON string to pandas object.
Notes
-----
@@ -3461,15 +3463,6 @@ def _get_item_cache(self, item):
res._is_copy = self._is_copy
return res
- def _iget_item_cache(self, item: int):
- """Return the cached item, item represents a positional indexer."""
- ax = self._info_axis
- if ax.is_unique:
- lower = self._get_item_cache(ax[item])
- else:
- return self._ixs(item, axis=1)
- return lower
-
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
@@ -4451,7 +4444,8 @@ def filter(
See Also
--------
- DataFrame.loc
+ DataFrame.loc : Access a group of rows and columns
+ by label(s) or a boolean array.
Notes
-----
@@ -4883,9 +4877,10 @@ def sample(
See Also
--------
- DataFrame.apply
- DataFrame.applymap
- Series.map
+ DataFrame.apply : Apply a function along input axis of DataFrame.
+ DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
+ Series.map : Apply a mapping correspondence on a
+ :class:`~pandas.Series`.
Notes
-----
@@ -6174,7 +6169,9 @@ def bfill(
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
+
TypeError
+ * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
@@ -6183,6 +6180,7 @@ def bfill(
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
+
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
@@ -6378,6 +6376,18 @@ def replace(
regex=False,
method="pad",
):
+ if not (
+ is_scalar(to_replace)
+ or isinstance(to_replace, pd.Series)
+ or is_re_compilable(to_replace)
+ or is_list_like(to_replace)
+ ):
+ raise TypeError(
+ "Expecting 'to_replace' to be either a scalar, array-like, "
+ "dict or None, got invalid type "
+ f"{repr(type(to_replace).__name__)}"
+ )
+
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
@@ -6798,7 +6808,6 @@ def interpolate(
method=method,
axis=ax,
index=index,
- values=_maybe_transposed_self,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 48c00140461b5..6362f11a3e032 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2312,11 +2312,12 @@ def _get_cythonized_result(
return self._wrap_transformed_output(output)
@Substitution(name="groupby")
- @Appender(_common_see_also)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Shift each group by periods observations.
+ If freq is passed, the index will be increased using the periods and the freq.
+
Parameters
----------
periods : int, default 1
@@ -2324,7 +2325,9 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
freq : str, optional
Frequency string.
axis : axis to shift, default 0
+ Shift direction.
fill_value : optional
+ The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
@@ -2332,6 +2335,12 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
-------
Series or DataFrame
Object shifted within each group.
+
+ See Also
+ --------
+ Index.shift : Shift values of Index.
+ tshift : Shift the time index, using the index’s frequency
+ if available.
"""
if freq is not None or axis != 0 or not isna(fill_value):
return self.apply(lambda x: x.shift(periods, freq, axis, fill_value))
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ae2387f0fd7b4..af6cbd0e95ec1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4236,6 +4236,9 @@ def putmask(self, mask, value):
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
+ if is_period_dtype(self.dtype):
+ # .values cast to object, so we need to cast back
+ values = type(self)(values)._data
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
@@ -4814,6 +4817,7 @@ def isin(self, values, level=None):
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
+
>>> idx.isin([1, 4])
array([ True, False, False])
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 654d423b37c81..d1e21a2fe7657 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -520,7 +520,8 @@ def where(self, cond, other=None):
other = other.view("i8")
result = np.where(cond, values, other).astype("i8")
- return self._shallow_copy(result)
+ arr = type(self._data)._simple_new(result, dtype=self.dtype)
+ return type(self)._simple_new(arr, name=self.name)
def _summary(self, name=None) -> str:
"""
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 9eeb41f735015..ebf69c49c029a 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -250,22 +250,11 @@ def _has_complex_internals(self):
return True
def _shallow_copy(self, values=None, name: Label = no_default):
- # TODO: simplify, figure out type of values
name = name if name is not no_default else self.name
if values is None:
values = self._data
- if isinstance(values, type(self)):
- values = values._data
-
- if not isinstance(values, PeriodArray):
- if isinstance(values, np.ndarray) and values.dtype == "i8":
- values = PeriodArray(values, freq=self.freq)
- else:
- # GH#30713 this should never be reached
- raise TypeError(type(values), getattr(values, "dtype", None))
-
return self._simple_new(values, name=name)
def _maybe_convert_timedelta(self, other):
@@ -618,10 +607,11 @@ def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
return self.astype(object).insert(loc, item)
- idx = np.concatenate(
+ i8result = np.concatenate(
(self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8)
)
- return self._shallow_copy(idx)
+ arr = type(self._data)._simple_new(i8result, dtype=self.dtype)
+ return type(self)._simple_new(arr, name=self.name)
def join(self, other, how="left", level=None, return_indexers=False, sort=False):
"""
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 3ab180bafd156..35e61ab6a59c9 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1633,14 +1633,12 @@ def _setitem_with_indexer(self, indexer, value):
info_idx = [info_idx]
labels = item_labels[info_idx]
+ plane_indexer = indexer[:1]
+ lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
+ # lplane_indexer gives the expected length of obj[indexer[0]]
+
if len(labels) == 1:
# We can operate on a single column
- item = labels[0]
- idx = indexer[0]
-
- plane_indexer = tuple([idx])
- lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
- # lplane_indexer gives the expected length of obj[idx]
# require that we are setting the right number of values that
# we are indexing
@@ -1652,11 +1650,6 @@ def _setitem_with_indexer(self, indexer, value):
"length than the value"
)
- # non-mi
- else:
- plane_indexer = indexer[:1]
- lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
-
def setter(item, v):
ser = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
@@ -1718,18 +1711,23 @@ def setter(item, v):
for i, item in enumerate(labels):
- # setting with a list, recoerces
+ # setting with a list, re-coerces
setter(item, value[:, i].tolist())
- # we have an equal len list/ndarray
- elif _can_do_equal_len(
- labels, value, plane_indexer, lplane_indexer, self.obj
+ elif (
+ len(labels) == 1
+ and lplane_indexer == len(value)
+ and not is_scalar(plane_indexer[0])
):
+ # we have an equal len list/ndarray
setter(labels[0], value)
- # per label values
- else:
+ elif lplane_indexer == 0 and len(value) == len(self.obj.index):
+ # We get here in one case via .loc with a all-False mask
+ pass
+ else:
+ # per-label values
if len(labels) != len(value):
raise ValueError(
"Must have equal len keys and value "
@@ -1746,7 +1744,6 @@ def setter(item, v):
else:
if isinstance(indexer, tuple):
- indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
@@ -1764,6 +1761,8 @@ def setter(item, v):
self.obj[item_labels[indexer[info_axis]]] = value
return
+ indexer = maybe_convert_ix(*indexer)
+
if isinstance(value, (ABCSeries, dict)):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
@@ -2277,26 +2276,3 @@ def _maybe_numeric_slice(df, slice_, include_bool=False):
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
-
-
-def _can_do_equal_len(labels, value, plane_indexer, lplane_indexer, obj) -> bool:
- """
- Returns
- -------
- bool
- True if we have an equal len settable.
- """
- if not len(labels) == 1 or not np.iterable(value) or is_scalar(plane_indexer[0]):
- return False
-
- item = labels[0]
- index = obj[item].index
-
- values_len = len(value)
- # equal len list/ndarray
- if len(index) == values_len:
- return True
- elif lplane_indexer == values_len:
- return True
-
- return False
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 34fa4c0e6544e..67b734b8890f4 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1107,7 +1107,6 @@ def interpolate(
method="pad",
axis=0,
index=None,
- values=None,
inplace=False,
limit=None,
limit_direction="forward",
@@ -1157,7 +1156,6 @@ def check_int_bool(self, inplace):
return self._interpolate(
method=m,
index=index,
- values=values,
axis=axis,
limit=limit,
limit_direction=limit_direction,
@@ -1211,7 +1209,6 @@ def _interpolate(
self,
method=None,
index=None,
- values=None,
fill_value=None,
axis=0,
limit=None,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index e397167e4881f..9c90b20fc0f16 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -164,15 +164,15 @@ def __nonzero__(self):
__bool__ = __nonzero__
@property
- def shape(self):
+ def shape(self) -> Tuple[int, ...]:
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
- def set_axis(self, axis, new_labels):
- new_labels = ensure_index(new_labels)
+ def set_axis(self, axis: int, new_labels: Index):
+ # Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
@@ -184,7 +184,9 @@ def set_axis(self, axis, new_labels):
self.axes[axis] = new_labels
- def rename_axis(self, mapper, axis, copy: bool = True, level=None):
+ def rename_axis(
+ self, mapper, axis: int, copy: bool = True, level=None
+ ) -> "BlockManager":
"""
Rename one of axes.
@@ -193,7 +195,7 @@ def rename_axis(self, mapper, axis, copy: bool = True, level=None):
mapper : unary callable
axis : int
copy : bool, default True
- level : int, default None
+ level : int or None, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
@@ -233,7 +235,7 @@ def _rebuild_blknos_and_blklocs(self):
self._blklocs = new_blklocs
@property
- def items(self):
+ def items(self) -> Index:
return self.axes[0]
def _get_counts(self, f):
@@ -623,7 +625,7 @@ def comp(s, regex=False):
bm._consolidate_inplace()
return bm
- def is_consolidated(self):
+ def is_consolidated(self) -> bool:
"""
Return True if more than one block with the same dtype
"""
@@ -688,7 +690,7 @@ def get_numeric_data(self, copy: bool = False):
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
- def combine(self, blocks, copy=True):
+ def combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager":
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
@@ -992,7 +994,6 @@ def delete(self, item):
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
- self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value):
@@ -1160,7 +1161,6 @@ def insert(self, loc: int, item, value, allow_duplicates: bool = False):
self.axes[0] = new_axis
self.blocks += (block,)
- self._shape = None
self._known_consolidated = False
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index faac472b3fc31..c301d6e7c7155 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1312,7 +1312,12 @@ def _get_join_indexers(
kwargs = copy.copy(kwargs)
if how == "left":
kwargs["sort"] = sort
- join_func = _join_functions[how]
+ join_func = {
+ "inner": libjoin.inner_join,
+ "left": libjoin.left_outer_join,
+ "right": _right_outer_join,
+ "outer": libjoin.full_outer_join,
+ }[how]
return join_func(lkey, rkey, count, **kwargs)
@@ -1842,14 +1847,6 @@ def _right_outer_join(x, y, max_groups):
return left_indexer, right_indexer
-_join_functions = {
- "inner": libjoin.inner_join,
- "left": libjoin.left_outer_join,
- "right": _right_outer_join,
- "outer": libjoin.full_outer_join,
-}
-
-
def _factorize_keys(lk, rk, sort=True):
# Some pre-processing for non-ndarray lk / rk
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
@@ -1923,9 +1920,6 @@ def _factorize_keys(lk, rk, sort=True):
def _sort_labels(uniques: np.ndarray, left, right):
- if not isinstance(uniques, np.ndarray):
- # tuplesafe
- uniques = Index(uniques).values
llength = len(left)
labels = np.concatenate([left, right])
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d984225f8fd89..d565cbbdd5344 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -9,7 +9,6 @@
TYPE_CHECKING,
Any,
Callable,
- Hashable,
Iterable,
List,
Optional,
@@ -23,7 +22,7 @@
from pandas._config import get_option
from pandas._libs import lib, properties, reshape, tslibs
-from pandas._typing import Label
+from pandas._typing import Axis, DtypeObj, Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, doc
from pandas.util._validators import validate_bool_kwarg, validate_percentile
@@ -177,7 +176,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_typ = "series"
- _name: Optional[Hashable]
+ _name: Label
_metadata: List[str] = ["name"]
_internal_names_set = {"index"} | generic.NDFrame._internal_names_set
_accessors = {"dt", "cat", "str", "sparse"}
@@ -391,9 +390,12 @@ def _can_hold_na(self):
_index = None
- def _set_axis(self, axis, labels, fastpath: bool = False) -> None:
+ def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:
"""
Override generic, we want to set the _typ here.
+
+ This is called from the cython code when we set the `index` attribute
+ directly, e.g. `series.index = [1, 2, 3]`.
"""
if not fastpath:
labels = ensure_index(labels)
@@ -413,6 +415,7 @@ def _set_axis(self, axis, labels, fastpath: bool = False) -> None:
object.__setattr__(self, "_index", labels)
if not fastpath:
+ # The ensure_index call aabove ensures we have an Index object
self._data.set_axis(axis, labels)
def _update_inplace(self, result, **kwargs):
@@ -421,25 +424,25 @@ def _update_inplace(self, result, **kwargs):
# ndarray compatibility
@property
- def dtype(self):
+ def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
@property
- def dtypes(self):
+ def dtypes(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
@property
- def name(self) -> Optional[Hashable]:
+ def name(self) -> Label:
return self._name
@name.setter
- def name(self, value: Optional[Hashable]) -> None:
+ def name(self, value: Label) -> None:
if not is_hashable(value):
raise TypeError("Series.name must be a hashable type")
object.__setattr__(self, "_name", value)
@@ -689,7 +692,7 @@ def __array_ufunc__(
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
- name: Optional[Hashable]
+ name: Label
if len(set(names)) == 1:
name = names[0]
else:
@@ -3983,7 +3986,7 @@ def rename(
see_also_sub="",
)
@Appender(generic.NDFrame.set_axis.__doc__)
- def set_axis(self, labels, axis=0, inplace=False):
+ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index b0c5d6a48d99a..3be9c5fcdfb26 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -36,7 +36,6 @@
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
-import pandas.core.common as com
from pandas.core.construction import extract_array
if TYPE_CHECKING:
@@ -782,7 +781,7 @@ def rep(x, r):
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
- result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
+ result = libops.vec_binop(np.asarray(arr), repeats, rep)
return result
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 9d1687e20a949..bc2fb9f0f41bc 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2379,19 +2379,21 @@ class MyDialect(csv.Dialect):
dia = MyDialect
- sniff_sep = True
-
if sep is not None:
- sniff_sep = False
dia.delimiter = sep
- # attempt to sniff the delimiter
- if sniff_sep:
+ else:
+ # attempt to sniff the delimiter from the first valid line,
+ # i.e. no comment line and not in skiprows
line = f.readline()
- while self.skipfunc(self.pos):
+ lines = self._check_comments([[line]])[0]
+ while self.skipfunc(self.pos) or not lines:
self.pos += 1
line = f.readline()
+ lines = self._check_comments([[line]])[0]
- line = self._check_comments([line])[0]
+ # since `line` was a string, lines will be a list containing
+ # only a single string
+ line = lines[0]
self.pos += 1
self.line_pos += 1
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 63d0b8abe59d9..c50755e2ffb96 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -413,7 +413,13 @@ def _compute_plot_data(self):
# np.ndarray before plot.
numeric_data = numeric_data.copy()
for col in numeric_data:
- numeric_data[col] = np.asarray(numeric_data[col])
+
+ # GH32073: cast to float if values contain nulled integers
+ values = numeric_data[col]
+ if values.isna().any().all():
+ values = values.astype(float)
+
+ numeric_data[col] = np.asarray(values)
self.data = numeric_data
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index d3f9ac4f3f8b2..f7211ab5f9fd4 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -27,6 +27,7 @@
date_range,
)
import pandas._testing as tm
+from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
@@ -956,6 +957,18 @@ def test_dt64arr_sub_NaT(self, box_with_array):
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
+ def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
+ dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
+ expected = dti - dti
+
+ obj = tm.box_expected(dti, box_with_array)
+ expected = tm.box_expected(expected, box_with_array)
+
+ warn = PerformanceWarning if box_with_array is not pd.DataFrame else None
+ with tm.assert_produces_warning(warn):
+ result = obj - obj.astype(object)
+ tm.assert_equal(result, expected)
+
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
@@ -2395,3 +2408,31 @@ def test_shift_months(years, months):
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
+
+
+def test_dt64arr_addsub_object_dtype_2d():
+ # block-wise DataFrame operations will require operating on 2D
+ # DatetimeArray/TimedeltaArray, so check that specifically.
+ dti = pd.date_range("1994-02-13", freq="2W", periods=4)
+ dta = dti._data.reshape((4, 1))
+
+ other = np.array([[pd.offsets.Day(n)] for n in range(4)])
+ assert other.shape == dta.shape
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ result = dta + other
+ with tm.assert_produces_warning(PerformanceWarning):
+ expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
+
+ assert isinstance(result, DatetimeArray)
+ assert result.freq is None
+ tm.assert_numpy_array_equal(result._data, expected._data)
+
+ with tm.assert_produces_warning(PerformanceWarning):
+ # Case where we expect to get a TimedeltaArray back
+ result2 = dta - dta.astype(object)
+
+ assert isinstance(result2, TimedeltaArray)
+ assert result2.shape == (4, 1)
+ assert result2.freq is None
+ assert (result2.asi8 == 0).all()
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 300e468c34e65..b11fcfd20b8c4 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -532,6 +532,20 @@ def test_tda_add_sub_index(self):
expected = tdi - tdi
tm.assert_index_equal(result, expected)
+ def test_tda_add_dt64_object_array(self, box_df_fail, tz_naive_fixture):
+ # Result should be cast back to DatetimeArray
+ dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
+ dti._set_freq(None)
+ tdi = dti - dti
+
+ obj = tm.box_expected(tdi, box_df_fail)
+ other = tm.box_expected(dti, box_df_fail)
+
+ warn = PerformanceWarning if box_df_fail is not pd.DataFrame else None
+ with tm.assert_produces_warning(warn):
+ result = obj + other.astype(object)
+ tm.assert_equal(result, other)
+
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py
index 06ba6cc34ad92..39dca1a9742df 100644
--- a/pandas/tests/base/test_ops.py
+++ b/pandas/tests/base/test_ops.py
@@ -11,9 +11,11 @@
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
+ is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
+ is_period_dtype,
needs_i8_conversion,
)
@@ -295,6 +297,10 @@ def test_value_counts_unique_nunique_null(self, null_obj, index_or_series_obj):
obj[0:2] = pd.NaT
values = obj._values
+ elif is_period_dtype(obj):
+ values[0:2] = iNaT
+ parr = type(obj._data)(values, dtype=obj.dtype)
+ values = obj._shallow_copy(parr)
elif needs_i8_conversion(obj):
values[0:2] = iNaT
values = obj._shallow_copy(values)
@@ -797,62 +803,83 @@ def test_fillna(self):
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
- def test_memory_usage(self):
- for o in self.objs:
- res = o.memory_usage()
- res_deep = o.memory_usage(deep=True)
-
- if is_object_dtype(o) or (
- isinstance(o, Series) and is_object_dtype(o.index)
- ):
- # if there are objects, only deep will pick them up
- assert res_deep > res
- else:
- assert res == res_deep
-
- if isinstance(o, Series):
- assert (
- o.memory_usage(index=False) + o.index.memory_usage()
- ) == o.memory_usage(index=True)
+ def test_memory_usage(self, index_or_series_obj):
+ obj = index_or_series_obj
+ res = obj.memory_usage()
+ res_deep = obj.memory_usage(deep=True)
- # sys.getsizeof will call the .memory_usage with
- # deep=True, and add on some GC overhead
- diff = res_deep - sys.getsizeof(o)
- assert abs(diff) < 100
+ is_object = is_object_dtype(obj) or (
+ isinstance(obj, Series) and is_object_dtype(obj.index)
+ )
+ is_categorical = is_categorical_dtype(obj) or (
+ isinstance(obj, Series) and is_categorical_dtype(obj.index)
+ )
- def test_searchsorted(self):
- # See gh-12238
- for o in self.objs:
- index = np.searchsorted(o, max(o))
- assert 0 <= index <= len(o)
+ if len(obj) == 0:
+ assert res_deep == res == 0
+ elif is_object or is_categorical:
+ # only deep will pick them up
+ assert res_deep > res
+ else:
+ assert res == res_deep
- index = np.searchsorted(o, max(o), sorter=range(len(o)))
- assert 0 <= index <= len(o)
+ # sys.getsizeof will call the .memory_usage with
+ # deep=True, and add on some GC overhead
+ diff = res_deep - sys.getsizeof(obj)
+ assert abs(diff) < 100
- def test_validate_bool_args(self):
- invalid_values = [1, "True", [1, 2, 3], 5.0]
+ def test_memory_usage_components_series(self, series_with_simple_index):
+ series = series_with_simple_index
+ total_usage = series.memory_usage(index=True)
+ non_index_usage = series.memory_usage(index=False)
+ index_usage = series.index.memory_usage()
+ assert total_usage == non_index_usage + index_usage
+
+ def test_memory_usage_components_narrow_series(self, narrow_series):
+ series = narrow_series
+ total_usage = series.memory_usage(index=True)
+ non_index_usage = series.memory_usage(index=False)
+ index_usage = series.index.memory_usage()
+ assert total_usage == non_index_usage + index_usage
+
+ def test_searchsorted(self, index_or_series_obj):
+ # numpy.searchsorted calls obj.searchsorted under the hood.
+ # See gh-12238
+ obj = index_or_series_obj
- for value in invalid_values:
- msg = "expected type bool"
- with pytest.raises(ValueError, match=msg):
- self.int_series.drop_duplicates(inplace=value)
+ if isinstance(obj, pd.MultiIndex):
+ # See gh-14833
+ pytest.skip("np.searchsorted doesn't work on pd.MultiIndex")
- def test_getitem(self):
- for i in self.indexes:
- s = pd.Series(i)
+ max_obj = max(obj, default=0)
+ index = np.searchsorted(obj, max_obj)
+ assert 0 <= index <= len(obj)
- assert i[0] == s.iloc[0]
- assert i[5] == s.iloc[5]
- assert i[-1] == s.iloc[-1]
+ index = np.searchsorted(obj, max_obj, sorter=range(len(obj)))
+ assert 0 <= index <= len(obj)
- assert i[-1] == i[9]
+ def test_access_by_position(self, indices):
+ index = indices
- msg = "index 20 is out of bounds for axis 0 with size 10"
- with pytest.raises(IndexError, match=msg):
- i[20]
- msg = "single positional indexer is out-of-bounds"
- with pytest.raises(IndexError, match=msg):
- s.iloc[20]
+ if len(index) == 0:
+ pytest.skip("Test doesn't make sense on empty data")
+ elif isinstance(index, pd.MultiIndex):
+ pytest.skip("Can't instantiate Series from MultiIndex")
+
+ series = pd.Series(index)
+ assert index[0] == series.iloc[0]
+ assert index[5] == series.iloc[5]
+ assert index[-1] == series.iloc[-1]
+
+ size = len(index)
+ assert index[-1] == index[size - 1]
+
+ msg = f"index {size} is out of bounds for axis 0 with size {size}"
+ with pytest.raises(IndexError, match=msg):
+ index[size]
+ msg = "single positional indexer is out-of-bounds"
+ with pytest.raises(IndexError, match=msg):
+ series.iloc[size]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 9eb5fda87d2d2..55b1ac819049d 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -26,7 +26,14 @@
)
import pandas as pd
-from pandas import Categorical, CategoricalIndex, IntervalIndex, Series, date_range
+from pandas import (
+ Categorical,
+ CategoricalIndex,
+ DatetimeIndex,
+ IntervalIndex,
+ Series,
+ date_range,
+)
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
@@ -177,6 +184,11 @@ def test_is_boolean(self, categories, expected):
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
+ def test_dtype_specific_categorical_dtype(self):
+ expected = "datetime64[ns]"
+ result = str(Categorical(DatetimeIndex([])).categories.dtype)
+ assert result == expected
+
class TestDatetimeTZDtype(Base):
@pytest.fixture
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 69a97f5c9fe02..059d3453995bd 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -282,6 +282,19 @@ def _compare_other(self, s, data, op_name, other):
with pytest.raises(TypeError, match=msg):
op(data, other)
+ @pytest.mark.parametrize(
+ "categories",
+ [["a", "b"], [0, 1], [pd.Timestamp("2019"), pd.Timestamp("2020")]],
+ )
+ def test_not_equal_with_na(self, categories):
+ # https://github.com/pandas-dev/pandas/issues/32276
+ c1 = Categorical.from_codes([-1, 0], categories=categories)
+ c2 = Categorical.from_codes([0, 1], categories=categories)
+
+ result = c1 != c2
+
+ assert result.all()
+
class TestParsing(base.BaseParsingTests):
pass
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 92b74c4409d7d..ee89562261b19 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1363,3 +1363,14 @@ def test_replace_after_convert_dtypes(self):
result = df.replace(1, 10)
expected = pd.DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
+
+ def test_replace_invalid_to_replace(self):
+ # GH 18634
+ # API: replace() should raise an exception if invalid argument is given
+ df = pd.DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]})
+ msg = (
+ r"Expecting 'to_replace' to be either a scalar, array-like, "
+ r"dict or None, got invalid type.*"
+ )
+ with pytest.raises(TypeError, match=msg):
+ df.replace(lambda x: x.strip())
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 34df8bb57dd91..e37170d4155f8 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -689,44 +689,6 @@ def test_rename_axis_mapper(self):
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
- def test_reorder_levels(self):
- index = MultiIndex(
- levels=[["bar"], ["one", "two", "three"], [0, 1]],
- codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
- names=["L0", "L1", "L2"],
- )
- df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
-
- # no change, position
- result = df.reorder_levels([0, 1, 2])
- tm.assert_frame_equal(df, result)
-
- # no change, labels
- result = df.reorder_levels(["L0", "L1", "L2"])
- tm.assert_frame_equal(df, result)
-
- # rotate, position
- result = df.reorder_levels([1, 2, 0])
- e_idx = MultiIndex(
- levels=[["one", "two", "three"], [0, 1], ["bar"]],
- codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
- names=["L1", "L2", "L0"],
- )
- expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
- tm.assert_frame_equal(result, expected)
-
- result = df.reorder_levels([0, 0, 0])
- e_idx = MultiIndex(
- levels=[["bar"], ["bar"], ["bar"]],
- codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
- names=["L0", "L0", "L0"],
- )
- expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
- tm.assert_frame_equal(result, expected)
-
- result = df.reorder_levels(["L0", "L0", "L0"])
- tm.assert_frame_equal(result, expected)
-
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 321eb5fe94daf..7eba2b873c4f4 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -8,7 +8,7 @@
import pandas._testing as tm
-class TestDataFrameConcatCommon:
+class TestDataFrameConcat:
def test_concat_multiple_frames_dtypes(self):
# GH 2759
@@ -107,77 +107,6 @@ def test_concat_tuple_keys(self):
)
tm.assert_frame_equal(results, expected)
- def test_join_str_datetime(self):
- str_dates = ["20120209", "20120222"]
- dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
-
- A = DataFrame(str_dates, index=range(2), columns=["aa"])
- C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
-
- tst = A.join(C, on="aa")
-
- assert len(tst.columns) == 3
-
- def test_join_multiindex_leftright(self):
- # GH 10741
- df1 = pd.DataFrame(
- [
- ["a", "x", 0.471780],
- ["a", "y", 0.774908],
- ["a", "z", 0.563634],
- ["b", "x", -0.353756],
- ["b", "y", 0.368062],
- ["b", "z", -1.721840],
- ["c", "x", 1],
- ["c", "y", 2],
- ["c", "z", 3],
- ],
- columns=["first", "second", "value1"],
- ).set_index(["first", "second"])
-
- df2 = pd.DataFrame(
- [["a", 10], ["b", 20]], columns=["first", "value2"]
- ).set_index(["first"])
-
- exp = pd.DataFrame(
- [
- [0.471780, 10],
- [0.774908, 10],
- [0.563634, 10],
- [-0.353756, 20],
- [0.368062, 20],
- [-1.721840, 20],
- [1.000000, np.nan],
- [2.000000, np.nan],
- [3.000000, np.nan],
- ],
- index=df1.index,
- columns=["value1", "value2"],
- )
-
- # these must be the same results (but columns are flipped)
- tm.assert_frame_equal(df1.join(df2, how="left"), exp)
- tm.assert_frame_equal(df2.join(df1, how="right"), exp[["value2", "value1"]])
-
- exp_idx = pd.MultiIndex.from_product(
- [["a", "b"], ["x", "y", "z"]], names=["first", "second"]
- )
- exp = pd.DataFrame(
- [
- [0.471780, 10],
- [0.774908, 10],
- [0.563634, 10],
- [-0.353756, 20],
- [0.368062, 20],
- [-1.721840, 20],
- ],
- index=exp_idx,
- columns=["value1", "value2"],
- )
-
- tm.assert_frame_equal(df1.join(df2, how="right"), exp)
- tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]])
-
def test_concat_named_keys(self):
# GH 14252
df = pd.DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py
index 8c388a887158f..4d6e675c6765f 100644
--- a/pandas/tests/frame/test_join.py
+++ b/pandas/tests/frame/test_join.py
@@ -1,6 +1,9 @@
+from datetime import datetime
+
import numpy as np
import pytest
+import pandas as pd
from pandas import DataFrame, Index, period_range
import pandas._testing as tm
@@ -216,3 +219,76 @@ def test_suppress_future_warning_with_sort_kw(sort_kw):
with tm.assert_produces_warning(None, check_stacklevel=False):
result = a.join([b, c], how="outer", sort=sort_kw)
tm.assert_frame_equal(result, expected)
+
+
+class TestDataFrameJoin:
+ def test_join_str_datetime(self):
+ str_dates = ["20120209", "20120222"]
+ dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
+
+ A = DataFrame(str_dates, index=range(2), columns=["aa"])
+ C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
+
+ tst = A.join(C, on="aa")
+
+ assert len(tst.columns) == 3
+
+ def test_join_multiindex_leftright(self):
+ # GH 10741
+ df1 = pd.DataFrame(
+ [
+ ["a", "x", 0.471780],
+ ["a", "y", 0.774908],
+ ["a", "z", 0.563634],
+ ["b", "x", -0.353756],
+ ["b", "y", 0.368062],
+ ["b", "z", -1.721840],
+ ["c", "x", 1],
+ ["c", "y", 2],
+ ["c", "z", 3],
+ ],
+ columns=["first", "second", "value1"],
+ ).set_index(["first", "second"])
+
+ df2 = pd.DataFrame(
+ [["a", 10], ["b", 20]], columns=["first", "value2"]
+ ).set_index(["first"])
+
+ exp = pd.DataFrame(
+ [
+ [0.471780, 10],
+ [0.774908, 10],
+ [0.563634, 10],
+ [-0.353756, 20],
+ [0.368062, 20],
+ [-1.721840, 20],
+ [1.000000, np.nan],
+ [2.000000, np.nan],
+ [3.000000, np.nan],
+ ],
+ index=df1.index,
+ columns=["value1", "value2"],
+ )
+
+ # these must be the same results (but columns are flipped)
+ tm.assert_frame_equal(df1.join(df2, how="left"), exp)
+ tm.assert_frame_equal(df2.join(df1, how="right"), exp[["value2", "value1"]])
+
+ exp_idx = pd.MultiIndex.from_product(
+ [["a", "b"], ["x", "y", "z"]], names=["first", "second"]
+ )
+ exp = pd.DataFrame(
+ [
+ [0.471780, 10],
+ [0.774908, 10],
+ [0.563634, 10],
+ [-0.353756, 20],
+ [0.368062, 20],
+ [-1.721840, 20],
+ ],
+ index=exp_idx,
+ columns=["value1", "value2"],
+ )
+
+ tm.assert_frame_equal(df1.join(df2, how="right"), exp)
+ tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]])
diff --git a/pandas/tests/generic/methods/test_reorder_levels.py b/pandas/tests/generic/methods/test_reorder_levels.py
new file mode 100644
index 0000000000000..8bb6417e56659
--- /dev/null
+++ b/pandas/tests/generic/methods/test_reorder_levels.py
@@ -0,0 +1,73 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, MultiIndex, Series
+import pandas._testing as tm
+
+
+class TestReorderLevels:
+ @pytest.mark.parametrize("klass", [Series, DataFrame])
+ def test_reorder_levels(self, klass):
+ index = MultiIndex(
+ levels=[["bar"], ["one", "two", "three"], [0, 1]],
+ codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
+ names=["L0", "L1", "L2"],
+ )
+ df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
+ obj = df if klass is DataFrame else df["A"]
+
+ # no change, position
+ result = obj.reorder_levels([0, 1, 2])
+ tm.assert_equal(obj, result)
+
+ # no change, labels
+ result = obj.reorder_levels(["L0", "L1", "L2"])
+ tm.assert_equal(obj, result)
+
+ # rotate, position
+ result = obj.reorder_levels([1, 2, 0])
+ e_idx = MultiIndex(
+ levels=[["one", "two", "three"], [0, 1], ["bar"]],
+ codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
+ names=["L1", "L2", "L0"],
+ )
+ expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
+ expected = expected if klass is DataFrame else expected["A"]
+ tm.assert_equal(result, expected)
+
+ result = obj.reorder_levels([0, 0, 0])
+ e_idx = MultiIndex(
+ levels=[["bar"], ["bar"], ["bar"]],
+ codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
+ names=["L0", "L0", "L0"],
+ )
+ expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
+ expected = expected if klass is DataFrame else expected["A"]
+ tm.assert_equal(result, expected)
+
+ result = obj.reorder_levels(["L0", "L0", "L0"])
+ tm.assert_equal(result, expected)
+
+ def test_reorder_levels_swaplevel_equivalence(
+ self, multiindex_year_month_day_dataframe_random_data
+ ):
+
+ ymd = multiindex_year_month_day_dataframe_random_data
+
+ result = ymd.reorder_levels(["month", "day", "year"])
+ expected = ymd.swaplevel(0, 1).swaplevel(1, 2)
+ tm.assert_frame_equal(result, expected)
+
+ result = ymd["A"].reorder_levels(["month", "day", "year"])
+ expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
+ tm.assert_series_equal(result, expected)
+
+ result = ymd.T.reorder_levels(["month", "day", "year"], axis=1)
+ expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError, match="hierarchical axis"):
+ ymd.reorder_levels([1, 2], axis=1)
+
+ with pytest.raises(IndexError, match="Too many levels"):
+ ymd.index.reorder_levels([1, 2, 3])
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index ceab670fb5041..554ae76979ba8 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -121,6 +121,14 @@ def test_dti_custom_getitem_matplotlib_hackaround(self):
class TestWhere:
+ def test_where_doesnt_retain_freq(self):
+ dti = date_range("20130101", periods=3, freq="D", name="idx")
+ cond = [True, True, False]
+ expected = DatetimeIndex([dti[0], dti[1], dti[0]], freq=None, name="idx")
+
+ result = dti.where(cond, dti[::-1])
+ tm.assert_index_equal(result, expected)
+
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range("20130101", periods=3, tz="US/Eastern")
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 40c7ffba46450..ab3e967f12360 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -117,7 +117,6 @@ def test_make_time_series(self):
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
-
# GH13067
idx = PeriodIndex([], freq="M")
result = idx._shallow_copy()
@@ -125,11 +124,16 @@ def test_shallow_copy_empty(self):
tm.assert_index_equal(result, expected)
- def test_shallow_copy_i8(self):
+ def test_shallow_copy_disallow_i8(self):
# GH-24391
pi = period_range("2018-01-01", periods=3, freq="2D")
- result = pi._shallow_copy(pi.asi8)
- tm.assert_index_equal(result, pi)
+ with pytest.raises(AssertionError, match="ndarray"):
+ pi._shallow_copy(pi.asi8)
+
+ def test_shallow_copy_requires_disallow_period_index(self):
+ pi = period_range("2018-01-01", periods=3, freq="2D")
+ with pytest.raises(AssertionError, match="PeriodIndex"):
+ pi._shallow_copy(pi)
def test_view_asi8(self):
idx = PeriodIndex([], freq="M")
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index b46e6514b4536..c6ba5c9d61e9e 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -10,7 +10,7 @@
from pandas._libs.tslibs import iNaT
-from pandas.core.dtypes.common import needs_i8_conversion
+from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import CategoricalIndex, MultiIndex, RangeIndex
@@ -219,7 +219,10 @@ def test_get_unique_index(self, indices):
if not indices._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
- if needs_i8_conversion(indices):
+ if is_period_dtype(indices):
+ vals = indices[[0] * 5]._data
+ vals[0] = pd.NaT
+ elif needs_i8_conversion(indices):
vals = indices.asi8[[0] * 5]
vals[0] = iNaT
else:
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 14fff6f9c85b5..5dec799832291 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -66,6 +66,14 @@ def test_timestamp_invalid_key(self, key):
class TestWhere:
+ def test_where_doesnt_retain_freq(self):
+ tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
+ cond = [True, True, False]
+ expected = TimedeltaIndex([tdi[0], tdi[1], tdi[0]], freq=None, name="idx")
+
+ result = tdi.where(cond, tdi[::-1])
+ tm.assert_index_equal(result, expected)
+
def test_where_invalid_dtypes(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 9f12af9a96104..c07a6471c732f 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -38,6 +38,7 @@ def test_linspace_behavior(self, periods, freq):
result = timedelta_range(start="0 days", end="4 days", periods=periods)
expected = timedelta_range(start="0 days", end="4 days", freq=freq)
tm.assert_index_equal(result, expected)
+ assert result.freq == freq
def test_errors(self):
# not enough params
diff --git a/pandas/tests/indexing/multiindex/conftest.py b/pandas/tests/indexing/multiindex/conftest.py
index 0256f5e35e1db..c69d6f86a6ce6 100644
--- a/pandas/tests/indexing/multiindex/conftest.py
+++ b/pandas/tests/indexing/multiindex/conftest.py
@@ -2,7 +2,6 @@
import pytest
from pandas import DataFrame, Index, MultiIndex
-import pandas._testing as tm
@pytest.fixture
@@ -16,17 +15,3 @@ def multiindex_dataframe_random_data():
return DataFrame(
np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp")
)
-
-
-@pytest.fixture
-def multiindex_year_month_day_dataframe_random_data():
- """
- DataFrame with 3 level MultiIndex (year, month, day) covering
- first 100 business days from 2000-01-01 with random data
- """
- tdf = tm.makeTimeDataFrame(100)
- ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
- # use Int64Index, to make sure things work
- ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True)
- ymd.index.set_names(["year", "month", "day"], inplace=True)
- return ymd
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index fff5ca03e80f4..18b9898e7d800 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -40,20 +40,7 @@ def check(self, result, original, indexer, getitem):
tm.assert_almost_equal(result, expected)
- @pytest.mark.parametrize(
- "index_func",
- [
- tm.makeStringIndex,
- tm.makeUnicodeIndex,
- tm.makeCategoricalIndex,
- tm.makeDateIndex,
- tm.makeTimedeltaIndex,
- tm.makePeriodIndex,
- tm.makeIntIndex,
- tm.makeRangeIndex,
- ],
- )
- def test_scalar_error(self, index_func):
+ def test_scalar_error(self, series_with_simple_index):
# GH 4892
# float_indexers should raise exceptions
@@ -62,11 +49,9 @@ def test_scalar_error(self, index_func):
# but is specifically testing for the error
# message
- i = index_func(5)
+ s = series_with_simple_index
- s = Series(np.arange(len(i)), index=i)
-
- msg = "Cannot index by location index"
+ msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s.iloc[3.0]
diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py
index 7367b19b40dc3..4d933fa02d36f 100644
--- a/pandas/tests/io/parser/test_python_parser_only.py
+++ b/pandas/tests/io/parser/test_python_parser_only.py
@@ -66,6 +66,24 @@ def test_sniff_delimiter(python_parser_only, kwargs):
tm.assert_frame_equal(result, expected)
+def test_sniff_delimiter_comment(python_parser_only):
+ data = """# comment line
+index|A|B|C
+# comment line
+foo|1|2|3 # ignore | this
+bar|4|5|6
+baz|7|8|9
+"""
+ parser = python_parser_only
+ result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#")
+ expected = DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ columns=["A", "B", "C"],
+ index=Index(["foo", "bar", "baz"], name="index"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_sniff_delimiter_encoding(python_parser_only, encoding):
parser = python_parser_only
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index ffbd135466709..7ae8854ffa4bd 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -167,6 +167,17 @@ def test_integer_array_plot(self):
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.hexbin, x="x", y="y")
+ def test_nullable_int_plot(self):
+ # GH32073
+ dates = ["2008", "2009", None, "2011", "2012"]
+ df = pd.DataFrame({"A": [1, 2, 3, 4, 5],
+ "B": [7, 5, np.nan, 3, 2],
+ "C": pd.to_datetime(dates, format="%Y")})
+
+ _check_plot_works(df.plot, x="A", y="B")
+ _check_plot_works(df[["A", "B"]].astype("Int64").plot, x="A", y="B")
+ _check_plot_works(df[["A", "C"]].plot, x="A", y="C")
+
def test_mpl2_color_cycle_str(self):
# GH 15516
colors = ["C" + str(x) for x in range(10)]
diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py
index dcb9d66708724..07656de2e9062 100644
--- a/pandas/tests/scalar/test_na_scalar.py
+++ b/pandas/tests/scalar/test_na_scalar.py
@@ -1,3 +1,5 @@
+import pickle
+
import numpy as np
import pytest
@@ -267,3 +269,26 @@ def test_integer_hash_collision_set():
assert len(result) == 2
assert NA in result
assert hash(NA) in result
+
+
+def test_pickle_roundtrip():
+ # https://github.com/pandas-dev/pandas/issues/31847
+ result = pickle.loads(pickle.dumps(pd.NA))
+ assert result is pd.NA
+
+
+def test_pickle_roundtrip_pandas():
+ result = tm.round_trip_pickle(pd.NA)
+ assert result is pd.NA
+
+
+@pytest.mark.parametrize(
+ "values, dtype", [([1, 2, pd.NA], "Int64"), (["A", "B", pd.NA], "string")]
+)
+@pytest.mark.parametrize("as_frame", [True, False])
+def test_pickle_roundtrip_containers(as_frame, values, dtype):
+ s = pd.Series(pd.array(values, dtype=dtype))
+ if as_frame:
+ s = s.to_frame(name="A")
+ result = tm.round_trip_pickle(s)
+ tm.assert_equal(result, s)
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 230a14aeec60a..ea02a76275443 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -412,6 +412,46 @@ def test_td_rdiv_timedeltalike_scalar(self):
assert np.timedelta64(60, "h") / td == 0.25
+ def test_td_rdiv_na_scalar(self):
+ # GH#31869 None gets cast to NaT
+ td = Timedelta(10, unit="d")
+
+ result = NaT / td
+ assert np.isnan(result)
+
+ result = None / td
+ assert np.isnan(result)
+
+ result = np.timedelta64("NaT") / td
+ assert np.isnan(result)
+
+ with pytest.raises(TypeError, match="cannot use operands with types dtype"):
+ np.datetime64("NaT") / td
+
+ with pytest.raises(TypeError, match="Cannot divide float by Timedelta"):
+ np.nan / td
+
+ def test_td_rdiv_ndarray(self):
+ td = Timedelta(10, unit="d")
+
+ arr = np.array([td], dtype=object)
+ result = arr / td
+ expected = np.array([1], dtype=np.float64)
+ tm.assert_numpy_array_equal(result, expected)
+
+ arr = np.array([None])
+ result = arr / td
+ expected = np.array([np.nan])
+ tm.assert_numpy_array_equal(result, expected)
+
+ arr = np.array([np.nan], dtype=object)
+ with pytest.raises(TypeError, match="Cannot divide float by Timedelta"):
+ arr / td
+
+ arr = np.array([np.nan], dtype=np.float64)
+ with pytest.raises(TypeError, match="cannot use operands with types dtype"):
+ arr / td
+
# ---------------------------------------------------------------
# Timedelta.__floordiv__
diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py
index 770ad38b0215e..26eaf53616282 100644
--- a/pandas/tests/series/methods/test_replace.py
+++ b/pandas/tests/series/methods/test_replace.py
@@ -362,3 +362,14 @@ def test_replace_no_cast(self, ser, exp):
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
+
+ def test_replace_invalid_to_replace(self):
+ # GH 18634
+ # API: replace() should raise an exception if invalid argument is given
+ series = pd.Series(["a", "b", "c "])
+ msg = (
+ r"Expecting 'to_replace' to be either a scalar, array-like, "
+ r"dict or None, got invalid type.*"
+ )
+ with pytest.raises(TypeError, match=msg):
+ series.replace(lambda x: x.strip())
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index f6ca93b0c2882..769d1ed877a69 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -54,32 +54,6 @@ def test_set_index_makes_timeseries(self):
s.index = idx
assert s.index.is_all_dates
- def test_reorder_levels(self):
- index = MultiIndex(
- levels=[["bar"], ["one", "two", "three"], [0, 1]],
- codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
- names=["L0", "L1", "L2"],
- )
- s = Series(np.arange(6), index=index)
-
- # no change, position
- result = s.reorder_levels([0, 1, 2])
- tm.assert_series_equal(s, result)
-
- # no change, labels
- result = s.reorder_levels(["L0", "L1", "L2"])
- tm.assert_series_equal(s, result)
-
- # rotate, position
- result = s.reorder_levels([1, 2, 0])
- e_idx = MultiIndex(
- levels=[["one", "two", "three"], [0, 1], ["bar"]],
- codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
- names=["L1", "L2", "L0"],
- )
- expected = Series(np.arange(6), index=e_idx)
- tm.assert_series_equal(result, expected)
-
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index adb79f69c2d81..0766bfc37d7ca 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -5,7 +5,7 @@
from pandas import Series
-class TestSeriesCombine:
+class TestSeriesConcat:
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py
index 511d24ca7fa29..e2f050650b298 100644
--- a/pandas/tests/series/test_validate.py
+++ b/pandas/tests/series/test_validate.py
@@ -3,7 +3,15 @@
@pytest.mark.parametrize(
"func",
- ["reset_index", "_set_name", "sort_values", "sort_index", "rename", "dropna"],
+ [
+ "reset_index",
+ "_set_name",
+ "sort_values",
+ "sort_index",
+ "rename",
+ "dropna",
+ "drop_duplicates",
+ ],
)
@pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(string_series, func, inplace):
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index e3cf46b466ae4..84279d874bae1 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -939,25 +939,6 @@ def test_swaplevel(self):
with pytest.raises(TypeError, match=msg):
DataFrame(range(3)).swaplevel()
- def test_reorder_levels(self):
- result = self.ymd.reorder_levels(["month", "day", "year"])
- expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
- tm.assert_frame_equal(result, expected)
-
- result = self.ymd["A"].reorder_levels(["month", "day", "year"])
- expected = self.ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
- tm.assert_series_equal(result, expected)
-
- result = self.ymd.T.reorder_levels(["month", "day", "year"], axis=1)
- expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
- tm.assert_frame_equal(result, expected)
-
- with pytest.raises(TypeError, match="hierarchical axis"):
- self.ymd.reorder_levels([1, 2], axis=1)
-
- with pytest.raises(IndexError, match="Too many levels"):
- self.ymd.index.reorder_levels([1, 2, 3])
-
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index f9502cc22b0c6..7fc85a04e7d84 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -5,8 +5,9 @@
import platform
import struct
import sys
-from typing import List, Optional, Tuple, Union
+from typing import Dict, Optional, Union
+from pandas._typing import JSONSerializable
from pandas.compat._optional import VERSIONS, _get_version, import_optional_dependency
@@ -21,43 +22,32 @@ def _get_commit_hash() -> Optional[str]:
return versions["full-revisionid"]
-def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]:
+def _get_sys_info() -> Dict[str, JSONSerializable]:
"""
- Returns system information as a list
+ Returns system information as a JSON serializable dictionary.
+ """
+ uname_result = platform.uname()
+ language_code, encoding = locale.getlocale()
+ return {
+ "commit": _get_commit_hash(),
+ "python": ".".join(str(i) for i in sys.version_info),
+ "python-bits": struct.calcsize("P") * 8,
+ "OS": uname_result.system,
+ "OS-release": uname_result.release,
+ "Version": uname_result.version,
+ "machine": uname_result.machine,
+ "processor": uname_result.processor,
+ "byteorder": sys.byteorder,
+ "LC_ALL": os.environ.get("LC_ALL"),
+ "LANG": os.environ.get("LANG"),
+ "LOCALE": {"language-code": language_code, "encoding": encoding},
+ }
+
+
+def _get_dependency_info() -> Dict[str, JSONSerializable]:
+ """
+ Returns dependency information as a JSON serializable dictionary.
"""
- blob: List[Tuple[str, Optional[Union[str, int]]]] = []
-
- # get full commit hash
- commit = _get_commit_hash()
-
- blob.append(("commit", commit))
-
- try:
- (sysname, nodename, release, version, machine, processor) = platform.uname()
- blob.extend(
- [
- ("python", ".".join(map(str, sys.version_info))),
- ("python-bits", struct.calcsize("P") * 8),
- ("OS", f"{sysname}"),
- ("OS-release", f"{release}"),
- # FIXME: dont leave commented-out
- # ("Version", f"{version}"),
- ("machine", f"{machine}"),
- ("processor", f"{processor}"),
- ("byteorder", f"{sys.byteorder}"),
- ("LC_ALL", f"{os.environ.get('LC_ALL', 'None')}"),
- ("LANG", f"{os.environ.get('LANG', 'None')}"),
- ("LOCALE", ".".join(map(str, locale.getlocale()))),
- ]
- )
- except (KeyError, ValueError):
- pass
-
- return blob
-
-
-def show_versions(as_json=False):
- sys_info = get_sys_info()
deps = [
"pandas",
# required
@@ -86,39 +76,45 @@ def show_versions(as_json=False):
"IPython",
"pandas_datareader",
]
-
deps.extend(list(VERSIONS))
- deps_blob = []
+ result: Dict[str, JSONSerializable] = {}
for modname in deps:
mod = import_optional_dependency(
modname, raise_on_missing=False, on_version="ignore"
)
- ver: Optional[str]
- if mod:
- ver = _get_version(mod)
- else:
- ver = None
- deps_blob.append((modname, ver))
+ result[modname] = _get_version(mod) if mod else None
+ return result
+
+
+def show_versions(as_json: Union[str, bool] = False) -> None:
+ sys_info = _get_sys_info()
+ deps = _get_dependency_info()
if as_json:
- j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
+ j = dict(system=sys_info, dependencies=deps)
if as_json is True:
print(j)
else:
+ assert isinstance(as_json, str) # needed for mypy
with codecs.open(as_json, "wb", encoding="utf8") as f:
json.dump(j, f, indent=2)
else:
+ assert isinstance(sys_info["LOCALE"], dict) # needed for mypy
+ language_code = sys_info["LOCALE"]["language-code"]
+ encoding = sys_info["LOCALE"]["encoding"]
+ sys_info["LOCALE"] = f"{language_code}.{encoding}"
+
maxlen = max(len(x) for x in deps)
print("\nINSTALLED VERSIONS")
print("------------------")
- for k, stat in sys_info:
- print(f"{k:<{maxlen}}: {stat}")
+ for k, v in sys_info.items():
+ print(f"{k:<{maxlen}}: {v}")
print("")
- for k, stat in deps_blob:
- print(f"{k:<{maxlen}}: {stat}")
+ for k, v in deps.items():
+ print(f"{k:<{maxlen}}: {v}")
def main() -> int:
| - [x] closes #32073
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32387 | 2020-03-01T18:08:35Z | 2020-03-03T10:48:21Z | null | 2020-03-03T10:48:21Z |
BUG: Fix rolling functions with variable windows on decreasing index | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 1b6098e6b6ac1..fae41953a6371 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -83,6 +83,10 @@ Bug fixes
- Fixed bug where :meth:`GroupBy.first` and :meth:`GroupBy.last` would raise a ``TypeError`` when groups contained ``pd.NA`` in a column of object dtype (:issue:`32123`)
- Fix bug in :meth:`Series.convert_dtypes` for series with mix of integers and strings (:issue:`32117`)
+**Rolling**
+
+- Fixed rolling operations with variable window (defined by time duration) on decreasing time index (:issue:`32385`).
+
.. ---------------------------------------------------------------------------
.. _whatsnew_102.contributors:
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 80b9144042041..a90d2f77e44d1 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -1013,7 +1013,7 @@ def roll_max_variable(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_min_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp, int64_t win):
"""
- Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
+ Moving min of 1d array of any numeric type along axis=0 ignoring NaNs.
Parameters
----------
@@ -1030,7 +1030,7 @@ def roll_min_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_min_variable(ndarray[float64_t] values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp):
"""
- Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
+ Moving min of 1d array of any numeric type along axis=0 ignoring NaNs.
Parameters
----------
diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx
index 2d01d1964c043..8a1e7feb57ace 100644
--- a/pandas/_libs/window/indexers.pyx
+++ b/pandas/_libs/window/indexers.pyx
@@ -44,6 +44,7 @@ def calculate_variable_window_bounds(
cdef:
bint left_closed = False
bint right_closed = False
+ int index_growth_sign = 1
ndarray[int64_t, ndim=1] start, end
int64_t start_bound, end_bound
Py_ssize_t i, j
@@ -58,6 +59,9 @@ def calculate_variable_window_bounds(
if closed in ['left', 'both']:
left_closed = True
+ if index[num_values - 1] < index[0]:
+ index_growth_sign = -1
+
start = np.empty(num_values, dtype='int64')
start.fill(-1)
end = np.empty(num_values, dtype='int64')
@@ -78,7 +82,7 @@ def calculate_variable_window_bounds(
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = index[i]
- start_bound = index[i] - window_size
+ start_bound = index[i] - index_growth_sign * window_size
# left endpoint is closed
if left_closed:
@@ -88,13 +92,13 @@ def calculate_variable_window_bounds(
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
- if index[j] > start_bound:
+ if (index[j] - start_bound) * index_growth_sign > 0:
start[i] = j
break
# end bound is previous end
# or current index
- if index[end[i - 1]] <= end_bound:
+ if (index[end[i - 1]] - end_bound) * index_growth_sign <= 0:
end[i] = i + 1
else:
end[i] = end[i - 1]
diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py
index 5f5e10b5dd497..0c5289cd78fed 100644
--- a/pandas/tests/window/test_timeseries_window.py
+++ b/pandas/tests/window/test_timeseries_window.py
@@ -709,20 +709,25 @@ def test_rolling_cov_offset(self):
tm.assert_series_equal(result, expected2)
def test_rolling_on_decreasing_index(self):
- # GH-19248
+ # GH-19248, GH-32385
index = [
- Timestamp("20190101 09:00:00"),
- Timestamp("20190101 09:00:02"),
- Timestamp("20190101 09:00:03"),
- Timestamp("20190101 09:00:05"),
- Timestamp("20190101 09:00:06"),
+ Timestamp("20190101 09:00:30"),
+ Timestamp("20190101 09:00:27"),
+ Timestamp("20190101 09:00:20"),
+ Timestamp("20190101 09:00:18"),
+ Timestamp("20190101 09:00:10"),
]
- df = DataFrame({"column": [3, 4, 4, 2, 1]}, index=reversed(index))
- result = df.rolling("2s").min()
- expected = DataFrame(
- {"column": [3.0, 3.0, 3.0, 2.0, 1.0]}, index=reversed(index)
- )
+ df = DataFrame({"column": [3, 4, 4, 5, 6]}, index=index)
+ result = df.rolling("5s").min()
+ expected = DataFrame({"column": [3.0, 3.0, 4.0, 4.0, 6.0]}, index=index)
+ tm.assert_frame_equal(result, expected)
+
+ def test_rolling_on_empty(self):
+ # GH-32385
+ df = DataFrame({"column": []}, index=[])
+ result = df.rolling("5s").min()
+ expected = DataFrame({"column": []}, index=[])
tm.assert_frame_equal(result, expected)
def test_rolling_on_multi_index_level(self):
| - [x] Closes https://github.com/pandas-dev/pandas/issues/32385
- [x] tests updated & passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32386 | 2020-03-01T15:52:32Z | 2020-03-11T01:51:04Z | 2020-03-11T01:51:03Z | 2020-03-11T01:54:20Z |
DOC: Minor typo fixes for code style guide | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 17f8783f71bfb..1b223cf5f026b 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -21,7 +21,7 @@ Patterns
foo.__class__
-------------
-*pandas* uses 'type(foo)' instead 'foo.__class__' as it is making the code more
+*pandas* uses 'type(foo)' instead 'foo.__class__' as it makes the code more
readable.
For example:
@@ -52,8 +52,8 @@ f-strings
*pandas* uses f-strings formatting instead of '%' and '.format()' string formatters.
-The convention of using f-strings on a string that is concatenated over serveral lines,
-is to prefix only the lines containing the value needs to be interpeted.
+The convention of using f-strings on a string that is concatenated over several lines,
+is to prefix only the lines containing values which need to be interpreted.
For example:
@@ -86,8 +86,8 @@ For example:
White spaces
~~~~~~~~~~~~
-Putting the white space only at the end of the previous line, so
-there is no whitespace at the beggining of the concatenated string.
+Only put white space at the end of the previous line, so
+there is no whitespace at the beginning of the concatenated string.
For example:
@@ -116,7 +116,7 @@ Representation function (aka 'repr()')
*pandas* uses 'repr()' instead of '%r' and '!r'.
-The use of 'repr()' will only happend when the value is not an obvious string.
+The use of 'repr()' will only happen when the value is not an obvious string.
For example:
@@ -138,7 +138,7 @@ For example:
Imports (aim for absolute)
==========================
-In Python 3, absolute imports are recommended. In absolute import doing something
+In Python 3, absolute imports are recommended. Using absolute imports, doing something
like ``import string`` will import the string module rather than ``string.py``
in the same directory. As much as possible, you should try to write out
absolute imports that show the whole import chain from top-level pandas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/32379 | 2020-02-29T21:37:38Z | 2020-03-01T02:24:44Z | 2020-03-01T02:24:44Z | 2020-03-01T02:24:51Z | |
DOC: Minor typo fixes for code style guide | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 17f8783f71bfb..1b223cf5f026b 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -21,7 +21,7 @@ Patterns
foo.__class__
-------------
-*pandas* uses 'type(foo)' instead 'foo.__class__' as it is making the code more
+*pandas* uses 'type(foo)' instead 'foo.__class__' as it makes the code more
readable.
For example:
@@ -52,8 +52,8 @@ f-strings
*pandas* uses f-strings formatting instead of '%' and '.format()' string formatters.
-The convention of using f-strings on a string that is concatenated over serveral lines,
-is to prefix only the lines containing the value needs to be interpeted.
+The convention of using f-strings on a string that is concatenated over several lines,
+is to prefix only the lines containing values which need to be interpreted.
For example:
@@ -86,8 +86,8 @@ For example:
White spaces
~~~~~~~~~~~~
-Putting the white space only at the end of the previous line, so
-there is no whitespace at the beggining of the concatenated string.
+Only put white space at the end of the previous line, so
+there is no whitespace at the beginning of the concatenated string.
For example:
@@ -116,7 +116,7 @@ Representation function (aka 'repr()')
*pandas* uses 'repr()' instead of '%r' and '!r'.
-The use of 'repr()' will only happend when the value is not an obvious string.
+The use of 'repr()' will only happen when the value is not an obvious string.
For example:
@@ -138,7 +138,7 @@ For example:
Imports (aim for absolute)
==========================
-In Python 3, absolute imports are recommended. In absolute import doing something
+In Python 3, absolute imports are recommended. Using absolute imports, doing something
like ``import string`` will import the string module rather than ``string.py``
in the same directory. As much as possible, you should try to write out
absolute imports that show the whole import chain from top-level pandas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/32378 | 2020-02-29T21:27:25Z | 2020-02-29T21:34:29Z | null | 2020-02-29T21:34:29Z | |
ENH: infer freq in timedelta_range | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 0f18a1fd81815..999dee83cf7e0 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -66,6 +66,7 @@ Other enhancements
- :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`)
- When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`)
- `OptionError` is now exposed in `pandas.errors` (:issue:`27553`)
+- :func:`timedelta_range` will now infer a frequency when passed ``start``, ``stop``, and ``periods`` (:issue:`32377`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/_testing.py b/pandas/_testing.py
index fce06e216dfd7..b0f18cb6fdd39 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -742,9 +742,9 @@ def repr_class(x):
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
-def assert_attr_equal(attr, left, right, obj="Attributes"):
+def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
- checks attributes are equal. Both objects must have attribute.
+ Check attributes are equal. Both objects must have attribute.
Parameters
----------
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 81fc934748d3e..749489a0a04fb 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -39,6 +39,7 @@
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
+from pandas.core.construction import extract_array
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import Tick
@@ -141,8 +142,7 @@ def dtype(self):
# Constructors
def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
- if isinstance(values, (ABCSeries, ABCIndexClass)):
- values = values._values
+ values = extract_array(values)
inferred_freq = getattr(values, "_freq", None)
@@ -258,6 +258,10 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
index = _generate_regular_range(start, end, periods, freq)
else:
index = np.linspace(start.value, end.value, periods).astype("i8")
+ if len(index) >= 2:
+ # Infer a frequency
+ td = Timedelta(index[1] - index[0])
+ freq = to_offset(td)
if not left_closed:
index = index[1:]
@@ -614,6 +618,10 @@ def __floordiv__(self, other):
if self.freq is not None:
# Note: freq gets division, not floor-division
freq = self.freq / other
+ if freq.nanos == 0 and self.freq.nanos != 0:
+ # e.g. if self.freq is Nano(1) then dividing by 2
+ # rounds down to zero
+ freq = None
return type(self)(result.view("m8[ns]"), freq=freq)
if not hasattr(other, "dtype"):
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 9f12af9a96104..c07a6471c732f 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -38,6 +38,7 @@ def test_linspace_behavior(self, periods, freq):
result = timedelta_range(start="0 days", end="4 days", periods=periods)
expected = timedelta_range(start="0 days", end="4 days", freq=freq)
tm.assert_index_equal(result, expected)
+ assert result.freq == freq
def test_errors(self):
# not enough params
| Step in the direction of #31195. | https://api.github.com/repos/pandas-dev/pandas/pulls/32377 | 2020-02-29T21:24:08Z | 2020-03-03T01:44:14Z | 2020-03-03T01:44:14Z | 2020-03-03T12:57:33Z |
TYP: annotations for internals, set_axis | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f515b57e24cfa..61715397e8e0b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3599,7 +3599,7 @@ def align(
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
- def set_axis(self, labels, axis=0, inplace=False):
+ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 25770c2c6470c..890c0540d9851 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -520,7 +520,7 @@ def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
- def set_axis(self, labels, axis=0, inplace=False):
+ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
@@ -561,7 +561,8 @@ def set_axis(self, labels, axis=0, inplace=False):
obj.set_axis(labels, axis=axis, inplace=True)
return obj
- def _set_axis(self, axis, labels) -> None:
+ def _set_axis(self, axis: int, labels: Index) -> None:
+ labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index e397167e4881f..9c90b20fc0f16 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -164,15 +164,15 @@ def __nonzero__(self):
__bool__ = __nonzero__
@property
- def shape(self):
+ def shape(self) -> Tuple[int, ...]:
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
- def set_axis(self, axis, new_labels):
- new_labels = ensure_index(new_labels)
+ def set_axis(self, axis: int, new_labels: Index):
+ # Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
@@ -184,7 +184,9 @@ def set_axis(self, axis, new_labels):
self.axes[axis] = new_labels
- def rename_axis(self, mapper, axis, copy: bool = True, level=None):
+ def rename_axis(
+ self, mapper, axis: int, copy: bool = True, level=None
+ ) -> "BlockManager":
"""
Rename one of axes.
@@ -193,7 +195,7 @@ def rename_axis(self, mapper, axis, copy: bool = True, level=None):
mapper : unary callable
axis : int
copy : bool, default True
- level : int, default None
+ level : int or None, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
@@ -233,7 +235,7 @@ def _rebuild_blknos_and_blklocs(self):
self._blklocs = new_blklocs
@property
- def items(self):
+ def items(self) -> Index:
return self.axes[0]
def _get_counts(self, f):
@@ -623,7 +625,7 @@ def comp(s, regex=False):
bm._consolidate_inplace()
return bm
- def is_consolidated(self):
+ def is_consolidated(self) -> bool:
"""
Return True if more than one block with the same dtype
"""
@@ -688,7 +690,7 @@ def get_numeric_data(self, copy: bool = False):
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
- def combine(self, blocks, copy=True):
+ def combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager":
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
@@ -992,7 +994,6 @@ def delete(self, item):
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
- self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value):
@@ -1160,7 +1161,6 @@ def insert(self, loc: int, item, value, allow_duplicates: bool = False):
self.axes[0] = new_axis
self.blocks += (block,)
- self._shape = None
self._known_consolidated = False
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d984225f8fd89..d565cbbdd5344 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -9,7 +9,6 @@
TYPE_CHECKING,
Any,
Callable,
- Hashable,
Iterable,
List,
Optional,
@@ -23,7 +22,7 @@
from pandas._config import get_option
from pandas._libs import lib, properties, reshape, tslibs
-from pandas._typing import Label
+from pandas._typing import Axis, DtypeObj, Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, doc
from pandas.util._validators import validate_bool_kwarg, validate_percentile
@@ -177,7 +176,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_typ = "series"
- _name: Optional[Hashable]
+ _name: Label
_metadata: List[str] = ["name"]
_internal_names_set = {"index"} | generic.NDFrame._internal_names_set
_accessors = {"dt", "cat", "str", "sparse"}
@@ -391,9 +390,12 @@ def _can_hold_na(self):
_index = None
- def _set_axis(self, axis, labels, fastpath: bool = False) -> None:
+ def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:
"""
Override generic, we want to set the _typ here.
+
+ This is called from the cython code when we set the `index` attribute
+ directly, e.g. `series.index = [1, 2, 3]`.
"""
if not fastpath:
labels = ensure_index(labels)
@@ -413,6 +415,7 @@ def _set_axis(self, axis, labels, fastpath: bool = False) -> None:
object.__setattr__(self, "_index", labels)
if not fastpath:
+ # The ensure_index call aabove ensures we have an Index object
self._data.set_axis(axis, labels)
def _update_inplace(self, result, **kwargs):
@@ -421,25 +424,25 @@ def _update_inplace(self, result, **kwargs):
# ndarray compatibility
@property
- def dtype(self):
+ def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
@property
- def dtypes(self):
+ def dtypes(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
@property
- def name(self) -> Optional[Hashable]:
+ def name(self) -> Label:
return self._name
@name.setter
- def name(self, value: Optional[Hashable]) -> None:
+ def name(self, value: Label) -> None:
if not is_hashable(value):
raise TypeError("Series.name must be a hashable type")
object.__setattr__(self, "_name", value)
@@ -689,7 +692,7 @@ def __array_ufunc__(
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
- name: Optional[Hashable]
+ name: Label
if len(set(names)) == 1:
name = names[0]
else:
@@ -3983,7 +3986,7 @@ def rename(
see_also_sub="",
)
@Appender(generic.NDFrame.set_axis.__doc__)
- def set_axis(self, labels, axis=0, inplace=False):
+ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
| https://api.github.com/repos/pandas-dev/pandas/pulls/32376 | 2020-02-29T20:40:04Z | 2020-03-02T17:27:46Z | 2020-03-02T17:27:46Z | 2020-03-02T17:32:28Z | |
REF/TST: misplaced DataFrame.join tests | diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 321eb5fe94daf..7eba2b873c4f4 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -8,7 +8,7 @@
import pandas._testing as tm
-class TestDataFrameConcatCommon:
+class TestDataFrameConcat:
def test_concat_multiple_frames_dtypes(self):
# GH 2759
@@ -107,77 +107,6 @@ def test_concat_tuple_keys(self):
)
tm.assert_frame_equal(results, expected)
- def test_join_str_datetime(self):
- str_dates = ["20120209", "20120222"]
- dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
-
- A = DataFrame(str_dates, index=range(2), columns=["aa"])
- C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
-
- tst = A.join(C, on="aa")
-
- assert len(tst.columns) == 3
-
- def test_join_multiindex_leftright(self):
- # GH 10741
- df1 = pd.DataFrame(
- [
- ["a", "x", 0.471780],
- ["a", "y", 0.774908],
- ["a", "z", 0.563634],
- ["b", "x", -0.353756],
- ["b", "y", 0.368062],
- ["b", "z", -1.721840],
- ["c", "x", 1],
- ["c", "y", 2],
- ["c", "z", 3],
- ],
- columns=["first", "second", "value1"],
- ).set_index(["first", "second"])
-
- df2 = pd.DataFrame(
- [["a", 10], ["b", 20]], columns=["first", "value2"]
- ).set_index(["first"])
-
- exp = pd.DataFrame(
- [
- [0.471780, 10],
- [0.774908, 10],
- [0.563634, 10],
- [-0.353756, 20],
- [0.368062, 20],
- [-1.721840, 20],
- [1.000000, np.nan],
- [2.000000, np.nan],
- [3.000000, np.nan],
- ],
- index=df1.index,
- columns=["value1", "value2"],
- )
-
- # these must be the same results (but columns are flipped)
- tm.assert_frame_equal(df1.join(df2, how="left"), exp)
- tm.assert_frame_equal(df2.join(df1, how="right"), exp[["value2", "value1"]])
-
- exp_idx = pd.MultiIndex.from_product(
- [["a", "b"], ["x", "y", "z"]], names=["first", "second"]
- )
- exp = pd.DataFrame(
- [
- [0.471780, 10],
- [0.774908, 10],
- [0.563634, 10],
- [-0.353756, 20],
- [0.368062, 20],
- [-1.721840, 20],
- ],
- index=exp_idx,
- columns=["value1", "value2"],
- )
-
- tm.assert_frame_equal(df1.join(df2, how="right"), exp)
- tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]])
-
def test_concat_named_keys(self):
# GH 14252
df = pd.DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py
index 8c388a887158f..4d6e675c6765f 100644
--- a/pandas/tests/frame/test_join.py
+++ b/pandas/tests/frame/test_join.py
@@ -1,6 +1,9 @@
+from datetime import datetime
+
import numpy as np
import pytest
+import pandas as pd
from pandas import DataFrame, Index, period_range
import pandas._testing as tm
@@ -216,3 +219,76 @@ def test_suppress_future_warning_with_sort_kw(sort_kw):
with tm.assert_produces_warning(None, check_stacklevel=False):
result = a.join([b, c], how="outer", sort=sort_kw)
tm.assert_frame_equal(result, expected)
+
+
+class TestDataFrameJoin:
+ def test_join_str_datetime(self):
+ str_dates = ["20120209", "20120222"]
+ dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
+
+ A = DataFrame(str_dates, index=range(2), columns=["aa"])
+ C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
+
+ tst = A.join(C, on="aa")
+
+ assert len(tst.columns) == 3
+
+ def test_join_multiindex_leftright(self):
+ # GH 10741
+ df1 = pd.DataFrame(
+ [
+ ["a", "x", 0.471780],
+ ["a", "y", 0.774908],
+ ["a", "z", 0.563634],
+ ["b", "x", -0.353756],
+ ["b", "y", 0.368062],
+ ["b", "z", -1.721840],
+ ["c", "x", 1],
+ ["c", "y", 2],
+ ["c", "z", 3],
+ ],
+ columns=["first", "second", "value1"],
+ ).set_index(["first", "second"])
+
+ df2 = pd.DataFrame(
+ [["a", 10], ["b", 20]], columns=["first", "value2"]
+ ).set_index(["first"])
+
+ exp = pd.DataFrame(
+ [
+ [0.471780, 10],
+ [0.774908, 10],
+ [0.563634, 10],
+ [-0.353756, 20],
+ [0.368062, 20],
+ [-1.721840, 20],
+ [1.000000, np.nan],
+ [2.000000, np.nan],
+ [3.000000, np.nan],
+ ],
+ index=df1.index,
+ columns=["value1", "value2"],
+ )
+
+ # these must be the same results (but columns are flipped)
+ tm.assert_frame_equal(df1.join(df2, how="left"), exp)
+ tm.assert_frame_equal(df2.join(df1, how="right"), exp[["value2", "value1"]])
+
+ exp_idx = pd.MultiIndex.from_product(
+ [["a", "b"], ["x", "y", "z"]], names=["first", "second"]
+ )
+ exp = pd.DataFrame(
+ [
+ [0.471780, 10],
+ [0.774908, 10],
+ [0.563634, 10],
+ [-0.353756, 20],
+ [0.368062, 20],
+ [-1.721840, 20],
+ ],
+ index=exp_idx,
+ columns=["value1", "value2"],
+ )
+
+ tm.assert_frame_equal(df1.join(df2, how="right"), exp)
+ tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]])
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index adb79f69c2d81..0766bfc37d7ca 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -5,7 +5,7 @@
from pandas import Series
-class TestSeriesCombine:
+class TestSeriesConcat:
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
| https://api.github.com/repos/pandas-dev/pandas/pulls/32375 | 2020-02-29T20:20:16Z | 2020-03-02T17:47:09Z | 2020-03-02T17:47:09Z | 2020-03-02T17:52:13Z | |
REF: collect+parametrize reorder_levels tests | diff --git a/pandas/conftest.py b/pandas/conftest.py
index be44e6c2b36da..d6b9576d7729f 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1057,3 +1057,17 @@ def index_or_series_obj(request):
copy to avoid mutation, e.g. setting .name
"""
return _index_or_series_objs[request.param].copy(deep=True)
+
+
+@pytest.fixture
+def multiindex_year_month_day_dataframe_random_data():
+ """
+ DataFrame with 3 level MultiIndex (year, month, day) covering
+ first 100 business days from 2000-01-01 with random data
+ """
+ tdf = tm.makeTimeDataFrame(100)
+ ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
+ # use Int64Index, to make sure things work
+ ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True)
+ ymd.index.set_names(["year", "month", "day"], inplace=True)
+ return ymd
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 34df8bb57dd91..e37170d4155f8 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -689,44 +689,6 @@ def test_rename_axis_mapper(self):
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
- def test_reorder_levels(self):
- index = MultiIndex(
- levels=[["bar"], ["one", "two", "three"], [0, 1]],
- codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
- names=["L0", "L1", "L2"],
- )
- df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
-
- # no change, position
- result = df.reorder_levels([0, 1, 2])
- tm.assert_frame_equal(df, result)
-
- # no change, labels
- result = df.reorder_levels(["L0", "L1", "L2"])
- tm.assert_frame_equal(df, result)
-
- # rotate, position
- result = df.reorder_levels([1, 2, 0])
- e_idx = MultiIndex(
- levels=[["one", "two", "three"], [0, 1], ["bar"]],
- codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
- names=["L1", "L2", "L0"],
- )
- expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
- tm.assert_frame_equal(result, expected)
-
- result = df.reorder_levels([0, 0, 0])
- e_idx = MultiIndex(
- levels=[["bar"], ["bar"], ["bar"]],
- codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
- names=["L0", "L0", "L0"],
- )
- expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
- tm.assert_frame_equal(result, expected)
-
- result = df.reorder_levels(["L0", "L0", "L0"])
- tm.assert_frame_equal(result, expected)
-
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
diff --git a/pandas/tests/generic/methods/test_reorder_levels.py b/pandas/tests/generic/methods/test_reorder_levels.py
new file mode 100644
index 0000000000000..8bb6417e56659
--- /dev/null
+++ b/pandas/tests/generic/methods/test_reorder_levels.py
@@ -0,0 +1,73 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, MultiIndex, Series
+import pandas._testing as tm
+
+
+class TestReorderLevels:
+ @pytest.mark.parametrize("klass", [Series, DataFrame])
+ def test_reorder_levels(self, klass):
+ index = MultiIndex(
+ levels=[["bar"], ["one", "two", "three"], [0, 1]],
+ codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
+ names=["L0", "L1", "L2"],
+ )
+ df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
+ obj = df if klass is DataFrame else df["A"]
+
+ # no change, position
+ result = obj.reorder_levels([0, 1, 2])
+ tm.assert_equal(obj, result)
+
+ # no change, labels
+ result = obj.reorder_levels(["L0", "L1", "L2"])
+ tm.assert_equal(obj, result)
+
+ # rotate, position
+ result = obj.reorder_levels([1, 2, 0])
+ e_idx = MultiIndex(
+ levels=[["one", "two", "three"], [0, 1], ["bar"]],
+ codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
+ names=["L1", "L2", "L0"],
+ )
+ expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
+ expected = expected if klass is DataFrame else expected["A"]
+ tm.assert_equal(result, expected)
+
+ result = obj.reorder_levels([0, 0, 0])
+ e_idx = MultiIndex(
+ levels=[["bar"], ["bar"], ["bar"]],
+ codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
+ names=["L0", "L0", "L0"],
+ )
+ expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
+ expected = expected if klass is DataFrame else expected["A"]
+ tm.assert_equal(result, expected)
+
+ result = obj.reorder_levels(["L0", "L0", "L0"])
+ tm.assert_equal(result, expected)
+
+ def test_reorder_levels_swaplevel_equivalence(
+ self, multiindex_year_month_day_dataframe_random_data
+ ):
+
+ ymd = multiindex_year_month_day_dataframe_random_data
+
+ result = ymd.reorder_levels(["month", "day", "year"])
+ expected = ymd.swaplevel(0, 1).swaplevel(1, 2)
+ tm.assert_frame_equal(result, expected)
+
+ result = ymd["A"].reorder_levels(["month", "day", "year"])
+ expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
+ tm.assert_series_equal(result, expected)
+
+ result = ymd.T.reorder_levels(["month", "day", "year"], axis=1)
+ expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ with pytest.raises(TypeError, match="hierarchical axis"):
+ ymd.reorder_levels([1, 2], axis=1)
+
+ with pytest.raises(IndexError, match="Too many levels"):
+ ymd.index.reorder_levels([1, 2, 3])
diff --git a/pandas/tests/indexing/multiindex/conftest.py b/pandas/tests/indexing/multiindex/conftest.py
index 0256f5e35e1db..c69d6f86a6ce6 100644
--- a/pandas/tests/indexing/multiindex/conftest.py
+++ b/pandas/tests/indexing/multiindex/conftest.py
@@ -2,7 +2,6 @@
import pytest
from pandas import DataFrame, Index, MultiIndex
-import pandas._testing as tm
@pytest.fixture
@@ -16,17 +15,3 @@ def multiindex_dataframe_random_data():
return DataFrame(
np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp")
)
-
-
-@pytest.fixture
-def multiindex_year_month_day_dataframe_random_data():
- """
- DataFrame with 3 level MultiIndex (year, month, day) covering
- first 100 business days from 2000-01-01 with random data
- """
- tdf = tm.makeTimeDataFrame(100)
- ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
- # use Int64Index, to make sure things work
- ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True)
- ymd.index.set_names(["year", "month", "day"], inplace=True)
- return ymd
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index f6ca93b0c2882..769d1ed877a69 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -54,32 +54,6 @@ def test_set_index_makes_timeseries(self):
s.index = idx
assert s.index.is_all_dates
- def test_reorder_levels(self):
- index = MultiIndex(
- levels=[["bar"], ["one", "two", "three"], [0, 1]],
- codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
- names=["L0", "L1", "L2"],
- )
- s = Series(np.arange(6), index=index)
-
- # no change, position
- result = s.reorder_levels([0, 1, 2])
- tm.assert_series_equal(s, result)
-
- # no change, labels
- result = s.reorder_levels(["L0", "L1", "L2"])
- tm.assert_series_equal(s, result)
-
- # rotate, position
- result = s.reorder_levels([1, 2, 0])
- e_idx = MultiIndex(
- levels=[["one", "two", "three"], [0, 1], ["bar"]],
- codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
- names=["L1", "L2", "L0"],
- )
- expected = Series(np.arange(6), index=e_idx)
- tm.assert_series_equal(result, expected)
-
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index e3cf46b466ae4..84279d874bae1 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -939,25 +939,6 @@ def test_swaplevel(self):
with pytest.raises(TypeError, match=msg):
DataFrame(range(3)).swaplevel()
- def test_reorder_levels(self):
- result = self.ymd.reorder_levels(["month", "day", "year"])
- expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
- tm.assert_frame_equal(result, expected)
-
- result = self.ymd["A"].reorder_levels(["month", "day", "year"])
- expected = self.ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
- tm.assert_series_equal(result, expected)
-
- result = self.ymd.T.reorder_levels(["month", "day", "year"], axis=1)
- expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
- tm.assert_frame_equal(result, expected)
-
- with pytest.raises(TypeError, match="hierarchical axis"):
- self.ymd.reorder_levels([1, 2], axis=1)
-
- with pytest.raises(IndexError, match="Too many levels"):
- self.ymd.index.reorder_levels([1, 2, 3])
-
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
| https://api.github.com/repos/pandas-dev/pandas/pulls/32373 | 2020-02-29T18:58:41Z | 2020-03-02T11:18:37Z | 2020-03-02T11:18:36Z | 2020-03-02T15:30:15Z | |
TYP/CLN: Optional[Hashable] -> pandas._typing.Label | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 61641bfb24293..60d89b2076e92 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -890,7 +890,7 @@ def style(self) -> "Styler":
"""
@Appender(_shared_docs["items"])
- def items(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
+ def items(self) -> Iterable[Tuple[Label, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
@@ -899,10 +899,10 @@ def items(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
- def iteritems(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
+ def iteritems(self) -> Iterable[Tuple[Label, Series]]:
yield from self.items()
- def iterrows(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
+ def iterrows(self) -> Iterable[Tuple[Label, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
@@ -4043,7 +4043,7 @@ def set_index(
"one-dimensional arrays."
)
- missing: List[Optional[Hashable]] = []
+ missing: List[Label] = []
for col in keys:
if isinstance(
col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)
@@ -4082,7 +4082,7 @@ def set_index(
else:
arrays.append(self.index)
- to_remove: List[Optional[Hashable]] = []
+ to_remove: List[Label] = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
@@ -4137,7 +4137,7 @@ def reset_index(
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
- col_fill: Optional[Hashable] = "",
+ col_fill: Label = "",
) -> Optional["DataFrame"]:
"""
Reset the index, or a level of it.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f0147859cae97..866b58c1ffe3d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9729,7 +9729,7 @@ def describe_1d(data):
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
- names: List[Optional[Hashable]] = []
+ names: List[Label] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6f44b5abf5b04..7e391e7a03fbb 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1,7 +1,7 @@
from datetime import datetime
import operator
from textwrap import dedent
-from typing import TYPE_CHECKING, Any, FrozenSet, Hashable, Optional, Union
+from typing import TYPE_CHECKING, Any, FrozenSet, Hashable, Union
import warnings
import numpy as np
@@ -5546,7 +5546,7 @@ def default_index(n):
return RangeIndex(0, n, name=None)
-def maybe_extract_name(name, obj, cls) -> Optional[Hashable]:
+def maybe_extract_name(name, obj, cls) -> Label:
"""
If no name is passed, then extract it from data, validating hashability.
"""
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 06a180d4a096e..091129707228f 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -2,11 +2,11 @@
Concat routines.
"""
-from typing import Hashable, Iterable, List, Mapping, Optional, Union, overload
+from typing import Iterable, List, Mapping, Union, overload
import numpy as np
-from pandas._typing import FrameOrSeriesUnion
+from pandas._typing import FrameOrSeriesUnion, Label
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
@@ -32,7 +32,7 @@
@overload
def concat(
- objs: Union[Iterable["DataFrame"], Mapping[Optional[Hashable], "DataFrame"]],
+ objs: Union[Iterable["DataFrame"], Mapping[Label, "DataFrame"]],
axis=0,
join: str = "outer",
ignore_index: bool = False,
@@ -48,9 +48,7 @@ def concat(
@overload
def concat(
- objs: Union[
- Iterable[FrameOrSeriesUnion], Mapping[Optional[Hashable], FrameOrSeriesUnion]
- ],
+ objs: Union[Iterable[FrameOrSeriesUnion], Mapping[Label, FrameOrSeriesUnion]],
axis=0,
join: str = "outer",
ignore_index: bool = False,
@@ -65,9 +63,7 @@ def concat(
def concat(
- objs: Union[
- Iterable[FrameOrSeriesUnion], Mapping[Optional[Hashable], FrameOrSeriesUnion]
- ],
+ objs: Union[Iterable[FrameOrSeriesUnion], Mapping[Label, FrameOrSeriesUnion]],
axis=0,
join="outer",
ignore_index: bool = False,
@@ -536,7 +532,7 @@ def _get_concat_axis(self) -> Index:
idx = ibase.default_index(len(self.objs))
return idx
elif self.keys is None:
- names: List[Optional[Hashable]] = [None] * len(self.objs)
+ names: List[Label] = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 12164a4b8ff6b..bb4e222193608 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -692,11 +692,7 @@ def __array_ufunc__(
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
- name: Label
- if len(set(names)) == 1:
- name = names[0]
- else:
- name = None
+ name = names[0] if len(set(names)) == 1 else None
def construct_return(result):
if lib.is_scalar(result):
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 168666ea21f45..7aeed5c316d7f 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -8,17 +8,7 @@
import itertools
import os
import re
-from typing import (
- TYPE_CHECKING,
- Any,
- Dict,
- Hashable,
- List,
- Optional,
- Tuple,
- Type,
- Union,
-)
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
@@ -27,7 +17,7 @@
from pandas._libs import lib, writers as libwriters
from pandas._libs.tslibs import timezones
-from pandas._typing import ArrayLike, FrameOrSeries
+from pandas._typing import ArrayLike, FrameOrSeries, Label
from pandas.compat._optional import import_optional_dependency
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
@@ -2811,7 +2801,7 @@ def read_multi_index(
levels = []
codes = []
- names: List[Optional[Hashable]] = []
+ names: List[Label] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
@@ -2976,7 +2966,7 @@ class SeriesFixed(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
- name: Optional[Hashable]
+ name: Label
@property
def shape(self):
| https://api.github.com/repos/pandas-dev/pandas/pulls/32371 | 2020-02-29T17:16:02Z | 2020-03-06T07:12:48Z | 2020-03-06T07:12:48Z | 2020-03-06T07:13:20Z | |
PR #32068 follow up - Print merged df result in doc | diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst
index 8302b5c5dea60..49f4bbb6beb19 100644
--- a/doc/source/user_guide/merging.rst
+++ b/doc/source/user_guide/merging.rst
@@ -742,7 +742,7 @@ as shown in the following example.
)
ser
- result = pd.merge(df, ser.reset_index(), on=['Let', 'Num'])
+ pd.merge(df, ser.reset_index(), on=['Let', 'Num'])
Here is another example with duplicate join keys in DataFrames:
| - [x] follow up to PR #32068 which closed #12550 - small fix - see https://github.com/pandas-dev/pandas/pull/32068#discussion_r386029773
| https://api.github.com/repos/pandas-dev/pandas/pulls/32370 | 2020-02-29T14:47:52Z | 2020-02-29T17:17:48Z | 2020-02-29T17:17:48Z | 2020-02-29T19:10:05Z |
CLN: Some code cleanups in pandas/_libs/parsers.pyx | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 3a42a64046abd..4f7d75e0aaad6 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1596,8 +1596,6 @@ cdef _categorical_convert(parser_t *parser, int64_t col,
cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start,
int64_t line_end, int64_t width):
cdef:
- Py_ssize_t i
- coliter_t it
const char *word = NULL
char *data
ndarray result
@@ -1642,15 +1640,11 @@ cdef _try_double(parser_t *parser, int64_t col,
bint na_filter, kh_str_starts_t *na_hashset, object na_flist):
cdef:
int error, na_count = 0
- Py_ssize_t i, lines
- coliter_t it
- const char *word = NULL
- char *p_end
+ Py_ssize_t lines
float64_t *data
float64_t NA = na_values[np.float64]
kh_float64_t *na_fset
ndarray result
- khiter_t k
bint use_na_flist = len(na_flist) > 0
lines = line_end - line_start
@@ -1685,7 +1679,7 @@ cdef inline int _try_double_nogil(parser_t *parser,
coliter_t it
const char *word = NULL
char *p_end
- khiter_t k, k64
+ khiter_t k64
na_count[0] = 0
coliter_setup(&it, parser, col, line_start)
@@ -1748,11 +1742,10 @@ cdef _try_uint64(parser_t *parser, int64_t col,
bint na_filter, kh_str_starts_t *na_hashset):
cdef:
int error
- Py_ssize_t i, lines
+ Py_ssize_t lines
coliter_t it
uint64_t *data
ndarray result
- khiter_t k
uint_state state
lines = line_end - line_start
@@ -1822,13 +1815,11 @@ cdef _try_int64(parser_t *parser, int64_t col,
bint na_filter, kh_str_starts_t *na_hashset):
cdef:
int error, na_count = 0
- Py_ssize_t i, lines
+ Py_ssize_t lines
coliter_t it
int64_t *data
ndarray result
-
int64_t NA = na_values[np.int64]
- khiter_t k
lines = line_end - line_start
result = np.empty(lines, dtype=np.int64)
@@ -1856,7 +1847,6 @@ cdef inline int _try_int64_nogil(parser_t *parser, int64_t col,
Py_ssize_t i, lines = line_end - line_start
coliter_t it
const char *word = NULL
- khiter_t k
na_count[0] = 0
coliter_setup(&it, parser, col, line_start)
@@ -1892,9 +1882,7 @@ cdef _try_bool_flex(parser_t *parser, int64_t col,
const kh_str_starts_t *false_hashset):
cdef:
int error, na_count = 0
- Py_ssize_t i, lines
- coliter_t it
- const char *word = NULL
+ Py_ssize_t lines
uint8_t *data
ndarray result
@@ -1926,7 +1914,6 @@ cdef inline int _try_bool_flex_nogil(parser_t *parser, int64_t col,
Py_ssize_t i, lines = line_end - line_start
coliter_t it
const char *word = NULL
- khiter_t k
na_count[0] = 0
coliter_setup(&it, parser, col, line_start)
@@ -1981,10 +1968,8 @@ cdef kh_str_starts_t* kset_from_list(list values) except NULL:
# caller takes responsibility for freeing the hash table
cdef:
Py_ssize_t i
- khiter_t k
kh_str_starts_t *table
int ret = 0
-
object val
table = kh_init_str_starts()
@@ -2012,7 +1997,6 @@ cdef kh_str_starts_t* kset_from_list(list values) except NULL:
cdef kh_float64_t* kset_float64_from_list(values) except NULL:
# caller takes responsibility for freeing the hash table
cdef:
- Py_ssize_t i
khiter_t k
kh_float64_t *table
int ret = 0
@@ -2150,7 +2134,6 @@ cdef _apply_converter(object f, parser_t *parser, int64_t col,
int64_t line_start, int64_t line_end,
char* c_encoding):
cdef:
- int error
Py_ssize_t i, lines
coliter_t it
const char *word = NULL
| There are __a lot__ of cdef unused variables in ```pandas/_libs/parsers.pyx``` this PR is covering *some* of the unused variables.
| https://api.github.com/repos/pandas-dev/pandas/pulls/32369 | 2020-02-29T14:20:09Z | 2020-03-16T23:55:58Z | 2020-03-16T23:55:58Z | 2020-03-20T00:06:43Z |
Silence warnings when compiling pandas/_libs/parsers.pyx | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 3077f73a8d1a4..2fd227694800c 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -701,7 +701,7 @@ cdef class TextReader:
char *word
object name, old_name
int status
- uint64_t hr, data_line
+ uint64_t hr, data_line = 0
char *errors = "strict"
StringPath path = _string_path(self.c_encoding)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is getting rid of this warning:
```
pandas/_libs/parsers.c: In function ‘__pyx_f_6pandas_5_libs_7parsers_10TextReader__get_header’:
pandas/_libs/parsers.c:9313:27: warning: ‘__pyx_v_data_line’ may be used uninitialized in this function [-Wmaybe-uninitialized]
9313 | __pyx_t_5numpy_uint64_t __pyx_v_data_line;
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32368 | 2020-02-29T13:59:09Z | 2020-02-29T18:04:07Z | 2020-02-29T18:04:07Z | 2020-03-05T09:42:13Z |
CLN: some code cleanups to pandas/_libs/missing.pyx | diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index 4d17a6f883c1c..b0a24a9b2ebfe 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -10,10 +10,13 @@ cnp.import_array()
cimport pandas._libs.util as util
-from pandas._libs.tslibs.np_datetime cimport (
- get_timedelta64_value, get_datetime64_value)
+
+from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value
from pandas._libs.tslibs.nattype cimport (
- checknull_with_nat, c_NaT as NaT, is_null_datetimelike)
+ c_NaT as NaT,
+ checknull_with_nat,
+ is_null_datetimelike,
+)
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
from pandas.compat import is_platform_32bit
@@ -44,7 +47,7 @@ cpdef bint checknull(object val):
Returns
-------
- result : bool
+ bool
Notes
-----
@@ -223,7 +226,7 @@ def isnaobj2d_old(arr: ndarray) -> ndarray:
Returns
-------
- result : ndarray (dtype=np.bool_)
+ ndarray (dtype=np.bool_)
Notes
-----
@@ -248,17 +251,11 @@ def isnaobj2d_old(arr: ndarray) -> ndarray:
def isposinf_scalar(val: object) -> bool:
- if util.is_float_object(val) and val == INF:
- return True
- else:
- return False
+ return util.is_float_object(val) and val == INF
def isneginf_scalar(val: object) -> bool:
- if util.is_float_object(val) and val == NEGINF:
- return True
- else:
- return False
+ return util.is_float_object(val) and val == NEGINF
cdef inline bint is_null_datetime64(v):
@@ -423,7 +420,6 @@ class NAType(C_NAType):
return NA
elif isinstance(other, np.ndarray):
return np.where(other == 1, other, NA)
-
return NotImplemented
# Logical ops using Kleene logic
@@ -433,8 +429,7 @@ class NAType(C_NAType):
return False
elif other is True or other is C_NA:
return NA
- else:
- return NotImplemented
+ return NotImplemented
__rand__ = __and__
@@ -443,8 +438,7 @@ class NAType(C_NAType):
return True
elif other is False or other is C_NA:
return NA
- else:
- return NotImplemented
+ return NotImplemented
__ror__ = __or__
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32367 | 2020-02-29T13:43:43Z | 2020-03-03T03:00:27Z | 2020-03-03T03:00:27Z | 2020-03-05T10:12:19Z |
Backport PR #32284 on branch 1.0.x (CI: nested DataFrames in npdev) | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index ea1e339f44d93..61af02090f7db 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -9,6 +9,7 @@
import pytest
from pandas.compat import is_platform_little_endian
+from pandas.compat.numpy import _is_numpy_dev
from pandas.core.dtypes.common import is_integer_dtype
@@ -144,6 +145,7 @@ def test_constructor_dtype_list_data(self):
assert df.loc[1, 0] is None
assert df.loc[0, 1] == "2"
+ @pytest.mark.xfail(_is_numpy_dev, reason="Interprets list of frame as 3D")
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame()])
@@ -496,6 +498,7 @@ def test_constructor_error_msgs(self):
with pytest.raises(ValueError, match=msg):
DataFrame({"a": False, "b": True})
+ @pytest.mark.xfail(_is_numpy_dev, reason="Interprets embedded frame as 3D")
def test_constructor_with_embedded_frames(self):
# embedded data frames
| Backport PR #32284: CI: nested DataFrames in npdev | https://api.github.com/repos/pandas-dev/pandas/pulls/32366 | 2020-02-29T13:38:41Z | 2020-02-29T14:42:25Z | 2020-02-29T14:42:25Z | 2020-02-29T14:42:26Z |
CLN: Replaced "bool_t" with "builtins.bool" | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 866b58c1ffe3d..1b9cddab0df29 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1,3 +1,4 @@
+import builtins
import collections
from datetime import timedelta
import functools
@@ -154,9 +155,6 @@ def _single_replace(self, to_replace, method, inplace, limit):
return result
-bool_t = bool # Need alias because NDFrame has def bool:
-
-
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
@@ -1439,7 +1437,7 @@ def _is_level_reference(self, key, axis=0):
and not self._is_label_reference(key, axis=axis)
)
- def _is_label_reference(self, key, axis=0) -> bool_t:
+ def _is_label_reference(self, key, axis=0) -> builtins.bool:
"""
Test whether a key is a label reference for a given axis.
@@ -1468,7 +1466,7 @@ def _is_label_reference(self, key, axis=0) -> bool_t:
and any(key in self.axes[ax] for ax in other_axes)
)
- def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
+ def _is_label_or_level_reference(self, key: str, axis: int = 0) -> builtins.bool:
"""
Test whether a key is a label or level reference for a given axis.
@@ -1734,12 +1732,12 @@ def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
- def __contains__(self, key) -> bool_t:
+ def __contains__(self, key) -> builtins.bool:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
- def empty(self) -> bool_t:
+ def empty(self) -> builtins.bool:
"""
Indicator whether DataFrame is empty.
@@ -2075,12 +2073,12 @@ def to_json(
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
- force_ascii: bool_t = True,
+ force_ascii: builtins.bool = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
- lines: bool_t = False,
+ lines: builtins.bool = False,
compression: Optional[str] = "infer",
- index: bool_t = True,
+ index: builtins.bool = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
@@ -2257,12 +2255,12 @@ def to_hdf(
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
- append: bool_t = False,
+ append: builtins.bool = False,
format: Optional[str] = None,
- index: bool_t = True,
+ index: builtins.bool = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
- dropna: Optional[bool_t] = None,
+ dropna: Optional[builtins.bool] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
@@ -2397,7 +2395,7 @@ def to_sql(
con,
schema=None,
if_exists: str = "fail",
- index: bool_t = True,
+ index: builtins.bool = True,
index_label=None,
chunksize=None,
dtype=None,
@@ -2610,7 +2608,7 @@ def to_pickle(
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
- self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
+ self, excel: builtins.bool = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
@@ -2931,9 +2929,9 @@ def to_csv(
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
- header: Union[bool_t, List[str]] = True,
- index: bool_t = True,
- index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
+ header: Union[builtins.bool, List[str]] = True,
+ index: builtins.bool = True,
+ index_label: Optional[Union[builtins.bool, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
@@ -2942,7 +2940,7 @@ def to_csv(
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
- doublequote: bool_t = True,
+ doublequote: builtins.bool = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
@@ -3116,7 +3114,7 @@ def _maybe_cache_changed(self, item, value) -> None:
self._data.set(item, value)
@property
- def _is_cached(self) -> bool_t:
+ def _is_cached(self) -> builtins.bool:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
@@ -3128,7 +3126,7 @@ def _get_cacher(self):
return cacher
def _maybe_update_cacher(
- self, clear: bool_t = False, verify_is_copy: bool_t = True
+ self, clear: builtins.bool = False, verify_is_copy: builtins.bool = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
@@ -3172,7 +3170,11 @@ def _clear_item_cache(self) -> None:
# Indexing Methods
def take(
- self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
+ self: FrameOrSeries,
+ indices,
+ axis=0,
+ is_copy: Optional[builtins.bool] = None,
+ **kwargs,
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
@@ -3284,7 +3286,7 @@ def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
result._set_is_copy(self)
return result
- def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
+ def xs(self, key, axis=0, level=None, drop_level: builtins.bool = True):
"""
Return cross-section from the Series/DataFrame.
@@ -3487,14 +3489,14 @@ def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
- def _set_is_copy(self, ref, copy: bool_t = True) -> None:
+ def _set_is_copy(self, ref, copy: builtins.bool = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
- def _check_is_chained_assignment_possible(self) -> bool_t:
+ def _check_is_chained_assignment_possible(self) -> builtins.bool:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
@@ -3647,7 +3649,7 @@ def get(self, key, default=None):
return default
@property
- def _is_view(self) -> bool_t:
+ def _is_view(self) -> builtins.bool:
"""Return boolean indicating if self is view of another array """
return self._data.is_view
@@ -3655,7 +3657,7 @@ def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
- copy: bool_t = True,
+ copy: builtins.bool = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
@@ -3771,7 +3773,7 @@ def drop(
index=None,
columns=None,
level=None,
- inplace: bool_t = False,
+ inplace: builtins.bool = False,
errors: str = "raise",
):
@@ -3855,7 +3857,7 @@ def _drop_axis(
return result
- def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
+ def _update_inplace(self, result, verify_is_copy: builtins.bool = True) -> None:
"""
Replace self internals with result.
@@ -3994,10 +3996,10 @@ def sort_values(
self,
axis=0,
ascending=True,
- inplace: bool_t = False,
+ inplace: builtins.bool = False,
kind: str = "quicksort",
na_position: str = "last",
- ignore_index: bool_t = False,
+ ignore_index: builtins.bool = False,
):
"""
Sort by the values along either axis.
@@ -4364,7 +4366,7 @@ def _reindex_axes(
return obj
- def _needs_reindex_multi(self, axes, method, level) -> bool_t:
+ def _needs_reindex_multi(self, axes, method, level) -> builtins.bool:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
@@ -4380,8 +4382,8 @@ def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
- copy: bool_t = False,
- allow_dups: bool_t = False,
+ copy: builtins.bool = False,
+ allow_dups: builtins.bool = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
@@ -5142,7 +5144,7 @@ def f():
self._protect_consolidate(f)
- def _consolidate(self, inplace: bool_t = False):
+ def _consolidate(self, inplace: builtins.bool = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
@@ -5165,16 +5167,16 @@ def _consolidate(self, inplace: bool_t = False):
return self._constructor(cons_data).__finalize__(self)
@property
- def _is_mixed_type(self) -> bool_t:
+ def _is_mixed_type(self) -> builtins.bool:
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
- def _is_numeric_mixed_type(self) -> bool_t:
+ def _is_numeric_mixed_type(self) -> builtins.bool:
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
- def _check_inplace_setting(self, value) -> bool_t:
+ def _check_inplace_setting(self, value) -> builtins.bool:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
@@ -5333,7 +5335,7 @@ def dtypes(self):
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
- def _to_dict_of_blocks(self, copy: bool_t = True):
+ def _to_dict_of_blocks(self, copy: builtins.bool = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
@@ -5346,7 +5348,7 @@ def _to_dict_of_blocks(self, copy: bool_t = True):
}
def astype(
- self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
+ self: FrameOrSeries, dtype, copy: builtins.bool = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
@@ -5488,7 +5490,7 @@ def astype(
result.columns = self.columns
return result
- def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
+ def copy(self: FrameOrSeries, deep: builtins.bool = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
@@ -5596,7 +5598,7 @@ def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
- def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
+ def __copy__(self: FrameOrSeries, deep: builtins.bool = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
@@ -5610,11 +5612,11 @@ def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
def _convert(
self: FrameOrSeries,
- datetime: bool_t = False,
- numeric: bool_t = False,
- timedelta: bool_t = False,
- coerce: bool_t = False,
- copy: bool_t = True,
+ datetime: builtins.bool = False,
+ numeric: builtins.bool = False,
+ timedelta: builtins.bool = False,
+ coerce: builtins.bool = False,
+ copy: builtins.bool = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
@@ -5706,10 +5708,10 @@ def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
def convert_dtypes(
self: FrameOrSeries,
- infer_objects: bool_t = True,
- convert_string: bool_t = True,
- convert_integer: bool_t = True,
- convert_boolean: bool_t = True,
+ infer_objects: builtins.bool = True,
+ convert_string: builtins.bool = True,
+ convert_integer: builtins.bool = True,
+ convert_boolean: builtins.bool = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
@@ -5848,7 +5850,7 @@ def fillna(
value=None,
method=None,
axis=None,
- inplace: bool_t = False,
+ inplace: builtins.bool = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
@@ -6035,7 +6037,7 @@ def fillna(
def ffill(
self: FrameOrSeries,
axis=None,
- inplace: bool_t = False,
+ inplace: builtins.bool = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
@@ -6054,7 +6056,7 @@ def ffill(
def bfill(
self: FrameOrSeries,
axis=None,
- inplace: bool_t = False,
+ inplace: builtins.bool = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
@@ -7138,7 +7140,7 @@ def notna(self: FrameOrSeries) -> FrameOrSeries:
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
- def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
+ def _clip_with_scalar(self, lower, upper, inplace: builtins.bool = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
@@ -7191,7 +7193,7 @@ def clip(
lower=None,
upper=None,
axis=None,
- inplace: bool_t = False,
+ inplace: builtins.bool = False,
*args,
**kwargs,
) -> FrameOrSeries:
@@ -7375,7 +7377,7 @@ def asfreq(
freq,
method=None,
how: Optional[str] = None,
- normalize: bool_t = False,
+ normalize: builtins.bool = False,
fill_value=None,
) -> FrameOrSeries:
"""
@@ -7480,7 +7482,7 @@ def asfreq(
)
def at_time(
- self: FrameOrSeries, time, asof: bool_t = False, axis=None
+ self: FrameOrSeries, time, asof: builtins.bool = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
@@ -7541,8 +7543,8 @@ def between_time(
self: FrameOrSeries,
start_time,
end_time,
- include_start: bool_t = True,
- include_end: bool_t = True,
+ include_start: builtins.bool = True,
+ include_end: builtins.bool = True,
axis=None,
) -> FrameOrSeries:
"""
@@ -8040,10 +8042,10 @@ def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
- numeric_only: Optional[bool_t] = None,
+ numeric_only: Optional[builtins.bool] = None,
na_option: str = "keep",
- ascending: bool_t = True,
- pct: bool_t = False,
+ ascending: builtins.bool = True,
+ pct: builtins.bool = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
@@ -8295,7 +8297,7 @@ def _align_frame(
join="outer",
axis=None,
level=None,
- copy: bool_t = True,
+ copy: builtins.bool = True,
fill_value=None,
method=None,
limit=None,
@@ -8357,7 +8359,7 @@ def _align_series(
join="outer",
axis=None,
level=None,
- copy: bool_t = True,
+ copy: builtins.bool = True,
fill_value=None,
method=None,
limit=None,
@@ -8947,7 +8949,11 @@ def tshift(
return self._constructor(new_data).__finalize__(self)
def truncate(
- self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
+ self: FrameOrSeries,
+ before=None,
+ after=None,
+ axis=None,
+ copy: builtins.bool = True,
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
@@ -9101,7 +9107,7 @@ def truncate(
return result
def tz_convert(
- self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
+ self: FrameOrSeries, tz, axis=0, level=None, copy: builtins.bool = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
@@ -9162,7 +9168,7 @@ def tz_localize(
tz,
axis=0,
level=None,
- copy: bool_t = True,
+ copy: builtins.bool = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32365 | 2020-02-29T12:58:10Z | 2020-03-11T15:43:50Z | null | 2020-03-14T14:08:43Z |
TST: Removed import of itertools | diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index c402ca194648f..83080aa98648f 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1,7 +1,6 @@
import builtins
import datetime as dt
from io import StringIO
-from itertools import product
from string import ascii_lowercase
import numpy as np
@@ -1296,36 +1295,32 @@ def __eq__(self, other):
# --------------------------------
-def test_size(df):
- grouped = df.groupby(["A", "B"])
+@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]])
+def test_size(df, by):
+ grouped = df.groupby(by=by)
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
- grouped = df.groupby("A")
- result = grouped.size()
- for key, group in grouped:
- assert result[key] == len(group)
- grouped = df.groupby("B")
- result = grouped.size()
- for key, group in grouped:
- assert result[key] == len(group)
+@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]])
+@pytest.mark.parametrize("sort", [True, False])
+def test_size_sort(df, sort, by):
+ df = DataFrame(np.random.choice(20, (1000, 3)), columns=list("ABC"))
+ left = df.groupby(by=by, sort=sort).size()
+ right = df.groupby(by=by, sort=sort)["C"].apply(lambda a: a.shape[0])
+ tm.assert_series_equal(left, right, check_names=False)
- df = DataFrame(np.random.choice(20, (1000, 3)), columns=list("abc"))
- for sort, key in product((False, True), ("a", "b", ["a", "b"])):
- left = df.groupby(key, sort=sort).size()
- right = df.groupby(key, sort=sort)["c"].apply(lambda a: a.shape[0])
- tm.assert_series_equal(left, right, check_names=False)
- # GH11699
+def test_size_series_dataframe():
+ # https://github.com/pandas-dev/pandas/issues/11699
df = DataFrame(columns=["A", "B"])
out = Series(dtype="int64", index=Index([], name="A"))
tm.assert_series_equal(df.groupby("A").size(), out)
def test_size_groupby_all_null():
- # GH23050
+ # https://github.com/pandas-dev/pandas/issues/23050
# Assert no 'Value Error : Length of passed values is 2, index implies 0'
df = DataFrame({"A": [None, None]}) # all-null groups
result = df.groupby("A").size()
@@ -1335,6 +1330,8 @@ def test_size_groupby_all_null():
# quantile
# --------------------------------
+
+
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32364 | 2020-02-29T12:27:05Z | 2020-03-07T12:01:24Z | 2020-03-07T12:01:24Z | 2020-03-07T12:09:19Z |
TYP/cln: generic._make_*_function | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f7eb79a4f1c78..e36eace1e42e6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9899,37 +9899,37 @@ def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
- axis_descr, name, name2 = _doc_parms(cls)
+ axis_descr, name1, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
- name,
- name2,
- axis_descr,
- _any_desc,
- nanops.nanany,
- _any_see_also,
- _any_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc=_any_desc,
+ func=nanops.nanany,
+ see_also=_any_see_also,
+ examples=_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
- name,
- name2,
- axis_descr,
- _all_desc,
- nanops.nanall,
- _all_see_also,
- _all_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc=_all_desc,
+ func=nanops.nanall,
+ see_also=_all_see_also,
+ examples=_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
- name1=name,
+ name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
@@ -9957,177 +9957,177 @@ def mad(self, axis=None, skipna=None, level=None):
cls.sem = _make_stat_function_ddof(
cls,
"sem",
- name,
- name2,
- axis_descr,
- "Return unbiased standard error of the mean over requested "
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
- nanops.nansem,
+ func=nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
- name,
- name2,
- axis_descr,
- "Return unbiased variance over requested axis.\n\nNormalized by "
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
- nanops.nanvar,
+ func=nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
- name,
- name2,
- axis_descr,
- "Return sample standard deviation over requested axis."
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
- nanops.nanstd,
+ func=nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
- name,
- name2,
- axis_descr,
- "minimum",
- np.minimum.accumulate,
- "min",
- np.inf,
- np.nan,
- _cummin_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="minimum",
+ accum_func=np.minimum.accumulate,
+ accum_func_name="min",
+ mask_a=np.inf,
+ mask_b=np.nan,
+ examples=_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
- name,
- name2,
- axis_descr,
- "sum",
- np.cumsum,
- "sum",
- 0.0,
- np.nan,
- _cumsum_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="sum",
+ accum_func=np.cumsum,
+ accum_func_name="sum",
+ mask_a=0.0,
+ mask_b=np.nan,
+ examples=_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
- name,
- name2,
- axis_descr,
- "product",
- np.cumprod,
- "prod",
- 1.0,
- np.nan,
- _cumprod_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="product",
+ accum_func=np.cumprod,
+ accum_func_name="prod",
+ mask_a=1.0,
+ mask_b=np.nan,
+ examples=_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
- name,
- name2,
- axis_descr,
- "maximum",
- np.maximum.accumulate,
- "max",
- -np.inf,
- np.nan,
- _cummax_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="maximum",
+ accum_func=np.maximum.accumulate,
+ accum_func_name="max",
+ mask_a=-np.inf,
+ mask_b=np.nan,
+ examples=_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
- name,
- name2,
- axis_descr,
- """Return the sum of the values for the requested axis.\n
- This is equivalent to the method ``numpy.sum``.""",
- nanops.nansum,
- _stat_func_see_also,
- _sum_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return the sum of the values for the requested axis.\n\n"
+ "This is equivalent to the method ``numpy.sum``.",
+ func=nanops.nansum,
+ see_also=_stat_func_see_also,
+ examples=_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
- name,
- name2,
- axis_descr,
- "Return the mean of the values for the requested axis.",
- nanops.nanmean,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return the mean of the values for the requested axis.",
+ func=nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
- name,
- name2,
- axis_descr,
- "Return unbiased skew over requested axis.\n\nNormalized by N-1.",
- nanops.nanskew,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
+ func=nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
- name,
- name2,
- axis_descr,
- "Return unbiased kurtosis over requested axis.\n\n"
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
- nanops.nankurt,
+ func=nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
- name,
- name2,
- axis_descr,
- "Return the product of the values for the requested axis.",
- nanops.nanprod,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return the product of the values for the requested axis.",
+ func=nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
- name,
- name2,
- axis_descr,
- "Return the median of the values for the requested axis.",
- nanops.nanmedian,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return the median of the values for the requested axis.",
+ func=nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
- name,
- name2,
- axis_descr,
- """Return the maximum of the values for the requested axis.\n
- If you want the *index* of the maximum, use ``idxmax``. This is
- the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
- nanops.nanmax,
- _stat_func_see_also,
- _max_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return the maximum of the values for the requested axis.\n\n"
+ "If you want the *index* of the maximum, use ``idxmax``. This is"
+ "the equivalent of the ``numpy.ndarray`` method ``argmax``.",
+ func=nanops.nanmax,
+ see_also=_stat_func_see_also,
+ examples=_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
- name,
- name2,
- axis_descr,
- """Return the minimum of the values for the requested axis.\n
- If you want the *index* of the minimum, use ``idxmin``. This is
- the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
- nanops.nanmin,
- _stat_func_see_also,
- _min_examples,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ desc="Return the minimum of the values for the requested axis.\n\n"
+ "If you want the *index* of the minimum, use ``idxmin``. This is"
+ "the equivalent of the ``numpy.ndarray`` method ``argmin``.",
+ func=nanops.nanmin,
+ see_also=_stat_func_see_also,
+ examples=_min_examples,
)
@classmethod
@@ -10947,8 +10947,16 @@ def _doc_parms(cls):
def _make_min_count_stat_function(
- cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
-):
+ cls,
+ name: str,
+ name1: str,
+ name2: str,
+ axis_descr: str,
+ desc: str,
+ func: Callable,
+ see_also: str = "",
+ examples: str = "",
+) -> Callable:
@Substitution(
desc=desc,
name1=name1,
@@ -10983,8 +10991,8 @@ def stat_func(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
- f,
- name,
+ func,
+ name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
@@ -10995,8 +11003,16 @@ def stat_func(
def _make_stat_function(
- cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
-):
+ cls,
+ name: str,
+ name1: str,
+ name2: str,
+ axis_descr: str,
+ desc: str,
+ func: Callable,
+ see_also: str = "",
+ examples: str = "",
+) -> Callable:
@Substitution(
desc=desc,
name1=name1,
@@ -11021,13 +11037,15 @@ def stat_func(
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
- f, name, axis=axis, skipna=skipna, numeric_only=numeric_only
+ func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
-def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
+def _make_stat_function_ddof(
+ cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
+) -> Callable:
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
@@ -11043,7 +11061,7 @@ def stat_func(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
- f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
+ func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
@@ -11051,17 +11069,17 @@ def stat_func(
def _make_cum_function(
cls,
- name,
- name1,
- name2,
- axis_descr,
- desc,
- accum_func,
- accum_func_name,
- mask_a,
- mask_b,
- examples,
-):
+ name: str,
+ name1: str,
+ name2: str,
+ axis_descr: str,
+ desc: str,
+ accum_func: Callable,
+ accum_func_name: str,
+ mask_a: float,
+ mask_b: float,
+ examples: str,
+) -> Callable:
@Substitution(
desc=desc,
name1=name1,
@@ -11145,8 +11163,17 @@ def na_accum_func(blk_values):
def _make_logical_function(
- cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value
-):
+ cls,
+ name: str,
+ name1: str,
+ name2: str,
+ axis_descr: str,
+ desc: str,
+ func: Callable,
+ see_also: str,
+ examples: str,
+ empty_value: bool,
+) -> Callable:
@Substitution(
desc=desc,
name1=name1,
@@ -11166,8 +11193,8 @@ def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
- f,
- name,
+ func,
+ name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
| Give calls to these funcs named parameters for better clarity + type up the make_stats functions.
| https://api.github.com/repos/pandas-dev/pandas/pulls/32363 | 2020-02-29T08:57:49Z | 2020-03-04T08:19:31Z | 2020-03-04T08:19:31Z | 2020-03-04T08:53:31Z |
DOC: Fixed errors in pandas.DataFrame.asfreq PR07, RT02, RT03, SA04 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e6c5ac9dbf733..1ec5391a2d00f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7485,6 +7485,7 @@ def asfreq(
Parameters
----------
freq : DateOffset or str
+ Frequency DateOffset or string.
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
@@ -7502,11 +7503,12 @@ def asfreq(
Returns
-------
- converted : same type as caller
+ Same type as caller
+ Object converted to the specified frequency.
See Also
--------
- reindex
+ reindex : Conform DataFrame to new index with optional filling logic.
Notes
-----
| - [X] closes https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/19
- [ ] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Output of `python scripts/validate_docstrings.py pandas.DataFrame.asfreq`:
```
################################################################################
##################### Docstring (pandas.DataFrame.asfreq) #####################
################################################################################
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
Frequency DateOffset or string.
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted
Same type as caller.
See Also
--------
reindex : Conform DataFrame to new index with optional filling logic.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
################################################################################
################################## Validation ##################################
################################################################################
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32362 | 2020-02-29T07:00:18Z | 2020-03-11T02:00:36Z | 2020-03-11T02:00:36Z | 2020-03-11T04:03:23Z |
Changed kind parameter from integer to int, Added example | diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 542cfd334b810..549606795f528 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -255,6 +255,16 @@ class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
Methods
-------
None
+
+ Examples
+ --------
+ >>> from pandas.arrays import SparseArray
+ >>> arr = SparseArray([0, 0, 1, 2])
+ >>> arr
+ [0, 0, 1, 2]
+ Fill: 0
+ IntIndex
+ Indices: array([2, 3], dtype=int32)
"""
_pandas_ftype = "sparse"
| - [x] closes [Fix PR06 error in pandas.arrays.SparseArray](https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/1)
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
```
################################################################################
################################## Validation ##################################
################################################################################
3 Errors found:
Parameter "sparse_index" has no description
Parameter "index" has no description
See Also section not found
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32361 | 2020-02-29T06:52:17Z | 2020-02-29T17:43:46Z | 2020-02-29T17:43:46Z | 2020-02-29T17:43:54Z |
DOC: Update the pandas.DatetimeIndex docstring | diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index c9fefd46e55c7..46824e0be6c28 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -78,21 +78,26 @@ def _new_DatetimeIndex(cls, d):
)
class DatetimeIndex(DatetimeTimedeltaMixin):
"""
- Immutable ndarray of datetime64 data, represented internally as int64, and
- which can be boxed to Timestamp objects that are subclasses of datetime and
- carry metadata such as frequency information.
+ Immutable ndarray-like of datetime64 data.
+
+ Represented internally as int64, and which can be boxed to Timestamp objects
+ that are subclasses of datetime and carry metadata.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
- copy : bool
- Make a copy of input ndarray.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
- tz : pytz.timezone or dateutil.tz.tzfile
+ tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
+ Set the Timezone of the data.
+ normalize : bool, default False
+ Normalize start/end dates to midnight before generating date range.
+ closed : {'left', 'right'}, optional
+ Set whether to include `start` and `end` that are on the
+ boundary. The default includes boundary points on either end.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
@@ -107,12 +112,16 @@ class DatetimeIndex(DatetimeTimedeltaMixin):
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
- name : object
- Name to be stored in the index.
dayfirst : bool, default False
If True, parse dates in `data` with the day first order.
yearfirst : bool, default False
If True parse dates in `data` with the year first order.
+ dtype : numpy.dtype or DatetimeTZDtype or str, default None
+ Note that the only NumPy dtype allowed is ‘datetime64[ns]’.
+ copy : bool, default False
+ Make a copy of input ndarray.
+ name : label, default None
+ Name to be stored in the index.
Attributes
----------
| - [x] closes https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/7
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Output of ```python scripts/validate_docstrings.py pandas.DatetimeIndex```
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found:
No examples section found
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/32360 | 2020-02-29T06:43:41Z | 2020-03-07T21:01:14Z | 2020-03-07T21:01:14Z | 2020-03-09T09:22:50Z |
Fixing RT02 pandas.Index.dropna and PR08 pandas.Index.fillna | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 7e391e7a03fbb..5555b99a0ef88 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2163,7 +2163,7 @@ def dropna(self, how="any"):
Returns
-------
- valid : Index
+ Index
"""
if how not in ("any", "all"):
raise ValueError(f"invalid how option: {how}")
| - [x] closes https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/17
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32359 | 2020-02-29T06:19:05Z | 2020-03-06T17:00:10Z | 2020-03-06T17:00:10Z | 2020-03-06T17:00:16Z |
DOC: Fixed PR09 error in pandas.testing.assert_series_equal | diff --git a/pandas/_testing.py b/pandas/_testing.py
index a70f75d6cfaf4..fce06e216dfd7 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1106,7 +1106,7 @@ def assert_series_equal(
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
- Whether to compare category order of internal Categoricals
+ Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
obj : str, default 'Series'
| - [X] closes https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/15
- [ ] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Output of `python scripts/validate_docstrings.py pandas.HDFStore.put`:
```
################################################################################
####################### Docstring (pandas.HDFStore.put) #######################
################################################################################
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable.
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data.
append : bool, default False
This will force Table format, append the input data to the
existing.
data_columns : list, default None
List of columns to create as data columns, or True to
use all columns. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
dropna : bool, default False, do not write an ALL nan row to
The store settable by the option 'io.hdf.dropna_table'.
################################################################################
################################## Validation ##################################
################################################################################
8 Errors found:
No extended summary found
Parameters {'min_itemsize', 'index', 'complevel', 'errors', 'complib', 'append', 'nan_rep'} not documented
Unknown parameters {'dropna ', 'append '}
Parameter "key" has no description
Parameter "value" has no description
Parameter "format" description should start with a capital letter
See Also section not found
No examples section found
(pandas-dev) D:\Keluarga\Tolhas\Kuliah\Semester 4\DSC Pandas\pandas-tolhassianipar>python scripts/validate_docstrings.py pandas.HDFStore.put
################################################################################
####################### Docstring (pandas.HDFStore.put) #######################
################################################################################
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable.
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data.
append : bool, default False
This will force Table format, append the input data to the
existing.
data_columns : list, default None
List of columns to create as data columns, or True to
use all columns. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
dropna : bool, default False, do not write an ALL nan row to
The store settable by the option 'io.hdf.dropna_table'.
################################################################################
################################## Validation ##################################
################################################################################
8 Errors found:
No extended summary found
Parameters {'errors', 'index', 'nan_rep', 'append', 'min_itemsize', 'complevel', 'complib'} not documented
Unknown parameters {'append ', 'dropna '}
Parameter "key" has no description
Parameter "value" has no description
Parameter "format" description should start with a capital letter
See Also section not found
No examples section found
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32358 | 2020-02-29T05:54:48Z | 2020-02-29T17:55:18Z | 2020-02-29T17:55:18Z | 2020-03-04T08:55:38Z |
DOC: Update the pandas.Index.is_ docstring | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index ae2387f0fd7b4..47c759ac9614b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -545,11 +545,17 @@ def is_(self, other) -> bool:
Parameters
----------
other : object
- other object to compare against.
+ Other object to compare against.
Returns
-------
- True if both have same underlying data, False otherwise : bool
+ bool
+ Boolean indicating if both object have same underlying data,
+ False otherwise.
+
+ See Also
+ --------
+ Index.identical : Works like Index.is_ but also check metadata.
"""
# use something other than None to be clearer
return self._id is getattr(other, "_id", Ellipsis) and self._id is not None
| - [x] closes https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/6
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Output of `python scripts/validate_docstrings.py pandas.Index.is_`:
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found:
No examples section found
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32357 | 2020-02-29T05:51:37Z | 2020-03-10T14:36:28Z | null | 2020-06-20T08:24:55Z |
DOC: Fixed ES01, PR07, SA04 error in pandas.core.groupby.DataFrameGroupBy.shift | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 48c00140461b5..6362f11a3e032 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2312,11 +2312,12 @@ def _get_cythonized_result(
return self._wrap_transformed_output(output)
@Substitution(name="groupby")
- @Appender(_common_see_also)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Shift each group by periods observations.
+ If freq is passed, the index will be increased using the periods and the freq.
+
Parameters
----------
periods : int, default 1
@@ -2324,7 +2325,9 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
freq : str, optional
Frequency string.
axis : axis to shift, default 0
+ Shift direction.
fill_value : optional
+ The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
@@ -2332,6 +2335,12 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
-------
Series or DataFrame
Object shifted within each group.
+
+ See Also
+ --------
+ Index.shift : Shift values of Index.
+ tshift : Shift the time index, using the index’s frequency
+ if available.
"""
if freq is not None or axis != 0 or not isna(fill_value):
return self.apply(lambda x: x.shift(periods, freq, axis, fill_value))
| - [x] closes: https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/14
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Output of python scripts/validate_docstrings.py pandas.core.groupby.DataFrameGroupBy.shift:
```
################################################################################
################################## Validation ##################################
################################################################################
1 Errors found:
No examples section found
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32356 | 2020-02-29T05:48:55Z | 2020-03-02T22:52:15Z | 2020-03-02T22:52:15Z | 2020-03-03T14:26:56Z |
Fix PR08, RT02, RT03, and SA01 on pandas.Index.fillna | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index aa22527d8c2d7..d2887c6652635 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2126,13 +2126,18 @@ def fillna(self, value=None, downcast=None):
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
- a dict of item->dtype of what to downcast if possible,
+ A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
- filled : Index
+ Index
+
+ See Also
+ --------
+ DataFrame.fillna : Fill NaN values of a DataFrame.
+ Series.fillna : Fill NaN Values of a Series.
"""
self._assert_can_do_op(value)
if self.hasnans:
| - [x] closes #https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/5
- [ ] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32355 | 2020-02-29T05:15:36Z | 2020-03-04T15:41:32Z | 2020-03-04T15:41:32Z | 2020-03-04T15:41:38Z |
DOC : fix errors docstrings pandas.to_numeric | diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index 4939cbfc9cc96..8075c69a614d5 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -35,6 +35,7 @@ def to_numeric(arg, errors="raise", downcast=None):
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
+ Argument to be converted.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaN.
@@ -61,7 +62,8 @@ def to_numeric(arg, errors="raise", downcast=None):
Returns
-------
- ret : numeric if parsing succeeded.
+ ret
+ Numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
added docstrings information about arg descriptions, downcast type 'int', and Return type
| https://api.github.com/repos/pandas-dev/pandas/pulls/32354 | 2020-02-29T05:14:05Z | 2020-03-06T20:48:15Z | 2020-03-06T20:48:15Z | 2020-03-08T10:55:55Z |
docs: fix SS06, PR08, RT02, RT03, SA04 in pandas.DatetimeIndex.indexe… | diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index c9fefd46e55c7..e675d925e7ed2 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -702,23 +702,27 @@ def inferred_type(self) -> str:
def indexer_at_time(self, time, asof=False):
"""
- Return index locations of index values at particular time of day
- (e.g. 9:30AM).
+ Return the index location at certain times of the day (eg 9:30 am).
Parameters
----------
time : datetime.time or str
- datetime.time or string in appropriate format ("%H:%M", "%H%M",
+ A datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p").
+ asof : bool
Returns
-------
- values_at_time : array of integers
+ array of integers
+ An array is a container object that holds a fixed number of
+ values of a single type.
See Also
--------
- indexer_between_time, DataFrame.at_time
+ indexer_between_time : Return index locations of values between
+ particular times of day (e.g., 9:00-9:30AM).
+ DataFrame.at_time : Select values at particular time of day (e.g. 9:30AM).
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
| …r_at_time
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Fix : https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/8
| https://api.github.com/repos/pandas-dev/pandas/pulls/32353 | 2020-02-29T05:09:41Z | 2020-03-10T14:37:20Z | null | 2020-03-10T14:37:20Z |
DOC: Update pandas.core.groupby.GroupBy.pipe docstring | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 48c00140461b5..ad6e236244830 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -190,14 +190,13 @@ class providing the base-class of operations.
)
_pipe_template = """
-Apply a function `func` with arguments to this %(klass)s object and return
-the function's result.
+Apply a function `func` with arguments to this %(klass)s object and return the result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
-functions that expect Series, DataFrames, GroupBy or Resampler objects.
-Instead of writing
+functions expect Series, DataFrames, GroupBy or Resampler objects.
+Instead of writing.
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
@@ -212,19 +211,20 @@ class providing the base-class of operations.
Parameters
----------
-func : callable or tuple of (callable, string)
- Function to apply to this %(klass)s object or, alternatively,
+func : callable or tuple of (callable, str)
+ Function to apply to this %(klass)s object or
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
-args : iterable, optional
- Positional arguments passed into `func`.
-kwargs : dict, optional
- A dictionary of keyword arguments passed into `func`.
+*args : iterable, optional
+ Positional arguments passed into `func`.
+**kwargs : dict, optional
+ A dictionary of keyword arguments passed into `func`.
Returns
-------
-object : the return type of `func`.
+object
+ The return type of `func`.
See Also
--------
| - [x] closes https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/2
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32352 | 2020-02-29T05:09:34Z | 2020-03-07T21:43:46Z | null | 2020-03-07T21:47:52Z |
DOC: Fix SS06 formatting errors in merge_asof docstrings | diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 49ac1b6cfa52b..faac472b3fc31 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -320,10 +320,10 @@ def merge_asof(
direction: str = "backward",
) -> "DataFrame":
"""
- Perform an asof merge. This is similar to a left-join except that we
- match on nearest key rather than equal keys.
+ Perform an asof merge.
- Both DataFrames must be sorted by the key.
+ This is similar to a left-join except that we match on nearest
+ key rather than equal keys. Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
| Errors fixed (from the list in #29254 ):
pandas.merge_asof
Validated the fixes with python scripts/validate_docstrings.py
Refrencing issue: #29254
Fix : pandanistas/pandanistas_sprint_ui2020#11 | https://api.github.com/repos/pandas-dev/pandas/pulls/32351 | 2020-02-29T04:58:31Z | 2020-02-29T17:27:50Z | 2020-02-29T17:27:50Z | 2020-03-04T04:54:52Z |
Implement BlockManager.iset | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 9c90b20fc0f16..bb3254446bd3b 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -8,7 +8,7 @@
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
-from pandas._typing import DtypeObj
+from pandas._typing import DtypeObj, Label
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
@@ -996,7 +996,25 @@ def delete(self, item):
)
self._rebuild_blknos_and_blklocs()
- def set(self, item, value):
+ def set(self, item: Label, value):
+ """
+ Set new item in-place.
+
+ Notes
+ -----
+ Does not consolidate.
+ Adds new Block if not contained in the current items Index.
+ """
+ try:
+ loc = self.items.get_loc(item)
+ except KeyError:
+ # This item wasn't present, just insert at end
+ self.insert(len(self.items), item, value)
+ return
+
+ self.iset(loc, value)
+
+ def iset(self, loc: Union[int, slice, np.ndarray], value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
@@ -1029,13 +1047,6 @@ def value_getitem(placement):
"Shape of new values must be compatible with manager shape"
)
- try:
- loc = self.items.get_loc(item)
- except KeyError:
- # This item wasn't present, just insert at end
- self.insert(len(self.items), item, value)
- return
-
if isinstance(loc, int):
loc = [loc]
@@ -1081,7 +1092,7 @@ def value_getitem(placement):
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
- new_blocks = []
+ new_blocks: List[Block] = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
@@ -1140,6 +1151,9 @@ def insert(self, loc: int, item, value, allow_duplicates: bool = False):
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
+ if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value):
+ value = _safe_reshape(value, (1,) + value.shape)
+
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
| Preliminary to the PR that fixes `setitem_with_indexer` (#22036, #15686) | https://api.github.com/repos/pandas-dev/pandas/pulls/32350 | 2020-02-29T04:04:58Z | 2020-03-03T15:29:48Z | 2020-03-03T15:29:48Z | 2020-03-03T15:55:54Z |
REF: Remove BlockManager.rename_axis | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e6c5ac9dbf733..5a220f7de9895 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -967,7 +967,6 @@ def rename(
continue
ax = self._get_axis(axis_no)
- baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
@@ -984,9 +983,8 @@ def rename(
]
raise KeyError(f"{missing_labels} not found in axis")
- result._data = result._data.rename_axis(
- f, axis=baxis, copy=copy, level=level
- )
+ new_index = ax._transform_index(f, level)
+ result.set_axis(new_index, axis=axis_no, inplace=True)
result._clear_item_cache()
if inplace:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 3eab757311ccb..5c06b1c17f0ab 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4738,6 +4738,27 @@ def map(self, mapper, na_action=None):
return Index(new_values, **attributes)
+ # TODO: De-duplicate with map, xref GH#32349
+ def _transform_index(self, func, level=None) -> "Index":
+ """
+ Apply function to all values found in index.
+
+ This includes transforming multiindex entries separately.
+ Only apply function to one level of the MultiIndex if level is specified.
+ """
+ if isinstance(self, ABCMultiIndex):
+ if level is not None:
+ items = [
+ tuple(func(y) if i == level else y for i, y in enumerate(x))
+ for x in self
+ ]
+ else:
+ items = [tuple(func(y) for y in x) for x in self]
+ return type(self).from_tuples(items, names=self.names)
+ else:
+ items = [func(x) for x in self]
+ return Index(items, name=self.name, tupleize_cols=False)
+
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index 37a3405554745..e70652b81c42f 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -17,7 +17,6 @@
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
- _transform_index,
concatenate_block_managers,
create_block_manager_from_arrays,
create_block_manager_from_blocks,
@@ -40,7 +39,6 @@
"_block_shape",
"BlockManager",
"SingleBlockManager",
- "_transform_index",
"concatenate_block_managers",
"create_block_manager_from_arrays",
"create_block_manager_from_blocks",
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 98afc5ac3a0e3..14841e354af4d 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -34,7 +34,7 @@
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.base import PandasObject
from pandas.core.indexers import maybe_convert_indices
-from pandas.core.indexes.api import Index, MultiIndex, ensure_index
+from pandas.core.indexes.api import Index, ensure_index
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
@@ -216,23 +216,6 @@ def set_axis(self, axis: int, new_labels: Index) -> None:
self.axes[axis] = new_labels
- def rename_axis(
- self, mapper, axis: int, copy: bool = True, level=None
- ) -> "BlockManager":
- """
- Rename one of axes.
-
- Parameters
- ----------
- mapper : unary callable
- axis : int
- copy : bool, default True
- level : int or None, default None
- """
- obj = self.copy(deep=copy)
- obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
- return obj
-
@property
def _is_single_block(self) -> bool:
if self.ndim == 1:
@@ -1966,28 +1949,6 @@ def _compare_or_regex_search(a, b, regex=False):
return result
-def _transform_index(index, func, level=None):
- """
- Apply function to all values found in index.
-
- This includes transforming multiindex entries separately.
- Only apply function to one level of the MultiIndex if level is specified.
-
- """
- if isinstance(index, MultiIndex):
- if level is not None:
- items = [
- tuple(func(y) if i == level else y for i, y in enumerate(x))
- for x in index
- ]
- else:
- items = [tuple(func(y) for y in x) for x in index]
- return MultiIndex.from_tuples(items, names=index.names)
- else:
- items = [func(x) for x in index]
- return Index(items, name=index.name, tupleize_cols=False)
-
-
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
counts = np.bincount(arr.astype(np.int_))
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index c301d6e7c7155..daaa5138f7654 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -46,7 +46,7 @@
from pandas.core.arrays.categorical import _recode_for_categories
import pandas.core.common as com
from pandas.core.frame import _merge_doc
-from pandas.core.internals import _transform_index, concatenate_block_managers
+from pandas.core.internals import concatenate_block_managers
from pandas.core.sorting import is_int64_overflow_possible
if TYPE_CHECKING:
@@ -2022,4 +2022,4 @@ def renamer(x, suffix):
lrenamer = partial(renamer, suffix=lsuffix)
rrenamer = partial(renamer, suffix=rsuffix)
- return (_transform_index(left, lrenamer), _transform_index(right, rrenamer))
+ return (left._transform_index(lrenamer), right._transform_index(rrenamer))
| Better to do it using NDFrame methods
cc @toobaz I know you're on board for getting index/axis stuff out of BlockManager. | https://api.github.com/repos/pandas-dev/pandas/pulls/32349 | 2020-02-29T03:33:03Z | 2020-03-11T02:40:14Z | 2020-03-11T02:40:14Z | 2020-03-11T02:41:50Z |
REF: avoid using internals methods for to_timestamp, to_period | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 61715397e8e0b..8fe3a32fe3d39 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -94,10 +94,8 @@
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
- ABCDatetimeIndex,
ABCIndexClass,
ABCMultiIndex,
- ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
@@ -8245,7 +8243,9 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
return result
- def to_timestamp(self, freq=None, how="start", axis=0, copy=True) -> "DataFrame":
+ def to_timestamp(
+ self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True
+ ) -> "DataFrame":
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
@@ -8265,23 +8265,16 @@ def to_timestamp(self, freq=None, how="start", axis=0, copy=True) -> "DataFrame"
-------
DataFrame with DatetimeIndex
"""
- new_data = self._data
- if copy:
- new_data = new_data.copy()
+ new_obj = self.copy(deep=copy)
- axis = self._get_axis_number(axis)
- if axis == 0:
- assert isinstance(self.index, (ABCDatetimeIndex, ABCPeriodIndex))
- new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
- elif axis == 1:
- assert isinstance(self.columns, (ABCDatetimeIndex, ABCPeriodIndex))
- new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
- else: # pragma: no cover
- raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
+ axis_name = self._get_axis_name(axis)
+ old_ax = getattr(self, axis_name)
+ new_ax = old_ax.to_timestamp(freq=freq, how=how)
- return self._constructor(new_data)
+ setattr(new_obj, axis_name, new_ax)
+ return new_obj
- def to_period(self, freq=None, axis=0, copy=True) -> "DataFrame":
+ def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> "DataFrame":
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
@@ -8299,23 +8292,16 @@ def to_period(self, freq=None, axis=0, copy=True) -> "DataFrame":
Returns
-------
- TimeSeries with PeriodIndex
+ DataFrame with PeriodIndex
"""
- new_data = self._data
- if copy:
- new_data = new_data.copy()
+ new_obj = self.copy(deep=copy)
- axis = self._get_axis_number(axis)
- if axis == 0:
- assert isinstance(self.index, ABCDatetimeIndex)
- new_data.set_axis(1, self.index.to_period(freq=freq))
- elif axis == 1:
- assert isinstance(self.columns, ABCDatetimeIndex)
- new_data.set_axis(0, self.columns.to_period(freq=freq))
- else: # pragma: no cover
- raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
+ axis_name = self._get_axis_name(axis)
+ old_ax = getattr(self, axis_name)
+ new_ax = old_ax.to_period(freq=freq)
- return self._constructor(new_data)
+ setattr(new_obj, axis_name, new_ax)
+ return new_obj
def isin(self, values) -> "DataFrame":
"""
| https://api.github.com/repos/pandas-dev/pandas/pulls/32347 | 2020-02-29T03:26:26Z | 2020-03-04T01:02:48Z | 2020-03-04T01:02:48Z | 2020-03-04T01:04:39Z | |
TYP: Update type naming in formatter | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f304fadbab871..f515b57e24cfa 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -758,8 +758,8 @@ def to_string(
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
- formatters: Optional[fmt.formatters_type] = None,
- float_format: Optional[fmt.float_format_type] = None,
+ formatters: Optional[fmt.FormattersType] = None,
+ float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index b5ddd15c1312a..2a528781f8c93 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -81,10 +81,10 @@
if TYPE_CHECKING:
from pandas import Series, DataFrame, Categorical
-formatters_type = Union[
+FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
]
-float_format_type = Union[str, Callable, "EngFormatter"]
+FloatFormatType = Union[str, Callable, "EngFormatter"]
common_docstring = """
Parameters
@@ -455,7 +455,7 @@ class TableFormatter:
show_dimensions: Union[bool, str]
is_truncated: bool
- formatters: formatters_type
+ formatters: FormattersType
columns: Index
@property
@@ -548,9 +548,9 @@ def __init__(
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
- formatters: Optional[formatters_type] = None,
+ formatters: Optional[FormattersType] = None,
justify: Optional[str] = None,
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
line_width: Optional[int] = None,
@@ -1089,7 +1089,7 @@ def _get_column_name_list(self) -> List[str]:
def format_array(
values: Any,
formatter: Optional[Callable],
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
na_rep: str = "NaN",
digits: Optional[int] = None,
space: Optional[Union[str, int]] = None,
@@ -1171,7 +1171,7 @@ def __init__(
formatter: Optional[Callable] = None,
na_rep: str = "NaN",
space: Union[str, int] = 12,
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
justify: str = "right",
decimal: str = ".",
quoting: Optional[int] = None,
@@ -1278,7 +1278,7 @@ def __init__(self, *args, **kwargs):
def _value_formatter(
self,
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
threshold: Optional[Union[float, int]] = None,
) -> Callable:
"""Returns a function to be applied on each value to format it"""
@@ -1372,7 +1372,7 @@ def format_values_with(float_format):
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
- float_format: Optional[float_format_type]
+ float_format: Optional[FloatFormatType]
if self.float_format is None:
if self.fixed_width:
float_format = partial(
| - [x] part of #26792, #28480
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32345 | 2020-02-29T02:33:58Z | 2020-02-29T16:16:32Z | 2020-02-29T16:16:32Z | 2020-02-29T16:16:44Z |
C Functionality for Accessing DataFrame Values in Column Order | diff --git a/pandas/_libs/ndframe_iter.c b/pandas/_libs/ndframe_iter.c
new file mode 100644
index 0000000000000..f920814782880
--- /dev/null
+++ b/pandas/_libs/ndframe_iter.c
@@ -0,0 +1,222 @@
+#include "assert.h"
+#include "ndframe_iter.h"
+
+// returns -1 on error
+// performs no bounds checking, so will likely segfault if you pass
+// and inappropriate dim for the object
+static Py_ssize_t getDimLength(PyObject *df, Py_ssize_t dim) {
+ PyObject *shape;
+ Py_ssize_t ncols;
+
+ shape = PyObject_GetAttrString(df, "shape");
+ if (shape == NULL) {
+ return -1;
+ }
+
+ if (!PyTuple_Check(shape)) {
+ Py_DECREF(shape);
+ return -1;
+ }
+
+ ncols = PyLong_AsLongLong(PyTuple_GET_ITEM(shape, dim));
+ Py_DECREF(shape);
+
+ return ncols;
+}
+
+// Return the blocks object associated with a dataframe
+// Checks that the return value is a Tuple
+static PyObject *getBlocksTuple(PyObject *df) {
+ PyObject *blockManager, *blocks;
+
+ blockManager = PyObject_GetAttrString(df, "_data");
+ if (blockManager == NULL) {
+ return NULL;
+ }
+
+ blocks = PyObject_GetAttrString(blockManager, "blocks");
+ Py_DECREF(blockManager);
+ if (blocks == NULL) {
+ return NULL;
+ }
+
+ if (!PyTuple_Check(blocks)) {
+ // TODO: Set error message here
+ Py_DECREF(blocks);
+ return NULL;
+ }
+
+ return blocks;
+}
+
+// Return the integer placements of the blocks columns in its owning frame
+// Checks that the return value is a List
+static PyObject *getManagerLocationsAsList(PyObject *block) {
+ PyObject *managerLocs, *ndarray, *list;
+
+ managerLocs = PyObject_GetAttrString(block, "mgr_locs");
+ if (managerLocs == NULL) {
+ return NULL;
+ }
+
+ // TODO: we could probably just supply managerLocs to the list()
+ // built-in instead of going from mgr_locs->as_array->tolist
+ ndarray = PyObject_GetAttrString(managerLocs, "as_array");
+ Py_DECREF(managerLocs);
+ if (ndarray == NULL) {
+ return NULL;
+ }
+
+ list = PyObject_CallMethod(ndarray, "tolist", NULL);
+ Py_DECREF(ndarray);
+ if (list == NULL) {
+ return NULL;
+ } else if (!PyList_Check(list)) {
+ Py_DECREF(list);
+ return NULL;
+ }
+
+ return list;
+}
+
+
+// Given a DataFrame, this will create a struct containing both
+// the length of ndarrays the frame uses along with an array of
+// PyArrayObjects, appearing in C-order along the requested axis.
+//
+// For example, if we had a DataFrame that looks as follows
+// a b c
+// 0 1 2.0 3
+// 1 4 5.0 6
+//
+// This would deconstruct the blocks and provide a struct
+// with len 3; accessing
+// ndarrays[0] will yield ndarray([1, 4])
+// ndarrays[1] will yield ndarray([2., 5.])
+// ndarrays[2] will yield ndarray([3, 6])
+//
+// The goal of this is to provide a performant way in C to
+// maintain axis order and dtype information of the
+// supplied DataFrame.
+PdOrderedArrays *PdOrderedArrays_New(PyObject *df, int axis) {
+ PyObject *blocks, *block, *managerLocs, *blockValues, *ndarr, *key;
+ PyObject **ndarrays;
+ Py_ssize_t i, j, loc, ncols;
+ PdOrderedArrays *result;
+
+ //
+ assert(axis == 0);
+
+ ncols = getDimLength(df, 1);
+ blocks = getBlocksTuple(df);
+ if (blocks == NULL) {
+ return NULL;
+ }
+
+ ndarrays = PyObject_Malloc(sizeof(PyObject *) * ncols);
+ if (ndarrays == NULL) {
+ Py_DECREF(blocks);
+ return NULL;
+ }
+
+
+ // Iterate over all of the blocks, getting a slice from
+ // each block sequentially and storing it in the appropriate order
+ // in `arrays`
+ for (i = 0; i < PyTuple_GET_SIZE(blocks); i++) {
+ block = PyTuple_GET_ITEM(blocks, i);
+
+ blockValues = PyObject_CallMethod(block, "get_block_values", NULL);
+ if (blockValues == NULL) {
+ Py_DECREF(blocks);
+ PyObject_Free(ndarrays);
+ return NULL;
+ }
+
+ managerLocs = getManagerLocationsAsList(block);
+ if (managerLocs == NULL) {
+ Py_DECREF(blockValues);
+ Py_DECREF(blocks);
+ PyObject_Free(ndarrays);
+ return NULL;
+ }
+
+ // managerLocs tells use where each column of the block
+ // exists in the maintaining dataframe, so iterate
+ // managerLocs sequentially and assign, get the column of
+ // data and assign that reference to the appropriate position
+ // in `ndarrays`
+ for (j = 0; j < PyList_GET_SIZE(managerLocs); j++) {
+ loc = PyLong_AsLongLong(PyList_GET_ITEM(managerLocs, j));
+ key = PyLong_FromLongLong(j);
+ if (key == NULL) {
+ goto LOOP_ERROR;
+ }
+
+ // TODO: We should actually be grabbing an ndarray reference here
+ // and storing that in ndarrays; I haven't quite figured out
+ // with the Numpy C-API how to do that though
+ //
+ // For illustration, let's assume we have a DataFrame that looks like:
+ // colA colB colC
+ // 0 0 X 1
+ // 1 2 Y 3
+ //
+ // If we are currently iterating over the int block, it would look
+ // something like this:
+ //
+ // [[0, 1],
+ // [2, 3]]
+ //
+ // and managerLocs would be [0, 2] (note position of int dtypes in dataframe)
+ //
+ // So in this loop we would ideally create a ndarray that references [0, 2]
+ // (i.e. the first column of the above block) and store a pointer to that
+ // at arrays[0]. We would then get an array that references [1, 3] and store
+ // that at arrays[2].
+ //
+ // Iteration over the string block should populate an ndarray at arrays[1]
+ // that holds a reference to ["X", "Y"]
+ ndarr = PyObject_GetItem(blockValues, key); // TODO: no GetItem; construct ndarray somehow
+ Py_DECREF(key);
+ if (ndarr == NULL) {
+ goto LOOP_ERROR;
+ }
+
+ ndarrays[loc] = ndarr;
+ continue;
+ LOOP_ERROR:
+ Py_DECREF(managerLocs);
+ Py_DECREF(blockValues);
+ Py_DECREF(blocks);
+ PyObject_Free(ndarrays);
+ return NULL;
+ }
+
+ Py_DECREF(managerLocs);
+ Py_DECREF(blockValues);
+ }
+
+ Py_DECREF(blocks);
+
+ result = PyObject_Malloc(sizeof(PdOrderedArrays));
+ if (result == NULL) {
+ return NULL;
+ }
+
+ result->len = ncols;
+ result->ndarrays = ndarrays;
+
+ return result;
+}
+
+void PdOrderedArrays_Destroy(PdOrderedArrays *orderedArrays) {
+ Py_ssize_t i;
+
+ for (i = 0; i < orderedArrays->len; i++) {
+ Py_DECREF(orderedArrays->ndarrays++);
+ }
+
+ PyObject_Free(orderedArrays->ndarrays);
+ PyObject_Free(orderedArrays);
+}
diff --git a/pandas/_libs/ndframe_iter.h b/pandas/_libs/ndframe_iter.h
new file mode 100644
index 0000000000000..cec8d8a863215
--- /dev/null
+++ b/pandas/_libs/ndframe_iter.h
@@ -0,0 +1,26 @@
+// TODO: could use better naming than "PdBlockIter"
+// showing that this is really "deconstructed" block data
+// in native form
+
+#ifndef PANDAS__NDFRAME_ITER
+#define PANDAS__NDFRAME_ITER
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+
+// Struct containing a pointer to len # of NDArrays
+// The order of each item in data should match the
+// order specified by the BlockManager
+typedef struct {
+ Py_ssize_t len;
+ PyObject **ndarrays; // TODO: need some kind of destructor
+} PdOrderedArrays;
+
+// Provided a DataFrame and axis deconstructs the block
+// data to match the order represented by the BlockManager
+// Returns NULL on error
+PdOrderedArrays *PdOrderedArrays_New(PyObject *df, int axis);
+void PdOrderedArrays_Destroy(PdOrderedArrays *orderedArrays);
+
+#endif
diff --git a/pandas/tests/frame/test_native.py b/pandas/tests/frame/test_native.py
new file mode 100644
index 0000000000000..f4b8d635f35b9
--- /dev/null
+++ b/pandas/tests/frame/test_native.py
@@ -0,0 +1,13 @@
+import ctypes
+
+import pandas as pd
+
+
+def test_func():
+ # TODO: this is hard coded to only work on Mac
+ libc = ctypes.PyDLL('pandas/_libs/ndframe_data.cpython-37m-darwin.so')
+ df = pd.DataFrame([[1, "2", 3., 4., 5], [6, "7", 8., 9, 10]])
+ obj = libc.PdOrderedArrays_New(ctypes.py_object(df), ctypes.c_int(0))
+ # TODO: Improve test by accessing numpy array elements sequentially
+ # and testing for appropriate values
+ #libc.PdOrderedArrays_Destroy(obj)
diff --git a/setup.py b/setup.py
index 2d49d7e1e85f2..f80ebfca9a32c 100755
--- a/setup.py
+++ b/setup.py
@@ -234,6 +234,7 @@ def initialize_options(self):
pjoin(ujson_lib, "ultrajsonenc.c"),
pjoin(ujson_lib, "ultrajsondec.c"),
pjoin(util, "move.c"),
+ pjoin("pandas", "_libs", "ndframe_iter.c"),
]
for root, dirs, files in os.walk("pandas"):
@@ -730,8 +731,18 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
extensions.append(ujson_ext)
+# ----------------------------------------------------------------------w
+# native frame access functions
+native_frame_ext = Extension(
+ "pandas._libs.ndframe_data",
+ sources=["pandas/_libs/ndframe_iter.c"],
+ include_dirs=["pandas/_libs"],
+ define_macros=macros,
+ depends=["ndframe_iter.h"],
+)
-# ----------------------------------------------------------------------
+extensions.append(native_frame_ext)
+# ----------------------------------------------------------------------w
def setup_package():
| This is very rough and was a little hesitant to even push here, but figured worth opening up to feedback.
The main motivation for this PR is to get block management out of the JSON serializer, but it could potentially be extended beyond that for external uses
The gist of this is that we can hold a struct `PdOrderedArrays` which gives the length of the DataFrame and sequential access to Numpy arrays that mirror what the user sees when dealing with a DataFrame, all by deconstructing the blocks that already exist down in some C functions | https://api.github.com/repos/pandas-dev/pandas/pulls/32343 | 2020-02-29T00:28:10Z | 2020-03-04T01:06:46Z | null | 2023-04-12T20:17:13Z |
CLN: setitem_with_indexer cleanups | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 3ab180bafd156..35e61ab6a59c9 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1633,14 +1633,12 @@ def _setitem_with_indexer(self, indexer, value):
info_idx = [info_idx]
labels = item_labels[info_idx]
+ plane_indexer = indexer[:1]
+ lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
+ # lplane_indexer gives the expected length of obj[indexer[0]]
+
if len(labels) == 1:
# We can operate on a single column
- item = labels[0]
- idx = indexer[0]
-
- plane_indexer = tuple([idx])
- lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
- # lplane_indexer gives the expected length of obj[idx]
# require that we are setting the right number of values that
# we are indexing
@@ -1652,11 +1650,6 @@ def _setitem_with_indexer(self, indexer, value):
"length than the value"
)
- # non-mi
- else:
- plane_indexer = indexer[:1]
- lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
-
def setter(item, v):
ser = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
@@ -1718,18 +1711,23 @@ def setter(item, v):
for i, item in enumerate(labels):
- # setting with a list, recoerces
+ # setting with a list, re-coerces
setter(item, value[:, i].tolist())
- # we have an equal len list/ndarray
- elif _can_do_equal_len(
- labels, value, plane_indexer, lplane_indexer, self.obj
+ elif (
+ len(labels) == 1
+ and lplane_indexer == len(value)
+ and not is_scalar(plane_indexer[0])
):
+ # we have an equal len list/ndarray
setter(labels[0], value)
- # per label values
- else:
+ elif lplane_indexer == 0 and len(value) == len(self.obj.index):
+ # We get here in one case via .loc with a all-False mask
+ pass
+ else:
+ # per-label values
if len(labels) != len(value):
raise ValueError(
"Must have equal len keys and value "
@@ -1746,7 +1744,6 @@ def setter(item, v):
else:
if isinstance(indexer, tuple):
- indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
@@ -1764,6 +1761,8 @@ def setter(item, v):
self.obj[item_labels[indexer[info_axis]]] = value
return
+ indexer = maybe_convert_ix(*indexer)
+
if isinstance(value, (ABCSeries, dict)):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
@@ -2277,26 +2276,3 @@ def _maybe_numeric_slice(df, slice_, include_bool=False):
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
-
-
-def _can_do_equal_len(labels, value, plane_indexer, lplane_indexer, obj) -> bool:
- """
- Returns
- -------
- bool
- True if we have an equal len settable.
- """
- if not len(labels) == 1 or not np.iterable(value) or is_scalar(plane_indexer[0]):
- return False
-
- item = labels[0]
- index = obj[item].index
-
- values_len = len(value)
- # equal len list/ndarray
- if len(index) == values_len:
- return True
- elif lplane_indexer == values_len:
- return True
-
- return False
| working on fixing some significant bugs in setitem_with_indexer, this breaks off some easier cleanups | https://api.github.com/repos/pandas-dev/pandas/pulls/32341 | 2020-02-28T23:37:52Z | 2020-03-03T02:02:44Z | 2020-03-03T02:02:44Z | 2020-03-03T02:07:33Z |
BUG: None / Timedelta incorrectly returning NaT | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 0f18a1fd81815..18123efe76b1d 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -213,6 +213,7 @@ Timedelta
^^^^^^^^^
- Bug in constructing a :class:`Timedelta` with a high precision integer that would round the :class:`Timedelta` components (:issue:`31354`)
+- Bug in dividing ``np.nan`` or ``None`` by :class:`Timedelta`` incorrectly returning ``NaT`` (:issue:`31869`)
-
Timezones
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 66660c5f641fd..298028227e18b 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1407,7 +1407,14 @@ class Timedelta(_Timedelta):
# convert to Timedelta below
pass
+ elif util.is_nan(other):
+ # i.e. np.nan or np.float64("NaN")
+ raise TypeError("Cannot divide float by Timedelta")
+
elif hasattr(other, 'dtype'):
+ if other.dtype.kind == "O":
+ # GH#31869
+ return np.array([x / self for x in other])
return other / self.to_timedelta64()
elif not _validate_ops_compat(other):
@@ -1415,7 +1422,8 @@ class Timedelta(_Timedelta):
other = Timedelta(other)
if other is NaT:
- return NaT
+ # In this context we treat NaT as timedelta-like
+ return np.nan
return float(other.value) / self.value
def __floordiv__(self, other):
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 230a14aeec60a..ea02a76275443 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -412,6 +412,46 @@ def test_td_rdiv_timedeltalike_scalar(self):
assert np.timedelta64(60, "h") / td == 0.25
+ def test_td_rdiv_na_scalar(self):
+ # GH#31869 None gets cast to NaT
+ td = Timedelta(10, unit="d")
+
+ result = NaT / td
+ assert np.isnan(result)
+
+ result = None / td
+ assert np.isnan(result)
+
+ result = np.timedelta64("NaT") / td
+ assert np.isnan(result)
+
+ with pytest.raises(TypeError, match="cannot use operands with types dtype"):
+ np.datetime64("NaT") / td
+
+ with pytest.raises(TypeError, match="Cannot divide float by Timedelta"):
+ np.nan / td
+
+ def test_td_rdiv_ndarray(self):
+ td = Timedelta(10, unit="d")
+
+ arr = np.array([td], dtype=object)
+ result = arr / td
+ expected = np.array([1], dtype=np.float64)
+ tm.assert_numpy_array_equal(result, expected)
+
+ arr = np.array([None])
+ result = arr / td
+ expected = np.array([np.nan])
+ tm.assert_numpy_array_equal(result, expected)
+
+ arr = np.array([np.nan], dtype=object)
+ with pytest.raises(TypeError, match="Cannot divide float by Timedelta"):
+ arr / td
+
+ arr = np.array([np.nan], dtype=np.float64)
+ with pytest.raises(TypeError, match="cannot use operands with types dtype"):
+ arr / td
+
# ---------------------------------------------------------------
# Timedelta.__floordiv__
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
xref #31869, arguable whether or not this closes that. | https://api.github.com/repos/pandas-dev/pandas/pulls/32340 | 2020-02-28T22:22:06Z | 2020-03-03T02:05:29Z | 2020-03-03T02:05:29Z | 2021-11-20T23:21:50Z |
TST/REF: move tools test files | diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/tools/test_to_datetime.py
similarity index 100%
rename from pandas/tests/indexes/datetimes/test_tools.py
rename to pandas/tests/tools/test_to_datetime.py
diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_to_numeric.py
similarity index 100%
rename from pandas/tests/tools/test_numeric.py
rename to pandas/tests/tools/test_to_numeric.py
diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/tools/test_to_timedelta.py
similarity index 100%
rename from pandas/tests/indexes/timedeltas/test_tools.py
rename to pandas/tests/tools/test_to_timedelta.py
diff --git a/setup.cfg b/setup.cfg
index 61d5b1030a500..bbd8489622005 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -135,7 +135,7 @@ ignore_errors=True
[mypy-pandas.tests.arithmetic.test_datetime64]
ignore_errors=True
-[mypy-pandas.tests.indexes.datetimes.test_tools]
+[mypy-pandas.tests.tools.test_to_datetime]
ignore_errors=True
[mypy-pandas.tests.scalar.period.test_period]
| These files are in the index tests, but they're not really testing the index objects. | https://api.github.com/repos/pandas-dev/pandas/pulls/32338 | 2020-02-28T17:51:35Z | 2020-02-29T19:36:31Z | 2020-02-29T19:36:31Z | 2020-02-29T20:13:52Z |
BUG: fixes unhandled NAType when plotting (#32073) | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 17f8783f71bfb..1b223cf5f026b 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -21,7 +21,7 @@ Patterns
foo.__class__
-------------
-*pandas* uses 'type(foo)' instead 'foo.__class__' as it is making the code more
+*pandas* uses 'type(foo)' instead 'foo.__class__' as it makes the code more
readable.
For example:
@@ -52,8 +52,8 @@ f-strings
*pandas* uses f-strings formatting instead of '%' and '.format()' string formatters.
-The convention of using f-strings on a string that is concatenated over serveral lines,
-is to prefix only the lines containing the value needs to be interpeted.
+The convention of using f-strings on a string that is concatenated over several lines,
+is to prefix only the lines containing values which need to be interpreted.
For example:
@@ -86,8 +86,8 @@ For example:
White spaces
~~~~~~~~~~~~
-Putting the white space only at the end of the previous line, so
-there is no whitespace at the beggining of the concatenated string.
+Only put white space at the end of the previous line, so
+there is no whitespace at the beginning of the concatenated string.
For example:
@@ -116,7 +116,7 @@ Representation function (aka 'repr()')
*pandas* uses 'repr()' instead of '%r' and '!r'.
-The use of 'repr()' will only happend when the value is not an obvious string.
+The use of 'repr()' will only happen when the value is not an obvious string.
For example:
@@ -138,7 +138,7 @@ For example:
Imports (aim for absolute)
==========================
-In Python 3, absolute imports are recommended. In absolute import doing something
+In Python 3, absolute imports are recommended. Using absolute imports, doing something
like ``import string`` will import the string module rather than ``string.py``
in the same directory. As much as possible, you should try to write out
absolute imports that show the whole import chain from top-level pandas.
diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst
index 8302b5c5dea60..49f4bbb6beb19 100644
--- a/doc/source/user_guide/merging.rst
+++ b/doc/source/user_guide/merging.rst
@@ -742,7 +742,7 @@ as shown in the following example.
)
ser
- result = pd.merge(df, ser.reset_index(), on=['Let', 'Num'])
+ pd.merge(df, ser.reset_index(), on=['Let', 'Num'])
Here is another example with duplicate join keys in DataFrames:
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 3077f73a8d1a4..2fd227694800c 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -701,7 +701,7 @@ cdef class TextReader:
char *word
object name, old_name
int status
- uint64_t hr, data_line
+ uint64_t hr, data_line = 0
char *errors = "strict"
StringPath path = _string_path(self.c_encoding)
diff --git a/pandas/_testing.py b/pandas/_testing.py
index a70f75d6cfaf4..fce06e216dfd7 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1106,7 +1106,7 @@ def assert_series_equal(
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
- Whether to compare category order of internal Categoricals
+ Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
obj : str, default 'Series'
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 542cfd334b810..549606795f528 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -255,6 +255,16 @@ class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
Methods
-------
None
+
+ Examples
+ --------
+ >>> from pandas.arrays import SparseArray
+ >>> arr = SparseArray([0, 0, 1, 2])
+ >>> arr
+ [0, 0, 1, 2]
+ Fill: 0
+ IntIndex
+ Indices: array([2, 3], dtype=int32)
"""
_pandas_ftype = "sparse"
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f304fadbab871..f515b57e24cfa 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -758,8 +758,8 @@ def to_string(
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
- formatters: Optional[fmt.formatters_type] = None,
- float_format: Optional[fmt.float_format_type] = None,
+ formatters: Optional[fmt.FormattersType] = None,
+ float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 49ac1b6cfa52b..faac472b3fc31 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -320,10 +320,10 @@ def merge_asof(
direction: str = "backward",
) -> "DataFrame":
"""
- Perform an asof merge. This is similar to a left-join except that we
- match on nearest key rather than equal keys.
+ Perform an asof merge.
- Both DataFrames must be sorted by the key.
+ This is similar to a left-join except that we match on nearest
+ key rather than equal keys. Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index 4939cbfc9cc96..40f376724bd39 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -70,7 +70,7 @@ def to_numeric(arg, errors="raise", downcast=None):
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
- convert_dtypes : Convert dtypes.
+ DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index b5ddd15c1312a..2a528781f8c93 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -81,10 +81,10 @@
if TYPE_CHECKING:
from pandas import Series, DataFrame, Categorical
-formatters_type = Union[
+FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
]
-float_format_type = Union[str, Callable, "EngFormatter"]
+FloatFormatType = Union[str, Callable, "EngFormatter"]
common_docstring = """
Parameters
@@ -455,7 +455,7 @@ class TableFormatter:
show_dimensions: Union[bool, str]
is_truncated: bool
- formatters: formatters_type
+ formatters: FormattersType
columns: Index
@property
@@ -548,9 +548,9 @@ def __init__(
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
- formatters: Optional[formatters_type] = None,
+ formatters: Optional[FormattersType] = None,
justify: Optional[str] = None,
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
line_width: Optional[int] = None,
@@ -1089,7 +1089,7 @@ def _get_column_name_list(self) -> List[str]:
def format_array(
values: Any,
formatter: Optional[Callable],
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
na_rep: str = "NaN",
digits: Optional[int] = None,
space: Optional[Union[str, int]] = None,
@@ -1171,7 +1171,7 @@ def __init__(
formatter: Optional[Callable] = None,
na_rep: str = "NaN",
space: Union[str, int] = 12,
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
justify: str = "right",
decimal: str = ".",
quoting: Optional[int] = None,
@@ -1278,7 +1278,7 @@ def __init__(self, *args, **kwargs):
def _value_formatter(
self,
- float_format: Optional[float_format_type] = None,
+ float_format: Optional[FloatFormatType] = None,
threshold: Optional[Union[float, int]] = None,
) -> Callable:
"""Returns a function to be applied on each value to format it"""
@@ -1372,7 +1372,7 @@ def format_values_with(float_format):
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
- float_format: Optional[float_format_type]
+ float_format: Optional[FloatFormatType]
if self.float_format is None:
if self.fixed_width:
float_format = partial(
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 63d0b8abe59d9..3c826317f11e6 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -413,7 +413,13 @@ def _compute_plot_data(self):
# np.ndarray before plot.
numeric_data = numeric_data.copy()
for col in numeric_data:
- numeric_data[col] = np.asarray(numeric_data[col])
+
+ # GH32073: cast to float if values contain nulled integers
+ values = numeric_data[col]
+ if (values.isna().any().all()):
+ values = values.astype(float)
+
+ numeric_data[col] = np.asarray(values)
self.data = numeric_data
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index c966962a7c87d..fff5ca03e80f4 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -95,43 +95,14 @@ def test_scalar_non_numeric(self, index_func, klass):
s = gen_obj(klass, i)
# getting
- for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:
+ with pytest.raises(KeyError, match="^3.0$"):
+ s[3.0]
- if getitem:
- error = KeyError
- msg = r"^3\.0?$"
- else:
- error = TypeError
- msg = (
- r"cannot do (label|positional) indexing "
- fr"on {type(i).__name__} with these indexers \[3\.0\] of "
- r"type float|"
- "Cannot index by location index with a "
- "non-integer key"
- )
- with pytest.raises(error, match=msg):
- idxr(s)[3.0]
-
- # label based can be a TypeError or KeyError
- if s.index.inferred_type in {
- "categorical",
- "string",
- "unicode",
- "mixed",
- "period",
- "timedelta64",
- "datetime64",
- }:
- error = KeyError
- msg = r"^3\.0$"
- else:
- error = TypeError
- msg = (
- r"cannot do (label|positional) indexing "
- fr"on {type(i).__name__} with these indexers \[3\.0\] of "
- "type float"
- )
- with pytest.raises(error, match=msg):
+ msg = "Cannot index by location index with a non-integer key"
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[3.0]
+
+ with pytest.raises(KeyError, match="^3.0$"):
s.loc[3.0]
# contains
@@ -190,16 +161,12 @@ def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=["a", "b", "c"])
s3 = Series([1, 2, 3], index=["a", "b", 1.5])
- # lookup in a pure stringstr
- # with an invalid indexer
- msg = (
- r"cannot do label indexing "
- r"on Index with these indexers \[1\.0\] of "
- r"type float|"
- "Cannot index by location index with a non-integer key"
- )
+ # lookup in a pure string index with an invalid indexer
+
with pytest.raises(KeyError, match="^1.0$"):
s2[1.0]
+
+ msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s2.iloc[1.0]
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/tools/test_to_datetime.py
similarity index 100%
rename from pandas/tests/indexes/datetimes/test_tools.py
rename to pandas/tests/tools/test_to_datetime.py
diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_to_numeric.py
similarity index 100%
rename from pandas/tests/tools/test_numeric.py
rename to pandas/tests/tools/test_to_numeric.py
diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/tools/test_to_timedelta.py
similarity index 100%
rename from pandas/tests/indexes/timedeltas/test_tools.py
rename to pandas/tests/tools/test_to_timedelta.py
diff --git a/setup.cfg b/setup.cfg
index 61d5b1030a500..bbd8489622005 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -135,7 +135,7 @@ ignore_errors=True
[mypy-pandas.tests.arithmetic.test_datetime64]
ignore_errors=True
-[mypy-pandas.tests.indexes.datetimes.test_tools]
+[mypy-pandas.tests.tools.test_to_datetime]
ignore_errors=True
[mypy-pandas.tests.scalar.period.test_period]
| - [x] closes #32073
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32337 | 2020-02-28T17:28:14Z | 2020-03-01T17:58:51Z | null | 2020-03-01T17:58:51Z |
CLN: Don't create _join_functions | diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 49ac1b6cfa52b..bb40adb69e42d 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1312,7 +1312,12 @@ def _get_join_indexers(
kwargs = copy.copy(kwargs)
if how == "left":
kwargs["sort"] = sort
- join_func = _join_functions[how]
+ join_func = {
+ "inner": libjoin.inner_join,
+ "left": libjoin.left_outer_join,
+ "right": _right_outer_join,
+ "outer": libjoin.full_outer_join,
+ }[how]
return join_func(lkey, rkey, count, **kwargs)
@@ -1842,14 +1847,6 @@ def _right_outer_join(x, y, max_groups):
return left_indexer, right_indexer
-_join_functions = {
- "inner": libjoin.inner_join,
- "left": libjoin.left_outer_join,
- "right": _right_outer_join,
- "outer": libjoin.full_outer_join,
-}
-
-
def _factorize_keys(lk, rk, sort=True):
# Some pre-processing for non-ndarray lk / rk
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
| This seems more clear than defining `_join_functions` far away from where it's actually used | https://api.github.com/repos/pandas-dev/pandas/pulls/32336 | 2020-02-28T16:04:25Z | 2020-03-03T03:09:21Z | 2020-03-03T03:09:20Z | 2020-04-09T02:37:43Z |
ENH: implement fill_value for df.add(other=Series) #13488 | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 1b6098e6b6ac1..0c6c3caf11b09 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -10,6 +10,16 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_102.enhancements:
+
+Enhancements
+~~~~~~~~~~~~
+
+- :meth:`DataFrame.add` now accepts a ``fill_value`` not equal to ``None`` when ``other`` parameter equals :class:`Series`.
+ Same enhancement also available with other binary operators: :meth:`~DataFrame.sub`, :meth:`~DataFrame.mul`, :meth:`~DataFrame.div`, :meth:`~DataFrame.truediv`, :meth:`~DataFrame.floordiv`, :meth:`~DataFrame.mod`, :meth:`~DataFrame.pow`. (:issue:`13488`)
+
+.. ---------------------------------------------------------------------------
+
.. _whatsnew_102.regressions:
Fixed regressions
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index d0adf2da04db3..03d793d386acd 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -14,7 +14,12 @@
from pandas._typing import ArrayLike, Level
from pandas.util._decorators import Appender
-from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_list_like,
+ is_number,
+ is_timedelta64_dtype,
+)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
@@ -341,7 +346,11 @@ def fill_binop(left, right, fill_value):
left = left.copy()
left[left_mask & mask] = fill_value
- if right_mask.any():
+ if is_bool(right_mask):
+ if right_mask:
+ right = left._constructor(right, index=left.index)
+ right[right_mask & mask] = fill_value
+ elif right_mask.any():
# Avoid making a copy if we can
right = right.copy()
right[right_mask & mask] = fill_value
@@ -585,7 +594,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# DataFrame
-def _combine_series_frame(left, right, func, axis: int):
+def _combine_series_frame(left, right, func, axis: int, fill_value=None):
"""
Apply binary operator `func` to self, other using alignment and fill
conventions determined by the axis argument.
@@ -596,16 +605,29 @@ def _combine_series_frame(left, right, func, axis: int):
right : Series
func : binary operator
axis : {0, 1}
+ fill_value : numeric, optional
Returns
-------
result : DataFrame
"""
+ if fill_value is None:
+ _arith_op = func
+
+ else:
+
+ def _arith_op(left, right):
+ left, right = fill_binop(left, right, fill_value)
+ return func(left, right)
+
# We assume that self.align(other, ...) has already been called
if axis == 0:
- new_data = left._combine_match_index(right, func)
+ if fill_value is not None:
+ new_data = dispatch_to_series(left, right, _arith_op, axis=0)
+ else:
+ new_data = left._combine_match_index(right, _arith_op)
else:
- new_data = dispatch_to_series(left, right, func, axis="columns")
+ new_data = dispatch_to_series(left, right, _arith_op, axis="columns")
return left._construct_result(new_data)
@@ -771,6 +793,12 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
if _should_reindex_frame_op(self, other, axis, default_axis, fill_value, level):
return _frame_arith_method_with_reindex(self, other, op)
+ if not is_number(fill_value) and fill_value is not None:
+ raise TypeError(
+ "fill_value must be numeric or None. "
+ f"Got {type(fill_value).__name__}"
+ )
+
self, other = _align_method_FRAME(self, other, axis, flex=True, level=level)
if isinstance(other, ABCDataFrame):
@@ -787,11 +815,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None):
pass_op = op if axis in [0, "columns", None] else na_op
pass_op = pass_op if not is_logical else op
- if fill_value is not None:
- raise NotImplementedError(f"fill_value {fill_value} not supported.")
-
axis = self._get_axis_number(axis) if axis is not None else 1
- return _combine_series_frame(self, other, pass_op, axis=axis)
+ return _combine_series_frame(self, other, pass_op, axis, fill_value)
else:
# in this case we always have `np.ndim(other) == 0`
if fill_value is not None:
diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py
index 203ea3946d1b2..7685373ca19e5 100644
--- a/pandas/core/ops/docstrings.py
+++ b/pandas/core/ops/docstrings.py
@@ -478,6 +478,36 @@ def _make_flex_doc(op_name, typ):
triangle 4 181
rectangle 5 361
+Add Series by axis when values are missing
+
+>>> a = pd.Series([2, 4], index=['circle', 'triangle'])
+
+>>> df.add(a, axis=0)
+ angles degrees
+circle 2.0 362.0
+rectangle NaN NaN
+triangle 7.0 184.0
+
+>>> df.add(a, axis=0, fill_value=1)
+ angles degrees
+circle 2.0 362.0
+rectangle 5.0 361.0
+triangle 7.0 184.0
+
+>>> b = pd.Series([3, 6, 9], index=["angles", "degrees", "scale"])
+
+>>> df.add(b)
+ angles degrees scale
+circle 3 366 NaN
+triangle 6 186 NaN
+rectangle 7 366 NaN
+
+>>> df.add(b, fill_value=1)
+ angles degrees scale
+circle 3 366 10.0
+triangle 6 186 10.0
+rectangle 7 366 10.0
+
Divide by constant with reverse version.
>>> df.div(10)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index e4be8a979a70f..9b46e6a509919 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -453,12 +453,6 @@ def test_arith_flex_frame_corner(self, float_frame):
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
- with pytest.raises(NotImplementedError, match="fill_value"):
- float_frame.add(float_frame.iloc[0], fill_value=3)
-
- with pytest.raises(NotImplementedError, match="fill_value"):
- float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
-
def test_arith_flex_series(self, simple_frame):
df = simple_frame
@@ -490,19 +484,6 @@ def test_arith_flex_series(self, simple_frame):
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
- def test_arith_flex_zero_len_raises(self):
- # GH 19522 passing fill_value to frame flex arith methods should
- # raise even in the zero-length special cases
- ser_len0 = pd.Series([], dtype=object)
- df_len0 = pd.DataFrame(columns=["A", "B"])
- df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
-
- with pytest.raises(NotImplementedError, match="fill_value"):
- df.add(ser_len0, fill_value="E")
-
- with pytest.raises(NotImplementedError, match="fill_value"):
- df_len0.sub(df["A"], axis=None, fill_value=3)
-
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
@@ -774,6 +755,63 @@ def test_frame_single_columns_object_sum_axis_1():
tm.assert_series_equal(result, expected)
+@pytest.fixture
+def simple_frame_with_na():
+ df = pd.DataFrame(
+ [[np.nan, 2.0, 3.0], [4.0, np.nan, 6.0], [7.0, 8.0, 9.0]],
+ index=["a", "b", "c"],
+ columns=np.arange(3),
+ )
+ return df
+
+
+@pytest.mark.parametrize(
+ "axis, series, expected",
+ [
+ (
+ 0,
+ pd.Series([1.0, np.nan, 3.0, 4.0], index=["a", "b", "c", "d"]),
+ pd.DataFrame(
+ [
+ [2.0, 3.0, 4.0],
+ [5.0, np.nan, 7.0],
+ [10.0, 11.0, 12.0],
+ [5.0, 5.0, 5.0],
+ ],
+ columns=np.arange(3),
+ index=["a", "b", "c", "d"],
+ ),
+ ),
+ (
+ "columns",
+ pd.Series([np.nan, 2.0, np.nan, 4.0], index=np.arange(4)),
+ pd.DataFrame(
+ [[np.nan, 4.0, 4.0, 5.0], [5.0, 3.0, 7.0, 5.0], [8.0, 10.0, 10.0, 5.0]],
+ index=["a", "b", "c"],
+ columns=np.arange(4),
+ ),
+ ),
+ ],
+)
+def test_add_series_to_frame_with_fill(simple_frame_with_na, axis, series, expected):
+ # Check missing values correctly populated with fill-value when
+ # adding series to frame, GH#13488.
+ df = simple_frame_with_na
+ result = df.add(other=series, axis=axis, fill_value=1)
+ expected = expected
+ tm.assert_frame_equal(result, expected)
+
+
+def test_df_add_with_non_numeric_fill(simple_frame):
+ # Check non-numeric fill-value raises when adding series to frame, GH#13488.
+ # Test replaces non-numeric check in removed test_arith_flex_zero_len_raises.
+ df = simple_frame
+ ser = pd.Series([1.0, np.nan, 3.0], index=["a", "b", "c"])
+
+ with pytest.raises(TypeError, match="fill_value"):
+ df.add(ser, fill_value="E")
+
+
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
| - [ ] closes #13488
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32335 | 2020-02-28T15:53:18Z | 2020-03-11T02:17:58Z | null | 2020-03-11T02:17:59Z |
DOC: Fixed reference to `convert_dtypes` in `to_numeric` (#32295) | diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index 4939cbfc9cc96..40f376724bd39 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -70,7 +70,7 @@ def to_numeric(arg, errors="raise", downcast=None):
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
- convert_dtypes : Convert dtypes.
+ DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
| - closes #32295
- just changed the reference from convert_dtypes to DataFrame.convert_dtypes
| https://api.github.com/repos/pandas-dev/pandas/pulls/32333 | 2020-02-28T14:37:28Z | 2020-02-29T03:07:24Z | 2020-02-29T03:07:24Z | 2020-03-02T10:20:40Z |
BUG: Rolling groupby should not maintain the by column in the resulting DataFrame | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 720ce7af47a18..60b1805abbdb4 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -167,6 +167,35 @@ key and type of :class:`Index`. These now consistently raise ``KeyError`` (:iss
...
KeyError: Timestamp('1970-01-01 00:00:00')
+GroupBy.rolling no longer returns grouped-by column in values
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Suppose we start with
+
+.. ipython:: python
+
+ df = pd.DataFrame({"A": [1, 1, 2, 3], "B": [0, 1, 2, 3]})
+ df
+
+*Previous behavior*:
+
+.. code-block:: ipython
+
+ In [1]: df.groupby("A").rolling(2).sum()
+ Out[1]:
+ A B
+ A
+ 1 0 NaN NaN
+ 1 2.0 1.0
+ 2 2 NaN NaN
+ 3 3 NaN NaN
+
+*New behavior*:
+
+.. ipython:: python
+
+ df.groupby("A").rolling(2).sum()
+
.. ---------------------------------------------------------------------------
.. _whatsnew_110.api_breaking.assignment_to_multiple_columns:
diff --git a/pandas/core/base.py b/pandas/core/base.py
index e1c6bef66239d..a7aa6b39ea96d 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -209,11 +209,18 @@ def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
- if len(self.exclusions) > 0:
- return self.obj.drop(self.exclusions, axis=1)
- else:
+ elif not self.exclusions or not isinstance(self.obj, ABCDataFrame):
return self.obj
+ # there may be elements in self.exclusions that are no longer
+ # in self.obj, see GH 32468
+ nlevels = self.obj.columns.nlevels
+ unique_column_names = {
+ j for i in range(nlevels) for j in self.obj.columns.get_level_values(i)
+ }
+ exclusions = self.exclusions.intersection(unique_column_names)
+ return self.obj.drop(exclusions, axis=1)
+
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(f"Column(s) {self._selection} already selected")
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 19e51d05feb92..7cf6e9106ec0d 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1577,6 +1577,7 @@ def rolling(self, *args, **kwargs):
"""
from pandas.core.window import RollingGroupby
+ kwargs["exclusions"] = self.exclusions
return RollingGroupby(self, *args, **kwargs)
@Substitution(name="groupby")
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index ed0b816f64800..bbaa345c85970 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -35,6 +35,8 @@ def _dispatch(name: str, *args, **kwargs):
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
+ # patch for GH 32332
+ x.obj = x._obj_with_exclusions
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
@@ -82,6 +84,8 @@ def _apply(
# TODO: can we de-duplicate with _dispatch?
def f(x, name=name, *args):
x = self._shallow_copy(x)
+ # patch for GH 32332
+ x.obj = x._obj_with_exclusions
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 0ec876583dcde..2da3f4008a7f5 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -142,6 +142,7 @@ def __init__(
adjust=True,
ignore_na=False,
axis=0,
+ **kwargs,
):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 3784989de10ab..de20e61c304e3 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -93,6 +93,10 @@ def __init__(
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
self._numba_func_cache: Dict[Optional[str], Callable] = dict()
+ self.exclusions = kwargs.get("exclusions", set())
+
+ def _shallow_copy(self, obj: FrameOrSeries, **kwargs) -> ShallowMixin:
+ return super()._shallow_copy(obj, exclusions=self.exclusions, **kwargs)
@property
def _constructor(self):
@@ -1187,8 +1191,7 @@ def count(self):
closed=self.closed,
).sum()
results.append(result)
-
- return self._wrap_results(results, blocks, obj)
+ return self._wrap_results(results, blocks, obj, exclude=self.exclusions)
_shared_docs["apply"] = dedent(
r"""
@@ -1632,6 +1635,8 @@ def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
+ # patch for GH 32332
+ other.obj = other._obj_with_exclusions
# GH 16058: offset window
if self.is_freq_type:
@@ -1775,6 +1780,8 @@ def corr(self, other=None, pairwise=None, **kwargs):
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
+ # patch for GH 32332
+ other.obj = other._obj_with_exclusions
window = self._get_window(other) if not self.is_freq_type else self.win_freq
def _get_corr(a, b):
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 5b2687271f9d6..f979099e21cb6 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -62,11 +62,15 @@ def test_rolling(self):
for f in ["sum", "mean", "min", "max", "count", "kurt", "skew"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
+ # groupby.apply doesn't drop the grouped-by column
+ expected = expected.drop("A", axis=1)
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
+ # groupby.apply doesn't drop the grouped-by column
+ expected = expected.drop("A", axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
@@ -79,6 +83,8 @@ def test_rolling_quantile(self, interpolation):
expected = g.apply(
lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation)
)
+ # groupby.apply doesn't drop the grouped-by column
+ expected = expected.drop("A", axis=1)
tm.assert_frame_equal(result, expected)
def test_rolling_corr_cov(self):
@@ -92,6 +98,8 @@ def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
+ # groupby.apply doesn't drop the grouped-by column
+ expected = expected.drop("A", axis=1)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
@@ -109,6 +117,8 @@ def test_rolling_apply(self, raw):
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
+ # groupby.apply doesn't drop the grouped-by column
+ expected = expected.drop("A", axis=1)
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index ab2c7fcb7a0dc..2e8d5a6772e08 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -465,3 +465,18 @@ def test_rolling_count_default_min_periods_with_null_values(constructor):
result = constructor(values).rolling(3).count()
expected = constructor(expected_counts)
tm.assert_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "columns", [pd.MultiIndex.from_tuples([("A", ""), ("B", "C")]), ["A", "B"]]
+)
+def test_by_column_not_in_values(columns):
+ # GH 32262
+ df = pd.DataFrame([[1, 0]] * 20 + [[2, 0]] * 12 + [[3, 0]] * 8, columns=columns)
+
+ g = df.groupby("A")
+ original_obj = g.obj.copy(deep=True)
+ r = g.rolling(4)
+ result = r.sum()
+ assert "A" not in result.columns
+ tm.assert_frame_equal(g.obj, original_obj) # check for side-effects
| - [x] closes #32262
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Does this seem like the correct idea? If so, I'll expand on tests / write whatsnew / think about a cleaner patch
EDIT
----
lots of unwanted changes here, just seeing what tests fail
EDIT
----
need to check the types make sense and see what's the proper way to write
```
kwargs["exclusions"] = self.exclusions
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/32332 | 2020-02-28T13:55:34Z | 2020-03-22T21:08:34Z | null | 2020-10-10T14:14:53Z |
CLN: _libs.interval looping with cdef index | diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 1166768472449..50ac055dbffc9 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -502,14 +502,14 @@ def intervals_to_interval_bounds(ndarray intervals,
"""
cdef:
object closed = None, interval
- int64_t n = len(intervals)
+ Py_ssize_t i, n = len(intervals)
ndarray left, right
bint seen_closed = False
left = np.empty(n, dtype=intervals.dtype)
right = np.empty(n, dtype=intervals.dtype)
- for i in range(len(intervals)):
+ for i in range(n):
interval = intervals[i]
if interval is None or util.is_nan(interval):
left[i] = np.nan
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32329 | 2020-02-28T12:28:47Z | 2020-02-28T16:41:26Z | 2020-02-28T16:41:26Z | 2020-02-29T10:25:01Z |
CLN: Removed unused variables defenition | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 6141e2b78e9f4..0ba5cb7e9bc40 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -115,8 +115,6 @@ cdef class IndexEngine:
cdef _maybe_get_bool_indexer(self, object val):
cdef:
ndarray[uint8_t, ndim=1, cast=True] indexer
- ndarray[intp_t, ndim=1] found
- int count
indexer = self._get_index_values() == val
return self._unpack_bool_indexer(indexer, val)
| I don't see ```found``` and ```count``` being used anywhere in this function. | https://api.github.com/repos/pandas-dev/pandas/pulls/32328 | 2020-02-28T11:24:56Z | 2020-02-28T15:40:29Z | 2020-02-28T15:40:29Z | 2020-02-29T10:24:23Z |
TST/CLN: Follow-up to #31867 | diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index c966962a7c87d..fff5ca03e80f4 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -95,43 +95,14 @@ def test_scalar_non_numeric(self, index_func, klass):
s = gen_obj(klass, i)
# getting
- for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:
+ with pytest.raises(KeyError, match="^3.0$"):
+ s[3.0]
- if getitem:
- error = KeyError
- msg = r"^3\.0?$"
- else:
- error = TypeError
- msg = (
- r"cannot do (label|positional) indexing "
- fr"on {type(i).__name__} with these indexers \[3\.0\] of "
- r"type float|"
- "Cannot index by location index with a "
- "non-integer key"
- )
- with pytest.raises(error, match=msg):
- idxr(s)[3.0]
-
- # label based can be a TypeError or KeyError
- if s.index.inferred_type in {
- "categorical",
- "string",
- "unicode",
- "mixed",
- "period",
- "timedelta64",
- "datetime64",
- }:
- error = KeyError
- msg = r"^3\.0$"
- else:
- error = TypeError
- msg = (
- r"cannot do (label|positional) indexing "
- fr"on {type(i).__name__} with these indexers \[3\.0\] of "
- "type float"
- )
- with pytest.raises(error, match=msg):
+ msg = "Cannot index by location index with a non-integer key"
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[3.0]
+
+ with pytest.raises(KeyError, match="^3.0$"):
s.loc[3.0]
# contains
@@ -190,16 +161,12 @@ def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=["a", "b", "c"])
s3 = Series([1, 2, 3], index=["a", "b", 1.5])
- # lookup in a pure stringstr
- # with an invalid indexer
- msg = (
- r"cannot do label indexing "
- r"on Index with these indexers \[1\.0\] of "
- r"type float|"
- "Cannot index by location index with a non-integer key"
- )
+ # lookup in a pure string index with an invalid indexer
+
with pytest.raises(KeyError, match="^1.0$"):
s2[1.0]
+
+ msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
s2.iloc[1.0]
| xref #31867
| https://api.github.com/repos/pandas-dev/pandas/pulls/32324 | 2020-02-28T09:50:32Z | 2020-02-29T00:56:20Z | 2020-02-29T00:56:20Z | 2020-02-29T13:31:31Z |
STY: spaces in wrong place | diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py
index f85d823cb2fac..06ba6cc34ad92 100644
--- a/pandas/tests/base/test_ops.py
+++ b/pandas/tests/base/test_ops.py
@@ -213,9 +213,9 @@ def test_value_counts_unique_nunique(self, index_or_series_obj):
if orig.duplicated().any():
pytest.xfail(
- "The test implementation isn't flexible enough to deal"
- " with duplicated values. This isn't a bug in the"
- " application code, but in the test code."
+ "The test implementation isn't flexible enough to deal "
+ "with duplicated values. This isn't a bug in the "
+ "application code, but in the test code."
)
# create repeated values, 'n'th element is repeated by n+1 times
@@ -279,9 +279,9 @@ def test_value_counts_unique_nunique_null(self, null_obj, index_or_series_obj):
pytest.skip("MultiIndex doesn't support isna")
elif orig.duplicated().any():
pytest.xfail(
- "The test implementation isn't flexible enough to deal"
- " with duplicated values. This isn't a bug in the"
- " application code, but in the test code."
+ "The test implementation isn't flexible enough to deal "
+ "with duplicated values. This isn't a bug in the "
+ "application code, but in the test code."
)
# special assign to the numpy array
| - [x] ref #30755
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/32323 | 2020-02-28T09:33:22Z | 2020-02-28T10:15:59Z | 2020-02-28T10:15:59Z | 2020-02-28T10:28:34Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.