title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG/DEPR: deprecate Categorical take default behaviour + fix Series[categorical].take | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index c128058858c17..2440a8d586c96 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -891,6 +891,7 @@ Deprecations
removed in a future version (:issue:`20419`).
- ``DatetimeIndex.offset`` is deprecated. Use ``DatetimeIndex.freq`` instead (:issue:`20716`)
- ``Index.get_duplicates()`` is deprecated and will be removed in a future version (:issue:`20239`)
+- The previous default behavior of negative indices in ``Categorical.take`` is deprecated. In a future version it will change from meaning missing values to meaning positional indices from the right. The future behavior is consistent with :meth:`Series.take` (:issue:`20664`).
.. _whatsnew_0230.prior_deprecations:
@@ -1024,6 +1025,7 @@ Categorical
- Bug in ``Categorical.__iter__`` not converting to Python types (:issue:`19909`)
- Bug in :func:`pandas.factorize` returning the unique codes for the ``uniques``. This now returns a ``Categorical`` with the same dtype as the input (:issue:`19721`)
- Bug in :func:`pandas.factorize` including an item for missing values in the ``uniques`` return value (:issue:`19721`)
+- Bug in :meth:`Series.take` with categorical data interpreting ``-1`` in `indices` as missing value markers, rather than the last element of the Series (:issue:`20664`)
Datetimelike
^^^^^^^^^^^^
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 7f0d54de9def8..517c21cc1bc3a 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2,6 +2,7 @@
import numpy as np
from warnings import warn
+import textwrap
import types
from pandas import compat
@@ -29,7 +30,7 @@
is_scalar,
is_dict_like)
-from pandas.core.algorithms import factorize, take_1d, unique1d
+from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
@@ -48,6 +49,17 @@
from .base import ExtensionArray
+_take_msg = textwrap.dedent("""\
+ Interpreting negative values in 'indexer' as missing values.
+ In the future, this will change to meaning positional indicies
+ from the right.
+
+ Use 'allow_fill=True' to retain the previous behavior and silence this
+ warning.
+
+ Use 'allow_fill=False' to accept the new behavior.""")
+
+
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
@@ -1732,17 +1744,49 @@ def fillna(self, value=None, method=None, limit=None):
return self._constructor(values, categories=self.categories,
ordered=self.ordered, fastpath=True)
- def take_nd(self, indexer, allow_fill=True, fill_value=None):
- """ Take the codes by the indexer, fill with the fill_value.
-
- For internal compatibility with numpy arrays.
+ def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
+ Take elements from the Categorical.
- # filling must always be None/nan here
- # but is passed thru internally
- assert isna(fill_value)
+ Parameters
+ ----------
+ indexer : sequence of integers
+ allow_fill : bool, default None.
+ How to handle negative values in `indexer`.
- codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
+ * False: negative values in `indices` indicate positional indices
+ from the right. This is similar to
+ :func:`numpy.take`.
+
+ * True: negative values in `indices` indicate missing values
+ (the default). These values are set to `fill_value`. Any other
+ other negative values raise a ``ValueError``.
+
+ .. versionchanged:: 0.23.0
+
+ Deprecated the default value of `allow_fill`. The deprecated
+ default is ``True``. In the future, this will change to
+ ``False``.
+
+ Returns
+ -------
+ Categorical
+ This Categorical will have the same categories and ordered as
+ `self`.
+ """
+ indexer = np.asarray(indexer, dtype=np.intp)
+ if allow_fill is None:
+ if (indexer < 0).any():
+ warn(_take_msg, FutureWarning, stacklevel=2)
+ allow_fill = True
+
+ if isna(fill_value):
+ # For categorical, any NA value is considered a user-facing
+ # NA value. Our storage NA value is -1.
+ fill_value = -1
+
+ codes = take(self._codes, indexer, allow_fill=allow_fill,
+ fill_value=fill_value)
result = self._constructor(codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7abd95c68ea2b..a14f3299e11e9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3499,7 +3499,14 @@ def _take(self, indices, axis=0, convert=True, is_copy=False):
indices = _ensure_platform_int(indices)
new_index = self.index.take(indices)
- new_values = self._values.take(indices)
+
+ if is_categorical_dtype(self):
+ # https://github.com/pandas-dev/pandas/issues/20664
+ # TODO: remove when the default Categorical.take behavior changes
+ kwargs = {'allow_fill': False}
+ else:
+ kwargs = {}
+ new_values = self._values.take(indices, **kwargs)
result = (self._constructor(new_values, index=new_index,
fastpath=True).__finalize__(self))
diff --git a/pandas/tests/categorical/conftest.py b/pandas/tests/categorical/conftest.py
new file mode 100644
index 0000000000000..274389d484995
--- /dev/null
+++ b/pandas/tests/categorical/conftest.py
@@ -0,0 +1,13 @@
+import pytest
+
+
+@pytest.fixture(params=[True, False])
+def allow_fill(request):
+ """Boolean 'allow_fill' parameter for Categorical.take"""
+ return request.param
+
+
+@pytest.fixture(params=[True, False])
+def ordered(request):
+ """Boolean 'ordered' parameter for Categorical."""
+ return request.param
diff --git a/pandas/tests/categorical/test_algos.py b/pandas/tests/categorical/test_algos.py
index 1c68377786dd4..dcf2081ae32fe 100644
--- a/pandas/tests/categorical/test_algos.py
+++ b/pandas/tests/categorical/test_algos.py
@@ -69,3 +69,45 @@ def test_isin_empty(empty):
result = s.isin(empty)
tm.assert_numpy_array_equal(expected, result)
+
+
+class TestTake(object):
+ # https://github.com/pandas-dev/pandas/issues/20664
+
+ def test_take_warns(self):
+ cat = pd.Categorical(['a', 'b'])
+ with tm.assert_produces_warning(FutureWarning):
+ cat.take([0, -1])
+
+ def test_take_positive_no_warning(self):
+ cat = pd.Categorical(['a', 'b'])
+ with tm.assert_produces_warning(None):
+ cat.take([0, 0])
+
+ def test_take_bounds(self, allow_fill):
+ # https://github.com/pandas-dev/pandas/issues/20664
+ cat = pd.Categorical(['a', 'b', 'a'])
+ with pytest.raises(IndexError):
+ cat.take([4, 5], allow_fill=allow_fill)
+
+ def test_take_empty(self, allow_fill):
+ # https://github.com/pandas-dev/pandas/issues/20664
+ cat = pd.Categorical([], categories=['a', 'b'])
+ with pytest.raises(IndexError):
+ cat.take([0], allow_fill=allow_fill)
+
+ def test_positional_take(self, ordered):
+ cat = pd.Categorical(['a', 'a', 'b', 'b'], categories=['b', 'a'],
+ ordered=ordered)
+ result = cat.take([0, 1, 2], allow_fill=False)
+ expected = pd.Categorical(['a', 'a', 'b'], categories=cat.categories,
+ ordered=ordered)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_positional_take_unobserved(self, ordered):
+ cat = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'],
+ ordered=ordered)
+ result = cat.take([1, 0], allow_fill=False)
+ expected = pd.Categorical(['b', 'a'], categories=cat.categories,
+ ordered=ordered)
+ tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index c34339c99322d..8685819f369b9 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -111,7 +111,7 @@ def test_take_non_na_fill_value(self):
def test_take_out_of_bounds_raises(self):
pass
- @skip_take
+ @pytest.mark.skip(reason="GH-20747. Unobserved categories.")
def test_take_series(self):
pass
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 1fdb7298eefc4..b7ac8033f3f6d 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -32,7 +32,7 @@ def data():
while len(data[0]) == len(data[1]):
data = make_data()
- return JSONArray(make_data())
+ return JSONArray(data)
@pytest.fixture
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 5cc1a8ff1c451..8571fbc10e9bb 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -753,6 +753,16 @@ def test_take():
s.take([-1, 3, 4], convert=False)
+def test_take_categorical():
+ # https://github.com/pandas-dev/pandas/issues/20664
+ s = Series(pd.Categorical(['a', 'b', 'c']))
+ result = s.take([-2, -2, 0])
+ expected = Series(pd.Categorical(['b', 'b', 'a'],
+ categories=['a', 'b', 'c']),
+ index=[1, 1, 0])
+ assert_series_equal(result, expected)
+
+
def test_head_tail(test_data):
assert_series_equal(test_data.series.head(), test_data.series[:5])
assert_series_equal(test_data.series.head(0), test_data.series[0:0])
| closes #20664
I might be unavailable for the next few hours, so I'm just putting this up here, even though it includes changes from https://github.com/pandas-dev/pandas/pull/20814 in the first commit. Once that is merged, this can be merged on top of master and it'll just have the categorical changes. | https://api.github.com/repos/pandas-dev/pandas/pulls/20841 | 2018-04-27T11:17:58Z | 2018-04-30T11:55:36Z | 2018-04-30T11:55:35Z | 2018-04-30T12:04:25Z |
BUG: concat of Series of EA and other dtype fails | diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 377ef7ad7e4f8..4aa74cdbbc2c0 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -175,7 +175,7 @@ def is_nonempty(x):
return _concat_sparse(to_concat, axis=axis, typs=typs)
extensions = [is_extension_array_dtype(x) for x in to_concat]
- if any(extensions):
+ if any(extensions) and axis == 1:
to_concat = [np.atleast_2d(x.astype('object')) for x in to_concat]
if not nonempty:
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index cc78321dea7af..fe920a47ab740 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -64,6 +64,31 @@ def test_concat_mixed_dtypes(self, data):
expected = pd.concat([df1.astype('object'), df2.astype('object')])
self.assert_frame_equal(result, expected)
+ result = pd.concat([df1['A'], df2['A']])
+ expected = pd.concat([df1['A'].astype('object'),
+ df2['A'].astype('object')])
+ self.assert_series_equal(result, expected)
+
+ def test_concat_columns(self, data, na_value):
+ df1 = pd.DataFrame({'A': data[:3]})
+ df2 = pd.DataFrame({'B': [1, 2, 3]})
+
+ expected = pd.DataFrame({'A': data[:3], 'B': [1, 2, 3]})
+ result = pd.concat([df1, df2], axis=1)
+ self.assert_frame_equal(result, expected)
+ result = pd.concat([df1['A'], df2['B']], axis=1)
+ self.assert_frame_equal(result, expected)
+
+ # non-aligned
+ df2 = pd.DataFrame({'B': [1, 2, 3]}, index=[1, 2, 3])
+ expected = pd.DataFrame({
+ 'A': data._from_sequence(list(data[:3]) + [na_value]),
+ 'B': [np.nan, 1, 2, 3]})
+ result = pd.concat([df1, df2], axis=1)
+ self.assert_frame_equal(result, expected)
+ result = pd.concat([df1['A'], df2['B']], axis=1)
+ self.assert_frame_equal(result, expected)
+
def test_align(self, data, na_value):
a = data[:3]
b = data[2:5]
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index 6ebe700f13be0..579dad78579a0 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -67,6 +67,10 @@ class TestConstructors(base.BaseConstructorsTests):
class TestReshaping(base.BaseReshapingTests):
+ @pytest.mark.skip(reason="Unobserved categories preseved in concat.")
+ def test_concat_columns(self, data, na_value):
+ pass
+
@pytest.mark.skip(reason="Unobserved categories preseved in concat.")
def test_align(self, data, na_value):
pass
| Closes #20832 | https://api.github.com/repos/pandas-dev/pandas/pulls/20840 | 2018-04-27T09:43:18Z | 2018-04-29T21:06:25Z | 2018-04-29T21:06:24Z | 2018-04-30T11:46:55Z |
Handle duplicate column names in select_dtypes and get_dummies | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 5a0b4bb20f774..eb6c212731822 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1359,6 +1359,7 @@ Reshaping
- Bug in :meth:`DataFrame.astype` where column metadata is lost when converting to categorical or a dictionary of dtypes (:issue:`19920`)
- Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`)
- Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`)
+- Bug in :func:`get_dummies`, and :func:`select_dtypes`, where duplicate column names caused incorrect behavior (:issue:`20848`)
Other
^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ffb124af4f5fc..ffb2ad046158f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3076,15 +3076,15 @@ def select_dtypes(self, include=None, exclude=None):
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
- def is_dtype_instance_mapper(column, dtype):
- return column, functools.partial(issubclass, dtype.type)
+ def is_dtype_instance_mapper(idx, dtype):
+ return idx, functools.partial(issubclass, dtype.type)
- for column, f in itertools.starmap(is_dtype_instance_mapper,
- self.dtypes.iteritems()):
+ for idx, f in itertools.starmap(is_dtype_instance_mapper,
+ enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
- include_these[column] = any(map(f, include))
+ include_these.iloc[idx] = any(map(f, include))
if exclude:
- exclude_these[column] = not any(map(f, exclude))
+ exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 389f1af48434a..0829aa8f5a509 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -821,14 +821,15 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
from pandas.core.reshape.concat import concat
from itertools import cycle
+ dtypes_to_encode = ['object', 'category']
+
if isinstance(data, DataFrame):
# determine columns being encoded
-
if columns is None:
- columns_to_encode = data.select_dtypes(
- include=['object', 'category']).columns
+ data_to_encode = data.select_dtypes(
+ include=dtypes_to_encode)
else:
- columns_to_encode = columns
+ data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
@@ -836,35 +837,45 @@ def check_len(item, name):
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
- if not len(item) == len(columns_to_encode):
- len_msg = len_msg.format(name=name, len_item=len(item),
- len_enc=len(columns_to_encode))
+ if not len(item) == data_to_encode.shape[1]:
+ len_msg = \
+ len_msg.format(name=name, len_item=len(item),
+ len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
+
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
- prefix = [prefix[col] for col in columns_to_encode]
+ prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
- prefix = columns_to_encode
+ prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
- prefix_sep = [prefix_sep[col] for col in columns_to_encode]
+ prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
- if set(columns_to_encode) == set(data.columns):
+ if data_to_encode.shape == data.shape:
+ # Encoding the entire df, do not prepend any dropped columns
with_dummies = []
+ elif columns is not None:
+ # Encoding only cols specified in columns. Get all cols not in
+ # columns to prepend to result.
+ with_dummies = [data.drop(columns, axis=1)]
else:
- with_dummies = [data.drop(columns_to_encode, axis=1)]
-
- for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
-
- dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
+ # Encoding only object and category dtype columns. Get remaining
+ # columns to prepend to result.
+ with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
+
+ for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
+ prefix_sep):
+ # col is (column_name, column), use just column data here
+ dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 152159965036d..4c9f8c2ea0980 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -287,6 +287,23 @@ def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
ei = df[['b', 'c', 'f', 'k']]
assert_frame_equal(ri, ei)
+ def test_select_dtypes_duplicate_columns(self):
+ # GH20839
+ odict = compat.OrderedDict
+ df = DataFrame(odict([('a', list('abc')),
+ ('b', list(range(1, 4))),
+ ('c', np.arange(3, 6).astype('u1')),
+ ('d', np.arange(4.0, 7.0, dtype='float64')),
+ ('e', [True, False, True]),
+ ('f', pd.date_range('now', periods=3).values)]))
+ df.columns = ['a', 'a', 'b', 'b', 'b', 'c']
+
+ expected = DataFrame({'a': list(range(1, 4)),
+ 'b': np.arange(3, 6).astype('u1')})
+
+ result = df.select_dtypes(include=[np.number], exclude=['floating'])
+ assert_frame_equal(result, expected)
+
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index c4d925b83585b..295801f3e8def 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -465,6 +465,21 @@ def test_get_dummies_dont_sparsify_all_columns(self, sparse):
tm.assert_frame_equal(df[['GDP']], df2)
+ def test_get_dummies_duplicate_columns(self, df):
+ # GH20839
+ df.columns = ["A", "A", "A"]
+ result = get_dummies(df).sort_index(axis=1)
+
+ expected = DataFrame([[1, 1, 0, 1, 0],
+ [2, 0, 1, 1, 0],
+ [3, 1, 0, 0, 1]],
+ columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
+ dtype=np.uint8).sort_index(axis=1)
+
+ expected = expected.astype({"A": np.int64})
+
+ tm.assert_frame_equal(result, expected)
+
class TestCategoricalReshape(object):
| Functions `select_dtypes` and `get_dummies` previously failed on DataFrames with duplicate column names and had strange behavior as shown below. This PR fixes that strange behavior.
Previous behavior:
```python
In [6]: df
Out[6]:
col1 col1
0 1 a
1 2 b
In [7]: df.select_dtypes(include=['int'])
Out[7]:
Empty DataFrame
Columns: []
Index: [0, 1]
In [8]: pd.get_dummies(df)
Out[8]:
col1_('c', 'o', 'l', '1') col1_('c', 'o', 'l', '1')
0 1 1
1 1 1
```
New behavior:
```python
In [6]: df
Out[6]:
col1 col1
0 1 a
1 2 b
In [7]: df.select_dtypes(include=['int'])
Out[7]:
col1
0 1
1 2
In [8]: pd.get_dummies(df)
Out[8]:
col1 col1_a col1_b
0 1 1 0
1 2 0 1
```
- [x] closes #20848
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20839 | 2018-04-27T07:48:11Z | 2018-05-05T12:59:06Z | 2018-05-05T12:59:06Z | 2018-05-06T06:12:05Z |
PERF: Faster Series.__getattribute__ | diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 478aba278029c..3f6522c3403d9 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -121,3 +121,16 @@ def setup(self):
def time_dir_strings(self):
dir(self.s)
+
+
+class SeriesGetattr(object):
+ # https://github.com/pandas-dev/pandas/issues/19764
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(1,
+ index=date_range("2012-01-01", freq='s',
+ periods=int(1e6)))
+
+ def time_series_datetimeindex_repr(self):
+ getattr(self.s, 'a', None)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index ffa4f1068f84d..c2fdd9bac46e6 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -958,6 +958,7 @@ Performance Improvements
- Improved performance of :func:`pandas.core.groupby.GroupBy.any` and :func:`pandas.core.groupby.GroupBy.all` (:issue:`15435`)
- Improved performance of :func:`pandas.core.groupby.GroupBy.pct_change` (:issue:`19165`)
- Improved performance of :func:`Series.isin` in the case of categorical dtypes (:issue:`20003`)
+- Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifiested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`)
- Fixed a performance regression for :func:`GroupBy.nth` and :func:`GroupBy.last` with some object columns (:issue:`19283`)
.. _whatsnew_0230.docs:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 86342b6996abf..75216f12e5739 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4375,7 +4375,7 @@ def __getattr__(self, name):
name in self._accessors):
return object.__getattribute__(self, name)
else:
- if name in self._info_axis:
+ if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2ceec1592d49b..f208687a0cf1b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2083,6 +2083,19 @@ def __getitem__(self, key):
else:
return result
+ def _can_hold_identifiers_and_holds_name(self, name):
+ """
+ Faster check for ``name in self`` when we know `name` is a Python
+ identifier (e.g. in NDFrame.__getattr__, which hits this to support
+ . key lookup). For indexes that can't hold identifiers (everything
+ but object & categorical) we just return False.
+
+ https://github.com/pandas-dev/pandas/issues/19764
+ """
+ if self.is_object() or self.is_categorical():
+ return name in self
+ return False
+
def append(self, other):
"""
Append a collection of Index options together
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 7d01a2a70145d..e32e18ea0ec4a 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -8,6 +8,11 @@
class DatetimeLike(Base):
+ def test_can_hold_identifiers(self):
+ idx = self.create_index()
+ key = idx[0]
+ assert idx._can_hold_identifiers_and_holds_name(key) is False
+
def test_shift_identity(self):
idx = self.create_index()
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7cd880b51661d..95ddf1f6cd8bd 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -66,6 +66,11 @@ def generate_index_types(self, skip_index_keys=[]):
if key not in skip_index_keys:
yield key, idx
+ def test_can_hold_identifiers(self):
+ idx = self.create_index()
+ key = idx[0]
+ assert idx._can_hold_identifiers_and_holds_name(key) is True
+
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index e9fddfde90348..6a1a1a5bdba4f 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -33,6 +33,11 @@ def create_index(self, categories=None, ordered=False):
return CategoricalIndex(
list('aabbca'), categories=categories, ordered=ordered)
+ def test_can_hold_identifiers(self):
+ idx = self.create_index(categories=list('abcd'))
+ key = idx[0]
+ assert idx._can_hold_identifiers_and_holds_name(key) is True
+
def test_construction(self):
ci = self.create_index(categories=list('abcd'))
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index da11ac645ae07..37f70090c179f 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -48,6 +48,11 @@ def setup_method(self, method):
def create_index(self):
return self.index
+ def test_can_hold_identifiers(self):
+ idx = self.create_index()
+ key = idx[0]
+ assert idx._can_hold_identifiers_and_holds_name(key) is True
+
def test_boolean_context_compat2(self):
# boolean context compat
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index bafb6ae2e45f4..49322d9b7abd6 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -64,6 +64,11 @@ def test_index_rdiv_timedelta(self, scalar_td, index):
class Numeric(Base):
+ def test_can_hold_identifiers(self):
+ idx = self.create_index()
+ key = idx[0]
+ assert idx._can_hold_identifiers_and_holds_name(key) is False
+
def test_numeric_compat(self):
pass # override Base method
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 8990834ebe91a..38f4b341116b8 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -44,6 +44,11 @@ def check_binop(self, ops, scalars, idxs):
expected = op(Int64Index(idx), scalar)
tm.assert_index_equal(result, expected)
+ def test_can_hold_identifiers(self):
+ idx = self.create_index()
+ key = idx[0]
+ assert idx._can_hold_identifiers_and_holds_name(key) is False
+
def test_binops(self):
ops = [operator.add, operator.sub, operator.mul, operator.floordiv,
operator.truediv]
| Closes #19764 | https://api.github.com/repos/pandas-dev/pandas/pulls/20834 | 2018-04-26T19:51:13Z | 2018-05-01T06:45:20Z | 2018-05-01T06:45:19Z | 2019-05-06T19:37:36Z |
TST: Fixed failures in JSON asserts | diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 95f868e89ac39..2e75bb3b8c326 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -105,6 +105,12 @@ def take(self, indexer, allow_fill=True, fill_value=None):
def copy(self, deep=False):
return type(self)(self.data[:])
+ def astype(self, dtype, copy=True):
+ # NumPy has issues when all the dicts are the same length.
+ # np.array([UserDict(...), UserDict(...)]) fails,
+ # but np.array([{...}, {...}]) works, so cast.
+ return np.array([dict(x) for x in self], dtype=dtype, copy=copy)
+
def unique(self):
# Parent method doesn't work since np.array will try to infer
# a 2-dim object.
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index dcf08440738e7..0ef34c3b0f679 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -1,8 +1,10 @@
import operator
+import collections
import pytest
-
+import pandas as pd
+import pandas.util.testing as tm
from pandas.compat import PY2, PY36
from pandas.tests.extension import base
@@ -59,27 +61,76 @@ def data_for_grouping():
])
-class TestDtype(base.BaseDtypeTests):
+class BaseJSON(object):
+ # NumPy doesn't handle an array of equal-length UserDicts.
+ # The default assert_series_equal eventually does a
+ # Series.values, which raises. We work around it by
+ # converting the UserDicts to dicts.
+ def assert_series_equal(self, left, right, **kwargs):
+ if left.dtype.name == 'json':
+ assert left.dtype == right.dtype
+ left = pd.Series(JSONArray(left.values.astype(object)),
+ index=left.index, name=left.name)
+ right = pd.Series(JSONArray(right.values.astype(object)),
+ index=right.index, name=right.name)
+ tm.assert_series_equal(left, right, **kwargs)
+
+ def assert_frame_equal(self, left, right, *args, **kwargs):
+ tm.assert_index_equal(
+ left.columns, right.columns,
+ exact=kwargs.get('check_column_type', 'equiv'),
+ check_names=kwargs.get('check_names', True),
+ check_exact=kwargs.get('check_exact', False),
+ check_categorical=kwargs.get('check_categorical', True),
+ obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame')))
+
+ jsons = (left.dtypes == 'json').index
+
+ for col in jsons:
+ self.assert_series_equal(left[col], right[col],
+ *args, **kwargs)
+
+ left = left.drop(columns=jsons)
+ right = right.drop(columns=jsons)
+ tm.assert_frame_equal(left, right, *args, **kwargs)
+
+
+class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
-class TestInterface(base.BaseInterfaceTests):
- pass
+class TestInterface(BaseJSON, base.BaseInterfaceTests):
+ def test_custom_asserts(self):
+ # This would always trigger the KeyError from trying to put
+ # an array of equal-length UserDicts inside an ndarray.
+ data = JSONArray([collections.UserDict({'a': 1}),
+ collections.UserDict({'b': 2}),
+ collections.UserDict({'c': 3})])
+ a = pd.Series(data)
+ self.assert_series_equal(a, a)
+ self.assert_frame_equal(a.to_frame(), a.to_frame())
+
+ b = pd.Series(data.take([0, 0, 1]))
+ with pytest.raises(AssertionError):
+ self.assert_series_equal(a, b)
+
+ with pytest.raises(AssertionError):
+ self.assert_frame_equal(a.to_frame(), b.to_frame())
-class TestConstructors(base.BaseConstructorsTests):
+class TestConstructors(BaseJSON, base.BaseConstructorsTests):
pass
-class TestReshaping(base.BaseReshapingTests):
+class TestReshaping(BaseJSON, base.BaseReshapingTests):
pass
-class TestGetitem(base.BaseGetitemTests):
+class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
-class TestMissing(base.BaseMissingTests):
+class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.xfail(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@@ -94,7 +145,7 @@ def test_fillna_frame(self):
reason="Dictionary order unstable")
-class TestMethods(base.BaseMethodsTests):
+class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@@ -126,7 +177,7 @@ def test_sort_values_missing(self, data_missing_for_sorting, ascending):
data_missing_for_sorting, ascending)
-class TestCasting(base.BaseCastingTests):
+class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.xfail
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
@@ -139,7 +190,7 @@ def test_astype_str(self):
# internals has trouble setting sequences of values into scalar positions.
-class TestGroupby(base.BaseGroupbyTests):
+class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
| Fixes an occasional failure in the json tests. They'd fail when the
Series held objects of equal length.
```pytb
pandas/tests/extension/json/test_json.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
pandas/util/testing.py:1224: in assert_series_equal
_testing.assert_almost_equal(left.get_values(), right.get_values(),
pandas/core/series.py:466: in get_values
return self._data.get_values()
pandas/core/internals.py:4742: in get_values
return np.array(self._block.to_dense(), copy=False)
pandas/core/internals.py:1940: in to_dense
return np.asarray(self.values)
../../Envs/pandas-dev/lib/python3.6/site-packages/numpy/numpy/core/numeric.py:500: in asarray
return array(a, dtype, copy=False, order=order)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = {'a': 1}, key = 0
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
> raise KeyError(key)
E KeyError: 0
```
Work around that by converting to dicts before comparing. | https://api.github.com/repos/pandas-dev/pandas/pulls/20827 | 2018-04-26T03:15:50Z | 2018-04-26T12:18:15Z | 2018-04-26T12:18:15Z | 2018-04-26T12:18:18Z |
Fixed read_csv with CategoricalDtype with boolean categories (20498) | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 4ff3cc728f7f7..9b32034defda0 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1369,6 +1369,7 @@ Current Behavior:
Notice how we now instead output ``np.nan`` itself instead of a stringified form of it.
+- Bug in :func:`read_csv` in which a column specified with ``CategoricalDtype`` of boolean categories was not being correctly coerced from string values to booleans (:issue:`20498`)
- Bug in :meth:`to_sql` when writing timezone aware data (``datetime64[ns, tz]`` dtype) would raise a ``TypeError`` (:issue:`9086`)
- Bug in :meth:`to_sql` where a naive DatetimeIndex would be written as ``TIMESTAMP WITH TIMEZONE`` type in supported databases, e.g. PostgreSQL (:issue:`23510`)
- Bug in :meth:`read_excel()` when ``parse_cols`` is specified with an empty dataset (:issue:`9208`)
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index f74de79542628..1dc71264c94dd 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1202,7 +1202,20 @@ cdef class TextReader:
bint user_dtype,
kh_str_t *na_hashset,
object na_flist):
- if is_integer_dtype(dtype):
+ if is_categorical_dtype(dtype):
+ # TODO: I suspect that _categorical_convert could be
+ # optimized when dtype is an instance of CategoricalDtype
+ codes, cats, na_count = _categorical_convert(
+ self.parser, i, start, end, na_filter,
+ na_hashset, self.c_encoding)
+
+ # Method accepts list of strings, not encoded ones.
+ true_values = [x.decode() for x in self.true_values]
+ cat = Categorical._from_inferred_categories(
+ cats, codes, dtype, true_values=true_values)
+ return cat, na_count
+
+ elif is_integer_dtype(dtype):
try:
result, na_count = _try_int64(self.parser, i, start,
end, na_filter, na_hashset)
@@ -1233,6 +1246,7 @@ cdef class TextReader:
na_filter, na_hashset,
self.true_set, self.false_set)
return result, na_count
+
elif dtype.kind == 'S':
# TODO: na handling
width = dtype.itemsize
@@ -1252,15 +1266,6 @@ cdef class TextReader:
# unicode variable width
return self._string_convert(i, start, end, na_filter,
na_hashset)
- elif is_categorical_dtype(dtype):
- # TODO: I suspect that _categorical_convert could be
- # optimized when dtype is an instance of CategoricalDtype
- codes, cats, na_count = _categorical_convert(
- self.parser, i, start, end, na_filter,
- na_hashset, self.c_encoding)
- cat = Categorical._from_inferred_categories(cats, codes, dtype)
- return cat, na_count
-
elif is_object_dtype(dtype):
return self._string_convert(i, start, end, na_filter,
na_hashset)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 6dc3a960dc817..59ad386f797dc 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -546,8 +546,9 @@ def base(self):
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
- dtype):
- """Construct a Categorical from inferred values
+ dtype, true_values=None):
+ """
+ Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
@@ -555,10 +556,12 @@ def _from_inferred_categories(cls, inferred_categories, inferred_codes,
Parameters
----------
-
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
+ true_values : list, optional
+ If none are provided, the default ones are
+ "True", "TRUE", and "true."
Returns
-------
@@ -567,27 +570,32 @@ def _from_inferred_categories(cls, inferred_categories, inferred_codes,
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
-
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
- # Convert to a specialzed type with `dtype` if specified
+ # Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
- cats = to_numeric(inferred_categories, errors='coerce')
+ cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
- cats = to_datetime(inferred_categories, errors='coerce')
+ cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
- cats = to_timedelta(inferred_categories, errors='coerce')
+ cats = to_timedelta(inferred_categories, errors="coerce")
+ elif dtype.categories.is_boolean():
+ if true_values is None:
+ true_values = ["True", "TRUE", "true"]
+
+ cats = cats.isin(true_values)
if known_categories:
- # recode from observation order to dtype.categories order
+ # Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
- # sort categories and recode for unknown categories
+ # Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
+
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index acb9bca2545c0..990709e20f1e0 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1745,8 +1745,8 @@ def _cast_types(self, values, cast_type, column):
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
- cats, cats.get_indexer(values), cast_type
- )
+ cats, cats.get_indexer(values), cast_type,
+ true_values=self.true_values)
else:
try:
diff --git a/pandas/tests/io/parser/test_dtypes.py b/pandas/tests/io/parser/test_dtypes.py
index 17cd0ab16ea61..caa03fc3685f6 100644
--- a/pandas/tests/io/parser/test_dtypes.py
+++ b/pandas/tests/io/parser/test_dtypes.py
@@ -324,6 +324,22 @@ def test_categorical_coerces_timedelta(all_parsers):
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("data", [
+ "b\nTrue\nFalse\nNA\nFalse",
+ "b\ntrue\nfalse\nNA\nfalse",
+ "b\nTRUE\nFALSE\nNA\nFALSE",
+ "b\nTrue\nFalse\nNA\nFALSE",
+])
+def test_categorical_dtype_coerces_boolean(all_parsers, data):
+ # see gh-20498
+ parser = all_parsers
+ dtype = {"b": CategoricalDtype([False, True])}
+ expected = DataFrame({"b": Categorical([True, False, None, False])})
+
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+
def test_categorical_unexpected_categories(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
| - [x] closes #20498
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/20826 | 2018-04-25T22:01:21Z | 2018-11-27T16:17:27Z | 2018-11-27T16:17:26Z | 2018-11-27T16:17:32Z |
DOC: Removed duplicate returns section | diff --git a/pandas/core/window.py b/pandas/core/window.py
index 96630258c3e50..d7f9f7c85fbbc 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -1271,12 +1271,12 @@ def kurt(self, **kwargs):
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
- %(name)s quantile
+ %(name)s quantile.
Parameters
----------
quantile : float
- 0 <= quantile <= 1
+ Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
@@ -1289,6 +1289,9 @@ def kurt(self, **kwargs):
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
+ **kwargs:
+ For compatibility with other %(name)s methods. Has no effect on
+ the result.
Returns
-------
@@ -1298,7 +1301,7 @@ def kurt(self, **kwargs):
Examples
--------
- >>> s = Series([1, 2, 3, 4])
+ >>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
@@ -1319,7 +1322,6 @@ def kurt(self, **kwargs):
in Series.
pandas.DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
-
""")
def quantile(self, quantile, interpolation='linear', **kwargs):
@@ -1656,7 +1658,6 @@ def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
- @Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, interpolation='linear', **kwargs):
return super(Rolling, self).quantile(quantile=quantile,
@@ -1917,7 +1918,6 @@ def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
- @Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, interpolation='linear', **kwargs):
return super(Expanding, self).quantile(quantile=quantile,
| Closes https://github.com/pandas-dev/pandas/issues/20822 | https://api.github.com/repos/pandas-dev/pandas/pulls/20823 | 2018-04-25T20:45:16Z | 2018-04-26T01:07:06Z | 2018-04-26T01:07:06Z | 2018-04-26T01:07:06Z |
Change _can_hold_na to a class attribute and document that it shouldn't be changed | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 9958be47267ee..f1a81b5eefddd 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -38,10 +38,9 @@ class ExtensionArray(object):
* copy
* _concat_same_type
- Some additional methods are available to satisfy pandas' internal, private
- block API:
+ An additional method is available to satisfy pandas' internal,
+ private block API.
- * _can_hold_na
* _formatting_values
Some methods require casting the ExtensionArray to an ndarray of Python
@@ -399,7 +398,8 @@ def _values_for_factorize(self):
Returns
-------
values : ndarray
- An array suitable for factoraization. This should maintain order
+
+ An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
@@ -422,7 +422,7 @@ def factorize(self, na_sentinel=-1):
Returns
-------
labels : ndarray
- An interger NumPy array that's an indexer into the original
+ An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
@@ -566,16 +566,12 @@ def _concat_same_type(cls, to_concat):
"""
raise AbstractMethodError(cls)
- @property
- def _can_hold_na(self):
- # type: () -> bool
- """Whether your array can hold missing values. True by default.
-
- Notes
- -----
- Setting this to false will optimize some operations like fillna.
- """
- return True
+ # The _can_hold_na attribute is set to True so that pandas internals
+ # will use the ExtensionDtype.na_value as the NA value in operations
+ # such as take(), reindex(), shift(), etc. In addition, those results
+ # will then be of the ExtensionArray subclass rather than an array
+ # of objects
+ _can_hold_na = True
@property
def _ndarray_values(self):
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 9b60652fbace3..8ef8debbdc666 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -21,7 +21,8 @@ def test_ndim(self, data):
assert data.ndim == 1
def test_can_hold_na_valid(self, data):
- assert data._can_hold_na in {True, False}
+ # GH-20761
+ assert data._can_hold_na is True
def test_memory_usage(self, data):
s = pd.Series(data)
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index f6cee9af0b722..32cf29818e069 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -9,10 +9,7 @@
class BaseMissingTests(BaseExtensionTests):
def test_isna(self, data_missing):
- if data_missing._can_hold_na:
- expected = np.array([True, False])
- else:
- expected = np.array([False, False])
+ expected = np.array([True, False])
result = pd.isna(data_missing)
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py
index 4cb4ea21d9be3..bbd31c4071b91 100644
--- a/pandas/tests/extension/conftest.py
+++ b/pandas/tests/extension/conftest.py
@@ -57,7 +57,7 @@ def na_cmp():
Should return a function of two arguments that returns
True if both arguments are (scalar) NA for your type.
- By default, uses ``operator.or``
+ By default, uses ``operator.is_``
"""
return operator.is_
| - [x] closes #20761
- [x] tests added / passed
- removed tests dependent on `_can_hold_na`
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- not needed because it is part of `ExtensionArray` support new for v0.23
Replaces pull request #20815 (see discussion there for reasoning)
| https://api.github.com/repos/pandas-dev/pandas/pulls/20819 | 2018-04-25T16:11:04Z | 2018-04-26T15:34:46Z | 2018-04-26T15:34:45Z | 2018-06-29T14:00:47Z |
ExtensionArray.take default implementation | diff --git a/pandas/api/extensions/__init__.py b/pandas/api/extensions/__init__.py
index cc09204d992d8..3e6e192a3502c 100644
--- a/pandas/api/extensions/__init__.py
+++ b/pandas/api/extensions/__init__.py
@@ -2,5 +2,6 @@
from pandas.core.accessor import (register_dataframe_accessor, # noqa
register_index_accessor,
register_series_accessor)
+from pandas.core.algorithms import take # noqa
from pandas.core.arrays.base import ExtensionArray # noqa
from pandas.core.dtypes.dtypes import ExtensionDtype # noqa
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index cbc412d74d51d..a03d892432b51 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1448,6 +1448,94 @@ def func(arr, indexer, out, fill_value=np.nan):
return func
+def take(arr, indices, allow_fill=False, fill_value=None):
+ """
+ Take elements from an array.
+
+ .. versionadded:: 0.23.0
+
+ Parameters
+ ----------
+ arr : sequence
+ Non array-likes (sequences without a dtype) are coereced
+ to an ndarray.
+ indices : sequence of integers
+ Indices to be taken.
+ allow_fill : bool, default False
+ How to handle negative values in `indices`.
+
+ * False: negative values in `indices` indicate positional indices
+ from the right (the default). This is similar to :func:`numpy.take`.
+
+ * True: negative values in `indices` indicate
+ missing values. These values are set to `fill_value`. Any other
+ other negative values raise a ``ValueError``.
+
+ fill_value : any, optional
+ Fill value to use for NA-indices when `allow_fill` is True.
+ This may be ``None``, in which case the default NA value for
+ the type (``self.dtype.na_value``) is used.
+
+ Returns
+ -------
+ ndarray or ExtensionArray
+ Same type as the input.
+
+ Raises
+ ------
+ IndexError
+ When `indices` is out of bounds for the array.
+ ValueError
+ When the indexer contains negative values other than ``-1``
+ and `allow_fill` is True.
+
+ Notes
+ -----
+ When `allow_fill` is False, `indices` may be whatever dimensionality
+ is accepted by NumPy for `arr`.
+
+ When `allow_fill` is True, `indices` should be 1-D.
+
+ See Also
+ --------
+ numpy.take
+
+ Examples
+ --------
+ >>> from pandas.api.extensions import take
+
+ With the default ``allow_fill=False``, negative numbers indicate
+ positional indices from the right.
+
+ >>> take(np.array([10, 20, 30]), [0, 0, -1])
+ array([10, 10, 30])
+
+ Setting ``allow_fill=True`` will place `fill_value` in those positions.
+
+ >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
+ array([10., 10., nan])
+
+ >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
+ ... fill_value=-10)
+ array([ 10, 10, -10])
+ """
+ from pandas.core.indexing import validate_indices
+
+ if not is_array_like(arr):
+ arr = np.asarray(arr)
+
+ indices = np.asarray(indices, dtype=np.intp)
+
+ if allow_fill:
+ # Pandas style, -1 means NA
+ validate_indices(indices, len(arr))
+ result = take_1d(arr, indices, allow_fill=True, fill_value=fill_value)
+ else:
+ # NumPy style
+ result = arr.take(indices)
+ return result
+
+
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
@@ -1462,7 +1550,7 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
- indicies are filed with fill_value
+ indices are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index f1a81b5eefddd..1922801c30719 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -462,22 +462,36 @@ def factorize(self, na_sentinel=-1):
# ------------------------------------------------------------------------
# Indexing methods
# ------------------------------------------------------------------------
- def take(self, indexer, allow_fill=True, fill_value=None):
+
+ def take(self, indices, allow_fill=False, fill_value=None):
# type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray
"""Take elements from an array.
Parameters
----------
- indexer : sequence of integers
- indices to be taken. -1 is used to indicate values
- that are missing.
- allow_fill : bool, default True
- If False, indexer is assumed to contain no -1 values so no filling
- will be done. This short-circuits computation of a mask. Result is
- undefined if allow_fill == False and -1 is present in indexer.
- fill_value : any, default None
- Fill value to replace -1 values with. If applicable, this should
- use the sentinel missing value for this type.
+ indices : sequence of integers
+ Indices to be taken.
+ allow_fill : bool, default False
+ How to handle negative values in `indices`.
+
+ * False: negative values in `indices` indicate positional indices
+ from the right (the default). This is similar to
+ :func:`numpy.take`.
+
+ * True: negative values in `indices` indicate
+ missing values. These values are set to `fill_value`. Any other
+ other negative values raise a ``ValueError``.
+
+ fill_value : any, optional
+ Fill value to use for NA-indices when `allow_fill` is True.
+ This may be ``None``, in which case the default NA value for
+ the type, ``self.dtype.na_value``, is used.
+
+ For many ExtensionArrays, there will be two representations of
+ `fill_value`: a user-facing "boxed" scalar, and a low-level
+ physical NA value. `fill_value` should be the user-facing version,
+ and the implementation should handle translating that to the
+ physical version for processing the take if nescessary.
Returns
-------
@@ -486,44 +500,56 @@ def take(self, indexer, allow_fill=True, fill_value=None):
Raises
------
IndexError
- When the indexer is out of bounds for the array.
+ When the indices are out of bounds for the array.
+ ValueError
+ When `indices` contains negative values other than ``-1``
+ and `allow_fill` is True.
Notes
-----
- This should follow pandas' semantics where -1 indicates missing values.
- Positions where indexer is ``-1`` should be filled with the missing
- value for this type.
- This gives rise to the special case of a take on an empty
- ExtensionArray that does not raises an IndexError straight away
- when the `indexer` is all ``-1``.
+ ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
+ ``iloc``, when `indices` is a sequence of values. Additionally,
+ it's called by :meth:`Series.reindex`, or any other method
+ that causes realignemnt, with a `fill_value`.
- This is called by ``Series.__getitem__``, ``.loc``, ``iloc``, when the
- indexer is a sequence of values.
+ See Also
+ --------
+ numpy.take
+ pandas.api.extensions.take
Examples
--------
- Suppose the extension array is backed by a NumPy array stored as
- ``self.data``. Then ``take`` may be written as
+ Here's an example implementation, which relies on casting the
+ extension array to object dtype. This uses the helper method
+ :func:`pandas.api.extensions.take`.
.. code-block:: python
- def take(self, indexer, allow_fill=True, fill_value=None):
- indexer = np.asarray(indexer)
- mask = indexer == -1
+ def take(self, indices, allow_fill=False, fill_value=None):
+ from pandas.core.algorithms import take
- # take on empty array not handled as desired by numpy
- # in case of -1 (all missing take)
- if not len(self) and mask.all():
- return type(self)([np.nan] * len(indexer))
+ # If the ExtensionArray is backed by an ndarray, then
+ # just pass that here instead of coercing to object.
+ data = self.astype(object)
- result = self.data.take(indexer)
- result[mask] = np.nan # NA for this type
- return type(self)(result)
+ if allow_fill and fill_value is None:
+ fill_value = self.dtype.na_value
- See Also
- --------
- numpy.take
+ # fill value should always be translated from the scalar
+ # type for the array, to the physical storage type for
+ # the data, before passing to take.
+
+ result = take(data, indices, fill_value=fill_value,
+ allow_fill=allow_fill)
+ return self._from_sequence(result)
"""
+ # Implementer note: The `fill_value` parameter should be a user-facing
+ # value, an instance of self.dtype.type. When passed `fill_value=None`,
+ # the default of `self.dtype.na_value` should be used.
+ # This may differ from the physical storage type your ExtensionArray
+ # uses. In this case, your implementation is responsible for casting
+ # the user-facing type to the storage type, before using
+ # pandas.api.extensions.take
raise AbstractMethodError(self)
def copy(self, deep=False):
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index 6dbed5f138d5d..49e98c16c716e 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -16,6 +16,12 @@ class _DtypeOpsMixin(object):
# classes will inherit from this Mixin. Once everything is compatible, this
# class's methods can be moved to ExtensionDtype and removed.
+ # na_value is the default NA value to use for this type. This is used in
+ # e.g. ExtensionArray.take. This should be the user-facing "boxed" version
+ # of the NA value, not the physical NA vaalue for storage.
+ # e.g. for JSONArray, this is an empty dictionary.
+ na_value = np.nan
+
def __eq__(self, other):
"""Check whether 'other' is equal to self.
@@ -92,6 +98,8 @@ def is_dtype(cls, dtype):
class ExtensionDtype(_DtypeOpsMixin):
"""A custom data type, to be paired with an ExtensionArray.
+ .. versionadded:: 0.23.0
+
Notes
-----
The interface includes the following abstract methods that must
@@ -101,6 +109,9 @@ class ExtensionDtype(_DtypeOpsMixin):
* name
* construct_from_string
+ The `na_value` class attribute can be used to set the default NA value
+ for this type. :attr:`numpy.nan` is used by default.
+
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 75434fcc2b40d..e4ed6d544d42e 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -255,7 +255,6 @@ def changeit():
def maybe_promote(dtype, fill_value=np.nan):
-
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
@@ -294,6 +293,8 @@ def maybe_promote(dtype, fill_value=np.nan):
elif is_datetimetz(dtype):
if isna(fill_value):
fill_value = iNaT
+ elif is_extension_array_dtype(dtype) and isna(fill_value):
+ fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 2c98cedd7d715..3b2336bf19547 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -502,6 +502,8 @@ def na_value_for_dtype(dtype, compat=True):
"""
dtype = pandas_dtype(dtype)
+ if is_extension_array_dtype(dtype):
+ return dtype.na_value
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype) or is_period_dtype(dtype)):
return NaT
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index de6985ef3b4ea..82d5a0286b117 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3476,7 +3476,7 @@ def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
- fill_value=np.nan, limit=None, tolerance=None):
+ fill_value=None, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 86342b6996abf..6d55f92167d3b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3660,7 +3660,7 @@ def reindex(self, *args, **kwargs):
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
tolerance = kwargs.pop('tolerance', None)
- fill_value = kwargs.pop('fill_value', np.nan)
+ fill_value = kwargs.pop('fill_value', None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
@@ -3776,7 +3776,7 @@ def _reindex_multi(self, axes, copy, fill_value):
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
- limit=None, fill_value=np.nan):
+ limit=None, fill_value=None):
msg = ("'.reindex_axis' is deprecated and will be removed in a future "
"version. Use '.reindex' instead.")
self._consolidate_inplace()
@@ -3790,7 +3790,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
return self._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value, copy=copy)
- def _reindex_with_indexers(self, reindexers, fill_value=np.nan, copy=False,
+ def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False,
allow_dups=False):
"""allow_dups indicates an internal call here """
@@ -7252,7 +7252,7 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
raise TypeError('unsupported type: %s' % type(other))
def _align_frame(self, other, join='outer', axis=None, level=None,
- copy=True, fill_value=np.nan, method=None, limit=None,
+ copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
# defaults
join_index, join_columns = None, None
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 2eb52ecc6bcc7..fe6d6775c4e0b 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2417,12 +2417,53 @@ def maybe_convert_indices(indices, n):
mask = indices < 0
if mask.any():
indices[mask] += n
+
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
+def validate_indices(indices, n):
+ """Perform bounds-checking for an indexer.
+
+ -1 is allowed for indicating missing values.
+
+ Parameters
+ ----------
+ indices : ndarray
+ n : int
+ length of the array being indexed
+
+ Raises
+ ------
+ ValueError
+
+ Examples
+ --------
+ >>> validate_indices([1, 2], 3)
+ # OK
+ >>> validate_indices([1, -2], 3)
+ ValueError
+ >>> validate_indices([1, 2, 3], 3)
+ IndexError
+ >>> validate_indices([-1, -1], 0)
+ # OK
+ >>> validate_indices([0, 1], 0)
+ IndexError
+ """
+ if len(indices):
+ min_idx = indices.min()
+ if min_idx < -1:
+ msg = ("'indices' contains values less than allowed ({} < {})"
+ .format(min_idx, -1))
+ raise ValueError(msg)
+
+ max_idx = indices.max()
+ if max_idx >= n:
+ raise IndexError("indices are out-of-bounds")
+
+
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index a266ea620bd9f..474894aba65df 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1888,6 +1888,11 @@ def _holder(self):
# For extension blocks, the holder is values-dependent.
return type(self.values)
+ @property
+ def fill_value(self):
+ # Used in reindex_indexer
+ return self.values.dtype.na_value
+
@property
def _can_hold_na(self):
# The default ExtensionArray._can_hold_na is True
@@ -1951,7 +1956,8 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
- new_values = self.values.take(indexer, fill_value=fill_value)
+ new_values = self.values.take(indexer, fill_value=fill_value,
+ allow_fill=True)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
@@ -5440,6 +5446,14 @@ def is_uniform_join_units(join_units):
len(join_units) > 1)
+def is_uniform_reindex(join_units):
+ return (
+ # TODO: should this be ju.block._can_hold_na?
+ all(ju.block and ju.block.is_extension for ju in join_units) and
+ len(set(ju.block.dtype.name for ju in join_units)) == 1
+ )
+
+
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
@@ -5457,6 +5471,12 @@ def get_empty_dtype_and_na(join_units):
if blk is None:
return np.float64, np.nan
+ if is_uniform_reindex(join_units):
+ # XXX: integrate property
+ empty_dtype = join_units[0].block.dtype
+ upcasted_na = join_units[0].block.fill_value
+ return empty_dtype, upcasted_na
+
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f2ee225f50514..7abd95c68ea2b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3216,7 +3216,8 @@ def _reindex_indexer(self, new_index, indexer, copy):
return self.copy()
return self
- new_values = algorithms.take_1d(self._values, indexer)
+ new_values = algorithms.take_1d(self._values, indexer,
+ allow_fill=True, fill_value=None)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index ac156900671a6..5c9ede1079079 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -127,20 +127,53 @@ def test_take(self, data, na_value, na_cmp):
result = data.take([0, -1])
assert result.dtype == data.dtype
assert result[0] == data[0]
- na_cmp(result[1], na_value)
+ assert result[1] == data[-1]
+
+ result = data.take([0, -1], allow_fill=True, fill_value=na_value)
+ assert result[0] == data[0]
+ assert na_cmp(result[1], na_value)
with tm.assert_raises_regex(IndexError, "out of bounds"):
data.take([len(data) + 1])
def test_take_empty(self, data, na_value, na_cmp):
empty = data[:0]
- result = empty.take([-1])
- na_cmp(result[0], na_value)
+
+ result = empty.take([-1], allow_fill=True)
+ assert na_cmp(result[0], na_value)
+
+ with pytest.raises(IndexError):
+ empty.take([-1])
with tm.assert_raises_regex(IndexError, "cannot do a non-empty take"):
empty.take([0, 1])
- @pytest.mark.xfail(reason="Series.take with extension array buggy for -1")
+ def test_take_negative(self, data):
+ # https://github.com/pandas-dev/pandas/issues/20640
+ n = len(data)
+ result = data.take([0, -n, n - 1, -1])
+ expected = data.take([0, 0, n - 1, n - 1])
+ self.assert_extension_array_equal(result, expected)
+
+ def test_take_non_na_fill_value(self, data_missing):
+ fill_value = data_missing[1] # valid
+ na = data_missing[0]
+
+ array = data_missing._from_sequence([na, fill_value, na])
+ result = array.take([-1, 1], fill_value=fill_value, allow_fill=True)
+ expected = array.take([1, 1])
+ self.assert_extension_array_equal(result, expected)
+
+ def test_take_pandas_style_negative_raises(self, data, na_value):
+ with pytest.raises(ValueError):
+ data.take([0, -2], fill_value=na_value, allow_fill=True)
+
+ @pytest.mark.parametrize('allow_fill', [True, False])
+ def test_take_out_of_bounds_raises(self, data, allow_fill):
+ arr = data[:3]
+ with pytest.raises(IndexError):
+ arr.take(np.asarray([0, 3]), allow_fill=allow_fill)
+
def test_take_series(self, data):
s = pd.Series(data)
result = s.take([0, -1])
@@ -166,3 +199,14 @@ def test_reindex(self, data, na_value):
expected = pd.Series(data._from_sequence([na_value, na_value]),
index=[n, n + 1])
self.assert_series_equal(result, expected)
+
+ def test_reindex_non_na_fill_value(self, data_missing):
+ valid = data_missing[1]
+ na = data_missing[0]
+
+ array = data_missing._from_sequence([na, valid])
+ ser = pd.Series(array)
+ result = ser.reindex([0, 1, 2], fill_value=valid)
+ expected = pd.Series(data_missing._from_sequence([na, valid, valid]))
+
+ self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index 6ebe700f13be0..c34339c99322d 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -81,6 +81,8 @@ def test_merge(self, data, na_value):
class TestGetitem(base.BaseGetitemTests):
+ skip_take = pytest.mark.skip(reason="GH-20664.")
+
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self):
# CategoricalDtype.type isn't "correct" since it should
@@ -88,11 +90,35 @@ def test_getitem_scalar(self):
# to break things by changing.
pass
- @pytest.mark.xfail(reason="Categorical.take buggy")
+ @skip_take
def test_take(self):
# TODO remove this once Categorical.take is fixed
pass
+ @skip_take
+ def test_take_negative(self):
+ pass
+
+ @skip_take
+ def test_take_pandas_style_negative_raises(self):
+ pass
+
+ @skip_take
+ def test_take_non_na_fill_value(self):
+ pass
+
+ @skip_take
+ def test_take_out_of_bounds_raises(self):
+ pass
+
+ @skip_take
+ def test_take_series(self):
+ pass
+
+ @skip_take
+ def test_reindex_non_na_fill_value(self):
+ pass
+
@pytest.mark.xfail(reason="Categorical.take buggy")
def test_take_empty(self):
pass
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 5d749126e0cec..e9431bd0c233c 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -8,12 +8,12 @@
import pandas as pd
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.base import ExtensionDtype
-from pandas.core.dtypes.common import _ensure_platform_int
class DecimalDtype(ExtensionDtype):
type = decimal.Decimal
name = 'decimal'
+ na_value = decimal.Decimal('NaN')
@classmethod
def construct_from_string(cls, string):
@@ -28,6 +28,7 @@ class DecimalArray(ExtensionArray):
dtype = DecimalDtype()
def __init__(self, values):
+ assert all(isinstance(v, decimal.Decimal) for v in values)
values = np.asarray(values, dtype=object)
self._data = values
@@ -52,6 +53,17 @@ def __getitem__(self, item):
else:
return type(self)(self._data[item])
+ def take(self, indexer, allow_fill=False, fill_value=None):
+ from pandas.api.extensions import take
+
+ data = self._data
+ if allow_fill and fill_value is None:
+ fill_value = self.dtype.na_value
+
+ result = take(data, indexer, fill_value=fill_value,
+ allow_fill=allow_fill)
+ return self._from_sequence(result)
+
def copy(self, deep=False):
if deep:
return type(self)(self._data.copy())
@@ -80,20 +92,6 @@ def nbytes(self):
def isna(self):
return np.array([x.is_nan() for x in self._data])
- def take(self, indexer, allow_fill=True, fill_value=None):
- indexer = np.asarray(indexer)
- mask = indexer == -1
-
- # take on empty array not handled as desired by numpy in case of -1
- if not len(self) and mask.all():
- return type(self)([self._na_value] * len(indexer))
-
- indexer = _ensure_platform_int(indexer)
- out = self._data.take(indexer)
- out[mask] = self._na_value
-
- return type(self)(out)
-
@property
def _na_value(self):
return decimal.Decimal('NaN')
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 53d74cd6d38cb..1f8cf0264f62f 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -108,7 +108,15 @@ class TestReshaping(BaseDecimal, base.BaseReshapingTests):
class TestGetitem(BaseDecimal, base.BaseGetitemTests):
- pass
+
+ def test_take_na_value_other_decimal(self):
+ arr = DecimalArray([decimal.Decimal('1.0'),
+ decimal.Decimal('2.0')])
+ result = arr.take([0, -1], allow_fill=True,
+ fill_value=decimal.Decimal('-1.0'))
+ expected = DecimalArray([decimal.Decimal('1.0'),
+ decimal.Decimal('-1.0')])
+ self.assert_extension_array_equal(result, expected)
class TestMissing(BaseDecimal, base.BaseMissingTests):
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 2e75bb3b8c326..88bb66f38b35c 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -1,3 +1,15 @@
+"""Test extension array for storing nested data in a pandas container.
+
+The JSONArray stores lists of dictionaries. The storage mechanism is a list,
+not an ndarray.
+
+Note:
+
+We currently store lists of UserDicts (Py3 only). Pandas has a few places
+internally that specifically check for dicts, and does non-scalar things
+in that case. We *want* the dictionaries to be treated as scalars, so we
+hack around pandas by using UserDicts.
+"""
import collections
import itertools
import numbers
@@ -14,6 +26,11 @@
class JSONDtype(ExtensionDtype):
type = collections.Mapping
name = 'json'
+ try:
+ na_value = collections.UserDict()
+ except AttributeError:
+ # source compatibility with Py2.
+ na_value = {}
@classmethod
def construct_from_string(cls, string):
@@ -91,15 +108,33 @@ def nbytes(self):
return sys.getsizeof(self.data)
def isna(self):
- return np.array([x == self._na_value for x in self.data])
-
- def take(self, indexer, allow_fill=True, fill_value=None):
- try:
- output = [self.data[loc] if loc != -1 else self._na_value
- for loc in indexer]
- except IndexError:
- raise IndexError("Index is out of bounds or cannot do a "
- "non-empty take from an empty array.")
+ return np.array([x == self.dtype.na_value for x in self.data])
+
+ def take(self, indexer, allow_fill=False, fill_value=None):
+ # re-implement here, since NumPy has trouble setting
+ # sized objects like UserDicts into scalar slots of
+ # an ndarary.
+ indexer = np.asarray(indexer)
+ msg = ("Index is out of bounds or cannot do a "
+ "non-empty take from an empty array.")
+
+ if allow_fill:
+ if fill_value is None:
+ fill_value = self.dtype.na_value
+ # bounds check
+ if (indexer < -1).any():
+ raise ValueError
+ try:
+ output = [self.data[loc] if loc != -1 else fill_value
+ for loc in indexer]
+ except IndexError:
+ raise IndexError(msg)
+ else:
+ try:
+ output = [self.data[loc] for loc in indexer]
+ except IndexError:
+ raise IndexError(msg)
+
return self._from_sequence(output)
def copy(self, deep=False):
@@ -118,10 +153,6 @@ def unique(self):
dict(x) for x in list(set(tuple(d.items()) for d in self.data))
])
- @property
- def _na_value(self):
- return {}
-
@classmethod
def _concat_same_type(cls, to_concat):
data = list(itertools.chain.from_iterable([x.data for x in to_concat]))
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 0ef34c3b0f679..1fdb7298eefc4 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -21,6 +21,17 @@ def dtype():
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
+ data = make_data()
+
+ # Why the while loop? NumPy is unable to construct an ndarray from
+ # equal-length ndarrays. Many of our operations involve coercing the
+ # EA to an ndarray of objects. To avoid random test failures, we ensure
+ # that our data is coercable to an ndarray. Several tests deal with only
+ # the first two elements, so that's what we'll check.
+
+ while len(data[0]) == len(data[1]):
+ data = make_data()
+
return JSONArray(make_data())
@@ -41,8 +52,8 @@ def data_missing_for_sorting():
@pytest.fixture
-def na_value():
- return {}
+def na_value(dtype):
+ return dtype.na_value
@pytest.fixture
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index c66310d10ebdc..04225454f61f9 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -16,7 +16,8 @@
import numpy as np
import pandas as pd
-from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
+from pandas.core.indexing import (_non_reducing_slice, _maybe_numeric_slice,
+ validate_indices)
from pandas import NaT, DataFrame, Index, Series, MultiIndex
import pandas.util.testing as tm
@@ -994,3 +995,27 @@ def test_none_coercion_mixed_dtypes(self):
datetime(2000, 1, 3)],
'd': [None, 'b', 'c']})
tm.assert_frame_equal(start_dataframe, exp)
+
+
+def test_validate_indices_ok():
+ indices = np.asarray([0, 1])
+ validate_indices(indices, 2)
+ validate_indices(indices[:0], 0)
+ validate_indices(np.array([-1, -1]), 0)
+
+
+def test_validate_indices_low():
+ indices = np.asarray([0, -2])
+ with tm.assert_raises_regex(ValueError, "'indices' contains"):
+ validate_indices(indices, 2)
+
+
+def test_validate_indices_high():
+ indices = np.asarray([0, 1, 2])
+ with tm.assert_raises_regex(IndexError, "indices are out"):
+ validate_indices(indices, 2)
+
+
+def test_validate_indices_empty():
+ with tm.assert_raises_regex(IndexError, "indices are out"):
+ validate_indices(np.array([0, 1]), 0)
diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py
index 7b97b0e975df3..2b78c91f9dac5 100644
--- a/pandas/tests/test_take.py
+++ b/pandas/tests/test_take.py
@@ -3,6 +3,7 @@
from datetime import datetime
import numpy as np
+import pytest
from pandas.compat import long
import pandas.core.algorithms as algos
import pandas.util.testing as tm
@@ -445,3 +446,47 @@ def test_2d_datetime64(self):
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
+
+
+class TestExtensionTake(object):
+ # The take method found in pd.api.extensions
+
+ def test_bounds_check_large(self):
+ arr = np.array([1, 2])
+ with pytest.raises(IndexError):
+ algos.take(arr, [2, 3], allow_fill=True)
+
+ with pytest.raises(IndexError):
+ algos.take(arr, [2, 3], allow_fill=False)
+
+ def test_bounds_check_small(self):
+ arr = np.array([1, 2, 3], dtype=np.int64)
+ indexer = [0, -1, -2]
+ with pytest.raises(ValueError):
+ algos.take(arr, indexer, allow_fill=True)
+
+ result = algos.take(arr, indexer)
+ expected = np.array([1, 3, 2], dtype=np.int64)
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('allow_fill', [True, False])
+ def test_take_empty(self, allow_fill):
+ arr = np.array([], dtype=np.int64)
+ # empty take is ok
+ result = algos.take(arr, [], allow_fill=allow_fill)
+ tm.assert_numpy_array_equal(arr, result)
+
+ with pytest.raises(IndexError):
+ algos.take(arr, [0], allow_fill=allow_fill)
+
+ def test_take_na_empty(self):
+ result = algos.take(np.array([]), [-1, -1], allow_fill=True,
+ fill_value=0.0)
+ expected = np.array([0., 0.])
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_take_coerces_list(self):
+ arr = [1, 2, 3]
+ result = algos.take(arr, [0, 0])
+ expected = np.array([1, 1])
+ tm.assert_numpy_array_equal(result, expected)
| Implements a take interface that's compatible with NumPy and optionally pandas'
NA semantics.
```python
In [1]: import pandas as pd
In [2]: from pandas.tests.extension.decimal.array import *
In [3]: arr = DecimalArray(['1.1', '1.2', '1.3'])
In [4]: arr.take([0, 1, -1])
Out[4]: DecimalArray(array(['1.1', '1.2', '1.3'], dtype=object))
In [5]: arr.take([0, 1, -1], fill_value=float('nan'))
Out[5]: DecimalArray(array(['1.1', '1.2', Decimal('NaN')], dtype=object))
```
Closes https://github.com/pandas-dev/pandas/issues/20640
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20814 | 2018-04-24T19:03:37Z | 2018-04-27T13:06:07Z | 2018-04-27T13:06:06Z | 2022-11-30T02:56:54Z |
CLN: Continued indexes/test_base.py cleanup | diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7cd880b51661d..afb6653b1e694 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -62,9 +62,9 @@ def generate_index_types(self, skip_index_keys=[]):
Return a generator of the various index types, leaving
out the ones with a key in skip_index_keys
"""
- for key, idx in self.indices.items():
+ for key, index in self.indices.items():
if key not in skip_index_keys:
- yield key, idx
+ yield key, index
def test_new_axis(self):
new_index = self.dateIndex[None, :]
@@ -80,8 +80,8 @@ def test_copy_and_deepcopy(self, indices):
@pytest.mark.parametrize("attr", ['strIndex', 'dateIndex'])
def test_constructor_regular(self, attr):
# regular instance creation
- idx = getattr(self, attr)
- tm.assert_contains_all(idx, idx)
+ index = getattr(self, attr)
+ tm.assert_contains_all(index, index)
def test_constructor_casting(self):
# casting
@@ -108,14 +108,14 @@ def test_constructor_corner(self):
# corner case
pytest.raises(TypeError, Index, 0)
- @pytest.mark.parametrize("idx_vals", [
+ @pytest.mark.parametrize("index_vals", [
[('A', 1), 'B'], ['B', ('A', 1)]])
- def test_construction_list_mixed_tuples(self, idx_vals):
+ def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
- idx = Index(idx_vals)
- assert isinstance(idx, Index)
- assert not isinstance(idx, MultiIndex)
+ index = Index(index_vals)
+ assert isinstance(index, Index)
+ assert not isinstance(index, MultiIndex)
@pytest.mark.parametrize('na_value', [None, np.nan])
@pytest.mark.parametrize('vtype', [list, tuple, iter])
@@ -127,36 +127,36 @@ def test_construction_list_tuples_nan(self, na_value, vtype):
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cast_as_obj", [True, False])
- @pytest.mark.parametrize("idx", [
+ @pytest.mark.parametrize("index", [
pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), # DTI with tz
pd.date_range('2015-01-01 10:00', freq='D', periods=3), # DTI no tz
pd.timedelta_range('1 days', freq='D', periods=3), # td
pd.period_range('2015-01-01', freq='D', periods=3) # period
])
- def test_constructor_from_index_dtlike(self, cast_as_obj, idx):
+ def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
- result = pd.Index(idx.astype(object))
+ result = pd.Index(index.astype(object))
else:
- result = pd.Index(idx)
+ result = pd.Index(index)
- tm.assert_index_equal(result, idx)
+ tm.assert_index_equal(result, index)
- if isinstance(idx, pd.DatetimeIndex) and hasattr(idx, 'tz'):
- assert result.tz == idx.tz
+ if isinstance(index, pd.DatetimeIndex) and hasattr(index, 'tz'):
+ assert result.tz == index.tz
- @pytest.mark.parametrize("idx,has_tz", [
+ @pytest.mark.parametrize("index,has_tz", [
(pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), True), # datetimetz
(pd.timedelta_range('1 days', freq='D', periods=3), False), # td
(pd.period_range('2015-01-01', freq='D', periods=3), False) # period
])
- def test_constructor_from_series_dtlike(self, idx, has_tz):
- result = pd.Index(pd.Series(idx))
- tm.assert_index_equal(result, idx)
+ def test_constructor_from_series_dtlike(self, index, has_tz):
+ result = pd.Index(pd.Series(index))
+ tm.assert_index_equal(result, index)
if has_tz:
- assert result.tz == idx.tz
+ assert result.tz == index.tz
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series(self, klass):
@@ -187,19 +187,19 @@ def test_constructor_from_frame_series_freq(self):
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = dts
result = DatetimeIndex(df['date'], freq='MS')
- assert df['date'].dtype == object
+ assert df['date'].dtype == object
expected.name = 'date'
- exp = pd.Series(dts, name='date')
- tm.assert_series_equal(df['date'], exp)
+ tm.assert_index_equal(result, expected)
+
+ expected = pd.Series(dts, name='date')
+ tm.assert_series_equal(df['date'], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df['date'])
assert freq == 'MS'
- tm.assert_index_equal(result, expected)
-
@pytest.mark.parametrize("array", [
np.arange(5), np.array(['a', 'b', 'c']), date_range(
'2000-01-01', periods=3).values
@@ -255,14 +255,14 @@ def test_constructor_int_dtype_nan_raises(self, dtype):
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
- exp = klass(na_list)
- assert exp.dtype == dtype
+ expected = klass(na_list)
+ assert expected.dtype == dtype
result = Index(na_list)
- tm.assert_index_equal(result, exp)
+ tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
- tm.assert_index_equal(result, exp)
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos", [0, 1])
@pytest.mark.parametrize("klass,dtype,ctor", [
@@ -271,16 +271,16 @@ def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
])
def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor,
nulls_fixture):
- exp = klass([pd.NaT, pd.NaT])
- assert exp.dtype == dtype
+ expected = klass([pd.NaT, pd.NaT])
+ assert expected.dtype == dtype
data = [ctor]
data.insert(pos, nulls_fixture)
result = Index(data)
- tm.assert_index_equal(result, exp)
+ tm.assert_index_equal(result, expected)
result = Index(np.array(data, dtype=object))
- tm.assert_index_equal(result, exp)
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("swap_objs", [True, False])
def test_index_ctor_nat_result(self, swap_objs):
@@ -289,9 +289,9 @@ def test_index_ctor_nat_result(self, swap_objs):
if swap_objs:
data = data[::-1]
- exp = pd.Index(data, dtype=object)
- tm.assert_index_equal(Index(data), exp)
- tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
+ expected = pd.Index(data, dtype=object)
+ tm.assert_index_equal(Index(data), expected)
+ tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
@@ -304,9 +304,9 @@ def test_index_ctor_infer_periodindex(self):
(['A', 'B', 'C', np.nan], 'obj')
])
def test_constructor_simple_new(self, vals, dtype):
- idx = Index(vals, name=dtype)
- result = idx._simple_new(idx, dtype)
- tm.assert_index_equal(result, idx)
+ index = Index(vals, name=dtype)
+ result = index._simple_new(index, dtype)
+ tm.assert_index_equal(result, index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int),
@@ -314,29 +314,29 @@ def test_constructor_simple_new(self, vals, dtype):
[1., 2., 3.], np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_int64(self, vals):
- idx = Index(vals, dtype=int)
- assert isinstance(idx, Int64Index)
+ index = Index(vals, dtype=int)
+ assert isinstance(index, Int64Index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], [1., 2., 3.], np.array([1., 2., 3.]),
np.array([1, 2, 3], dtype=int), np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_float64(self, vals):
- idx = Index(vals, dtype=float)
- assert isinstance(idx, Float64Index)
+ index = Index(vals, dtype=float)
+ assert isinstance(index, Float64Index)
- @pytest.mark.parametrize("cast_idx", [True, False])
+ @pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
[True, False, True], np.array([True, False, True], dtype=bool)
])
- def test_constructor_dtypes_to_object(self, cast_idx, vals):
- if cast_idx:
- idx = Index(vals, dtype=bool)
+ def test_constructor_dtypes_to_object(self, cast_index, vals):
+ if cast_index:
+ index = Index(vals, dtype=bool)
else:
- idx = Index(vals)
+ index = Index(vals)
- assert isinstance(idx, Index)
- assert idx.dtype == object
+ assert isinstance(index, Index)
+ assert index.dtype == object
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3], dtype=int),
@@ -345,38 +345,38 @@ def test_constructor_dtypes_to_object(self, cast_idx, vals):
[datetime(2011, 1, 1), datetime(2011, 1, 2)]
])
def test_constructor_dtypes_to_categorical(self, vals):
- idx = Index(vals, dtype='category')
- assert isinstance(idx, CategoricalIndex)
+ index = Index(vals, dtype='category')
+ assert isinstance(index, CategoricalIndex)
- @pytest.mark.parametrize("cast_idx", [True, False])
+ @pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])
])
- def test_constructor_dtypes_to_datetime(self, cast_idx, vals):
- if cast_idx:
- idx = Index(vals, dtype=object)
- assert isinstance(idx, Index)
- assert idx.dtype == object
+ def test_constructor_dtypes_to_datetime(self, cast_index, vals):
+ if cast_index:
+ index = Index(vals, dtype=object)
+ assert isinstance(index, Index)
+ assert index.dtype == object
else:
- idx = Index(vals)
- assert isinstance(idx, DatetimeIndex)
+ index = Index(vals)
+ assert isinstance(index, DatetimeIndex)
- @pytest.mark.parametrize("cast_idx", [True, False])
+ @pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]),
[timedelta(1), timedelta(1)]
])
- def test_constructor_dtypes_to_timedelta(self, cast_idx, vals):
- if cast_idx:
- idx = Index(vals, dtype=object)
- assert isinstance(idx, Index)
- assert idx.dtype == object
+ def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
+ if cast_index:
+ index = Index(vals, dtype=object)
+ assert isinstance(index, Index)
+ assert index.dtype == object
else:
- idx = Index(vals)
- assert isinstance(idx, TimedeltaIndex)
+ index = Index(vals)
+ assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("values", [
# pass values without timezone, as DatetimeIndex localizes it
@@ -385,41 +385,41 @@ def test_constructor_dtypes_to_timedelta(self, cast_idx, vals):
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, values,
klass):
- idx = pd.date_range('2011-01-01', periods=5, tz=tz_naive_fixture)
- dtype = idx.dtype
+ index = pd.date_range('2011-01-01', periods=5, tz=tz_naive_fixture)
+ dtype = index.dtype
- res = klass(values, tz=tz_naive_fixture)
- tm.assert_index_equal(res, idx)
+ result = klass(values, tz=tz_naive_fixture)
+ tm.assert_index_equal(result, index)
- res = klass(values, dtype=dtype)
- tm.assert_index_equal(res, idx)
+ result = klass(values, dtype=dtype)
+ tm.assert_index_equal(result, index)
- res = klass(list(values), tz=tz_naive_fixture)
- tm.assert_index_equal(res, idx)
+ result = klass(list(values), tz=tz_naive_fixture)
+ tm.assert_index_equal(result, index)
- res = klass(list(values), dtype=dtype)
- tm.assert_index_equal(res, idx)
+ result = klass(list(values), dtype=dtype)
+ tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
- idx = pd.timedelta_range('1 days', periods=5)
- dtype = idx.dtype
+ index = pd.timedelta_range('1 days', periods=5)
+ dtype = index.dtype
- values = getattr(idx, attr)
+ values = getattr(index, attr)
- res = klass(values, dtype=dtype)
- tm.assert_index_equal(res, idx)
+ result = klass(values, dtype=dtype)
+ tm.assert_index_equal(result, index)
- res = klass(list(values), dtype=dtype)
- tm.assert_index_equal(res, idx)
+ result = klass(list(values), dtype=dtype)
+ tm.assert_index_equal(result, index)
def test_constructor_empty_gen(self):
skip_index_keys = ["repeats", "periodIndex", "rangeIndex",
"tuples"]
- for key, idx in self.generate_index_types(skip_index_keys):
- empty = idx.__class__([])
- assert isinstance(empty, idx.__class__)
+ for key, index in self.generate_index_types(skip_index_keys):
+ empty = index.__class__([])
+ assert isinstance(empty, index.__class__)
assert not len(empty)
@pytest.mark.parametrize("empty,klass", [
@@ -515,21 +515,21 @@ def test_insert_missing(self, nulls_fixture):
result = Index(list('abc')).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
- @pytest.mark.parametrize("pos,exp", [
- (0, Index(['b', 'c', 'd'], name='idx')),
- (-1, Index(['a', 'b', 'c'], name='idx'))
+ @pytest.mark.parametrize("pos,expected", [
+ (0, Index(['b', 'c', 'd'], name='index')),
+ (-1, Index(['a', 'b', 'c'], name='index'))
])
- def test_delete(self, pos, exp):
- idx = Index(['a', 'b', 'c', 'd'], name='idx')
- result = idx.delete(pos)
- tm.assert_index_equal(result, exp)
- assert result.name == exp.name
+ def test_delete(self, pos, expected):
+ index = Index(['a', 'b', 'c', 'd'], name='index')
+ result = index.delete(pos)
+ tm.assert_index_equal(result, expected)
+ assert result.name == expected.name
def test_delete_raises(self):
- idx = Index(['a', 'b', 'c', 'd'], name='idx')
+ index = Index(['a', 'b', 'c', 'd'], name='index')
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
- idx.delete(5)
+ index.delete(5)
def test_identical(self):
@@ -585,9 +585,9 @@ def test_asof(self):
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
- idx = pd.date_range('2010-01-01', periods=2, freq='m')
+ index = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
- result = idx.asof('2010-02')
+ result = index.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
@@ -601,9 +601,9 @@ def test_nanosecond_index_access(self):
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
- exp_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+0000',
- 'ns')
- assert first_value == x[Timestamp(exp_ts)]
+ expected_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+'
+ '0000', 'ns')
+ assert first_value == x[Timestamp(expected_ts)]
@pytest.mark.parametrize("op", [
operator.eq, operator.ne, operator.gt, operator.lt,
@@ -622,15 +622,15 @@ def test_comparators(self, op):
tm.assert_numpy_array_equal(arr_result, index_result)
def test_booleanindex(self):
- boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
- boolIdx[5:30:2] = False
+ boolIndex = np.repeat(True, len(self.strIndex)).astype(bool)
+ boolIndex[5:30:2] = False
- subIndex = self.strIndex[boolIdx]
+ subIndex = self.strIndex[boolIndex]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
- subIndex = self.strIndex[list(boolIdx)]
+ subIndex = self.strIndex[list(boolIndex)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
@@ -644,11 +644,11 @@ def test_fancy(self):
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, attr, dtype):
empty_arr = np.array([], dtype=dtype)
- idx = getattr(self, attr)
- empty_idx = idx.__class__([])
+ index = getattr(self, attr)
+ empty_index = index.__class__([])
- assert idx[[]].identical(empty_idx)
- assert idx[empty_arr].identical(empty_idx)
+ assert index[[]].identical(empty_index)
+ assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
@@ -656,12 +656,12 @@ def test_empty_fancy_raises(self, attr):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
- idx = getattr(self, attr)
- empty_idx = idx.__class__([])
+ index = getattr(self, attr)
+ empty_index = index.__class__([])
- assert idx[[]].identical(empty_idx)
+ assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
- pytest.raises(IndexError, idx.__getitem__, empty_farr)
+ pytest.raises(IndexError, index.__getitem__, empty_farr)
@pytest.mark.parametrize("itm", [101, 'no_int'])
def test_getitem_error(self, indices, itm):
@@ -678,85 +678,90 @@ def test_intersection(self):
inter = first.intersection(first)
assert inter is first
- idx1 = Index([1, 2, 3, 4, 5], name='idx')
- # if target has the same name, it is preserved
- idx2 = Index([3, 4, 5, 6, 7], name='idx')
- expected2 = Index([3, 4, 5], name='idx')
- result2 = idx1.intersection(idx2)
- tm.assert_index_equal(result2, expected2)
- assert result2.name == expected2.name
-
- # if target name is different, it will be reset
- idx3 = Index([3, 4, 5, 6, 7], name='other')
- expected3 = Index([3, 4, 5], name=None)
- result3 = idx1.intersection(idx3)
- tm.assert_index_equal(result3, expected3)
- assert result3.name == expected3.name
-
- # non monotonic
- idx1 = Index([5, 3, 2, 4, 1], name='idx')
- idx2 = Index([4, 7, 6, 5, 3], name='idx')
- expected = Index([5, 3, 4], name='idx')
- result = idx1.intersection(idx2)
- tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize("index2,keeps_name", [
+ (Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
+ (Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
+ (Index([3, 4, 5, 6, 7]), False)])
+ def test_intersection_name_preservation(self, index2, keeps_name):
+ index1 = Index([1, 2, 3, 4, 5], name='index')
+ expected = Index([3, 4, 5])
+ result = index1.intersection(index2)
- idx2 = Index([4, 7, 6, 5, 3], name='other')
- expected = Index([5, 3, 4], name=None)
- result = idx1.intersection(idx2)
- tm.assert_index_equal(result, expected)
+ if keeps_name:
+ expected.name = 'index'
- # non-monotonic non-unique
- idx1 = Index(['A', 'B', 'A', 'C'])
- idx2 = Index(['B', 'D'])
- expected = Index(['B'], dtype='object')
- result = idx1.intersection(idx2)
+ assert result.name == expected.name
tm.assert_index_equal(result, expected)
- idx2 = Index(['B', 'D', 'A'])
- expected = Index(['A', 'B', 'A'], dtype='object')
- result = idx1.intersection(idx2)
- tm.assert_index_equal(result, expected)
-
- # preserve names
+ @pytest.mark.parametrize("first_name,second_name,expected_name", [
+ ('A', 'A', 'A'), ('A', 'B', None), (None, 'B', None)])
+ def test_intersection_name_preservation2(self, first_name, second_name,
+ expected_name):
first = self.strIndex[5:20]
second = self.strIndex[:10]
- first.name = 'A'
- second.name = 'A'
+ first.name = first_name
+ second.name = second_name
intersect = first.intersection(second)
- assert intersect.name == 'A'
+ assert intersect.name == expected_name
- second.name = 'B'
- intersect = first.intersection(second)
- assert intersect.name is None
+ @pytest.mark.parametrize("index2,keeps_name", [
+ (Index([4, 7, 6, 5, 3], name='index'), True),
+ (Index([4, 7, 6, 5, 3], name='other'), False)])
+ def test_intersection_monotonic(self, index2, keeps_name):
+ index1 = Index([5, 3, 2, 4, 1], name='index')
+ expected = Index([5, 3, 4])
- first.name = None
- second.name = 'B'
- intersect = first.intersection(second)
- assert intersect.name is None
+ if keeps_name:
+ expected.name = "index"
+
+ result = index1.intersection(index2)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("index2,expected_arr", [
+ (Index(['B', 'D']), ['B']),
+ (Index(['B', 'D', 'A']), ['A', 'B', 'A'])])
+ def test_intersection_non_monotonic_non_unique(self, index2, expected_arr):
+ # non-monotonic non-unique
+ index1 = Index(['A', 'B', 'A', 'C'])
+ expected = Index(expected_arr, dtype='object')
+ result = index1.intersection(index2)
+ tm.assert_index_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
- res = i2.intersection(i1)
+ result = i2.intersection(i1)
- assert len(res) == 0
+ assert len(result) == 0
def test_union(self):
+ # TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
+
union = first.union(second)
assert tm.equalContents(union, everything)
+ @pytest.mark.parametrize("klass", [
+ np.array, Series, list])
+ def test_union_from_iterables(self, klass):
# GH 10149
- cases = [klass(second.values) for klass in [np.array, Series, list]]
- for case in cases:
- result = first.union(case)
- assert tm.equalContents(result, everything)
+ # TODO: Replace with fixturesult
+ first = self.strIndex[5:20]
+ second = self.strIndex[:10]
+ everything = self.strIndex[:20]
+
+ case = klass(second.values)
+ result = first.union(case)
+ assert tm.equalContents(result, everything)
+
+ def test_union_identity(self):
+ # TODO: replace with fixturesult
+ first = self.strIndex[5:20]
- # Corner cases
union = first.union(first)
assert union is first
@@ -766,61 +771,22 @@ def test_union(self):
union = Index([]).union(first)
assert union is first
- # preserve names
- first = Index(list('ab'), name='A')
- second = Index(list('ab'), name='B')
- union = first.union(second)
- expected = Index(list('ab'), name=None)
- tm.assert_index_equal(union, expected)
-
- first = Index(list('ab'), name='A')
- second = Index([], name='B')
- union = first.union(second)
- expected = Index(list('ab'), name=None)
- tm.assert_index_equal(union, expected)
-
- first = Index([], name='A')
- second = Index(list('ab'), name='B')
- union = first.union(second)
- expected = Index(list('ab'), name=None)
- tm.assert_index_equal(union, expected)
-
- first = Index(list('ab'))
- second = Index(list('ab'), name='B')
- union = first.union(second)
- expected = Index(list('ab'), name='B')
- tm.assert_index_equal(union, expected)
-
- first = Index([])
- second = Index(list('ab'), name='B')
- union = first.union(second)
- expected = Index(list('ab'), name='B')
- tm.assert_index_equal(union, expected)
-
- first = Index(list('ab'))
- second = Index([], name='B')
+ @pytest.mark.parametrize("first_list", [list('ab'), list()])
+ @pytest.mark.parametrize("second_list", [list('ab'), list()])
+ @pytest.mark.parametrize("first_name, second_name, expected_name", [
+ ('A', 'B', None), (None, 'B', 'B'), ('A', None, 'A')])
+ def test_union_name_preservation(self, first_list, second_list, first_name,
+ second_name, expected_name):
+ first = Index(first_list, name=first_name)
+ second = Index(second_list, name=second_name)
union = first.union(second)
- expected = Index(list('ab'), name='B')
- tm.assert_index_equal(union, expected)
- first = Index(list('ab'), name='A')
- second = Index(list('ab'))
- union = first.union(second)
- expected = Index(list('ab'), name='A')
- tm.assert_index_equal(union, expected)
-
- first = Index(list('ab'), name='A')
- second = Index([])
- union = first.union(second)
- expected = Index(list('ab'), name='A')
- tm.assert_index_equal(union, expected)
-
- first = Index([], name='A')
- second = Index(list('ab'))
- union = first.union(second)
- expected = Index(list('ab'), name='A')
+ vals = sorted(set(first_list).union(second_list))
+ expected = Index(vals, name=expected_name)
tm.assert_index_equal(union, expected)
+ def test_union_dt_as_obj(self):
+ # TODO: Replace with fixturesult
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
@@ -837,28 +803,29 @@ def test_union(self):
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
- idx = self.strIndex
+ index = self.strIndex
expected = Index(self.strIndex.values * 2)
- tm.assert_index_equal(idx + idx, expected)
- tm.assert_index_equal(idx + idx.tolist(), expected)
- tm.assert_index_equal(idx.tolist() + idx, expected)
+ tm.assert_index_equal(index + index, expected)
+ tm.assert_index_equal(index + index.tolist(), expected)
+ tm.assert_index_equal(index.tolist() + index, expected)
# test add and radd
- idx = Index(list('abc'))
+ index = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
- tm.assert_index_equal(idx + '1', expected)
+ tm.assert_index_equal(index + '1', expected)
expected = Index(['1a', '1b', '1c'])
- tm.assert_index_equal('1' + idx, expected)
+ tm.assert_index_equal('1' + index, expected)
def test_sub(self):
- idx = self.strIndex
- pytest.raises(TypeError, lambda: idx - 'a')
- pytest.raises(TypeError, lambda: idx - idx)
- pytest.raises(TypeError, lambda: idx - idx.tolist())
- pytest.raises(TypeError, lambda: idx.tolist() - idx)
+ index = self.strIndex
+ pytest.raises(TypeError, lambda: index - 'a')
+ pytest.raises(TypeError, lambda: index - index)
+ pytest.raises(TypeError, lambda: index - index.tolist())
+ pytest.raises(TypeError, lambda: index.tolist() - index)
def test_map_identity_mapping(self):
# GH 12766
+ # TODO: replace with fixture
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
@@ -867,17 +834,18 @@ def test_map_with_tuples(self):
# Test that returning a single tuple from an Index
# returns an Index.
- idx = tm.makeIntIndex(3)
+ index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
- expected = Index([(i,) for i in idx])
+ expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
- result = idx.map(lambda x: (x, x == 1))
- expected = MultiIndex.from_tuples([(i, i == 1) for i in idx])
+ result = index.map(lambda x: (x, x == 1))
+ expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
+ def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
@@ -885,20 +853,18 @@ def test_map_with_tuples(self):
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
- def test_map_tseries_indices_return_index(self):
- date_index = tm.makeDateIndex(10)
- exp = Index([1] * 10)
- tm.assert_index_equal(exp, date_index.map(lambda x: 1))
-
- period_index = tm.makePeriodIndex(10)
- tm.assert_index_equal(exp, period_index.map(lambda x: 1))
-
- tdelta_index = tm.makeTimedeltaIndex(10)
- tm.assert_index_equal(exp, tdelta_index.map(lambda x: 1))
-
+ @pytest.mark.parametrize("attr", [
+ 'makeDateIndex', 'makePeriodIndex', 'makeTimedeltaIndex'])
+ def test_map_tseries_indices_return_index(self, attr):
+ index = getattr(tm, attr)(10)
+ expected = Index([1] * 10)
+ result = index.map(lambda x: 1)
+ tm.assert_index_equal(expected, result)
+
+ def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
- exp = Index(range(24), name='hourly')
- tm.assert_index_equal(exp, date_index.map(lambda x: x.hour))
+ expected = Index(range(24), name='hourly')
+ tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
@@ -908,10 +874,11 @@ def test_map_tseries_indices_return_index(self):
def test_map_dictlike(self, mapper):
# GH 12756
expected = Index(['foo', 'bar', 'baz'])
- idx = tm.makeIntIndex(3)
- result = idx.map(mapper(expected.values, idx))
+ index = tm.makeIntIndex(3)
+ result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
+ # TODO: replace with fixture
for name in self.indices.keys():
if name == 'catIndex':
# Tested in test_categorical
@@ -930,29 +897,28 @@ def test_map_dictlike(self, mapper):
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
- def test_map_with_non_function_missing_values(self):
+ @pytest.mark.parametrize("mapper", [
+ Series(['foo', 2., 'baz'], index=[0, 2, -1]),
+ {0: 'foo', 2: 2.0, -1: 'baz'}])
+ def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2., np.nan, 'foo'])
- input = Index([2, 1, 0])
-
- mapper = Series(['foo', 2., 'baz'], index=[0, 2, -1])
- tm.assert_index_equal(expected, input.map(mapper))
+ result = Index([2, 1, 0]).map(mapper)
- mapper = {0: 'foo', 2: 2.0, -1: 'baz'}
- tm.assert_index_equal(expected, input.map(mapper))
+ tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
- idx = Index([1.5, np.nan, 3, np.nan, 5])
+ index = Index([1.5, np.nan, 3, np.nan, 5])
- result = idx.map(lambda x: x * 2, na_action='ignore')
- exp = idx * 2
- tm.assert_index_equal(result, exp)
+ result = index.map(lambda x: x * 2, na_action='ignore')
+ expected = index * 2
+ tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
- idx = Index([1, 2, 3])
+ index = Index([1, 2, 3])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
- result = idx.map(default_dict)
+ result = index.map(default_dict)
expected = Index(['stuff', 'blank', 'blank'])
tm.assert_index_equal(result, expected)
@@ -967,18 +933,14 @@ def test_append_multiple(self):
result = index.append([])
tm.assert_index_equal(result, index)
- def test_append_empty_preserve_name(self):
- left = Index([], name='foo')
- right = Index([1, 2, 3], name='foo')
-
- result = left.append(right)
- assert result.name == 'foo'
-
+ @pytest.mark.parametrize("name,expected", [
+ ('foo', 'foo'), ('bar', None)])
+ def test_append_empty_preserve_name(self, name, expected):
left = Index([], name='foo')
- right = Index([1, 2, 3], name='bar')
+ right = Index([1, 2, 3], name=name)
result = left.append(right)
- assert result.name is None
+ assert result.name == expected
def test_add_string(self):
# from bug report
@@ -996,78 +958,82 @@ def test_iadd_string(self):
index += '_x'
assert 'a_x' in index
- def test_difference(self):
-
+ @pytest.mark.parametrize("second_name,expected", [
+ (None, None), ('name', 'name')])
+ def test_difference_name_preservation(self, second_name, expected):
+ # TODO: replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
+
first.name = 'name'
- # different names
+ second.name = second_name
result = first.difference(second)
assert tm.equalContents(result, answer)
- assert result.name is None
- # same names
- second.name = 'name'
- result = first.difference(second)
- assert result.name == 'name'
+ if expected is None:
+ assert result.name is None
+ else:
+ assert result.name == expected
- # with empty
+ def test_difference_empty_arg(self):
+ first = self.strIndex[5:20]
+ first.name == 'name'
result = first.difference([])
+
assert tm.equalContents(result, first)
assert result.name == first.name
- # with everything
+ def test_difference_identity(self):
+ first = self.strIndex[5:20]
+ first.name == 'name'
result = first.difference(first)
+
assert len(result) == 0
assert result.name == first.name
def test_symmetric_difference(self):
# smoke
- idx1 = Index([1, 2, 3, 4], name='idx1')
- idx2 = Index([2, 3, 4, 5])
- result = idx1.symmetric_difference(idx2)
+ index1 = Index([1, 2, 3, 4], name='index1')
+ index2 = Index([2, 3, 4, 5])
+ result = index1.symmetric_difference(index2)
expected = Index([1, 5])
assert tm.equalContents(result, expected)
assert result.name is None
# __xor__ syntax
- expected = idx1 ^ idx2
+ expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
- # multiIndex
- idx1 = MultiIndex.from_tuples(self.tuples)
- idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
- result = idx1.symmetric_difference(idx2)
+ def test_symmetric_difference_mi(self):
+ index1 = MultiIndex.from_tuples(self.tuples)
+ index2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
+ result = index1.symmetric_difference(index2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
assert tm.equalContents(result, expected)
- # nans:
+ @pytest.mark.parametrize("index2,expected", [
+ (Index([0, 1, np.nan]), Index([0.0, 2.0, 3.0])),
+ (Index([0, 1]), Index([0.0, 2.0, 3.0, np.nan]))])
+ def test_symmetric_difference_missing(self, index2, expected):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
- idx1 = Index([1, np.nan, 2, 3])
- idx2 = Index([0, 1, np.nan])
- idx3 = Index([0, 1])
+ index1 = Index([1, np.nan, 2, 3])
- result = idx1.symmetric_difference(idx2)
- expected = Index([0.0, 2.0, 3.0])
+ result = index1.symmetric_difference(index2)
tm.assert_index_equal(result, expected)
- result = idx1.symmetric_difference(idx3)
- expected = Index([0.0, 2.0, 3.0, np.nan])
- tm.assert_index_equal(result, expected)
-
- # other not an Index:
- idx1 = Index([1, 2, 3, 4], name='idx1')
- idx2 = np.array([2, 3, 4, 5])
+ def test_symmetric_difference_non_index(self):
+ index1 = Index([1, 2, 3, 4], name='index1')
+ index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
- result = idx1.symmetric_difference(idx2)
+ result = index1.symmetric_difference(index2)
assert tm.equalContents(result, expected)
- assert result.name == 'idx1'
+ assert result.name == 'index1'
- result = idx1.symmetric_difference(idx2, result_name='new_name')
+ result = index1.symmetric_difference(index2, result_name='new_name')
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
@@ -1076,9 +1042,9 @@ def test_difference_type(self):
# If taking difference of a set and itself, it
# needs to preserve the type of the index
skip_index_keys = ['repeats']
- for key, idx in self.generate_index_types(skip_index_keys):
- result = idx.difference(idx)
- expected = idx.drop(idx)
+ for key, index in self.generate_index_types(skip_index_keys):
+ result = index.difference(index)
+ expected = index.drop(index)
tm.assert_index_equal(result, expected)
def test_intersection_difference(self):
@@ -1087,30 +1053,28 @@ def test_intersection_difference(self):
# empty index produces the same index as the difference
# of an index with itself. Test for all types
skip_index_keys = ['repeats']
- for key, idx in self.generate_index_types(skip_index_keys):
- inter = idx.intersection(idx.drop(idx))
- diff = idx.difference(idx)
+ for key, index in self.generate_index_types(skip_index_keys):
+ inter = index.intersection(index.drop(index))
+ diff = index.difference(index)
tm.assert_index_equal(inter, diff)
- def test_is_numeric(self):
- assert not self.dateIndex.is_numeric()
- assert not self.strIndex.is_numeric()
- assert self.intIndex.is_numeric()
- assert self.floatIndex.is_numeric()
- assert not self.catIndex.is_numeric()
-
- def test_is_object(self):
- assert self.strIndex.is_object()
- assert self.boolIndex.is_object()
- assert not self.catIndex.is_object()
- assert not self.intIndex.is_object()
- assert not self.dateIndex.is_object()
- assert not self.floatIndex.is_object()
-
- def test_is_all_dates(self):
- assert self.dateIndex.is_all_dates
- assert not self.strIndex.is_all_dates
- assert not self.intIndex.is_all_dates
+ @pytest.mark.parametrize("attr,expected", [
+ ('strIndex', False), ('boolIndex', False), ('catIndex', False),
+ ('intIndex', True), ('dateIndex', False), ('floatIndex', True)])
+ def test_is_numeric(self, attr, expected):
+ assert getattr(self, attr).is_numeric() == expected
+
+ @pytest.mark.parametrize("attr,expected", [
+ ('strIndex', True), ('boolIndex', True), ('catIndex', False),
+ ('intIndex', False), ('dateIndex', False), ('floatIndex', False)])
+ def test_is_object(self, attr, expected):
+ assert getattr(self, attr).is_object() == expected
+
+ @pytest.mark.parametrize("attr,expected", [
+ ('strIndex', False), ('boolIndex', False), ('catIndex', False),
+ ('intIndex', False), ('dateIndex', True), ('floatIndex', False)])
+ def test_is_all_dates(self, attr, expected):
+ assert getattr(self, attr).is_all_dates == expected
def test_summary(self):
self._check_method_works(Index._summary)
@@ -1142,19 +1106,21 @@ def test_format(self):
expected = [str(index[0])]
assert formatted == expected
+ self.strIndex[:0].format()
+
+ @pytest.mark.parametrize("vals", [
+ [1, 2.0 + 3.0j, 4.], ['a', 'b', 'c']])
+ def test_format_missing(self, vals, nulls_fixture):
# 2845
- index = Index([1, 2.0 + 3.0j, np.nan])
- formatted = index.format()
- expected = [str(index[0]), str(index[1]), u('NaN')]
- assert formatted == expected
+ vals = list(vals) # Copy for each iteration
+ vals.append(nulls_fixture)
+ index = Index(vals)
- # is this really allowed?
- index = Index([1, 2.0 + 3.0j, None])
formatted = index.format()
- expected = [str(index[0]), str(index[1]), u('NaN')]
- assert formatted == expected
+ expected = [str(index[0]), str(index[1]), str(index[2]), u('NaN')]
- self.strIndex[:0].format()
+ assert formatted == expected
+ assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
@@ -1172,19 +1138,13 @@ def test_format_datetime_with_time(self):
assert len(result) == 2
assert result == expected
- def test_format_none(self):
- values = ['a', 'b', 'c', None]
-
- idx = Index(values)
- idx.format()
- assert idx[3] is None
-
- def test_logical_compat(self):
- idx = self.create_index()
- assert idx.all() == idx.values.all()
- assert idx.any() == idx.values.any()
+ @pytest.mark.parametrize("op", ['any', 'all'])
+ def test_logical_compat(self, op):
+ index = self.create_index()
+ assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method):
+ # TODO: make this a dedicated test with parametrized methods
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
@@ -1194,41 +1154,39 @@ def _check_method_works(self, method):
method(self.catIndex)
def test_get_indexer(self):
- idx1 = Index([1, 2, 3, 4, 5])
- idx2 = Index([2, 4, 6])
-
- r1 = idx1.get_indexer(idx2)
- assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
+ index1 = Index([1, 2, 3, 4, 5])
+ index2 = Index([2, 4, 6])
- r1 = idx2.get_indexer(idx1, method='pad')
- e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
+ r1 = index1.get_indexer(index2)
+ e1 = np.array([1, 3, -1], dtype=np.intp)
assert_almost_equal(r1, e1)
- r2 = idx2.get_indexer(idx1[::-1], method='pad')
- assert_almost_equal(r2, e1[::-1])
+ @pytest.mark.parametrize("reverse", [True, False])
+ @pytest.mark.parametrize("expected,method", [
+ (np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'pad'),
+ (np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'ffill'),
+ (np.array([0, 0, 1, 1, 2], dtype=np.intp), 'backfill'),
+ (np.array([0, 0, 1, 1, 2], dtype=np.intp), 'bfill')])
+ def test_get_indexer_methods(self, reverse, expected, method):
+ index1 = Index([1, 2, 3, 4, 5])
+ index2 = Index([2, 4, 6])
- rffill1 = idx2.get_indexer(idx1, method='ffill')
- assert_almost_equal(r1, rffill1)
-
- r1 = idx2.get_indexer(idx1, method='backfill')
- e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
- assert_almost_equal(r1, e1)
+ if reverse:
+ index1 = index1[::-1]
+ expected = expected[::-1]
- rbfill1 = idx2.get_indexer(idx1, method='bfill')
- assert_almost_equal(r1, rbfill1)
-
- r2 = idx2.get_indexer(idx1[::-1], method='backfill')
- assert_almost_equal(r2, e1[::-1])
+ result = index2.get_indexer(index1, method=method)
+ assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
- idx = Index(np.arange(10))
+ index = Index(np.arange(10))
with tm.assert_raises_regex(ValueError, 'tolerance argument'):
- idx.get_indexer([1, 0], tolerance=1)
+ index.get_indexer([1, 0], tolerance=1)
with tm.assert_raises_regex(ValueError, 'limit argument'):
- idx.get_indexer([1, 0], limit=1)
+ index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
'method, tolerance, indexer, expected',
@@ -1251,9 +1209,9 @@ def test_get_indexer_invalid(self):
('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
- idx = Index(np.arange(10))
+ index = Index(np.arange(10))
- actual = idx.get_indexer(indexer, method=method, tolerance=tolerance)
+ actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
@@ -1266,57 +1224,54 @@ def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
[-1, 2, 9]])))
def test_get_indexer_nearest_listlike_tolerance(self, tolerance,
expected, listtype):
- idx = Index(np.arange(10))
+ index = Index(np.arange(10))
- actual = idx.get_indexer([0.2, 1.8, 8.5], method='nearest',
- tolerance=listtype(tolerance))
+ actual = index.get_indexer([0.2, 1.8, 8.5], method='nearest',
+ tolerance=listtype(tolerance))
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_nearest_error(self):
- idx = Index(np.arange(10))
+ index = Index(np.arange(10))
with tm.assert_raises_regex(ValueError, 'limit argument'):
- idx.get_indexer([1, 0], method='nearest', limit=1)
+ index.get_indexer([1, 0], method='nearest', limit=1)
with pytest.raises(ValueError, match='tolerance size must match'):
- idx.get_indexer([1, 0], method='nearest',
- tolerance=[1, 2, 3])
+ index.get_indexer([1, 0], method='nearest',
+ tolerance=[1, 2, 3])
- def test_get_indexer_nearest_decreasing(self):
- idx = Index(np.arange(10))[::-1]
+ @pytest.mark.parametrize("method,expected", [
+ ('pad', [8, 7, 0]), ('backfill', [9, 8, 1]), ('nearest', [9, 7, 0])])
+ def test_get_indexer_nearest_decreasing(self, method, expected):
+ index = Index(np.arange(10))[::-1]
- all_methods = ['pad', 'backfill', 'nearest']
- for method in all_methods:
- actual = idx.get_indexer([0, 5, 9], method=method)
- tm.assert_numpy_array_equal(actual, np.array([9, 4, 0],
- dtype=np.intp))
+ actual = index.get_indexer([0, 5, 9], method=method)
+ tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
- for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1],
- [9, 7, 0]]):
- actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
- tm.assert_numpy_array_equal(actual, np.array(expected,
- dtype=np.intp))
+ actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
+ tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
- def test_get_indexer_strings(self):
- idx = pd.Index(['b', 'c'])
+ @pytest.mark.parametrize("method,expected", [
+ ('pad', np.array([-1, 0, 1, 1], dtype=np.intp)),
+ ('backfill', np.array([0, 0, 1, -1], dtype=np.intp))])
+ def test_get_indexer_strings(self, method, expected):
+ index = pd.Index(['b', 'c'])
+ actual = index.get_indexer(['a', 'b', 'c', 'd'], method=method)
- actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')
- expected = np.array([-1, 0, 1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
- actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')
- expected = np.array([0, 0, 1, -1], dtype=np.intp)
- tm.assert_numpy_array_equal(actual, expected)
+ def test_get_indexer_strings_raises(self):
+ index = pd.Index(['b', 'c'])
with pytest.raises(TypeError):
- idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
+ index.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError):
- idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
+ index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
with pytest.raises(TypeError):
- idx.get_indexer(['a', 'b', 'c', 'd'], method='pad',
- tolerance=[2, 2, 2, 2])
+ index.get_indexer(['a', 'b', 'c', 'd'], method='pad',
+ tolerance=[2, 2, 2, 2])
def test_get_indexer_numeric_index_boolean_target(self):
# GH 16877
| progress towards #20812 | https://api.github.com/repos/pandas-dev/pandas/pulls/20813 | 2018-04-24T18:21:32Z | 2018-04-27T10:56:05Z | 2018-04-27T10:56:05Z | 2018-05-14T21:11:05Z |
Fixed typo | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 750b260c7f228..66d183d910000 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1773,7 +1773,7 @@ These both yield the same results, so which should you use? It is instructive to
of operations on these and why method 2 (``.loc``) is much preferred over method 1 (chained ``[]``).
``dfmi['one']`` selects the first level of the columns and returns a DataFrame that is singly-indexed.
-Then another Python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'`` happens.
+Then another Python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'``.
This is indicated by the variable ``dfmi_with_one`` because pandas sees these operations as separate events.
e.g. separate calls to ``__getitem__``, so it has to treat them as linear operations, they happen one after another.
| I believe it should be either `Then another Python operation ``dfmi_with_one['second']`` happens.` or `Then another Python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'``.` .
Not `Then another Python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'`` happens.`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/20811 | 2018-04-24T16:44:13Z | 2018-04-24T19:13:29Z | 2018-04-24T19:13:29Z | 2018-04-24T19:13:31Z |
BUG: Fixed NDFrame.transform('abs') | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 8fb74e2e87174..ac173c5182bc7 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -111,8 +111,14 @@ def get_result(self):
# string dispatch
if isinstance(self.f, compat.string_types):
- self.kwds['axis'] = self.axis
- return getattr(self.obj, self.f)(*self.args, **self.kwds)
+ # Support for `frame.transform('method')`
+ # Some methods (shift, etc.) require the axis argument, others
+ # don't, so inspect and insert if nescessary.
+ func = getattr(self.obj, self.f)
+ sig = compat.signature(func)
+ if 'axis' in sig.args:
+ self.kwds['axis'] = self.axis
+ return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index a057ca0879cac..af39c8f01cf73 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -4,6 +4,7 @@
import pytest
+import operator
from datetime import datetime
import warnings
@@ -880,6 +881,16 @@ def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']})
+ @pytest.mark.parametrize('method', [
+ 'abs', 'shift', 'pct_change', 'cumsum', 'rank',
+ ])
+ def test_transform_method_name(self, method):
+ # https://github.com/pandas-dev/pandas/issues/19760
+ df = pd.DataFrame({"A": [-1, 2]})
+ result = df.transform(method)
+ expected = operator.methodcaller(method)(df)
+ tm.assert_frame_equal(result, expected)
+
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
| Closes https://github.com/pandas-dev/pandas/issues/19760
So this is a hacky workaround. I think we would ideally whitelist a set of valid NDFrame.transform methods, but I don't want to break things. What would that list include?
- abs
- pct_change
- shift
- tshift
- cum* | https://api.github.com/repos/pandas-dev/pandas/pulls/20800 | 2018-04-23T18:50:57Z | 2018-04-25T20:21:41Z | 2018-04-25T20:21:41Z | 2018-04-25T20:21:44Z |
BUG: Coerce to object for mixed concat | diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 0501493e718d0..377ef7ad7e4f8 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -8,6 +8,7 @@
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_sparse,
+ is_extension_array_dtype,
is_datetimetz,
is_datetime64_dtype,
is_timedelta64_dtype,
@@ -173,6 +174,10 @@ def is_nonempty(x):
elif 'sparse' in typs:
return _concat_sparse(to_concat, axis=axis, typs=typs)
+ extensions = [is_extension_array_dtype(x) for x in to_concat]
+ if any(extensions):
+ to_concat = [np.atleast_2d(x.astype('object')) for x in to_concat]
+
if not nonempty:
# we have all empties, but may need to coerce the result dtype to
# object if we have non-numeric type operands (numpy would otherwise
@@ -210,7 +215,7 @@ def _concat_categorical(to_concat, axis=0):
def _concat_asobject(to_concat):
to_concat = [x.get_values() if is_categorical_dtype(x.dtype)
- else x.ravel() for x in to_concat]
+ else np.asarray(x).ravel() for x in to_concat]
res = _concat_compat(to_concat)
if axis == 1:
return res.reshape(1, len(res))
@@ -548,6 +553,8 @@ def convert_sparse(x, axis):
# coerce to native type
if isinstance(x, SparseArray):
x = x.get_values()
+ else:
+ x = np.asarray(x)
x = x.ravel()
if axis > 0:
x = np.atleast_2d(x)
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index 40456453cb43d..cc78321dea7af 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -41,6 +41,29 @@ def test_concat_all_na_block(self, data_missing, in_frame):
expected = pd.Series(data_missing.take([1, 1, 0, 0]))
self.assert_series_equal(result, expected)
+ def test_concat_mixed_dtypes(self, data):
+ # https://github.com/pandas-dev/pandas/issues/20762
+ df1 = pd.DataFrame({'A': data[:3]})
+ df2 = pd.DataFrame({"A": [1, 2, 3]})
+ df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category')
+ df4 = pd.DataFrame({"A": pd.SparseArray([1, 2, 3])})
+ dfs = [df1, df2, df3, df4]
+
+ # dataframes
+ result = pd.concat(dfs)
+ expected = pd.concat([x.astype(object) for x in dfs])
+ self.assert_frame_equal(result, expected)
+
+ # series
+ result = pd.concat([x['A'] for x in dfs])
+ expected = pd.concat([x['A'].astype(object) for x in dfs])
+ self.assert_series_equal(result, expected)
+
+ # simple test for just EA and one other
+ result = pd.concat([df1, df2])
+ expected = pd.concat([df1.astype('object'), df2.astype('object')])
+ self.assert_frame_equal(result, expected)
+
def test_align(self, data, na_value):
a = data[:3]
b = data[2:5]
| - [x] closes #20762
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/20799 | 2018-04-23T16:54:41Z | 2018-04-24T07:48:41Z | 2018-04-24T07:48:40Z | 2018-04-24T08:50:22Z |
BUG: fix to_latex() when using MultiIndex with NaN in (#14249) (v2) | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index e19aedac80213..46c972a9fa574 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1148,6 +1148,10 @@ I/O
- Bug in :func:`read_csv` where missing values were not being handled properly when ``keep_default_na=False`` with dictionary ``na_values`` (:issue:`19227`)
- Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`)
- Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`)
+- Bug in :func:`DataFrame.to_latex()` where a ``NaN`` in a ``MultiIndex`` would cause an ``IndexError`` or incorrect output (:issue:`14249`)
+- Bug in :func:`DataFrame.to_latex()` where a non-string index-level name would result in an ``AttributeError`` (:issue:`19981`)
+- Bug in :func:`DataFrame.to_latex()` where the combination of an index name and the `index_names=False` option would result in incorrect output (:issue:`18326`)
+- Bug in :func:`DataFrame.to_latex()` where a ``MultiIndex`` with an empty string as its name would result in incorrect output (:issue:`18669`)
- Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`)
- Bug in :func:`DataFrame.to_parquet` where an exception was raised if the write destination is S3 (:issue:`19134`)
- :class:`Interval` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`)
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 67b0a4f0e034e..94d48e7f98286 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -64,35 +64,32 @@ def get_col_type(dtype):
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
+ out = self.frame.index.format(
+ adjoin=False, sparsify=self.fmt.sparsify,
+ names=self.fmt.has_index_names, na_rep=self.fmt.na_rep
+ )
+
+ # index.format will sparsify repeated entries with empty strings
+ # so pad these with some empty space
+ def pad_empties(x):
+ for pad in reversed(x):
+ if pad:
+ break
+ return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]]
+ out = (pad_empties(i) for i in out)
+
+ # Add empty spaces for each column level
clevels = self.frame.columns.nlevels
- strcols.pop(0)
- name = any(self.frame.index.names)
- cname = any(self.frame.columns.names)
- lastcol = self.frame.index.nlevels - 1
- previous_lev3 = None
- for i, lev in enumerate(self.frame.index.levels):
- lev2 = lev.format()
- blank = ' ' * len(lev2[0])
- # display column names in last index-column
- if cname and i == lastcol:
- lev3 = [x if x else '{}' for x in self.frame.columns.names]
- else:
- lev3 = [blank] * clevels
- if name:
- lev3.append(lev.name)
- current_idx_val = None
- for level_idx in self.frame.index.labels[i]:
- if ((previous_lev3 is None or
- previous_lev3[len(lev3)].isspace()) and
- lev2[level_idx] == current_idx_val):
- # same index as above row and left index was the same
- lev3.append(blank)
- else:
- # different value than above or left index different
- lev3.append(lev2[level_idx])
- current_idx_val = lev2[level_idx]
- strcols.insert(i, lev3)
- previous_lev3 = lev3
+ out = [[' ' * len(i[-1])] * clevels + i for i in out]
+
+ # Add the column names to the last index column
+ cnames = self.frame.columns.names
+ if any(cnames):
+ new_names = [i if i else '{}' for i in cnames]
+ out[self.frame.index.nlevels - 1][:clevels] = new_names
+
+ # Get rid of old multiindex column and add new ones
+ strcols = out + strcols[1:]
column_format = self.column_format
if column_format is None:
@@ -118,7 +115,7 @@ def get_col_type(dtype):
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
- if any(self.frame.index.names):
+ if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 5ebf196be094e..ddd34e0c1626d 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -621,3 +621,74 @@ def test_to_latex_multiindex_names(self, name0, name1, axes):
\end{tabular}
""" % tuple(list(col_names) + [idx_names_row])
assert observed == expected
+
+ @pytest.mark.parametrize('one_row', [True, False])
+ def test_to_latex_multiindex_nans(self, one_row):
+ # GH 14249
+ df = pd.DataFrame({'a': [None, 1], 'b': [2, 3], 'c': [4, 5]})
+ if one_row:
+ df = df.iloc[[0]]
+ observed = df.set_index(['a', 'b']).to_latex()
+ expected = r"""\begin{tabular}{llr}
+\toprule
+ & & c \\
+a & b & \\
+\midrule
+NaN & 2 & 4 \\
+"""
+ if not one_row:
+ expected += r"""1.0 & 3 & 5 \\
+"""
+ expected += r"""\bottomrule
+\end{tabular}
+"""
+ assert observed == expected
+
+ def test_to_latex_non_string_index(self):
+ # GH 19981
+ observed = pd.DataFrame([[1, 2, 3]] * 2).set_index([0, 1]).to_latex()
+ expected = r"""\begin{tabular}{llr}
+\toprule
+ & & 2 \\
+0 & 1 & \\
+\midrule
+1 & 2 & 3 \\
+ & 2 & 3 \\
+\bottomrule
+\end{tabular}
+"""
+ assert observed == expected
+
+ def test_to_latex_midrule_location(self):
+ # GH 18326
+ df = pd.DataFrame({'a': [1, 2]})
+ df.index.name = 'foo'
+ observed = df.to_latex(index_names=False)
+ expected = r"""\begin{tabular}{lr}
+\toprule
+{} & a \\
+\midrule
+0 & 1 \\
+1 & 2 \\
+\bottomrule
+\end{tabular}
+"""
+
+ assert observed == expected
+
+ def test_to_latex_multiindex_empty_name(self):
+ # GH 18669
+ mi = pd.MultiIndex.from_product([[1, 2]], names=[''])
+ df = pd.DataFrame(-1, index=mi, columns=range(4))
+ observed = df.to_latex()
+ expected = r"""\begin{tabular}{lrrrr}
+\toprule
+ & 0 & 1 & 2 & 3 \\
+{} & & & & \\
+\midrule
+1 & -1 & -1 & -1 & -1 \\
+2 & -1 & -1 & -1 & -1 \\
+\bottomrule
+\end{tabular}
+"""
+ assert observed == expected
| This is an improved version of #19910 cleaning up MultiIndex handling in the process.
- [x] closes #14249
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
~~I quickly looked at some other issues that this didn't originally aim to address and found that this PR closes #19981 and #18326 too, but I didn't add a tests or whats new entries for those (yet).~~
Edit: tests and whatsnew entries added for:
- [x] closes #19981
- [x] closes #18326
- [x] closes #18669 | https://api.github.com/repos/pandas-dev/pandas/pulls/20797 | 2018-04-23T15:53:12Z | 2018-04-25T01:28:02Z | 2018-04-25T01:28:00Z | 2018-04-25T01:29:18Z |
ENH: Implemented lazy iteration | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 3c0dd646aa502..ba2e63c20d3f8 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -103,6 +103,7 @@ def setup(self):
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(np.random.randn(N, 5 * N),
columns=['C' + str(c) for c in range(N * 5)])
+ self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_iteritems(self):
# (monitor no-copying behaviour)
@@ -119,10 +120,70 @@ def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
+ def time_itertuples_start(self):
+ self.df4.itertuples()
+
+ def time_itertuples_read_first(self):
+ next(self.df4.itertuples())
+
def time_itertuples(self):
- for row in self.df2.itertuples():
+ for row in self.df4.itertuples():
+ pass
+
+ def time_itertuples_to_list(self):
+ list(self.df4.itertuples())
+
+ def mem_itertuples_start(self):
+ return self.df4.itertuples()
+
+ def peakmem_itertuples_start(self):
+ self.df4.itertuples()
+
+ def mem_itertuples_read_first(self):
+ return next(self.df4.itertuples())
+
+ def peakmem_itertuples(self):
+ for row in self.df4.itertuples():
+ pass
+
+ def mem_itertuples_to_list(self):
+ return list(self.df4.itertuples())
+
+ def peakmem_itertuples_to_list(self):
+ list(self.df4.itertuples())
+
+ def time_itertuples_raw_start(self):
+ self.df4.itertuples(index=False, name=None)
+
+ def time_itertuples_raw_read_first(self):
+ next(self.df4.itertuples(index=False, name=None))
+
+ def time_itertuples_raw_tuples(self):
+ for row in self.df4.itertuples(index=False, name=None):
pass
+ def time_itertuples_raw_tuples_to_list(self):
+ list(self.df4.itertuples(index=False, name=None))
+
+ def mem_itertuples_raw_start(self):
+ return self.df4.itertuples(index=False, name=None)
+
+ def peakmem_itertuples_raw_start(self):
+ self.df4.itertuples(index=False, name=None)
+
+ def peakmem_itertuples_raw_read_first(self):
+ next(self.df4.itertuples(index=False, name=None))
+
+ def peakmem_itertuples_raw(self):
+ for row in self.df4.itertuples(index=False, name=None):
+ pass
+
+ def mem_itertuples_raw_to_list(self):
+ return list(self.df4.itertuples(index=False, name=None))
+
+ def peakmem_itertuples_raw_to_list(self):
+ list(self.df4.itertuples(index=False, name=None))
+
def time_iterrows(self):
for row in self.df.iterrows():
pass
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index a2abda019812a..7090acdc37382 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1251,6 +1251,8 @@ Performance Improvements
- Fixed a performance regression on Windows with Python 3.7 of :func:`read_csv` (:issue:`23516`)
- Improved performance of :class:`Categorical` constructor for ``Series`` objects (:issue:`23814`)
- Improved performance of :meth:`~DataFrame.where` for Categorical data (:issue:`24077`)
+- Improved performance of iterating over a :class:`Series`. Using :meth:`DataFrame.itertuples` now creates iterators
+ without internally allocating lists of all elements (:issue:`20783`)
.. _whatsnew_0240.docs:
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4a64ea0e56574..0a4111b51ba4e 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -8,7 +8,7 @@
import pandas._libs.lib as lib
import pandas.compat as compat
-from pandas.compat import PYPY, OrderedDict, builtins
+from pandas.compat import PYPY, OrderedDict, builtins, map, range
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -1072,7 +1072,13 @@ def __iter__(self):
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
- return iter(self.tolist())
+ # We are explicity making element iterators.
+ if is_datetimelike(self._values):
+ return map(com.maybe_box_datetimelike, self._values)
+ elif is_extension_array_dtype(self._values):
+ return iter(self._values)
+ else:
+ return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c4537db254132..c8ef958750379 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -898,10 +898,10 @@ def itertuples(self, index=True, name="Pandas"):
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
- fields = []
+ fields = list(self.columns)
if index:
arrays.append(self.index)
- fields.append("Index")
+ fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
@@ -911,10 +911,9 @@ def itertuples(self, index=True, name="Pandas"):
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
- itertuple = collections.namedtuple(name,
- fields + list(self.columns),
- rename=True)
+ itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
+
except Exception:
pass
| Fixes GH20783.
- [x] closes #20783
- [x] no tests needed because just internal implementation change
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
* there are some failures on existing code
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20796 | 2018-04-23T14:52:30Z | 2018-12-25T17:59:10Z | 2018-12-25T17:59:10Z | 2018-12-25T23:03:58Z |
DOC/API: expose ExtensionArray in public api | diff --git a/doc/source/api.rst b/doc/source/api.rst
index e43632ea46bfb..93edd090d846b 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -2549,6 +2549,8 @@ objects.
api.extensions.register_dataframe_accessor
api.extensions.register_series_accessor
api.extensions.register_index_accessor
+ api.extensions.ExtensionDtype
+ api.extensions.ExtensionArray
.. This is to prevent warnings in the doc build. We don't want to encourage
.. these methods.
diff --git a/doc/source/extending.rst b/doc/source/extending.rst
index b94a43480ed93..f665b219a7bd1 100644
--- a/doc/source/extending.rst
+++ b/doc/source/extending.rst
@@ -61,7 +61,7 @@ Extension Types
.. warning::
- The ``ExtensionDtype`` and ``ExtensionArray`` APIs are new and
+ The :class:`pandas.api.extension.ExtensionDtype` and :class:`pandas.api.extension.ExtensionArray` APIs are new and
experimental. They may change between versions without warning.
Pandas defines an interface for implementing data types and arrays that *extend*
@@ -79,10 +79,10 @@ on :ref:`ecosystem.extensions`.
The interface consists of two classes.
-``ExtensionDtype``
-^^^^^^^^^^^^^^^^^^
+:class:`~pandas.api.extension.ExtensionDtype`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-An ``ExtensionDtype`` is similar to a ``numpy.dtype`` object. It describes the
+A :class:`pandas.api.extension.ExtensionDtype` is similar to a ``numpy.dtype`` object. It describes the
data type. Implementors are responsible for a few unique items like the name.
One particularly important item is the ``type`` property. This should be the
@@ -91,8 +91,8 @@ extension array for IP Address data, this might be ``ipaddress.IPv4Address``.
See the `extension dtype source`_ for interface definition.
-``ExtensionArray``
-^^^^^^^^^^^^^^^^^^
+:class:`~pandas.api.extension.ExtensionArray`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This class provides all the array-like functionality. ExtensionArrays are
limited to 1 dimension. An ExtensionArray is linked to an ExtensionDtype via the
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index e19aedac80213..e0515c5213bd4 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -365,7 +365,7 @@ for storing ip addresses.
...:
``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas
-``ExtensionArray``, it can be stored properly inside pandas' containers.
+:ref:`~pandas.api.extension.ExtensionArray, it can be stored properly inside pandas' containers.
.. code-block:: ipython
diff --git a/pandas/api/extensions/__init__.py b/pandas/api/extensions/__init__.py
index 64f5e8fb939a4..cc09204d992d8 100644
--- a/pandas/api/extensions/__init__.py
+++ b/pandas/api/extensions/__init__.py
@@ -2,3 +2,5 @@
from pandas.core.accessor import (register_dataframe_accessor, # noqa
register_index_accessor,
register_series_accessor)
+from pandas.core.arrays.base import ExtensionArray # noqa
+from pandas.core.dtypes.dtypes import ExtensionDtype # noqa
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 97a764fa7dbe8..27da3f3750283 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -222,6 +222,7 @@ def dtype(self):
@property
def shape(self):
# type: () -> Tuple[int, ...]
+ """Return a tuple of the array dimensions."""
return (len(self),)
@property
diff --git a/pandas/core/common.py b/pandas/core/common.py
index c4890dbd39ef1..b9182bfd2cbe2 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -14,10 +14,9 @@
from pandas.compat import long, zip, iteritems, PY36, OrderedDict
from pandas.core.config import get_option
from pandas.core.dtypes.generic import ABCSeries, ABCIndex
-from pandas.core.dtypes.common import _NS_DTYPE
+from pandas.core.dtypes.common import _NS_DTYPE, is_integer
from pandas.core.dtypes.inference import _iterable_not_string
from pandas.core.dtypes.missing import isna, isnull, notnull # noqa
-from pandas.api import types
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
@@ -570,7 +569,7 @@ def _random_state(state=None):
np.random.RandomState
"""
- if types.is_integer(state):
+ if is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
| cc @jorisvandenbossche do you want to include these in the public API docs? It's nice to have a place to link to I think. | https://api.github.com/repos/pandas-dev/pandas/pulls/20795 | 2018-04-23T14:21:52Z | 2018-04-24T07:51:19Z | 2018-04-24T07:51:19Z | 2018-04-24T07:51:24Z |
TST: ExtensionArrays disallow .values attribute | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 97a764fa7dbe8..1a76797afd9b9 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -61,7 +61,7 @@ class ExtensionArray(object):
ExtensionArrays are limited to 1 dimension.
- They may be backed by none, one, or many NumPy ararys. For example,
+ They may be backed by none, one, or many NumPy arrays. For example,
``pandas.Categorical`` is an extension array backed by two arrays,
one for codes and one for categories. An array of IPv6 address may
be backed by a NumPy structured array with two fields, one for the
@@ -69,6 +69,11 @@ class ExtensionArray(object):
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
+ The ExtensionArray interface does not impose any rules on how this data
+ is stored. However, currently, the backing data cannot be stored in
+ attributes called ``.values`` or ``._values`` to ensure full compatibility
+ with pandas internals. But other names as ``.data``, ``._data``,
+ ``._items``, ... can be freely used.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 2162552e9650d..9b60652fbace3 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -50,3 +50,9 @@ def test_is_extension_array_dtype(self, data):
assert is_extension_array_dtype(data.dtype)
assert is_extension_array_dtype(pd.Series(data))
assert isinstance(data.dtype, ExtensionDtype)
+
+ def test_no_values_attribute(self, data):
+ # GH-20735: EA's with .values attribute give problems with internal
+ # code, disallowing this for now until solved
+ assert not hasattr(data, 'values')
+ assert not hasattr(data, '_values')
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index a8e88365b5648..bd7d9500fdc14 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -30,10 +30,13 @@ class DecimalArray(ExtensionArray):
def __init__(self, values):
values = np.asarray(values, dtype=object)
- self.values = values
+ self._data = values
# Some aliases for common attribute names to ensure pandas supports
# these
- self._items = self._data = self.data = self.values
+ self._items = self.data = self._data
+ # those aliases are currently not working due to assumptions
+ # in internal code (GH-20735)
+ # self._values = self.values = self.data
@classmethod
def _constructor_from_sequence(cls, scalars):
@@ -45,13 +48,13 @@ def _from_factorized(cls, values, original):
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
- return self.values[item]
+ return self._data[item]
else:
- return type(self)(self.values[item])
+ return type(self)(self._data[item])
def copy(self, deep=False):
if deep:
- return type(self)(self.values.copy())
+ return type(self)(self._data.copy())
return type(self)(self)
def __setitem__(self, key, value):
@@ -59,13 +62,13 @@ def __setitem__(self, key, value):
value = [decimal.Decimal(v) for v in value]
else:
value = decimal.Decimal(value)
- self.values[key] = value
+ self._data[key] = value
def __len__(self):
- return len(self.values)
+ return len(self._data)
def __repr__(self):
- return 'DecimalArray({!r})'.format(self.values)
+ return 'DecimalArray({!r})'.format(self._data)
@property
def nbytes(self):
@@ -75,7 +78,7 @@ def nbytes(self):
return 0
def isna(self):
- return np.array([x.is_nan() for x in self.values])
+ return np.array([x.is_nan() for x in self._data])
def take(self, indexer, allow_fill=True, fill_value=None):
indexer = np.asarray(indexer)
@@ -86,7 +89,7 @@ def take(self, indexer, allow_fill=True, fill_value=None):
return type(self)([self._na_value] * len(indexer))
indexer = _ensure_platform_int(indexer)
- out = self.values.take(indexer)
+ out = self._data.take(indexer)
out[mask] = self._na_value
return type(self)(out)
@@ -97,7 +100,7 @@ def _na_value(self):
@classmethod
def _concat_same_type(cls, to_concat):
- return cls(np.concatenate([x.values for x in to_concat]))
+ return cls(np.concatenate([x._data for x in to_concat]))
def make_data():
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 33843492cb706..f5290467203bb 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -33,6 +33,13 @@ def __init__(self, values):
raise TypeError
self.data = values
+ # Some aliases for common attribute names to ensure pandas supports
+ # these
+ self._items = self._data = self.data
+ # those aliases are currently not working due to assumptions
+ # in internal code (GH-20735)
+ # self._values = self.values = self.data
+
@classmethod
def _constructor_from_sequence(cls, scalars):
return cls(scalars)
| xref https://github.com/pandas-dev/pandas/issues/20735.
Adds a test to ensure EA authors don't use the `.values` or `._values` attribute, which can give problems with pandas internals depending on how the data is stored.
| https://api.github.com/repos/pandas-dev/pandas/pulls/20794 | 2018-04-23T12:03:45Z | 2018-04-24T10:28:32Z | 2018-04-24T10:28:32Z | 2018-04-24T11:26:19Z |
DOC: add concatenation-section to text.rst | diff --git a/doc/source/text.rst b/doc/source/text.rst
index da8e40892716e..bcbf7f0ef78d7 100644
--- a/doc/source/text.rst
+++ b/doc/source/text.rst
@@ -199,6 +199,75 @@ regular expression object will raise a ``ValueError``.
---------------------------------------------------------------------------
ValueError: case and flags cannot be set when pat is a compiled regex
+.. _text.concatenate:
+
+Concatenation
+-------------
+
+There are several ways to concatenate a ``Series`` or ``Index``, either with itself or others, all based on :meth:`~Series.str.cat`,
+resp. ``Index.str.cat``.
+
+Concatenating a single Series into a string
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The content of a ``Series`` (or ``Index``) can be concatenated:
+
+.. ipython:: python
+
+ s = pd.Series(['a', 'b', 'c', 'd'])
+ s.str.cat(sep=',')
+
+If not specified, the keyword ``sep`` for the separator defaults to the empty string, ``sep=''``:
+
+.. ipython:: python
+
+ s.str.cat()
+
+By default, missing values are ignored. Using ``na_rep``, they can be given a representation:
+
+.. ipython:: python
+
+ t = pd.Series(['a', 'b', np.nan, 'd'])
+ t.str.cat(sep=',')
+ t.str.cat(sep=',', na_rep='-')
+
+Concatenating a Series and something list-like into a Series
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The first argument to :meth:`~Series.str.cat` can be a list-like object, provided that it matches the length of the calling ``Series`` (or ``Index``).
+
+.. ipython:: python
+
+ s.str.cat(['A', 'B', 'C', 'D'])
+
+Missing values on either side will result in missing values in the result as well, *unless* ``na_rep`` is specified:
+
+.. ipython:: python
+
+ s.str.cat(t)
+ s.str.cat(t, na_rep='-')
+
+Series are *not* aligned on their index before concatenation:
+
+.. ipython:: python
+
+ u = pd.Series(['b', 'd', 'e', 'c'], index=[1, 3, 4, 2])
+ # without alignment
+ s.str.cat(u)
+ # with separate alignment
+ v, w = s.align(u)
+ v.str.cat(w, na_rep='-')
+
+Concatenating a Series and many objects into a Series
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+List-likes (excluding iterators, ``dict``-views, etc.) can be arbitrarily combined in a list.
+All elements of the list must match in length to the calling ``Series`` (resp. ``Index``):
+
+.. ipython:: python
+
+ x = pd.Series([1, 2, 3, 4], index=['A', 'B', 'C', 'D'])
+ s.str.cat([['A', 'B', 'C', 'D'], s, s.values, x.index])
Indexing with ``.str``
----------------------
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 23c891ec4fcd0..47c212d9a0345 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -36,6 +36,22 @@
def _get_array_list(arr, others):
+ """
+ Auxiliary function for :func:`str_cat`
+
+ Parameters
+ ----------
+ arr : ndarray
+ The left-most ndarray of the concatenation
+ others : list, ndarray, Series
+ The rest of the content to concatenate. If list of list-likes,
+ all elements must be passable to ``np.asarray``.
+
+ Returns
+ -------
+ list
+ List of all necessary arrays
+ """
from pandas.core.series import Series
if len(others) and isinstance(com._values_from_object(others)[0],
| As requested by @jreback in #20347. | https://api.github.com/repos/pandas-dev/pandas/pulls/20790 | 2018-04-23T06:01:11Z | 2018-04-24T10:01:49Z | 2018-04-24T10:01:49Z | 2018-04-24T18:11:31Z |
DEPR: removed long deprecated input param 'axis' in .replace() | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index c194d98a89789..3320bd224d5fb 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1019,6 +1019,7 @@ Removal of prior version deprecations/changes
``ambiguous='infer'``, and ``infer_dst=False`` to ``ambiguous='raise'`` (:issue:`7963`).
- When ``.resample()`` was changed from an eager to a lazy operation, like ``.groupby()`` in v0.18.0, we put in place compatibility (with a ``FutureWarning``),
so operations would continue to work. This is now fully removed, so a ``Resampler`` will no longer forward compat operations (:issue:`20554`)
+- Remove long deprecated ``axis=None`` parameter from ``.replace()`` (:issue:`20271`)
.. _whatsnew_0230.performance:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 83d6dee64c165..7a2bd2708b711 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3790,11 +3790,11 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
- regex=False, method='pad', axis=None):
+ regex=False, method='pad'):
return super(DataFrame, self).replace(to_replace=to_replace,
value=value, inplace=inplace,
limit=limit, regex=regex,
- method=method, axis=axis)
+ method=method)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 80ba248efe9b5..96340f9e82992 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5543,9 +5543,6 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
.. versionchanged:: 0.23.0
Added to DataFrame.
- axis : None
- .. deprecated:: 0.13.0
- Has no effect and will be removed.
See Also
--------
@@ -5753,15 +5750,11 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
- regex=False, method='pad', axis=None):
+ regex=False, method='pad'):
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
- if axis is not None:
- warnings.warn('the "axis" argument is deprecated '
- 'and will be removed in'
- 'v0.13; this argument has no effect')
self._consolidate_inplace()
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2bf96ddaedf6f..951227f381b1c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3423,11 +3423,10 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
@Appender(generic._shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
- regex=False, method='pad', axis=None):
+ regex=False, method='pad'):
return super(Series, self).replace(to_replace=to_replace, value=value,
inplace=inplace, limit=limit,
- regex=regex, method=method,
- axis=axis)
+ regex=regex, method=method)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
| - [x] closes additional request in PR https://github.com/pandas-dev/pandas/pull/20271
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/20789 | 2018-04-22T21:11:40Z | 2018-05-03T00:19:49Z | 2018-05-03T00:19:49Z | 2018-05-03T00:19:53Z |
BUG: Switch more size_t references to int64_t | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index bcc442189bf11..71ac8712eea0a 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1140,6 +1140,7 @@ I/O
- Bug in :func:`read_msgpack` with a non existent file is passed in Python 2 (:issue:`15296`)
- Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`)
- Bug in :func:`read_csv` where missing values were not being handled properly when ``keep_default_na=False`` with dictionary ``na_values`` (:issue:`19227`)
+- Bug in :func:`read_csv` causing heap corruption on 32-bit, big-endian architectures (:issue:`20785`)
- Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`)
- Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`)
- Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`)
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index 6e8c220eab6b8..25eede6c286dc 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -69,9 +69,9 @@ static void free_if_not_null(void **ptr) {
*/
-static void *grow_buffer(void *buffer, size_t length, size_t *capacity,
- size_t space, size_t elsize, int *error) {
- size_t cap = *capacity;
+static void *grow_buffer(void *buffer, int64_t length, int64_t *capacity,
+ int64_t space, int64_t elsize, int *error) {
+ int64_t cap = *capacity;
void *newbuffer = buffer;
// Can we fit potentially nbytes tokens (+ null terminators) in the stream?
@@ -169,7 +169,7 @@ int parser_cleanup(parser_t *self) {
}
int parser_init(parser_t *self) {
- size_t sz;
+ int64_t sz;
/*
Initialize data buffers
@@ -353,7 +353,7 @@ static int push_char(parser_t *self, char c) {
("push_char: ERROR!!! self->stream_len(%d) >= "
"self->stream_cap(%d)\n",
self->stream_len, self->stream_cap))
- size_t bufsize = 100;
+ int64_t bufsize = 100;
self->error_msg = (char *)malloc(bufsize);
snprintf(self->error_msg, bufsize,
"Buffer overflow caught - possible malformed input file.\n");
@@ -370,7 +370,7 @@ int P_INLINE end_field(parser_t *self) {
("end_field: ERROR!!! self->words_len(%zu) >= "
"self->words_cap(%zu)\n",
self->words_len, self->words_cap))
- size_t bufsize = 100;
+ int64_t bufsize = 100;
self->error_msg = (char *)malloc(bufsize);
snprintf(self->error_msg, bufsize,
"Buffer overflow caught - possible malformed input file.\n");
@@ -402,8 +402,8 @@ int P_INLINE end_field(parser_t *self) {
}
static void append_warning(parser_t *self, const char *msg) {
- size_t ex_length;
- size_t length = strlen(msg);
+ int64_t ex_length;
+ int64_t length = strlen(msg);
void *newptr;
if (self->warn_msg == NULL) {
@@ -423,7 +423,7 @@ static int end_line(parser_t *self) {
char *msg;
int64_t fields;
int ex_fields = self->expected_fields;
- size_t bufsize = 100; // for error or warning messages
+ int64_t bufsize = 100; // for error or warning messages
fields = self->line_fields[self->lines];
@@ -495,7 +495,7 @@ static int end_line(parser_t *self) {
fields < ex_fields) {
// might overrun the buffer when closing fields
if (make_stream_space(self, ex_fields - fields) < 0) {
- size_t bufsize = 100;
+ int64_t bufsize = 100;
self->error_msg = (char *)malloc(bufsize);
snprintf(self->error_msg, bufsize, "out of memory");
return -1;
@@ -516,7 +516,7 @@ static int end_line(parser_t *self) {
TRACE((
"end_line: ERROR!!! self->lines(%zu) >= self->lines_cap(%zu)\n",
self->lines, self->lines_cap))
- size_t bufsize = 100;
+ int64_t bufsize = 100;
self->error_msg = (char *)malloc(bufsize);
snprintf(self->error_msg, bufsize,
"Buffer overflow caught - "
@@ -577,7 +577,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
self->datalen = bytes_read;
if (status != REACHED_EOF && self->data == NULL) {
- size_t bufsize = 200;
+ int64_t bufsize = 200;
self->error_msg = (char *)malloc(bufsize);
if (status == CALLING_READ_FAILED) {
@@ -608,7 +608,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
if (slen >= self->stream_cap) { \
TRACE(("PUSH_CHAR: ERROR!!! slen(%d) >= stream_cap(%d)\n", slen, \
self->stream_cap)) \
- size_t bufsize = 100; \
+ int64_t bufsize = 100; \
self->error_msg = (char *)malloc(bufsize); \
snprintf(self->error_msg, bufsize, \
"Buffer overflow caught - possible malformed input file.\n");\
@@ -729,7 +729,7 @@ int tokenize_bytes(parser_t *self, size_t line_limit, int64_t start_lines) {
char *buf = self->data + self->datapos;
if (make_stream_space(self, self->datalen - self->datapos) < 0) {
- size_t bufsize = 100;
+ int64_t bufsize = 100;
self->error_msg = (char *)malloc(bufsize);
snprintf(self->error_msg, bufsize, "out of memory");
return -1;
@@ -1036,7 +1036,7 @@ int tokenize_bytes(parser_t *self, size_t line_limit, int64_t start_lines) {
PUSH_CHAR(c);
self->state = IN_FIELD;
} else {
- size_t bufsize = 100;
+ int64_t bufsize = 100;
self->error_msg = (char *)malloc(bufsize);
snprintf(self->error_msg, bufsize,
"delimiter expected after quote in quote");
@@ -1132,7 +1132,7 @@ int tokenize_bytes(parser_t *self, size_t line_limit, int64_t start_lines) {
}
static int parser_handle_eof(parser_t *self) {
- size_t bufsize = 100;
+ int64_t bufsize = 100;
TRACE(
("handling eof, datalen: %d, pstate: %d\n", self->datalen, self->state))
@@ -1177,7 +1177,7 @@ static int parser_handle_eof(parser_t *self) {
}
int parser_consume_rows(parser_t *self, size_t nrows) {
- size_t i, offset, word_deletions, char_count;
+ int64_t i, offset, word_deletions, char_count;
if (nrows > self->lines) {
nrows = self->lines;
| closes #20785
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20786 | 2018-04-22T12:10:39Z | 2018-04-24T10:03:53Z | 2018-04-24T10:03:53Z | 2018-04-24T10:37:53Z |
DEPR: remove v018 resample compatibilitiy | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index e19aedac80213..08285ba852ff6 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -555,7 +555,6 @@ Convert to an xarray DataArray
p.to_xarray()
-
.. _whatsnew_0230.api_breaking.core_common:
pandas.core.common removals
@@ -923,7 +922,8 @@ Removal of prior version deprecations/changes
- The ``infer_dst`` keyword in :meth:`Series.tz_localize`, :meth:`DatetimeIndex.tz_localize`
and :class:`DatetimeIndex` have been removed. ``infer_dst=True`` is equivalent to
``ambiguous='infer'``, and ``infer_dst=False`` to ``ambiguous='raise'`` (:issue:`7963`).
-
+- When ``.resample()`` was changed from an eager to a lazy operation, like ``.groupby()`` in v0.18.0, we put in place compatibility (with a ``FutureWarning``),
+ so operations would continue to work. This is now fully removed, so a ``Resampler`` will no longer forward compat operations (:issue:`20554`)
.. _whatsnew_0230.performance:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index f8d283e932f44..bc7871a0d75c1 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -62,20 +62,6 @@ class Resampler(_GroupBy):
_attributes = ['freq', 'axis', 'closed', 'label', 'convention',
'loffset', 'base', 'kind']
- # API compat of allowed attributes
- _deprecated_valids = _attributes + ['__doc__', '_cache', '_attributes',
- 'binner', 'grouper', 'groupby',
- 'sort', 'kind', 'squeeze', 'keys',
- 'group_keys', 'as_index', 'exclusions',
- '_groupby']
-
- # don't raise deprecation warning on attributes starting with these
- # patterns - prevents warnings caused by IPython introspection
- _deprecated_valid_patterns = ['_ipython', '_repr']
-
- # API compat of disallowed attributes
- _deprecated_invalids = ['iloc', 'loc', 'ix', 'iat', 'at']
-
def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):
self.groupby = groupby
self.keys = None
@@ -100,6 +86,16 @@ def __unicode__(self):
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=', '.join(attrs))
+ def __getattr__(self, attr):
+ if attr in self._internal_names_set:
+ return object.__getattribute__(self, attr)
+ if attr in self._attributes:
+ return getattr(self.groupby, attr)
+ if attr in self.obj:
+ return self[attr]
+
+ return object.__getattribute__(self, attr)
+
@property
def obj(self):
return self.groupby.obj
@@ -124,100 +120,6 @@ def _from_selection(self):
(self.groupby.key is not None or
self.groupby.level is not None))
- def _deprecated(self, op):
- warnings.warn(("\n.resample() is now a deferred operation\n"
- "You called {op}(...) on this deferred object "
- "which materialized it into a {klass}\nby implicitly "
- "taking the mean. Use .resample(...).mean() "
- "instead").format(op=op, klass=self._typ),
- FutureWarning, stacklevel=3)
- return self.mean()
-
- def _make_deprecated_binop(op):
- # op is a string
-
- def _evaluate_numeric_binop(self, other):
- result = self._deprecated(op)
- return getattr(result, op)(other)
- return _evaluate_numeric_binop
-
- def _make_deprecated_unary(op, name):
- # op is a callable
-
- def _evaluate_numeric_unary(self):
- result = self._deprecated(name)
- return op(result)
- return _evaluate_numeric_unary
-
- def __array__(self):
- return self._deprecated('__array__').__array__()
-
- __gt__ = _make_deprecated_binop('__gt__')
- __ge__ = _make_deprecated_binop('__ge__')
- __lt__ = _make_deprecated_binop('__lt__')
- __le__ = _make_deprecated_binop('__le__')
- __eq__ = _make_deprecated_binop('__eq__')
- __ne__ = _make_deprecated_binop('__ne__')
-
- __add__ = __radd__ = _make_deprecated_binop('__add__')
- __sub__ = __rsub__ = _make_deprecated_binop('__sub__')
- __mul__ = __rmul__ = _make_deprecated_binop('__mul__')
- __floordiv__ = __rfloordiv__ = _make_deprecated_binop('__floordiv__')
- __truediv__ = __rtruediv__ = _make_deprecated_binop('__truediv__')
- if not compat.PY3:
- __div__ = __rdiv__ = _make_deprecated_binop('__div__')
- __neg__ = _make_deprecated_unary(lambda x: -x, '__neg__')
- __pos__ = _make_deprecated_unary(lambda x: x, '__pos__')
- __abs__ = _make_deprecated_unary(lambda x: np.abs(x), '__abs__')
- __inv__ = _make_deprecated_unary(lambda x: -x, '__inv__')
-
- def __getattr__(self, attr):
- if attr in self._internal_names_set:
- return object.__getattribute__(self, attr)
- if attr in self._attributes:
- return getattr(self.groupby, attr)
- if attr in self.obj:
- return self[attr]
-
- if attr in self._deprecated_invalids:
- raise ValueError(".resample() is now a deferred operation\n"
- "\tuse .resample(...).mean() instead of "
- ".resample(...)")
-
- matches_pattern = any(attr.startswith(x) for x
- in self._deprecated_valid_patterns)
- if not matches_pattern and attr not in self._deprecated_valids:
- # avoid the warning, if it's just going to be an exception
- # anyway.
- if not hasattr(self.obj, attr):
- raise AttributeError("'{}' has no attribute '{}'".format(
- type(self.obj).__name__, attr
- ))
- self = self._deprecated(attr)
-
- return object.__getattribute__(self, attr)
-
- def __setattr__(self, attr, value):
- if attr not in self._deprecated_valids:
- raise ValueError("cannot set values on {0}".format(
- self.__class__.__name__))
- object.__setattr__(self, attr, value)
-
- def __getitem__(self, key):
- try:
- return super(Resampler, self).__getitem__(key)
- except (KeyError, AbstractMethodError):
-
- # compat for deprecated
- if isinstance(self.obj, ABCSeries):
- return self._deprecated('__getitem__')[key]
-
- raise
-
- def __setitem__(self, attr, value):
- raise ValueError("cannot set items on {0}".format(
- self.__class__.__name__))
-
def _convert_obj(self, obj):
"""
provide any conversions for the object in order to correctly handle
@@ -282,11 +184,6 @@ def _assure_grouper(self):
def pipe(self, func, *args, **kwargs):
return super(Resampler, self).pipe(func, *args, **kwargs)
- def plot(self, *args, **kwargs):
- # for compat with prior versions, we want to
- # have the warnings shown here and just have this work
- return self._deprecated('plot').plot(*args, **kwargs)
-
_agg_doc = dedent("""
Examples
@@ -853,9 +750,6 @@ def size(self):
return result
-Resampler._deprecated_valids += dir(Resampler)
-
-
# downsample methods
for method in ['sum', 'prod']:
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index 2180e38e24e6c..778ea73b3ef25 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -14,11 +14,9 @@
import pandas as pd
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
-import pandas.util._test_decorators as td
from pandas import (Series, DataFrame, Panel, Index, isna,
notna, Timestamp)
-from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.compat import range, lrange, zip, product, OrderedDict
from pandas.errors import UnsupportedFunctionCall
from pandas.core.groupby.groupby import DataError
@@ -28,8 +26,7 @@
from pandas.core.indexes.datetimes import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.core.indexes.period import period_range, PeriodIndex, Period
-from pandas.core.resample import (DatetimeIndex, TimeGrouper,
- DatetimeIndexResampler)
+from pandas.core.resample import DatetimeIndex, TimeGrouper
from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
@@ -84,122 +81,6 @@ def test_api(self):
assert isinstance(result, DataFrame)
assert len(result) == 217
- def test_api_changes_v018(self):
-
- # change from .resample(....., how=...)
- # to .resample(......).how()
-
- r = self.series.resample('H')
- assert isinstance(r, DatetimeIndexResampler)
-
- for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = self.series.resample('H', how=how)
- expected = getattr(self.series.resample('H'), how)()
- tm.assert_series_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = self.series.resample('H', how='ohlc')
- expected = self.series.resample('H').ohlc()
- tm.assert_frame_equal(result, expected)
-
- # compat for pandas-like methods
- for how in ['sort_values', 'isna']:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- getattr(r, how)()
-
- # invalids as these can be setting operations
- r = self.series.resample('H')
- pytest.raises(ValueError, lambda: r.iloc[0])
- pytest.raises(ValueError, lambda: r.iat[0])
- pytest.raises(ValueError, lambda: r.loc[0])
- pytest.raises(ValueError, lambda: r.loc[
- Timestamp('2013-01-01 00:00:00', offset='H')])
- pytest.raises(ValueError, lambda: r.at[
- Timestamp('2013-01-01 00:00:00', offset='H')])
-
- def f():
- r[0] = 5
-
- pytest.raises(ValueError, f)
-
- # str/repr
- r = self.series.resample('H')
- with tm.assert_produces_warning(None):
- str(r)
- with tm.assert_produces_warning(None):
- repr(r)
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- tm.assert_numpy_array_equal(np.array(r), np.array(r.mean()))
-
- # masquerade as Series/DataFrame as needed for API compat
- assert isinstance(self.series.resample('H'), ABCSeries)
- assert not isinstance(self.frame.resample('H'), ABCSeries)
- assert not isinstance(self.series.resample('H'), ABCDataFrame)
- assert isinstance(self.frame.resample('H'), ABCDataFrame)
-
- # bin numeric ops
- for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']:
-
- if getattr(self.series, op, None) is None:
- continue
- r = self.series.resample('H')
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- assert isinstance(getattr(r, op)(2), Series)
-
- # unary numeric ops
- for op in ['__pos__', '__neg__', '__abs__', '__inv__']:
-
- if getattr(self.series, op, None) is None:
- continue
- r = self.series.resample('H')
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- assert isinstance(getattr(r, op)(), Series)
-
- # comparison ops
- for op in ['__lt__', '__le__', '__gt__', '__ge__', '__eq__', '__ne__']:
- r = self.series.resample('H')
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- assert isinstance(getattr(r, op)(2), Series)
-
- # IPython introspection shouldn't trigger warning GH 13618
- for op in ['_repr_json', '_repr_latex',
- '_ipython_canary_method_should_not_exist_']:
- r = self.series.resample('H')
- with tm.assert_produces_warning(None):
- getattr(r, op, None)
-
- # getitem compat
- df = self.series.to_frame('foo')
-
- # same as prior versions for DataFrame
- pytest.raises(KeyError, lambda: df.resample('H')[0])
-
- # compat for Series
- # but we cannot be sure that we need a warning here
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = self.series.resample('H')[0]
- expected = self.series.resample('H').mean()[0]
- assert result == expected
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = self.series.resample('H')['2005-01-09 23:00:00']
- expected = self.series.resample('H').mean()['2005-01-09 23:00:00']
- assert result == expected
-
def test_groupby_resample_api(self):
# GH 12448
@@ -251,23 +132,6 @@ def test_pipe(self):
result = r.pipe(lambda x: x.max() - x.mean())
tm.assert_frame_equal(result, expected)
- @td.skip_if_no_mpl
- def test_plot_api(self):
- # .resample(....).plot(...)
- # hitting warnings
- # GH 12448
- s = Series(np.random.randn(60),
- index=date_range('2016-01-01', periods=60, freq='1min'))
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = s.resample('15min').plot()
- tm.assert_is_valid_plot_return_object(result)
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = s.resample('15min', how='sum').plot()
- tm.assert_is_valid_plot_return_object(result)
-
def test_getitem(self):
r = self.frame.resample('H')
@@ -301,15 +165,6 @@ def test_attribute_access(self):
r = self.frame.resample('H')
tm.assert_series_equal(r.A.sum(), r['A'].sum())
- # getting
- pytest.raises(AttributeError, lambda: r.F)
-
- # setting
- def f():
- r.F = 'bah'
-
- pytest.raises(ValueError, f)
-
def test_api_compat_before_use(self):
# make sure that we are setting the binner
@@ -3012,23 +2867,6 @@ def setup_method(self, method):
freq='s',
periods=40))
- def test_back_compat_v180(self):
-
- df = self.frame
- for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = df.groupby('A').resample('4s', how=how)
- expected = getattr(df.groupby('A').resample('4s'), how)()
- assert_frame_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = df.groupby('A').resample('4s', how='mean',
- fill_method='ffill')
- expected = df.groupby('A').resample('4s').mean().ffill()
- assert_frame_equal(result, expected)
-
def test_tab_complete_ipython6_warning(self, ip):
from IPython.core.completer import provisionalcompleter
code = dedent("""\
| closes #20554
| https://api.github.com/repos/pandas-dev/pandas/pulls/20782 | 2018-04-21T23:10:38Z | 2018-04-24T09:58:56Z | 2018-04-24T09:58:56Z | 2018-04-24T09:59:50Z |
TST: split test_groupby.py | diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index d85719d328ff2..b2f18e11de8ee 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -15,51 +15,6 @@
import pandas.util.testing as tm
-@pytest.fixture
-def ts():
- return tm.makeTimeSeries()
-
-
-@pytest.fixture
-def tsframe():
- return DataFrame(tm.getTimeSeriesData())
-
-
-@pytest.fixture
-def df():
- return DataFrame(
- {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
-
-
-@pytest.fixture
-def mframe():
- index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
- ['one', 'two', 'three']],
- labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
- [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
- names=['first', 'second'])
- return DataFrame(np.random.randn(10, 3),
- index=index,
- columns=['A', 'B', 'C'])
-
-
-@pytest.fixture
-def three_group():
- return DataFrame(
- {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
- 'bar', 'bar', 'foo', 'foo', 'foo'],
- 'B': ['one', 'one', 'one', 'two', 'one', 'one',
- 'one', 'two', 'two', 'two', 'one'],
- 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny',
- 'shiny', 'dull', 'shiny', 'shiny', 'shiny'],
- 'D': np.random.randn(11),
- 'E': np.random.randn(11),
- 'F': np.random.randn(11)})
-
-
def test_agg_regression1(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
@@ -87,6 +42,32 @@ def test_agg_ser_multi_key(df):
tm.assert_series_equal(results, expected)
+def test_groupby_aggregation_mixed_dtype():
+
+ # GH 6212
+ expected = DataFrame({
+ 'v1': [5, 5, 7, np.nan, 3, 3, 4, 1],
+ 'v2': [55, 55, 77, np.nan, 33, 33, 44, 11]},
+ index=MultiIndex.from_tuples([(1, 95), (1, 99), (2, 95), (2, 99),
+ ('big', 'damp'),
+ ('blue', 'dry'),
+ ('red', 'red'), ('red', 'wet')],
+ names=['by1', 'by2']))
+
+ df = DataFrame({
+ 'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
+ 'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
+ 'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan,
+ 12],
+ 'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99,
+ np.nan, np.nan]
+ })
+
+ g = df.groupby(['by1', 'by2'])
+ result = g[['v1', 'v2']].mean()
+ tm.assert_frame_equal(result, expected)
+
+
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
diff --git a/pandas/tests/groupby/common.py b/pandas/tests/groupby/common.py
deleted file mode 100644
index 3e99e8211b4f8..0000000000000
--- a/pandas/tests/groupby/common.py
+++ /dev/null
@@ -1,62 +0,0 @@
-""" Base setup """
-
-import pytest
-import numpy as np
-from pandas.util import testing as tm
-from pandas import DataFrame, MultiIndex
-
-
-@pytest.fixture
-def mframe():
- index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
- 'three']],
- labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
- [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
- names=['first', 'second'])
- return DataFrame(np.random.randn(10, 3), index=index,
- columns=['A', 'B', 'C'])
-
-
-@pytest.fixture
-def df():
- return DataFrame(
- {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
-
-
-class MixIn(object):
-
- def setup_method(self, method):
- self.ts = tm.makeTimeSeries()
-
- self.seriesd = tm.getSeriesData()
- self.tsd = tm.getTimeSeriesData()
- self.frame = DataFrame(self.seriesd)
- self.tsframe = DataFrame(self.tsd)
-
- self.df = df()
- self.df_mixed_floats = DataFrame(
- {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.array(
- np.random.randn(8), dtype='float32')})
-
- self.mframe = mframe()
-
- self.three_group = DataFrame(
- {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
- 'foo', 'foo', 'foo'],
- 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
- 'two', 'two', 'one'],
- 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
- 'dull', 'shiny', 'shiny', 'shiny'],
- 'D': np.random.randn(11),
- 'E': np.random.randn(11),
- 'F': np.random.randn(11)})
-
-
-def assert_fp_equal(a, b):
- assert (np.abs(a - b) < 1e-12).all()
diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py
new file mode 100644
index 0000000000000..877aa835ac6f5
--- /dev/null
+++ b/pandas/tests/groupby/conftest.py
@@ -0,0 +1,77 @@
+import pytest
+import numpy as np
+from pandas import MultiIndex, DataFrame
+from pandas.util import testing as tm
+
+
+@pytest.fixture
+def mframe():
+ index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
+ 'three']],
+ labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
+ [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
+ names=['first', 'second'])
+ return DataFrame(np.random.randn(10, 3), index=index,
+ columns=['A', 'B', 'C'])
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
+ 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
+ 'C': np.random.randn(8),
+ 'D': np.random.randn(8)})
+
+
+@pytest.fixture
+def ts():
+ return tm.makeTimeSeries()
+
+
+@pytest.fixture
+def seriesd():
+ return tm.getSeriesData()
+
+
+@pytest.fixture
+def tsd():
+ return tm.getTimeSeriesData()
+
+
+@pytest.fixture
+def frame(seriesd):
+ return DataFrame(seriesd)
+
+
+@pytest.fixture
+def tsframe(tsd):
+ return DataFrame(tsd)
+
+
+@pytest.fixture
+def df_mixed_floats():
+ return DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B': ['one', 'one', 'two', 'three',
+ 'two', 'two', 'one', 'three'],
+ 'C': np.random.randn(8),
+ 'D': np.array(
+ np.random.randn(8), dtype='float32')})
+
+
+@pytest.fixture
+def three_group():
+ return DataFrame({'A': ['foo', 'foo', 'foo',
+ 'foo', 'bar', 'bar',
+ 'bar', 'bar',
+ 'foo', 'foo', 'foo'],
+ 'B': ['one', 'one', 'one',
+ 'two', 'one', 'one', 'one', 'two',
+ 'two', 'two', 'one'],
+ 'C': ['dull', 'dull', 'shiny',
+ 'dull', 'dull', 'shiny', 'shiny',
+ 'dull', 'shiny', 'shiny', 'shiny'],
+ 'D': np.random.randn(11),
+ 'E': np.random.randn(11),
+ 'F': np.random.randn(11)})
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
new file mode 100644
index 0000000000000..5ca10fe1af9d1
--- /dev/null
+++ b/pandas/tests/groupby/test_apply.py
@@ -0,0 +1,517 @@
+import pytest
+import numpy as np
+import pandas as pd
+from datetime import datetime
+from pandas.util import testing as tm
+from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
+
+
+def test_apply_issues():
+ # GH 5788
+
+ s = """2011.05.16,00:00,1.40893
+2011.05.16,01:00,1.40760
+2011.05.16,02:00,1.40750
+2011.05.16,03:00,1.40649
+2011.05.17,02:00,1.40893
+2011.05.17,03:00,1.40760
+2011.05.17,04:00,1.40750
+2011.05.17,05:00,1.40649
+2011.05.18,02:00,1.40893
+2011.05.18,03:00,1.40760
+2011.05.18,04:00,1.40750
+2011.05.18,05:00,1.40649"""
+
+ df = pd.read_csv(
+ compat.StringIO(s), header=None, names=['date', 'time', 'value'],
+ parse_dates=[['date', 'time']])
+ df = df.set_index('date_time')
+
+ expected = df.groupby(df.index.date).idxmax()
+ result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
+ tm.assert_frame_equal(result, expected)
+
+ # GH 5789
+ # don't auto coerce dates
+ df = pd.read_csv(
+ compat.StringIO(s), header=None, names=['date', 'time', 'value'])
+ exp_idx = pd.Index(
+ ['2011.05.16', '2011.05.17', '2011.05.18'
+ ], dtype=object, name='date')
+ expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
+ result = df.groupby('date').apply(
+ lambda x: x['time'][x['value'].idxmax()])
+ tm.assert_series_equal(result, expected)
+
+
+def test_apply_trivial():
+ # GH 20066
+ # trivial apply: ignore input and return a constant dataframe.
+ df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
+ 'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
+ columns=['key', 'data'])
+ expected = pd.concat([df.iloc[1:], df.iloc[1:]],
+ axis=1, keys=['float64', 'object'])
+ result = df.groupby([str(x) for x in df.dtypes],
+ axis=1).apply(lambda x: df.iloc[1:])
+
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.xfail(reason=("GH 20066; function passed into apply "
+ "returns a DataFrame with the same index "
+ "as the one to create GroupBy object."))
+def test_apply_trivial_fail():
+ # GH 20066
+ # trivial apply fails if the constant dataframe has the same index
+ # with the one used to create GroupBy object.
+ df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
+ 'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
+ columns=['key', 'data'])
+ expected = pd.concat([df, df],
+ axis=1, keys=['float64', 'object'])
+ result = df.groupby([str(x) for x in df.dtypes],
+ axis=1).apply(lambda x: df)
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_fast_apply():
+ # make sure that fast apply is correctly called
+ # rather than raising any kind of error
+ # otherwise the python path will be callsed
+ # which slows things down
+ N = 1000
+ labels = np.random.randint(0, 2000, size=N)
+ labels2 = np.random.randint(0, 3, size=N)
+ df = DataFrame({'key': labels,
+ 'key2': labels2,
+ 'value1': np.random.randn(N),
+ 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
+
+ def f(g):
+ return 1
+
+ g = df.groupby(['key', 'key2'])
+
+ grouper = g.grouper
+
+ splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
+ group_keys = grouper._get_group_keys()
+
+ values, mutated = splitter.fast_apply(f, group_keys)
+ assert not mutated
+
+
+def test_apply_with_mixed_dtype():
+ # GH3480, apply with mixed dtype on axis=1 breaks in 0.11
+ df = DataFrame({'foo1': np.random.randn(6),
+ 'foo2': ['one', 'two', 'two', 'three', 'one', 'two']})
+ result = df.apply(lambda x: x, axis=1)
+ tm.assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
+
+ # GH 3610 incorrect dtype conversion with as_index=False
+ df = DataFrame({"c1": [1, 2, 6, 6, 8]})
+ df["c2"] = df.c1 / 2.0
+ result1 = df.groupby("c2").mean().reset_index().c2
+ result2 = df.groupby("c2", as_index=False).mean().c2
+ tm.assert_series_equal(result1, result2)
+
+
+def test_groupby_as_index_apply(df):
+ # GH #4648 and #3417
+ df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
+ 'user_id': [1, 2, 1, 1, 3, 1],
+ 'time': range(6)})
+
+ g_as = df.groupby('user_id', as_index=True)
+ g_not_as = df.groupby('user_id', as_index=False)
+
+ res_as = g_as.head(2).index
+ res_not_as = g_not_as.head(2).index
+ exp = Index([0, 1, 2, 4])
+ tm.assert_index_equal(res_as, exp)
+ tm.assert_index_equal(res_not_as, exp)
+
+ res_as_apply = g_as.apply(lambda x: x.head(2)).index
+ res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
+
+ # apply doesn't maintain the original ordering
+ # changed in GH5610 as the as_index=False returns a MI here
+ exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
+ 2, 4)])
+ tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
+ exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
+
+ tm.assert_index_equal(res_as_apply, exp_as_apply)
+ tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
+
+ ind = Index(list('abcde'))
+ df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
+ res = df.groupby(0, as_index=False).apply(lambda x: x).index
+ tm.assert_index_equal(res, ind)
+
+
+def test_apply_concat_preserve_names(three_group):
+ grouped = three_group.groupby(['A', 'B'])
+
+ def desc(group):
+ result = group.describe()
+ result.index.name = 'stat'
+ return result
+
+ def desc2(group):
+ result = group.describe()
+ result.index.name = 'stat'
+ result = result[:len(group)]
+ # weirdo
+ return result
+
+ def desc3(group):
+ result = group.describe()
+
+ # names are different
+ result.index.name = 'stat_%d' % len(group)
+
+ result = result[:len(group)]
+ # weirdo
+ return result
+
+ result = grouped.apply(desc)
+ assert result.index.names == ('A', 'B', 'stat')
+
+ result2 = grouped.apply(desc2)
+ assert result2.index.names == ('A', 'B', 'stat')
+
+ result3 = grouped.apply(desc3)
+ assert result3.index.names == ('A', 'B', None)
+
+
+def test_apply_series_to_frame():
+ def f(piece):
+ with np.errstate(invalid='ignore'):
+ logged = np.log(piece)
+ return DataFrame({'value': piece,
+ 'demeaned': piece - piece.mean(),
+ 'logged': logged})
+
+ dr = bdate_range('1/1/2000', periods=100)
+ ts = Series(np.random.randn(100), index=dr)
+
+ grouped = ts.groupby(lambda x: x.month)
+ result = grouped.apply(f)
+
+ assert isinstance(result, DataFrame)
+ tm.assert_index_equal(result.index, ts.index)
+
+
+def test_apply_series_yield_constant(df):
+ result = df.groupby(['A', 'B'])['C'].apply(len)
+ assert result.index.names[:2] == ('A', 'B')
+
+
+def test_apply_frame_yield_constant(df):
+ # GH13568
+ result = df.groupby(['A', 'B']).apply(len)
+ assert isinstance(result, Series)
+ assert result.name is None
+
+ result = df.groupby(['A', 'B'])[['C', 'D']].apply(len)
+ assert isinstance(result, Series)
+ assert result.name is None
+
+
+def test_apply_frame_to_series(df):
+ grouped = df.groupby(['A', 'B'])
+ result = grouped.apply(len)
+ expected = grouped.count()['C']
+ tm.assert_index_equal(result.index, expected.index)
+ tm.assert_numpy_array_equal(result.values, expected.values)
+
+
+def test_apply_frame_concat_series():
+ def trans(group):
+ return group.groupby('B')['C'].sum().sort_values()[:2]
+
+ def trans2(group):
+ grouped = group.groupby(df.reindex(group.index)['B'])
+ return grouped.sum().sort_values()[:2]
+
+ df = DataFrame({'A': np.random.randint(0, 5, 1000),
+ 'B': np.random.randint(0, 5, 1000),
+ 'C': np.random.randn(1000)})
+
+ result = df.groupby('A').apply(trans)
+ exp = df.groupby('A')['C'].apply(trans2)
+ tm.assert_series_equal(result, exp, check_names=False)
+ assert result.name == 'C'
+
+
+def test_apply_transform(ts):
+ grouped = ts.groupby(lambda x: x.month)
+ result = grouped.apply(lambda x: x * 2)
+ expected = grouped.transform(lambda x: x * 2)
+ tm.assert_series_equal(result, expected)
+
+
+def test_apply_multikey_corner(tsframe):
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
+
+ def f(group):
+ return group.sort_values('A')[-5:]
+
+ result = grouped.apply(f)
+ for key, group in grouped:
+ tm.assert_frame_equal(result.loc[key], f(group))
+
+
+def test_apply_chunk_view():
+ # Low level tinkering could be unsafe, make sure not
+ df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ 'value': compat.lrange(9)})
+
+ # return view
+ f = lambda x: x[:2]
+
+ result = df.groupby('key', group_keys=False).apply(f)
+ expected = df.take([0, 1, 3, 4, 6, 7])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_apply_no_name_column_conflict():
+ df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
+ 'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
+ 'value': compat.lrange(10)[::-1]})
+
+ # it works! #2605
+ grouped = df.groupby(['name', 'name2'])
+ grouped.apply(lambda x: x.sort_values('value', inplace=True))
+
+
+def test_apply_typecast_fail():
+ df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
+ 'c': np.tile(
+ ['a', 'b', 'c'], 2),
+ 'v': np.arange(1., 7.)})
+
+ def f(group):
+ v = group['v']
+ group['v2'] = (v - v.min()) / (v.max() - v.min())
+ return group
+
+ result = df.groupby('d').apply(f)
+
+ expected = df.copy()
+ expected['v2'] = np.tile([0., 0.5, 1], 2)
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_apply_multiindex_fail():
+ index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
+ ])
+ df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
+ 'c': np.tile(['a', 'b', 'c'], 2),
+ 'v': np.arange(1., 7.)}, index=index)
+
+ def f(group):
+ v = group['v']
+ group['v2'] = (v - v.min()) / (v.max() - v.min())
+ return group
+
+ result = df.groupby('d').apply(f)
+
+ expected = df.copy()
+ expected['v2'] = np.tile([0., 0.5, 1], 2)
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_apply_corner(tsframe):
+ result = tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
+ expected = tsframe * 2
+ tm.assert_frame_equal(result, expected)
+
+
+def test_apply_without_copy():
+ # GH 5545
+ # returning a non-copy in an applied function fails
+
+ data = DataFrame({'id_field': [100, 100, 200, 300],
+ 'category': ['a', 'b', 'c', 'c'],
+ 'value': [1, 2, 3, 4]})
+
+ def filt1(x):
+ if x.shape[0] == 1:
+ return x.copy()
+ else:
+ return x[x.category == 'c']
+
+ def filt2(x):
+ if x.shape[0] == 1:
+ return x
+ else:
+ return x[x.category == 'c']
+
+ expected = data.groupby('id_field').apply(filt1)
+ result = data.groupby('id_field').apply(filt2)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_apply_corner_cases():
+ # #535, can't use sliding iterator
+
+ N = 1000
+ labels = np.random.randint(0, 100, size=N)
+ df = DataFrame({'key': labels,
+ 'value1': np.random.randn(N),
+ 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
+
+ grouped = df.groupby('key')
+
+ def f(g):
+ g['value3'] = g['value1'] * 2
+ return g
+
+ result = grouped.apply(f)
+ assert 'value3' in result
+
+
+def test_apply_numeric_coercion_when_datetime():
+ # In the past, group-by/apply operations have been over-eager
+ # in converting dtypes to numeric, in the presence of datetime
+ # columns. Various GH issues were filed, the reproductions
+ # for which are here.
+
+ # GH 15670
+ df = pd.DataFrame({'Number': [1, 2],
+ 'Date': ["2017-03-02"] * 2,
+ 'Str': ["foo", "inf"]})
+ expected = df.groupby(['Number']).apply(lambda x: x.iloc[0])
+ df.Date = pd.to_datetime(df.Date)
+ result = df.groupby(['Number']).apply(lambda x: x.iloc[0])
+ tm.assert_series_equal(result['Str'], expected['Str'])
+
+ # GH 15421
+ df = pd.DataFrame({'A': [10, 20, 30],
+ 'B': ['foo', '3', '4'],
+ 'T': [pd.Timestamp("12:31:22")] * 3})
+
+ def get_B(g):
+ return g.iloc[0][['B']]
+ result = df.groupby('A').apply(get_B)['B']
+ expected = df.B
+ expected.index = df.A
+ tm.assert_series_equal(result, expected)
+
+ # GH 14423
+ def predictions(tool):
+ out = pd.Series(index=['p1', 'p2', 'useTime'], dtype=object)
+ if 'step1' in list(tool.State):
+ out['p1'] = str(tool[tool.State == 'step1'].Machine.values[0])
+ if 'step2' in list(tool.State):
+ out['p2'] = str(tool[tool.State == 'step2'].Machine.values[0])
+ out['useTime'] = str(
+ tool[tool.State == 'step2'].oTime.values[0])
+ return out
+ df1 = pd.DataFrame({'Key': ['B', 'B', 'A', 'A'],
+ 'State': ['step1', 'step2', 'step1', 'step2'],
+ 'oTime': ['', '2016-09-19 05:24:33',
+ '', '2016-09-19 23:59:04'],
+ 'Machine': ['23', '36L', '36R', '36R']})
+ df2 = df1.copy()
+ df2.oTime = pd.to_datetime(df2.oTime)
+ expected = df1.groupby('Key').apply(predictions).p1
+ result = df2.groupby('Key').apply(predictions).p1
+ tm.assert_series_equal(expected, result)
+
+
+def test_time_field_bug():
+ # Test a fix for the following error related to GH issue 11324 When
+ # non-key fields in a group-by dataframe contained time-based fields
+ # that were not returned by the apply function, an exception would be
+ # raised.
+
+ df = pd.DataFrame({'a': 1, 'b': [datetime.now() for nn in range(10)]})
+
+ def func_with_no_date(batch):
+ return pd.Series({'c': 2})
+
+ def func_with_date(batch):
+ return pd.Series({'b': datetime(2015, 1, 1), 'c': 2})
+
+ dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date)
+ dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1])
+ dfg_no_conversion_expected.index.name = 'a'
+
+ dfg_conversion = df.groupby(by=['a']).apply(func_with_date)
+ dfg_conversion_expected = pd.DataFrame(
+ {'b': datetime(2015, 1, 1),
+ 'c': 2}, index=[1])
+ dfg_conversion_expected.index.name = 'a'
+
+ tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
+ tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
+
+
+def test_gb_apply_list_of_unequal_len_arrays():
+
+ # GH1738
+ df = DataFrame({'group1': ['a', 'a', 'a', 'b', 'b', 'b', 'a', 'a', 'a',
+ 'b', 'b', 'b'],
+ 'group2': ['c', 'c', 'd', 'd', 'd', 'e', 'c', 'c', 'd',
+ 'd', 'd', 'e'],
+ 'weight': [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
+ 'value': [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3]})
+ df = df.set_index(['group1', 'group2'])
+ df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
+
+ def noddy(value, weight):
+ out = np.array(value * weight).repeat(3)
+ return out
+
+ # the kernel function returns arrays of unequal length
+ # pandas sniffs the first one, sees it's an array and not
+ # a list, and assumed the rest are of equal length
+ # and so tries a vstack
+
+ # don't die
+ df_grouped.apply(lambda x: noddy(x.value, x.weight))
+
+
+def test_groupby_apply_all_none():
+ # Tests to make sure no errors if apply function returns all None
+ # values. Issue 9684.
+ test_df = DataFrame({'groups': [0, 0, 1, 1],
+ 'random_vars': [8, 7, 4, 5]})
+
+ def test_func(x):
+ pass
+
+ result = test_df.groupby('groups').apply(test_func)
+ expected = DataFrame()
+ tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_apply_none_first():
+ # GH 12824. Tests if apply returns None first.
+ test_df1 = DataFrame({'groups': [1, 1, 1, 2], 'vars': [0, 1, 2, 3]})
+ test_df2 = DataFrame({'groups': [1, 2, 2, 2], 'vars': [0, 1, 2, 3]})
+
+ def test_func(x):
+ if x.shape[0] < 2:
+ return None
+ return x.iloc[[0, -1]]
+
+ result1 = test_df1.groupby('groups').apply(test_func)
+ result2 = test_df2.groupby('groups').apply(test_func)
+ index1 = MultiIndex.from_arrays([[1, 1], [0, 2]],
+ names=['groups', None])
+ index2 = MultiIndex.from_arrays([[2, 2], [1, 3]],
+ names=['groups', None])
+ expected1 = DataFrame({'groups': [1, 1], 'vars': [0, 2]},
+ index=index1)
+ expected2 = DataFrame({'groups': [2, 2], 'vars': [1, 3]},
+ index=index2)
+ tm.assert_frame_equal(result1, expected1)
+ tm.assert_frame_equal(result2, expected2)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index bcd0da28b5a34..160b60e69f39d 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -9,710 +9,725 @@
import pandas as pd
from pandas import (Index, MultiIndex, CategoricalIndex,
- DataFrame, Categorical, Series, Interval)
+ DataFrame, Categorical, Series, Interval, qcut)
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas.util.testing as tm
-from .common import MixIn
-
-
-class TestGroupByCategorical(MixIn):
-
- def test_groupby(self):
-
- cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
- categories=["a", "b", "c", "d"], ordered=True)
- data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
-
- exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True)
- expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
- result = data.groupby("b").mean()
- tm.assert_frame_equal(result, expected)
-
- raw_cat1 = Categorical(["a", "a", "b", "b"],
- categories=["a", "b", "z"], ordered=True)
- raw_cat2 = Categorical(["c", "d", "c", "d"],
- categories=["c", "d", "y"], ordered=True)
- df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
-
- # single grouper
- gb = df.groupby("A")
- exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
- expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)})
- result = gb.sum()
- tm.assert_frame_equal(result, expected)
-
- # multiple groupers
- gb = df.groupby(['A', 'B'])
- exp_index = pd.MultiIndex.from_product(
- [Categorical(["a", "b", "z"], ordered=True),
- Categorical(["c", "d", "y"], ordered=True)],
- names=['A', 'B'])
- expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan,
- np.nan, np.nan, np.nan]},
- index=exp_index)
- result = gb.sum()
- tm.assert_frame_equal(result, expected)
-
- # multiple groupers with a non-cat
- df = df.copy()
- df['C'] = ['foo', 'bar'] * 2
- gb = df.groupby(['A', 'B', 'C'])
- exp_index = pd.MultiIndex.from_product(
- [Categorical(["a", "b", "z"], ordered=True),
- Categorical(["c", "d", "y"], ordered=True),
- ['foo', 'bar']],
- names=['A', 'B', 'C'])
- expected = DataFrame({'values': Series(
- np.nan, index=exp_index)}).sort_index()
- expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
- result = gb.sum()
- tm.assert_frame_equal(result, expected)
-
- # GH 8623
- x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
- [1, 'John P. Doe']],
- columns=['person_id', 'person_name'])
- x['person_name'] = Categorical(x.person_name)
-
- g = x.groupby(['person_id'])
- result = g.transform(lambda x: x)
- tm.assert_frame_equal(result, x[['person_name']])
-
- result = x.drop_duplicates('person_name')
- expected = x.iloc[[0, 1]]
- tm.assert_frame_equal(result, expected)
-
- def f(x):
- return x.drop_duplicates('person_name').iloc[0]
-
- result = g.apply(f)
- expected = x.iloc[[0, 1]].copy()
- expected.index = Index([1, 2], name='person_id')
- expected['person_name'] = expected['person_name'].astype('object')
- tm.assert_frame_equal(result, expected)
-
- # GH 9921
- # Monotonic
- df = DataFrame({"a": [5, 15, 25]})
- c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
-
- result = df.a.groupby(c).transform(sum)
- tm.assert_series_equal(result, df['a'])
-
- tm.assert_series_equal(
- df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
- tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
- tm.assert_frame_equal(
- df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
-
- # Filter
- tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
- tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
-
- # Non-monotonic
- df = DataFrame({"a": [5, 15, 25, -5]})
- c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
-
- result = df.a.groupby(c).transform(sum)
- tm.assert_series_equal(result, df['a'])
-
- tm.assert_series_equal(
- df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
- tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
- tm.assert_frame_equal(
- df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
-
- # GH 9603
- df = DataFrame({'a': [1, 0, 0, 0]})
- c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd')))
- result = df.groupby(c).apply(len)
-
- exp_index = CategoricalIndex(
- c.values.categories, ordered=c.values.ordered)
- expected = Series([1, 0, 0, 0], index=exp_index)
- expected.index.name = 'a'
- tm.assert_series_equal(result, expected)
-
- def test_groupby_sort(self):
-
- # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
- # This should result in a properly sorted Series so that the plot
- # has a sorted x axis
- # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
-
- df = DataFrame({'value': np.random.randint(0, 10000, 100)})
- labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
- cat_labels = Categorical(labels, labels)
-
- df = df.sort_values(by=['value'], ascending=True)
- df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
- right=False, labels=cat_labels)
-
- res = df.groupby(['value_group'])['value_group'].count()
- exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
- exp.index = CategoricalIndex(exp.index, name=exp.index.name)
- tm.assert_series_equal(res, exp)
-
- def test_level_groupby_get_group(self):
- # GH15155
- df = DataFrame(data=np.arange(2, 22, 2),
- index=MultiIndex(
- levels=[pd.CategoricalIndex(["a", "b"]), range(10)],
- labels=[[0] * 5 + [1] * 5, range(10)],
- names=["Index1", "Index2"]))
- g = df.groupby(level=["Index1"])
-
- # expected should equal test.loc[["a"]]
- # GH15166
- expected = DataFrame(data=np.arange(2, 12, 2),
- index=pd.MultiIndex(levels=[pd.CategoricalIndex(
- ["a", "b"]), range(5)],
- labels=[[0] * 5, range(5)],
- names=["Index1", "Index2"]))
- result = g.get_group('a')
- assert_frame_equal(result, expected)
-
- def test_apply_use_categorical_name(self):
- from pandas import qcut
- cats = qcut(self.df.C, 4)
-
- def get_stats(group):
- return {'min': group.min(),
- 'max': group.max(),
- 'count': group.count(),
- 'mean': group.mean()}
-
- result = self.df.groupby(cats).D.apply(get_stats)
- assert result.index.names[0] == 'C'
-
- def test_apply_categorical_data(self):
- # GH 10138
- for ordered in [True, False]:
- dense = Categorical(list('abc'), ordered=ordered)
- # 'b' is in the categories but not in the list
- missing = Categorical(
- list('aaa'), categories=['a', 'b'], ordered=ordered)
- values = np.arange(len(dense))
- df = DataFrame({'missing': missing,
- 'dense': dense,
- 'values': values})
- grouped = df.groupby(['missing', 'dense'])
-
- # missing category 'b' should still exist in the output index
- idx = MultiIndex.from_product(
- [Categorical(['a', 'b'], ordered=ordered),
- Categorical(['a', 'b', 'c'], ordered=ordered)],
- names=['missing', 'dense'])
- expected = DataFrame([0, 1, 2, np.nan, np.nan, np.nan],
- index=idx,
- columns=['values'])
-
- assert_frame_equal(grouped.apply(lambda x: np.mean(x)), expected)
- assert_frame_equal(grouped.mean(), expected)
- assert_frame_equal(grouped.agg(np.mean), expected)
-
- # but for transform we should still get back the original index
- idx = MultiIndex.from_product([['a'], ['a', 'b', 'c']],
- names=['missing', 'dense'])
- expected = Series(1, index=idx)
- assert_series_equal(grouped.apply(lambda x: 1), expected)
-
- def test_groupby_categorical(self):
- levels = ['foo', 'bar', 'baz', 'qux']
- codes = np.random.randint(0, 4, size=100)
-
- cats = Categorical.from_codes(codes, levels, ordered=True)
-
- data = DataFrame(np.random.randn(100, 4))
-
- result = data.groupby(cats).mean()
-
- expected = data.groupby(np.asarray(cats)).mean()
- exp_idx = CategoricalIndex(levels, categories=cats.categories,
- ordered=True)
- expected = expected.reindex(exp_idx)
-
- assert_frame_equal(result, expected)
-
- grouped = data.groupby(cats)
- desc_result = grouped.describe()
-
- idx = cats.codes.argsort()
- ord_labels = np.asarray(cats).take(idx)
- ord_data = data.take(idx)
-
- exp_cats = Categorical(ord_labels, ordered=True,
- categories=['foo', 'bar', 'baz', 'qux'])
- expected = ord_data.groupby(exp_cats, sort=False).describe()
- assert_frame_equal(desc_result, expected)
-
- # GH 10460
- expc = Categorical.from_codes(np.arange(4).repeat(8),
- levels, ordered=True)
- exp = CategoricalIndex(expc)
- tm.assert_index_equal((desc_result.stack().index
- .get_level_values(0)), exp)
- exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
- '75%', 'max'] * 4)
- tm.assert_index_equal((desc_result.stack().index
- .get_level_values(1)), exp)
-
- def test_groupby_datetime_categorical(self):
- # GH9049: ensure backward compatibility
- levels = pd.date_range('2014-01-01', periods=4)
- codes = np.random.randint(0, 4, size=100)
-
- cats = Categorical.from_codes(codes, levels, ordered=True)
-
- data = DataFrame(np.random.randn(100, 4))
- result = data.groupby(cats).mean()
-
- expected = data.groupby(np.asarray(cats)).mean()
- expected = expected.reindex(levels)
- expected.index = CategoricalIndex(expected.index,
- categories=expected.index,
- ordered=True)
-
- assert_frame_equal(result, expected)
-
- grouped = data.groupby(cats)
- desc_result = grouped.describe()
-
- idx = cats.codes.argsort()
- ord_labels = cats.take_nd(idx)
- ord_data = data.take(idx)
- expected = ord_data.groupby(ord_labels).describe()
- assert_frame_equal(desc_result, expected)
- tm.assert_index_equal(desc_result.index, expected.index)
- tm.assert_index_equal(
- desc_result.index.get_level_values(0),
- expected.index.get_level_values(0))
-
- # GH 10460
- expc = Categorical.from_codes(
- np.arange(4).repeat(8), levels, ordered=True)
- exp = CategoricalIndex(expc)
- tm.assert_index_equal((desc_result.stack().index
- .get_level_values(0)), exp)
- exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
- '75%', 'max'] * 4)
- tm.assert_index_equal((desc_result.stack().index
- .get_level_values(1)), exp)
-
- def test_groupby_categorical_index(self):
-
- s = np.random.RandomState(12345)
- levels = ['foo', 'bar', 'baz', 'qux']
- codes = s.randint(0, 4, size=20)
- cats = Categorical.from_codes(codes, levels, ordered=True)
- df = DataFrame(
- np.repeat(
- np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
- df['cats'] = cats
-
- # with a cat index
- result = df.set_index('cats').groupby(level=0).sum()
- expected = df[list('abcd')].groupby(cats.codes).sum()
- expected.index = CategoricalIndex(
- Categorical.from_codes(
- [0, 1, 2, 3], levels, ordered=True), name='cats')
- assert_frame_equal(result, expected)
- # with a cat column, should produce a cat index
- result = df.groupby('cats').sum()
- expected = df[list('abcd')].groupby(cats.codes).sum()
- expected.index = CategoricalIndex(
- Categorical.from_codes(
- [0, 1, 2, 3], levels, ordered=True), name='cats')
- assert_frame_equal(result, expected)
-
- def test_groupby_describe_categorical_columns(self):
- # GH 11558
- cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
- categories=['foo', 'bar', 'baz', 'qux'],
- ordered=True)
- df = DataFrame(np.random.randn(20, 4), columns=cats)
- result = df.groupby([1, 2, 3, 4] * 5).describe()
-
- tm.assert_index_equal(result.stack().columns, cats)
- tm.assert_categorical_equal(result.stack().columns.values, cats.values)
-
- def test_groupby_unstack_categorical(self):
- # GH11558 (example is taken from the original issue)
- df = pd.DataFrame({'a': range(10),
- 'medium': ['A', 'B'] * 5,
- 'artist': list('XYXXY') * 2})
- df['medium'] = df['medium'].astype('category')
-
- gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()
- result = gcat.describe()
-
- exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,
- name='medium')
- tm.assert_index_equal(result.columns, exp_columns)
- tm.assert_categorical_equal(result.columns.values, exp_columns.values)
-
- result = gcat['A'] + gcat['B']
- expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))
- tm.assert_series_equal(result, expected)
-
- def test_groupby_bins_unequal_len(self):
- # GH3011
- series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
- bins = pd.cut(series.dropna().values, 4)
-
- # len(bins) != len(series) here
- def f():
- series.groupby(bins).mean()
- pytest.raises(ValueError, f)
-
- def test_groupby_multi_categorical_as_index(self):
- # GH13204
- df = DataFrame({'cat': Categorical([1, 2, 2], [1, 2, 3]),
- 'A': [10, 11, 11],
- 'B': [101, 102, 103]})
- result = df.groupby(['cat', 'A'], as_index=False).sum()
- expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
- 'A': [10, 11, 10, 11, 10, 11],
- 'B': [101.0, nan, nan, 205.0, nan, nan]},
- columns=['cat', 'A', 'B'])
- tm.assert_frame_equal(result, expected)
-
- # function grouper
- f = lambda r: df.loc[r, 'A']
- result = df.groupby(['cat', f], as_index=False).sum()
- expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
- 'A': [10.0, nan, nan, 22.0, nan, nan],
- 'B': [101.0, nan, nan, 205.0, nan, nan]},
- columns=['cat', 'A', 'B'])
- tm.assert_frame_equal(result, expected)
-
- # another not in-axis grouper
- s = Series(['a', 'b', 'b'], name='cat2')
- result = df.groupby(['cat', s], as_index=False).sum()
- expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
- 'A': [10.0, nan, nan, 22.0, nan, nan],
- 'B': [101.0, nan, nan, 205.0, nan, nan]},
- columns=['cat', 'A', 'B'])
- tm.assert_frame_equal(result, expected)
-
- # GH18872: conflicting names in desired index
- pytest.raises(ValueError, lambda: df.groupby(['cat',
- s.rename('cat')]).sum())
-
- # is original index dropped?
- expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
- 'A': [10, 11, 10, 11, 10, 11],
- 'B': [101.0, nan, nan, 205.0, nan, nan]},
- columns=['cat', 'A', 'B'])
-
- group_columns = ['cat', 'A']
-
- for name in [None, 'X', 'B', 'cat']:
- df.index = Index(list("abc"), name=name)
-
- if name in group_columns and name in df.index.names:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = df.groupby(group_columns, as_index=False).sum()
-
- else:
+def test_groupby():
+
+ cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
+ categories=["a", "b", "c", "d"], ordered=True)
+ data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
+
+ exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True)
+ expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
+ result = data.groupby("b").mean()
+ tm.assert_frame_equal(result, expected)
+
+ raw_cat1 = Categorical(["a", "a", "b", "b"],
+ categories=["a", "b", "z"], ordered=True)
+ raw_cat2 = Categorical(["c", "d", "c", "d"],
+ categories=["c", "d", "y"], ordered=True)
+ df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
+
+ # single grouper
+ gb = df.groupby("A")
+ exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
+ expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)})
+ result = gb.sum()
+ tm.assert_frame_equal(result, expected)
+
+ # multiple groupers
+ gb = df.groupby(['A', 'B'])
+ exp_index = pd.MultiIndex.from_product(
+ [Categorical(["a", "b", "z"], ordered=True),
+ Categorical(["c", "d", "y"], ordered=True)],
+ names=['A', 'B'])
+ expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan,
+ np.nan, np.nan, np.nan]},
+ index=exp_index)
+ result = gb.sum()
+ tm.assert_frame_equal(result, expected)
+
+ # multiple groupers with a non-cat
+ df = df.copy()
+ df['C'] = ['foo', 'bar'] * 2
+ gb = df.groupby(['A', 'B', 'C'])
+ exp_index = pd.MultiIndex.from_product(
+ [Categorical(["a", "b", "z"], ordered=True),
+ Categorical(["c", "d", "y"], ordered=True),
+ ['foo', 'bar']],
+ names=['A', 'B', 'C'])
+ expected = DataFrame({'values': Series(
+ np.nan, index=exp_index)}).sort_index()
+ expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
+ result = gb.sum()
+ tm.assert_frame_equal(result, expected)
+
+ # GH 8623
+ x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
+ [1, 'John P. Doe']],
+ columns=['person_id', 'person_name'])
+ x['person_name'] = Categorical(x.person_name)
+
+ g = x.groupby(['person_id'])
+ result = g.transform(lambda x: x)
+ tm.assert_frame_equal(result, x[['person_name']])
+
+ result = x.drop_duplicates('person_name')
+ expected = x.iloc[[0, 1]]
+ tm.assert_frame_equal(result, expected)
+
+ def f(x):
+ return x.drop_duplicates('person_name').iloc[0]
+
+ result = g.apply(f)
+ expected = x.iloc[[0, 1]].copy()
+ expected.index = Index([1, 2], name='person_id')
+ expected['person_name'] = expected['person_name'].astype('object')
+ tm.assert_frame_equal(result, expected)
+
+ # GH 9921
+ # Monotonic
+ df = DataFrame({"a": [5, 15, 25]})
+ c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
+
+ result = df.a.groupby(c).transform(sum)
+ tm.assert_series_equal(result, df['a'])
+
+ tm.assert_series_equal(
+ df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
+ tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
+ tm.assert_frame_equal(
+ df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
+
+ # Filter
+ tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
+ tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
+
+ # Non-monotonic
+ df = DataFrame({"a": [5, 15, 25, -5]})
+ c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
+
+ result = df.a.groupby(c).transform(sum)
+ tm.assert_series_equal(result, df['a'])
+
+ tm.assert_series_equal(
+ df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
+ tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
+ tm.assert_frame_equal(
+ df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
+
+ # GH 9603
+ df = DataFrame({'a': [1, 0, 0, 0]})
+ c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd')))
+ result = df.groupby(c).apply(len)
+
+ exp_index = CategoricalIndex(
+ c.values.categories, ordered=c.values.ordered)
+ expected = Series([1, 0, 0, 0], index=exp_index)
+ expected.index.name = 'a'
+ tm.assert_series_equal(result, expected)
+
+
+def test_groupby_sort():
+
+ # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
+ # This should result in a properly sorted Series so that the plot
+ # has a sorted x axis
+ # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
+
+ df = DataFrame({'value': np.random.randint(0, 10000, 100)})
+ labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ cat_labels = Categorical(labels, labels)
+
+ df = df.sort_values(by=['value'], ascending=True)
+ df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
+ right=False, labels=cat_labels)
+
+ res = df.groupby(['value_group'])['value_group'].count()
+ exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
+ exp.index = CategoricalIndex(exp.index, name=exp.index.name)
+ tm.assert_series_equal(res, exp)
+
+
+def test_level_groupby_get_group():
+ # GH15155
+ df = DataFrame(data=np.arange(2, 22, 2),
+ index=MultiIndex(
+ levels=[pd.CategoricalIndex(["a", "b"]), range(10)],
+ labels=[[0] * 5 + [1] * 5, range(10)],
+ names=["Index1", "Index2"]))
+ g = df.groupby(level=["Index1"])
+
+ # expected should equal test.loc[["a"]]
+ # GH15166
+ expected = DataFrame(data=np.arange(2, 12, 2),
+ index=pd.MultiIndex(levels=[pd.CategoricalIndex(
+ ["a", "b"]), range(5)],
+ labels=[[0] * 5, range(5)],
+ names=["Index1", "Index2"]))
+ result = g.get_group('a')
+
+ assert_frame_equal(result, expected)
+
+
+def test_apply_use_categorical_name(df):
+ cats = qcut(df.C, 4)
+
+ def get_stats(group):
+ return {'min': group.min(),
+ 'max': group.max(),
+ 'count': group.count(),
+ 'mean': group.mean()}
+
+ result = df.groupby(cats).D.apply(get_stats)
+ assert result.index.names[0] == 'C'
+
+
+def test_apply_categorical_data():
+ # GH 10138
+ for ordered in [True, False]:
+ dense = Categorical(list('abc'), ordered=ordered)
+ # 'b' is in the categories but not in the list
+ missing = Categorical(
+ list('aaa'), categories=['a', 'b'], ordered=ordered)
+ values = np.arange(len(dense))
+ df = DataFrame({'missing': missing,
+ 'dense': dense,
+ 'values': values})
+ grouped = df.groupby(['missing', 'dense'])
+
+ # missing category 'b' should still exist in the output index
+ idx = MultiIndex.from_product(
+ [Categorical(['a', 'b'], ordered=ordered),
+ Categorical(['a', 'b', 'c'], ordered=ordered)],
+ names=['missing', 'dense'])
+ expected = DataFrame([0, 1, 2, np.nan, np.nan, np.nan],
+ index=idx,
+ columns=['values'])
+
+ assert_frame_equal(grouped.apply(lambda x: np.mean(x)), expected)
+ assert_frame_equal(grouped.mean(), expected)
+ assert_frame_equal(grouped.agg(np.mean), expected)
+
+ # but for transform we should still get back the original index
+ idx = MultiIndex.from_product([['a'], ['a', 'b', 'c']],
+ names=['missing', 'dense'])
+ expected = Series(1, index=idx)
+ assert_series_equal(grouped.apply(lambda x: 1), expected)
+
+
+def test_groupby_categorical():
+ levels = ['foo', 'bar', 'baz', 'qux']
+ codes = np.random.randint(0, 4, size=100)
+
+ cats = Categorical.from_codes(codes, levels, ordered=True)
+
+ data = DataFrame(np.random.randn(100, 4))
+
+ result = data.groupby(cats).mean()
+
+ expected = data.groupby(np.asarray(cats)).mean()
+ exp_idx = CategoricalIndex(levels, categories=cats.categories,
+ ordered=True)
+ expected = expected.reindex(exp_idx)
+
+ assert_frame_equal(result, expected)
+
+ grouped = data.groupby(cats)
+ desc_result = grouped.describe()
+
+ idx = cats.codes.argsort()
+ ord_labels = np.asarray(cats).take(idx)
+ ord_data = data.take(idx)
+
+ exp_cats = Categorical(ord_labels, ordered=True,
+ categories=['foo', 'bar', 'baz', 'qux'])
+ expected = ord_data.groupby(exp_cats, sort=False).describe()
+ assert_frame_equal(desc_result, expected)
+
+ # GH 10460
+ expc = Categorical.from_codes(np.arange(4).repeat(8),
+ levels, ordered=True)
+ exp = CategoricalIndex(expc)
+ tm.assert_index_equal((desc_result.stack().index
+ .get_level_values(0)), exp)
+ exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
+ '75%', 'max'] * 4)
+ tm.assert_index_equal((desc_result.stack().index
+ .get_level_values(1)), exp)
+
+
+def test_groupby_datetime_categorical():
+ # GH9049: ensure backward compatibility
+ levels = pd.date_range('2014-01-01', periods=4)
+ codes = np.random.randint(0, 4, size=100)
+
+ cats = Categorical.from_codes(codes, levels, ordered=True)
+
+ data = DataFrame(np.random.randn(100, 4))
+ result = data.groupby(cats).mean()
+
+ expected = data.groupby(np.asarray(cats)).mean()
+ expected = expected.reindex(levels)
+ expected.index = CategoricalIndex(expected.index,
+ categories=expected.index,
+ ordered=True)
+
+ assert_frame_equal(result, expected)
+
+ grouped = data.groupby(cats)
+ desc_result = grouped.describe()
+
+ idx = cats.codes.argsort()
+ ord_labels = cats.take_nd(idx)
+ ord_data = data.take(idx)
+ expected = ord_data.groupby(ord_labels).describe()
+ assert_frame_equal(desc_result, expected)
+ tm.assert_index_equal(desc_result.index, expected.index)
+ tm.assert_index_equal(
+ desc_result.index.get_level_values(0),
+ expected.index.get_level_values(0))
+
+ # GH 10460
+ expc = Categorical.from_codes(
+ np.arange(4).repeat(8), levels, ordered=True)
+ exp = CategoricalIndex(expc)
+ tm.assert_index_equal((desc_result.stack().index
+ .get_level_values(0)), exp)
+ exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
+ '75%', 'max'] * 4)
+ tm.assert_index_equal((desc_result.stack().index
+ .get_level_values(1)), exp)
+
+
+def test_groupby_categorical_index():
+
+ s = np.random.RandomState(12345)
+ levels = ['foo', 'bar', 'baz', 'qux']
+ codes = s.randint(0, 4, size=20)
+ cats = Categorical.from_codes(codes, levels, ordered=True)
+ df = DataFrame(
+ np.repeat(
+ np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
+ df['cats'] = cats
+
+ # with a cat index
+ result = df.set_index('cats').groupby(level=0).sum()
+ expected = df[list('abcd')].groupby(cats.codes).sum()
+ expected.index = CategoricalIndex(
+ Categorical.from_codes(
+ [0, 1, 2, 3], levels, ordered=True), name='cats')
+ assert_frame_equal(result, expected)
+
+ # with a cat column, should produce a cat index
+ result = df.groupby('cats').sum()
+ expected = df[list('abcd')].groupby(cats.codes).sum()
+ expected.index = CategoricalIndex(
+ Categorical.from_codes(
+ [0, 1, 2, 3], levels, ordered=True), name='cats')
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_describe_categorical_columns():
+ # GH 11558
+ cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
+ categories=['foo', 'bar', 'baz', 'qux'],
+ ordered=True)
+ df = DataFrame(np.random.randn(20, 4), columns=cats)
+ result = df.groupby([1, 2, 3, 4] * 5).describe()
+
+ tm.assert_index_equal(result.stack().columns, cats)
+ tm.assert_categorical_equal(result.stack().columns.values, cats.values)
+
+
+def test_groupby_unstack_categorical():
+ # GH11558 (example is taken from the original issue)
+ df = pd.DataFrame({'a': range(10),
+ 'medium': ['A', 'B'] * 5,
+ 'artist': list('XYXXY') * 2})
+ df['medium'] = df['medium'].astype('category')
+
+ gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()
+ result = gcat.describe()
+
+ exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,
+ name='medium')
+ tm.assert_index_equal(result.columns, exp_columns)
+ tm.assert_categorical_equal(result.columns.values, exp_columns.values)
+
+ result = gcat['A'] + gcat['B']
+ expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))
+ tm.assert_series_equal(result, expected)
+
+
+def test_groupby_bins_unequal_len():
+ # GH3011
+ series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
+ bins = pd.cut(series.dropna().values, 4)
+
+ # len(bins) != len(series) here
+ def f():
+ series.groupby(bins).mean()
+ pytest.raises(ValueError, f)
+
+
+def test_groupby_multi_categorical_as_index():
+ # GH13204
+ df = DataFrame({'cat': Categorical([1, 2, 2], [1, 2, 3]),
+ 'A': [10, 11, 11],
+ 'B': [101, 102, 103]})
+ result = df.groupby(['cat', 'A'], as_index=False).sum()
+ expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
+ 'A': [10, 11, 10, 11, 10, 11],
+ 'B': [101.0, nan, nan, 205.0, nan, nan]},
+ columns=['cat', 'A', 'B'])
+ tm.assert_frame_equal(result, expected)
+
+ # function grouper
+ f = lambda r: df.loc[r, 'A']
+ result = df.groupby(['cat', f], as_index=False).sum()
+ expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
+ 'A': [10.0, nan, nan, 22.0, nan, nan],
+ 'B': [101.0, nan, nan, 205.0, nan, nan]},
+ columns=['cat', 'A', 'B'])
+ tm.assert_frame_equal(result, expected)
+
+ # another not in-axis grouper
+ s = Series(['a', 'b', 'b'], name='cat2')
+ result = df.groupby(['cat', s], as_index=False).sum()
+ expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
+ 'A': [10.0, nan, nan, 22.0, nan, nan],
+ 'B': [101.0, nan, nan, 205.0, nan, nan]},
+ columns=['cat', 'A', 'B'])
+ tm.assert_frame_equal(result, expected)
+
+ # GH18872: conflicting names in desired index
+ pytest.raises(ValueError, lambda: df.groupby(['cat',
+ s.rename('cat')]).sum())
+
+ # is original index dropped?
+ expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
+ 'A': [10, 11, 10, 11, 10, 11],
+ 'B': [101.0, nan, nan, 205.0, nan, nan]},
+ columns=['cat', 'A', 'B'])
+
+ group_columns = ['cat', 'A']
+
+ for name in [None, 'X', 'B', 'cat']:
+ df.index = Index(list("abc"), name=name)
+
+ if name in group_columns and name in df.index.names:
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
result = df.groupby(group_columns, as_index=False).sum()
- tm.assert_frame_equal(result, expected, check_index_type=True)
-
- def test_groupby_preserve_categories(self):
- # GH-13179
- categories = list('abc')
-
- # ordered=True
- df = DataFrame({'A': pd.Categorical(list('ba'),
- categories=categories,
+ else:
+ result = df.groupby(group_columns, as_index=False).sum()
+
+ tm.assert_frame_equal(result, expected, check_index_type=True)
+
+
+def test_groupby_preserve_categories():
+ # GH-13179
+ categories = list('abc')
+
+ # ordered=True
+ df = DataFrame({'A': pd.Categorical(list('ba'),
+ categories=categories,
+ ordered=True)})
+ index = pd.CategoricalIndex(categories, categories, ordered=True)
+ tm.assert_index_equal(df.groupby('A', sort=True).first().index, index)
+ tm.assert_index_equal(df.groupby('A', sort=False).first().index, index)
+
+ # ordered=False
+ df = DataFrame({'A': pd.Categorical(list('ba'),
+ categories=categories,
+ ordered=False)})
+ sort_index = pd.CategoricalIndex(categories, categories, ordered=False)
+ nosort_index = pd.CategoricalIndex(list('bac'), list('bac'),
+ ordered=False)
+ tm.assert_index_equal(df.groupby('A', sort=True).first().index,
+ sort_index)
+ tm.assert_index_equal(df.groupby('A', sort=False).first().index,
+ nosort_index)
+
+
+def test_groupby_preserve_categorical_dtype():
+ # GH13743, GH13854
+ df = DataFrame({'A': [1, 2, 1, 1, 2],
+ 'B': [10, 16, 22, 28, 34],
+ 'C1': Categorical(list("abaab"),
+ categories=list("bac"),
+ ordered=False),
+ 'C2': Categorical(list("abaab"),
+ categories=list("bac"),
+ ordered=True)})
+ # single grouper
+ exp_full = DataFrame({'A': [2.0, 1.0, np.nan],
+ 'B': [25.0, 20.0, np.nan],
+ 'C1': Categorical(list("bac"),
+ categories=list("bac"),
+ ordered=False),
+ 'C2': Categorical(list("bac"),
+ categories=list("bac"),
ordered=True)})
- index = pd.CategoricalIndex(categories, categories, ordered=True)
- tm.assert_index_equal(df.groupby('A', sort=True).first().index, index)
- tm.assert_index_equal(df.groupby('A', sort=False).first().index, index)
-
- # ordered=False
- df = DataFrame({'A': pd.Categorical(list('ba'),
- categories=categories,
- ordered=False)})
- sort_index = pd.CategoricalIndex(categories, categories, ordered=False)
- nosort_index = pd.CategoricalIndex(list('bac'), list('bac'),
- ordered=False)
- tm.assert_index_equal(df.groupby('A', sort=True).first().index,
- sort_index)
- tm.assert_index_equal(df.groupby('A', sort=False).first().index,
- nosort_index)
-
- def test_groupby_preserve_categorical_dtype(self):
- # GH13743, GH13854
- df = DataFrame({'A': [1, 2, 1, 1, 2],
- 'B': [10, 16, 22, 28, 34],
- 'C1': Categorical(list("abaab"),
- categories=list("bac"),
- ordered=False),
- 'C2': Categorical(list("abaab"),
- categories=list("bac"),
- ordered=True)})
- # single grouper
- exp_full = DataFrame({'A': [2.0, 1.0, np.nan],
- 'B': [25.0, 20.0, np.nan],
- 'C1': Categorical(list("bac"),
- categories=list("bac"),
- ordered=False),
- 'C2': Categorical(list("bac"),
- categories=list("bac"),
- ordered=True)})
- for col in ['C1', 'C2']:
- result1 = df.groupby(by=col, as_index=False).mean()
- result2 = df.groupby(by=col, as_index=True).mean().reset_index()
- expected = exp_full.reindex(columns=result1.columns)
- tm.assert_frame_equal(result1, expected)
- tm.assert_frame_equal(result2, expected)
-
- # multiple grouper
- exp_full = DataFrame({'A': [1, 1, 1, 2, 2, 2],
- 'B': [np.nan, 20.0, np.nan, 25.0, np.nan,
- np.nan],
- 'C1': Categorical(list("bacbac"),
- categories=list("bac"),
- ordered=False),
- 'C2': Categorical(list("bacbac"),
- categories=list("bac"),
- ordered=True)})
- for cols in [['A', 'C1'], ['A', 'C2']]:
- result1 = df.groupby(by=cols, as_index=False).mean()
- result2 = df.groupby(by=cols, as_index=True).mean().reset_index()
- expected = exp_full.reindex(columns=result1.columns)
- tm.assert_frame_equal(result1, expected)
- tm.assert_frame_equal(result2, expected)
-
- def test_groupby_categorical_no_compress(self):
- data = Series(np.random.randn(9))
-
- codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
- cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
-
- result = data.groupby(cats).mean()
- exp = data.groupby(codes).mean()
-
- exp.index = CategoricalIndex(exp.index, categories=cats.categories,
- ordered=cats.ordered)
- assert_series_equal(result, exp)
-
- codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
- cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
-
- result = data.groupby(cats).mean()
- exp = data.groupby(codes).mean().reindex(cats.categories)
- exp.index = CategoricalIndex(exp.index, categories=cats.categories,
- ordered=cats.ordered)
- assert_series_equal(result, exp)
-
- cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
- categories=["a", "b", "c", "d"], ordered=True)
- data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
-
- result = data.groupby("b").mean()
- result = result["a"].values
- exp = np.array([1, 2, 4, np.nan])
- tm.assert_numpy_array_equal(result, exp)
-
- def test_groupby_sort_categorical(self):
- # dataframe groupby sort was being ignored # GH 8868
- df = DataFrame([['(7.5, 10]', 10, 10],
- ['(7.5, 10]', 8, 20],
- ['(2.5, 5]', 5, 30],
- ['(5, 7.5]', 6, 40],
- ['(2.5, 5]', 4, 50],
- ['(0, 2.5]', 1, 60],
- ['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
- df['range'] = Categorical(df['range'], ordered=True)
- index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
- '(7.5, 10]'], name='range', ordered=True)
- result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
- columns=['foo', 'bar'], index=index)
-
- col = 'range'
- assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
- # when categories is ordered, group is ordered by category's order
- assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
-
- df['range'] = Categorical(df['range'], ordered=False)
- index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
- '(7.5, 10]'], name='range')
- result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
- columns=['foo', 'bar'], index=index)
-
- index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]',
- '(0, 2.5]'],
- categories=['(7.5, 10]', '(2.5, 5]',
- '(5, 7.5]', '(0, 2.5]'],
- name='range')
- result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
- index=index, columns=['foo', 'bar'])
-
- col = 'range'
- # this is an unordered categorical, but we allow this ####
- assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
- assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
-
- def test_groupby_sort_categorical_datetimelike(self):
- # GH10505
-
- # use same data as test_groupby_sort_categorical, which category is
- # corresponding to datetime.month
- df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
- datetime(2011, 2, 1), datetime(2011, 5, 1),
- datetime(2011, 2, 1), datetime(2011, 1, 1),
- datetime(2011, 5, 1)],
- 'foo': [10, 8, 5, 6, 4, 1, 7],
- 'bar': [10, 20, 30, 40, 50, 60, 70]},
- columns=['dt', 'foo', 'bar'])
-
- # ordered=True
- df['dt'] = Categorical(df['dt'], ordered=True)
- index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
- datetime(2011, 5, 1), datetime(2011, 7, 1)]
- result_sort = DataFrame(
- [[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
- result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
-
- index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
- datetime(2011, 5, 1), datetime(2011, 1, 1)]
- result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
- columns=['foo', 'bar'])
- result_nosort.index = CategoricalIndex(index, categories=index,
- name='dt', ordered=True)
-
- col = 'dt'
- assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
- # when categories is ordered, group is ordered by category's order
- assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
-
- # ordered = False
- df['dt'] = Categorical(df['dt'], ordered=False)
- index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
- datetime(2011, 5, 1), datetime(2011, 7, 1)]
- result_sort = DataFrame(
- [[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
- result_sort.index = CategoricalIndex(index, name='dt')
-
- index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
- datetime(2011, 5, 1), datetime(2011, 1, 1)]
- result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
- columns=['foo', 'bar'])
- result_nosort.index = CategoricalIndex(index, categories=index,
- name='dt')
-
- col = 'dt'
- assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
- assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
-
- def test_groupby_categorical_two_columns(self):
-
- # https://github.com/pandas-dev/pandas/issues/8138
- d = {'cat':
- pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
- ordered=True),
- 'ints': [1, 1, 2, 2],
- 'val': [10, 20, 30, 40]}
- test = pd.DataFrame(d)
-
- # Grouping on a single column
- groups_single_key = test.groupby("cat")
- res = groups_single_key.agg('mean')
-
- exp_index = pd.CategoricalIndex(["a", "b", "c"], name="cat",
- ordered=True)
- exp = DataFrame({"ints": [1.5, 1.5, np.nan], "val": [20, 30, np.nan]},
- index=exp_index)
- tm.assert_frame_equal(res, exp)
-
- # Grouping on two columns
- groups_double_key = test.groupby(["cat", "ints"])
- res = groups_double_key.agg('mean')
- exp = DataFrame({"val": [10, 30, 20, 40, np.nan, np.nan],
- "cat": pd.Categorical(["a", "a", "b", "b", "c", "c"],
- ordered=True),
- "ints": [1, 2, 1, 2, 1, 2]}).set_index(["cat", "ints"
- ])
- tm.assert_frame_equal(res, exp)
-
- # GH 10132
- for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
- c, i = key
- result = groups_double_key.get_group(key)
- expected = test[(test.cat == c) & (test.ints == i)]
- assert_frame_equal(result, expected)
-
- d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
- test = pd.DataFrame(d)
- values = pd.cut(test['C1'], [1, 2, 3, 6])
- values.name = "cat"
- groups_double_key = test.groupby([values, 'C2'])
-
- res = groups_double_key.agg('mean')
- nan = np.nan
- idx = MultiIndex.from_product(
- [Categorical([Interval(1, 2), Interval(2, 3),
- Interval(3, 6)], ordered=True),
- [1, 2, 3, 4]],
- names=["cat", "C2"])
- exp = DataFrame({"C1": [nan, nan, nan, nan, 3, 3,
- nan, nan, nan, nan, 4, 5],
- "C3": [nan, nan, nan, nan, 10, 100,
- nan, nan, nan, nan, 200, 34]}, index=idx)
- tm.assert_frame_equal(res, exp)
-
- def test_empty_sum(self):
- # https://github.com/pandas-dev/pandas/issues/18678
- df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
- categories=['a', 'b', 'c']),
- 'B': [1, 2, 1]})
- expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
-
- # 0 by default
- result = df.groupby("A").B.sum()
- expected = pd.Series([3, 1, 0], expected_idx, name='B')
- tm.assert_series_equal(result, expected)
-
- # min_count=0
- result = df.groupby("A").B.sum(min_count=0)
- expected = pd.Series([3, 1, 0], expected_idx, name='B')
- tm.assert_series_equal(result, expected)
-
- # min_count=1
- result = df.groupby("A").B.sum(min_count=1)
- expected = pd.Series([3, 1, np.nan], expected_idx, name='B')
- tm.assert_series_equal(result, expected)
-
- # min_count>1
- result = df.groupby("A").B.sum(min_count=2)
- expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B')
- tm.assert_series_equal(result, expected)
-
- def test_empty_prod(self):
- # https://github.com/pandas-dev/pandas/issues/18678
- df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
- categories=['a', 'b', 'c']),
- 'B': [1, 2, 1]})
-
- expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
-
- # 1 by default
- result = df.groupby("A").B.prod()
- expected = pd.Series([2, 1, 1], expected_idx, name='B')
- tm.assert_series_equal(result, expected)
-
- # min_count=0
- result = df.groupby("A").B.prod(min_count=0)
- expected = pd.Series([2, 1, 1], expected_idx, name='B')
- tm.assert_series_equal(result, expected)
-
- # min_count=1
- result = df.groupby("A").B.prod(min_count=1)
- expected = pd.Series([2, 1, np.nan], expected_idx, name='B')
- tm.assert_series_equal(result, expected)
+ for col in ['C1', 'C2']:
+ result1 = df.groupby(by=col, as_index=False).mean()
+ result2 = df.groupby(by=col, as_index=True).mean().reset_index()
+ expected = exp_full.reindex(columns=result1.columns)
+ tm.assert_frame_equal(result1, expected)
+ tm.assert_frame_equal(result2, expected)
+
+ # multiple grouper
+ exp_full = DataFrame({'A': [1, 1, 1, 2, 2, 2],
+ 'B': [np.nan, 20.0, np.nan, 25.0, np.nan,
+ np.nan],
+ 'C1': Categorical(list("bacbac"),
+ categories=list("bac"),
+ ordered=False),
+ 'C2': Categorical(list("bacbac"),
+ categories=list("bac"),
+ ordered=True)})
+ for cols in [['A', 'C1'], ['A', 'C2']]:
+ result1 = df.groupby(by=cols, as_index=False).mean()
+ result2 = df.groupby(by=cols, as_index=True).mean().reset_index()
+ expected = exp_full.reindex(columns=result1.columns)
+ tm.assert_frame_equal(result1, expected)
+ tm.assert_frame_equal(result2, expected)
+
+
+def test_groupby_categorical_no_compress():
+ data = Series(np.random.randn(9))
+
+ codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
+ cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
+
+ result = data.groupby(cats).mean()
+ exp = data.groupby(codes).mean()
+
+ exp.index = CategoricalIndex(exp.index, categories=cats.categories,
+ ordered=cats.ordered)
+ assert_series_equal(result, exp)
+
+ codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
+ cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
+
+ result = data.groupby(cats).mean()
+ exp = data.groupby(codes).mean().reindex(cats.categories)
+ exp.index = CategoricalIndex(exp.index, categories=cats.categories,
+ ordered=cats.ordered)
+ assert_series_equal(result, exp)
+
+ cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
+ categories=["a", "b", "c", "d"], ordered=True)
+ data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
+
+ result = data.groupby("b").mean()
+ result = result["a"].values
+ exp = np.array([1, 2, 4, np.nan])
+ tm.assert_numpy_array_equal(result, exp)
+
+
+def test_groupby_sort_categorical():
+ # dataframe groupby sort was being ignored # GH 8868
+ df = DataFrame([['(7.5, 10]', 10, 10],
+ ['(7.5, 10]', 8, 20],
+ ['(2.5, 5]', 5, 30],
+ ['(5, 7.5]', 6, 40],
+ ['(2.5, 5]', 4, 50],
+ ['(0, 2.5]', 1, 60],
+ ['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
+ df['range'] = Categorical(df['range'], ordered=True)
+ index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
+ '(7.5, 10]'], name='range', ordered=True)
+ result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
+ columns=['foo', 'bar'], index=index)
+
+ col = 'range'
+ assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
+ # when categories is ordered, group is ordered by category's order
+ assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
+
+ df['range'] = Categorical(df['range'], ordered=False)
+ index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
+ '(7.5, 10]'], name='range')
+ result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
+ columns=['foo', 'bar'], index=index)
+
+ index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]',
+ '(0, 2.5]'],
+ categories=['(7.5, 10]', '(2.5, 5]',
+ '(5, 7.5]', '(0, 2.5]'],
+ name='range')
+ result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
+ index=index, columns=['foo', 'bar'])
+
+ col = 'range'
+ # this is an unordered categorical, but we allow this ####
+ assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
+ assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
+
+
+def test_groupby_sort_categorical_datetimelike():
+ # GH10505
+
+ # use same data as test_groupby_sort_categorical, which category is
+ # corresponding to datetime.month
+ df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
+ datetime(2011, 2, 1), datetime(2011, 5, 1),
+ datetime(2011, 2, 1), datetime(2011, 1, 1),
+ datetime(2011, 5, 1)],
+ 'foo': [10, 8, 5, 6, 4, 1, 7],
+ 'bar': [10, 20, 30, 40, 50, 60, 70]},
+ columns=['dt', 'foo', 'bar'])
+
+ # ordered=True
+ df['dt'] = Categorical(df['dt'], ordered=True)
+ index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 7, 1)]
+ result_sort = DataFrame(
+ [[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
+ result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
+
+ index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 1, 1)]
+ result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
+ columns=['foo', 'bar'])
+ result_nosort.index = CategoricalIndex(index, categories=index,
+ name='dt', ordered=True)
+
+ col = 'dt'
+ assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
+ # when categories is ordered, group is ordered by category's order
+ assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
+
+ # ordered = False
+ df['dt'] = Categorical(df['dt'], ordered=False)
+ index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 7, 1)]
+ result_sort = DataFrame(
+ [[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
+ result_sort.index = CategoricalIndex(index, name='dt')
+
+ index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
+ datetime(2011, 5, 1), datetime(2011, 1, 1)]
+ result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
+ columns=['foo', 'bar'])
+ result_nosort.index = CategoricalIndex(index, categories=index,
+ name='dt')
+
+ col = 'dt'
+ assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
+ assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
+
+
+def test_groupby_categorical_two_columns():
+
+ # https://github.com/pandas-dev/pandas/issues/8138
+ d = {'cat':
+ pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
+ ordered=True),
+ 'ints': [1, 1, 2, 2],
+ 'val': [10, 20, 30, 40]}
+ test = pd.DataFrame(d)
+
+ # Grouping on a single column
+ groups_single_key = test.groupby("cat")
+ res = groups_single_key.agg('mean')
+
+ exp_index = pd.CategoricalIndex(["a", "b", "c"], name="cat",
+ ordered=True)
+ exp = DataFrame({"ints": [1.5, 1.5, np.nan], "val": [20, 30, np.nan]},
+ index=exp_index)
+ tm.assert_frame_equal(res, exp)
+
+ # Grouping on two columns
+ groups_double_key = test.groupby(["cat", "ints"])
+ res = groups_double_key.agg('mean')
+ exp = DataFrame({"val": [10, 30, 20, 40, np.nan, np.nan],
+ "cat": pd.Categorical(["a", "a", "b", "b", "c", "c"],
+ ordered=True),
+ "ints": [1, 2, 1, 2, 1, 2]}).set_index(["cat", "ints"
+ ])
+ tm.assert_frame_equal(res, exp)
+
+ # GH 10132
+ for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
+ c, i = key
+ result = groups_double_key.get_group(key)
+ expected = test[(test.cat == c) & (test.ints == i)]
+ assert_frame_equal(result, expected)
+
+ d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
+ test = pd.DataFrame(d)
+ values = pd.cut(test['C1'], [1, 2, 3, 6])
+ values.name = "cat"
+ groups_double_key = test.groupby([values, 'C2'])
+
+ res = groups_double_key.agg('mean')
+ nan = np.nan
+ idx = MultiIndex.from_product(
+ [Categorical([Interval(1, 2), Interval(2, 3),
+ Interval(3, 6)], ordered=True),
+ [1, 2, 3, 4]],
+ names=["cat", "C2"])
+ exp = DataFrame({"C1": [nan, nan, nan, nan, 3, 3,
+ nan, nan, nan, nan, 4, 5],
+ "C3": [nan, nan, nan, nan, 10, 100,
+ nan, nan, nan, nan, 200, 34]}, index=idx)
+ tm.assert_frame_equal(res, exp)
+
+
+def test_empty_sum():
+ # https://github.com/pandas-dev/pandas/issues/18678
+ df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
+ categories=['a', 'b', 'c']),
+ 'B': [1, 2, 1]})
+ expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
+
+ # 0 by default
+ result = df.groupby("A").B.sum()
+ expected = pd.Series([3, 1, 0], expected_idx, name='B')
+ tm.assert_series_equal(result, expected)
+
+ # min_count=0
+ result = df.groupby("A").B.sum(min_count=0)
+ expected = pd.Series([3, 1, 0], expected_idx, name='B')
+ tm.assert_series_equal(result, expected)
+
+ # min_count=1
+ result = df.groupby("A").B.sum(min_count=1)
+ expected = pd.Series([3, 1, np.nan], expected_idx, name='B')
+ tm.assert_series_equal(result, expected)
+
+ # min_count>1
+ result = df.groupby("A").B.sum(min_count=2)
+ expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B')
+ tm.assert_series_equal(result, expected)
+
+
+def test_empty_prod():
+ # https://github.com/pandas-dev/pandas/issues/18678
+ df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
+ categories=['a', 'b', 'c']),
+ 'B': [1, 2, 1]})
+
+ expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
+
+ # 1 by default
+ result = df.groupby("A").B.prod()
+ expected = pd.Series([2, 1, 1], expected_idx, name='B')
+ tm.assert_series_equal(result, expected)
+
+ # min_count=0
+ result = df.groupby("A").B.prod(min_count=0)
+ expected = pd.Series([2, 1, 1], expected_idx, name='B')
+ tm.assert_series_equal(result, expected)
+
+ # min_count=1
+ result = df.groupby("A").B.prod(min_count=1)
+ expected = pd.Series([2, 1, np.nan], expected_idx, name='B')
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py
index cac6b46af8f87..873d9f6076b69 100644
--- a/pandas/tests/groupby/test_filters.py
+++ b/pandas/tests/groupby/test_filters.py
@@ -1,622 +1,576 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
-from numpy import nan
-
import pytest
-from pandas import Timestamp
-from pandas.core.index import MultiIndex
-from pandas.core.api import DataFrame
-
-from pandas.core.series import Series
-
-from pandas.util.testing import (assert_frame_equal, assert_series_equal
- )
-from pandas.compat import (lmap)
-
-from pandas import compat
-
-import pandas.core.common as com
import numpy as np
-
import pandas.util.testing as tm
+from pandas import Timestamp, DataFrame, Series
import pandas as pd
-class TestGroupByFilter(object):
-
- def setup_method(self, method):
- self.ts = tm.makeTimeSeries()
-
- self.seriesd = tm.getSeriesData()
- self.tsd = tm.getTimeSeriesData()
- self.frame = DataFrame(self.seriesd)
- self.tsframe = DataFrame(self.tsd)
-
- self.df = DataFrame(
- {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.random.randn(8)})
-
- self.df_mixed_floats = DataFrame(
- {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
- 'C': np.random.randn(8),
- 'D': np.array(
- np.random.randn(8), dtype='float32')})
-
- index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
- 'three']],
- labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
- [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
- names=['first', 'second'])
- self.mframe = DataFrame(np.random.randn(10, 3), index=index,
- columns=['A', 'B', 'C'])
-
- self.three_group = DataFrame(
- {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
- 'foo', 'foo', 'foo'],
- 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
- 'two', 'two', 'one'],
- 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
- 'dull', 'shiny', 'shiny', 'shiny'],
- 'D': np.random.randn(11),
- 'E': np.random.randn(11),
- 'F': np.random.randn(11)})
-
- def test_filter_series(self):
- s = pd.Series([1, 3, 20, 5, 22, 24, 7])
- expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
- expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
- grouper = s.apply(lambda x: x % 2)
- grouped = s.groupby(grouper)
- assert_series_equal(
- grouped.filter(lambda x: x.mean() < 10), expected_odd)
- assert_series_equal(
- grouped.filter(lambda x: x.mean() > 10), expected_even)
- # Test dropna=False.
- assert_series_equal(
- grouped.filter(lambda x: x.mean() < 10, dropna=False),
- expected_odd.reindex(s.index))
- assert_series_equal(
- grouped.filter(lambda x: x.mean() > 10, dropna=False),
- expected_even.reindex(s.index))
-
- def test_filter_single_column_df(self):
- df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
- expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
- expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
- grouper = df[0].apply(lambda x: x % 2)
- grouped = df.groupby(grouper)
- assert_frame_equal(
- grouped.filter(lambda x: x.mean() < 10), expected_odd)
- assert_frame_equal(
- grouped.filter(lambda x: x.mean() > 10), expected_even)
- # Test dropna=False.
- assert_frame_equal(
- grouped.filter(lambda x: x.mean() < 10, dropna=False),
- expected_odd.reindex(df.index))
- assert_frame_equal(
- grouped.filter(lambda x: x.mean() > 10, dropna=False),
- expected_even.reindex(df.index))
-
- def test_filter_multi_column_df(self):
- df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
- grouper = df['A'].apply(lambda x: x % 2)
- grouped = df.groupby(grouper)
- expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
- assert_frame_equal(
- grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
- expected)
-
- def test_filter_mixed_df(self):
- df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
- grouper = df['A'].apply(lambda x: x % 2)
- grouped = df.groupby(grouper)
- expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
- assert_frame_equal(
- grouped.filter(lambda x: x['A'].sum() > 10), expected)
-
- def test_filter_out_all_groups(self):
- s = pd.Series([1, 3, 20, 5, 22, 24, 7])
- grouper = s.apply(lambda x: x % 2)
- grouped = s.groupby(grouper)
- assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
- df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
- grouper = df['A'].apply(lambda x: x % 2)
- grouped = df.groupby(grouper)
- assert_frame_equal(
- grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
-
- def test_filter_out_no_groups(self):
- s = pd.Series([1, 3, 20, 5, 22, 24, 7])
- grouper = s.apply(lambda x: x % 2)
- grouped = s.groupby(grouper)
- filtered = grouped.filter(lambda x: x.mean() > 0)
- assert_series_equal(filtered, s)
- df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
- grouper = df['A'].apply(lambda x: x % 2)
- grouped = df.groupby(grouper)
- filtered = grouped.filter(lambda x: x['A'].mean() > 0)
- assert_frame_equal(filtered, df)
-
- def test_filter_out_all_groups_in_df(self):
- # GH12768
- df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
- res = df.groupby('a')
- res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
- expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
- assert_frame_equal(expected, res)
-
- df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
- res = df.groupby('a')
- res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
- expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
- assert_frame_equal(expected, res)
-
- def test_filter_condition_raises(self):
- def raise_if_sum_is_zero(x):
- if x.sum() == 0:
- raise ValueError
- else:
- return x.sum() > 0
-
- s = pd.Series([-1, 0, 1, 2])
- grouper = s.apply(lambda x: x % 2)
- grouped = s.groupby(grouper)
- pytest.raises(TypeError,
- lambda: grouped.filter(raise_if_sum_is_zero))
-
- def test_filter_with_axis_in_groupby(self):
- # issue 11041
- index = pd.MultiIndex.from_product([range(10), [0, 1]])
- data = pd.DataFrame(
- np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
- result = data.groupby(level=0,
- axis=1).filter(lambda x: x.iloc[0, 0] > 10)
- expected = data.iloc[:, 12:20]
- assert_frame_equal(result, expected)
-
- def test_filter_bad_shapes(self):
- df = DataFrame({'A': np.arange(8),
- 'B': list('aabbbbcc'),
- 'C': np.arange(8)})
- s = df['B']
- g_df = df.groupby('B')
- g_s = s.groupby(s)
-
- f = lambda x: x
- pytest.raises(TypeError, lambda: g_df.filter(f))
- pytest.raises(TypeError, lambda: g_s.filter(f))
-
- f = lambda x: x == 1
- pytest.raises(TypeError, lambda: g_df.filter(f))
- pytest.raises(TypeError, lambda: g_s.filter(f))
-
- f = lambda x: np.outer(x, x)
- pytest.raises(TypeError, lambda: g_df.filter(f))
- pytest.raises(TypeError, lambda: g_s.filter(f))
-
- def test_filter_nan_is_false(self):
- df = DataFrame({'A': np.arange(8),
- 'B': list('aabbbbcc'),
- 'C': np.arange(8)})
- s = df['B']
- g_df = df.groupby(df['B'])
- g_s = s.groupby(s)
-
- f = lambda x: np.nan
- assert_frame_equal(g_df.filter(f), df.loc[[]])
- assert_series_equal(g_s.filter(f), s[[]])
-
- def test_filter_against_workaround(self):
- np.random.seed(0)
- # Series of ints
- s = Series(np.random.randint(0, 100, 1000))
- grouper = s.apply(lambda x: np.round(x, -1))
- grouped = s.groupby(grouper)
- f = lambda x: x.mean() > 10
-
- old_way = s[grouped.transform(f).astype('bool')]
- new_way = grouped.filter(f)
- assert_series_equal(new_way.sort_values(), old_way.sort_values())
-
- # Series of floats
- s = 100 * Series(np.random.random(1000))
- grouper = s.apply(lambda x: np.round(x, -1))
- grouped = s.groupby(grouper)
- f = lambda x: x.mean() > 10
- old_way = s[grouped.transform(f).astype('bool')]
- new_way = grouped.filter(f)
- assert_series_equal(new_way.sort_values(), old_way.sort_values())
-
- # Set up DataFrame of ints, floats, strings.
- from string import ascii_lowercase
- letters = np.array(list(ascii_lowercase))
- N = 1000
- random_letters = letters.take(np.random.randint(0, 26, N))
- df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
- 'floats': N / 10 * Series(np.random.random(N)),
- 'letters': Series(random_letters)})
-
- # Group by ints; filter on floats.
- grouped = df.groupby('ints')
- old_way = df[grouped.floats.
- transform(lambda x: x.mean() > N / 20).astype('bool')]
- new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
- assert_frame_equal(new_way, old_way)
-
- # Group by floats (rounded); filter on strings.
- grouper = df.floats.apply(lambda x: np.round(x, -1))
- grouped = df.groupby(grouper)
- old_way = df[grouped.letters.
- transform(lambda x: len(x) < N / 10).astype('bool')]
- new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
- assert_frame_equal(new_way, old_way)
-
- # Group by strings; filter on ints.
- grouped = df.groupby('letters')
- old_way = df[grouped.ints.
- transform(lambda x: x.mean() > N / 20).astype('bool')]
- new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
- assert_frame_equal(new_way, old_way)
-
- def test_filter_using_len(self):
- # BUG GH4447
- df = DataFrame({'A': np.arange(8),
- 'B': list('aabbbbcc'),
- 'C': np.arange(8)})
- grouped = df.groupby('B')
- actual = grouped.filter(lambda x: len(x) > 2)
- expected = DataFrame(
- {'A': np.arange(2, 6),
- 'B': list('bbbb'),
- 'C': np.arange(2, 6)}, index=np.arange(2, 6))
- assert_frame_equal(actual, expected)
-
- actual = grouped.filter(lambda x: len(x) > 4)
- expected = df.loc[[]]
- assert_frame_equal(actual, expected)
-
- # Series have always worked properly, but we'll test anyway.
- s = df['B']
- grouped = s.groupby(s)
- actual = grouped.filter(lambda x: len(x) > 2)
- expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
- assert_series_equal(actual, expected)
-
- actual = grouped.filter(lambda x: len(x) > 4)
- expected = s[[]]
- assert_series_equal(actual, expected)
-
- def test_filter_maintains_ordering(self):
- # Simple case: index is sequential. #4621
- df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
- 'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
- s = df['pid']
- grouped = df.groupby('tag')
- actual = grouped.filter(lambda x: len(x) > 1)
- expected = df.iloc[[1, 2, 4, 7]]
- assert_frame_equal(actual, expected)
-
- grouped = s.groupby(df['tag'])
- actual = grouped.filter(lambda x: len(x) > 1)
- expected = s.iloc[[1, 2, 4, 7]]
- assert_series_equal(actual, expected)
-
- # Now index is sequentially decreasing.
- df.index = np.arange(len(df) - 1, -1, -1)
- s = df['pid']
- grouped = df.groupby('tag')
- actual = grouped.filter(lambda x: len(x) > 1)
- expected = df.iloc[[1, 2, 4, 7]]
- assert_frame_equal(actual, expected)
-
- grouped = s.groupby(df['tag'])
- actual = grouped.filter(lambda x: len(x) > 1)
- expected = s.iloc[[1, 2, 4, 7]]
- assert_series_equal(actual, expected)
-
- # Index is shuffled.
- SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
- df.index = df.index[SHUFFLED]
- s = df['pid']
- grouped = df.groupby('tag')
- actual = grouped.filter(lambda x: len(x) > 1)
- expected = df.iloc[[1, 2, 4, 7]]
- assert_frame_equal(actual, expected)
-
- grouped = s.groupby(df['tag'])
- actual = grouped.filter(lambda x: len(x) > 1)
- expected = s.iloc[[1, 2, 4, 7]]
- assert_series_equal(actual, expected)
-
- def test_filter_multiple_timestamp(self):
- # GH 10114
- df = DataFrame({'A': np.arange(5, dtype='int64'),
- 'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
- 'C': Timestamp('20130101')})
-
- grouped = df.groupby(['B', 'C'])
-
- result = grouped['A'].filter(lambda x: True)
- assert_series_equal(df['A'], result)
-
- result = grouped['A'].transform(len)
- expected = Series([2, 3, 2, 3, 3], name='A')
- assert_series_equal(result, expected)
-
- result = grouped.filter(lambda x: True)
- assert_frame_equal(df, result)
-
- result = grouped.transform('sum')
- expected = DataFrame({'A': [2, 8, 2, 8, 8]})
- assert_frame_equal(result, expected)
-
- result = grouped.transform(len)
- expected = DataFrame({'A': [2, 3, 2, 3, 3]})
- assert_frame_equal(result, expected)
-
- def test_filter_and_transform_with_non_unique_int_index(self):
- # GH4620
- index = [1, 1, 1, 2, 1, 1, 0, 1]
- df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
- 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
- grouped_df = df.groupby('tag')
- ser = df['pid']
- grouped_ser = ser.groupby(df['tag'])
- expected_indexes = [1, 2, 4, 7]
-
- # Filter DataFrame
- actual = grouped_df.filter(lambda x: len(x) > 1)
- expected = df.iloc[expected_indexes]
- assert_frame_equal(actual, expected)
-
- actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
- expected = df.copy()
- expected.iloc[[0, 3, 5, 6]] = np.nan
- assert_frame_equal(actual, expected)
-
- # Filter Series
- actual = grouped_ser.filter(lambda x: len(x) > 1)
- expected = ser.take(expected_indexes)
- assert_series_equal(actual, expected)
-
- actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
- NA = np.nan
- expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
- # ^ made manually because this can get confusing!
- assert_series_equal(actual, expected)
-
- # Transform Series
- actual = grouped_ser.transform(len)
- expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
- assert_series_equal(actual, expected)
-
- # Transform (a column from) DataFrameGroupBy
- actual = grouped_df.pid.transform(len)
- assert_series_equal(actual, expected)
-
- def test_filter_and_transform_with_multiple_non_unique_int_index(self):
- # GH4620
- index = [1, 1, 1, 2, 0, 0, 0, 1]
- df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
- 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
- grouped_df = df.groupby('tag')
- ser = df['pid']
- grouped_ser = ser.groupby(df['tag'])
- expected_indexes = [1, 2, 4, 7]
-
- # Filter DataFrame
- actual = grouped_df.filter(lambda x: len(x) > 1)
- expected = df.iloc[expected_indexes]
- assert_frame_equal(actual, expected)
-
- actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
- expected = df.copy()
- expected.iloc[[0, 3, 5, 6]] = np.nan
- assert_frame_equal(actual, expected)
-
- # Filter Series
- actual = grouped_ser.filter(lambda x: len(x) > 1)
- expected = ser.take(expected_indexes)
- assert_series_equal(actual, expected)
-
- actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
- NA = np.nan
- expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
- # ^ made manually because this can get confusing!
- assert_series_equal(actual, expected)
-
- # Transform Series
- actual = grouped_ser.transform(len)
- expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
- assert_series_equal(actual, expected)
-
- # Transform (a column from) DataFrameGroupBy
- actual = grouped_df.pid.transform(len)
- assert_series_equal(actual, expected)
-
- def test_filter_and_transform_with_non_unique_float_index(self):
- # GH4620
- index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
- df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
- 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
- grouped_df = df.groupby('tag')
- ser = df['pid']
- grouped_ser = ser.groupby(df['tag'])
- expected_indexes = [1, 2, 4, 7]
-
- # Filter DataFrame
- actual = grouped_df.filter(lambda x: len(x) > 1)
- expected = df.iloc[expected_indexes]
- assert_frame_equal(actual, expected)
-
- actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
- expected = df.copy()
- expected.iloc[[0, 3, 5, 6]] = np.nan
- assert_frame_equal(actual, expected)
-
- # Filter Series
- actual = grouped_ser.filter(lambda x: len(x) > 1)
- expected = ser.take(expected_indexes)
- assert_series_equal(actual, expected)
-
- actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
- NA = np.nan
- expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
- # ^ made manually because this can get confusing!
- assert_series_equal(actual, expected)
-
- # Transform Series
- actual = grouped_ser.transform(len)
- expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
- assert_series_equal(actual, expected)
-
- # Transform (a column from) DataFrameGroupBy
- actual = grouped_df.pid.transform(len)
- assert_series_equal(actual, expected)
-
- def test_filter_and_transform_with_non_unique_timestamp_index(self):
- # GH4620
- t0 = Timestamp('2013-09-30 00:05:00')
- t1 = Timestamp('2013-10-30 00:05:00')
- t2 = Timestamp('2013-11-30 00:05:00')
- index = [t1, t1, t1, t2, t1, t1, t0, t1]
- df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
- 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
- grouped_df = df.groupby('tag')
- ser = df['pid']
- grouped_ser = ser.groupby(df['tag'])
- expected_indexes = [1, 2, 4, 7]
-
- # Filter DataFrame
- actual = grouped_df.filter(lambda x: len(x) > 1)
- expected = df.iloc[expected_indexes]
- assert_frame_equal(actual, expected)
-
- actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
- expected = df.copy()
- expected.iloc[[0, 3, 5, 6]] = np.nan
- assert_frame_equal(actual, expected)
-
- # Filter Series
- actual = grouped_ser.filter(lambda x: len(x) > 1)
- expected = ser.take(expected_indexes)
- assert_series_equal(actual, expected)
-
- actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
- NA = np.nan
- expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
- # ^ made manually because this can get confusing!
- assert_series_equal(actual, expected)
-
- # Transform Series
- actual = grouped_ser.transform(len)
- expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
- assert_series_equal(actual, expected)
-
- # Transform (a column from) DataFrameGroupBy
- actual = grouped_df.pid.transform(len)
- assert_series_equal(actual, expected)
-
- def test_filter_and_transform_with_non_unique_string_index(self):
- # GH4620
- index = list('bbbcbbab')
- df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
- 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
- grouped_df = df.groupby('tag')
- ser = df['pid']
- grouped_ser = ser.groupby(df['tag'])
- expected_indexes = [1, 2, 4, 7]
-
- # Filter DataFrame
- actual = grouped_df.filter(lambda x: len(x) > 1)
- expected = df.iloc[expected_indexes]
- assert_frame_equal(actual, expected)
-
- actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
- expected = df.copy()
- expected.iloc[[0, 3, 5, 6]] = np.nan
- assert_frame_equal(actual, expected)
-
- # Filter Series
- actual = grouped_ser.filter(lambda x: len(x) > 1)
- expected = ser.take(expected_indexes)
- assert_series_equal(actual, expected)
-
- actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
- NA = np.nan
- expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
- # ^ made manually because this can get confusing!
- assert_series_equal(actual, expected)
-
- # Transform Series
- actual = grouped_ser.transform(len)
- expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
- assert_series_equal(actual, expected)
-
- # Transform (a column from) DataFrameGroupBy
- actual = grouped_df.pid.transform(len)
- assert_series_equal(actual, expected)
-
- def test_filter_has_access_to_grouped_cols(self):
- df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A')
- # previously didn't have access to col A #????
- filt = g.filter(lambda x: x['A'].sum() == 2)
- assert_frame_equal(filt, df.iloc[[0, 1]])
-
- def test_filter_enforces_scalarness(self):
- df = pd.DataFrame([
- ['best', 'a', 'x'],
- ['worst', 'b', 'y'],
- ['best', 'c', 'x'],
- ['best', 'd', 'y'],
- ['worst', 'd', 'y'],
- ['worst', 'd', 'y'],
- ['best', 'd', 'z'],
- ], columns=['a', 'b', 'c'])
- with tm.assert_raises_regex(TypeError,
- 'filter function returned a.*'):
- df.groupby('c').filter(lambda g: g['a'] == 'best')
-
- def test_filter_non_bool_raises(self):
- df = pd.DataFrame([
- ['best', 'a', 1],
- ['worst', 'b', 1],
- ['best', 'c', 1],
- ['best', 'd', 1],
- ['worst', 'd', 1],
- ['worst', 'd', 1],
- ['best', 'd', 1],
- ], columns=['a', 'b', 'c'])
- with tm.assert_raises_regex(TypeError,
- 'filter function returned a.*'):
- df.groupby('a').filter(lambda g: g.c.mean())
-
- def test_filter_dropna_with_empty_groups(self):
- # GH 10780
- data = pd.Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
- groupped = data.groupby(level=0)
- result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
- expected_false = pd.Series([np.nan] * 9,
- index=np.repeat([1, 2, 3], 3))
- tm.assert_series_equal(result_false, expected_false)
-
- result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
- expected_true = pd.Series(index=pd.Index([], dtype=int))
- tm.assert_series_equal(result_true, expected_true)
-
-
-def assert_fp_equal(a, b):
- assert (np.abs(a - b) < 1e-12).all()
-
-
-def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
- tups = lmap(tuple, df[keys].values)
- tups = com._asarray_tuplesafe(tups)
- expected = f(df.groupby(tups)[field])
- for k, v in compat.iteritems(expected):
- assert (result[k] == v)
+def test_filter_series():
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
+ expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ tm.assert_series_equal(
+ grouped.filter(lambda x: x.mean() < 10), expected_odd)
+ tm.assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 10), expected_even)
+ # Test dropna=False.
+ tm.assert_series_equal(
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
+ expected_odd.reindex(s.index))
+ tm.assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
+ expected_even.reindex(s.index))
+
+
+def test_filter_single_column_df():
+ df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
+ expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
+ expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
+ grouper = df[0].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ tm.assert_frame_equal(
+ grouped.filter(lambda x: x.mean() < 10), expected_odd)
+ tm.assert_frame_equal(
+ grouped.filter(lambda x: x.mean() > 10), expected_even)
+ # Test dropna=False.
+ tm.assert_frame_equal(
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
+ expected_odd.reindex(df.index))
+ tm.assert_frame_equal(
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
+ expected_even.reindex(df.index))
+
+
+def test_filter_multi_column_df():
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
+ tm.assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
+ expected)
+
+
+def test_filter_mixed_df():
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
+ tm.assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() > 10), expected)
+
+
+def test_filter_out_all_groups():
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ tm.assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
+
+
+def test_filter_out_no_groups():
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ filtered = grouped.filter(lambda x: x.mean() > 0)
+ tm.assert_series_equal(filtered, s)
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ filtered = grouped.filter(lambda x: x['A'].mean() > 0)
+ tm.assert_frame_equal(filtered, df)
+
+
+def test_filter_out_all_groups_in_df():
+ # GH12768
+ df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
+ res = df.groupby('a')
+ res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
+ expected = pd.DataFrame({'a': [np.nan] * 3, 'b': [np.nan] * 3})
+ tm.assert_frame_equal(expected, res)
+
+ df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
+ res = df.groupby('a')
+ res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
+ expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
+ tm.assert_frame_equal(expected, res)
+
+
+def test_filter_condition_raises():
+ def raise_if_sum_is_zero(x):
+ if x.sum() == 0:
+ raise ValueError
+ else:
+ return x.sum() > 0
+
+ s = pd.Series([-1, 0, 1, 2])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ pytest.raises(TypeError,
+ lambda: grouped.filter(raise_if_sum_is_zero))
+
+
+def test_filter_with_axis_in_groupby():
+ # issue 11041
+ index = pd.MultiIndex.from_product([range(10), [0, 1]])
+ data = pd.DataFrame(
+ np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
+ result = data.groupby(level=0,
+ axis=1).filter(lambda x: x.iloc[0, 0] > 10)
+ expected = data.iloc[:, 12:20]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_filter_bad_shapes():
+ df = DataFrame({'A': np.arange(8),
+ 'B': list('aabbbbcc'),
+ 'C': np.arange(8)})
+ s = df['B']
+ g_df = df.groupby('B')
+ g_s = s.groupby(s)
+
+ f = lambda x: x
+ pytest.raises(TypeError, lambda: g_df.filter(f))
+ pytest.raises(TypeError, lambda: g_s.filter(f))
+
+ f = lambda x: x == 1
+ pytest.raises(TypeError, lambda: g_df.filter(f))
+ pytest.raises(TypeError, lambda: g_s.filter(f))
+
+ f = lambda x: np.outer(x, x)
+ pytest.raises(TypeError, lambda: g_df.filter(f))
+ pytest.raises(TypeError, lambda: g_s.filter(f))
+
+
+def test_filter_nan_is_false():
+ df = DataFrame({'A': np.arange(8),
+ 'B': list('aabbbbcc'),
+ 'C': np.arange(8)})
+ s = df['B']
+ g_df = df.groupby(df['B'])
+ g_s = s.groupby(s)
+
+ f = lambda x: np.nan
+ tm.assert_frame_equal(g_df.filter(f), df.loc[[]])
+ tm.assert_series_equal(g_s.filter(f), s[[]])
+
+
+def test_filter_against_workaround():
+ np.random.seed(0)
+ # Series of ints
+ s = Series(np.random.randint(0, 100, 1000))
+ grouper = s.apply(lambda x: np.round(x, -1))
+ grouped = s.groupby(grouper)
+ f = lambda x: x.mean() > 10
+
+ old_way = s[grouped.transform(f).astype('bool')]
+ new_way = grouped.filter(f)
+ tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
+
+ # Series of floats
+ s = 100 * Series(np.random.random(1000))
+ grouper = s.apply(lambda x: np.round(x, -1))
+ grouped = s.groupby(grouper)
+ f = lambda x: x.mean() > 10
+ old_way = s[grouped.transform(f).astype('bool')]
+ new_way = grouped.filter(f)
+ tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
+
+ # Set up DataFrame of ints, floats, strings.
+ from string import ascii_lowercase
+ letters = np.array(list(ascii_lowercase))
+ N = 1000
+ random_letters = letters.take(np.random.randint(0, 26, N))
+ df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
+ 'floats': N / 10 * Series(np.random.random(N)),
+ 'letters': Series(random_letters)})
+
+ # Group by ints; filter on floats.
+ grouped = df.groupby('ints')
+ old_way = df[grouped.floats.
+ transform(lambda x: x.mean() > N / 20).astype('bool')]
+ new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
+ tm.assert_frame_equal(new_way, old_way)
+
+ # Group by floats (rounded); filter on strings.
+ grouper = df.floats.apply(lambda x: np.round(x, -1))
+ grouped = df.groupby(grouper)
+ old_way = df[grouped.letters.
+ transform(lambda x: len(x) < N / 10).astype('bool')]
+ new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
+ tm.assert_frame_equal(new_way, old_way)
+
+ # Group by strings; filter on ints.
+ grouped = df.groupby('letters')
+ old_way = df[grouped.ints.
+ transform(lambda x: x.mean() > N / 20).astype('bool')]
+ new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
+ tm.assert_frame_equal(new_way, old_way)
+
+
+def test_filter_using_len():
+ # BUG GH4447
+ df = DataFrame({'A': np.arange(8),
+ 'B': list('aabbbbcc'),
+ 'C': np.arange(8)})
+ grouped = df.groupby('B')
+ actual = grouped.filter(lambda x: len(x) > 2)
+ expected = DataFrame(
+ {'A': np.arange(2, 6),
+ 'B': list('bbbb'),
+ 'C': np.arange(2, 6)}, index=np.arange(2, 6))
+ tm.assert_frame_equal(actual, expected)
+
+ actual = grouped.filter(lambda x: len(x) > 4)
+ expected = df.loc[[]]
+ tm.assert_frame_equal(actual, expected)
+
+ # Series have always worked properly, but we'll test anyway.
+ s = df['B']
+ grouped = s.groupby(s)
+ actual = grouped.filter(lambda x: len(x) > 2)
+ expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
+ tm.assert_series_equal(actual, expected)
+
+ actual = grouped.filter(lambda x: len(x) > 4)
+ expected = s[[]]
+ tm.assert_series_equal(actual, expected)
+
+
+def test_filter_maintains_ordering():
+ # Simple case: index is sequential. #4621
+ df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
+ 'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
+ s = df['pid']
+ grouped = df.groupby('tag')
+ actual = grouped.filter(lambda x: len(x) > 1)
+ expected = df.iloc[[1, 2, 4, 7]]
+ tm.assert_frame_equal(actual, expected)
+
+ grouped = s.groupby(df['tag'])
+ actual = grouped.filter(lambda x: len(x) > 1)
+ expected = s.iloc[[1, 2, 4, 7]]
+ tm.assert_series_equal(actual, expected)
+
+ # Now index is sequentially decreasing.
+ df.index = np.arange(len(df) - 1, -1, -1)
+ s = df['pid']
+ grouped = df.groupby('tag')
+ actual = grouped.filter(lambda x: len(x) > 1)
+ expected = df.iloc[[1, 2, 4, 7]]
+ tm.assert_frame_equal(actual, expected)
+
+ grouped = s.groupby(df['tag'])
+ actual = grouped.filter(lambda x: len(x) > 1)
+ expected = s.iloc[[1, 2, 4, 7]]
+ tm.assert_series_equal(actual, expected)
+
+ # Index is shuffled.
+ SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
+ df.index = df.index[SHUFFLED]
+ s = df['pid']
+ grouped = df.groupby('tag')
+ actual = grouped.filter(lambda x: len(x) > 1)
+ expected = df.iloc[[1, 2, 4, 7]]
+ tm.assert_frame_equal(actual, expected)
+
+ grouped = s.groupby(df['tag'])
+ actual = grouped.filter(lambda x: len(x) > 1)
+ expected = s.iloc[[1, 2, 4, 7]]
+ tm.assert_series_equal(actual, expected)
+
+
+def test_filter_multiple_timestamp():
+ # GH 10114
+ df = DataFrame({'A': np.arange(5, dtype='int64'),
+ 'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
+ 'C': Timestamp('20130101')})
+
+ grouped = df.groupby(['B', 'C'])
+
+ result = grouped['A'].filter(lambda x: True)
+ tm.assert_series_equal(df['A'], result)
+
+ result = grouped['A'].transform(len)
+ expected = Series([2, 3, 2, 3, 3], name='A')
+ tm.assert_series_equal(result, expected)
+
+ result = grouped.filter(lambda x: True)
+ tm.assert_frame_equal(df, result)
+
+ result = grouped.transform('sum')
+ expected = DataFrame({'A': [2, 8, 2, 8, 8]})
+ tm.assert_frame_equal(result, expected)
+
+ result = grouped.transform(len)
+ expected = DataFrame({'A': [2, 3, 2, 3, 3]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_filter_and_transform_with_non_unique_int_index():
+ # GH4620
+ index = [1, 1, 1, 2, 1, 1, 0, 1]
+ df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
+ 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
+ grouped_df = df.groupby('tag')
+ ser = df['pid']
+ grouped_ser = ser.groupby(df['tag'])
+ expected_indexes = [1, 2, 4, 7]
+
+ # Filter DataFrame
+ actual = grouped_df.filter(lambda x: len(x) > 1)
+ expected = df.iloc[expected_indexes]
+ tm.assert_frame_equal(actual, expected)
+
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
+ expected = df.copy()
+ expected.iloc[[0, 3, 5, 6]] = np.nan
+ tm.assert_frame_equal(actual, expected)
+
+ # Filter Series
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
+ expected = ser.take(expected_indexes)
+ tm.assert_series_equal(actual, expected)
+
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
+ NA = np.nan
+ expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
+ # ^ made manually because this can get confusing!
+ tm.assert_series_equal(actual, expected)
+
+ # Transform Series
+ actual = grouped_ser.transform(len)
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
+ tm.assert_series_equal(actual, expected)
+
+ # Transform (a column from) DataFrameGroupBy
+ actual = grouped_df.pid.transform(len)
+ tm.assert_series_equal(actual, expected)
+
+
+def test_filter_and_transform_with_multiple_non_unique_int_index():
+ # GH4620
+ index = [1, 1, 1, 2, 0, 0, 0, 1]
+ df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
+ 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
+ grouped_df = df.groupby('tag')
+ ser = df['pid']
+ grouped_ser = ser.groupby(df['tag'])
+ expected_indexes = [1, 2, 4, 7]
+
+ # Filter DataFrame
+ actual = grouped_df.filter(lambda x: len(x) > 1)
+ expected = df.iloc[expected_indexes]
+ tm.assert_frame_equal(actual, expected)
+
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
+ expected = df.copy()
+ expected.iloc[[0, 3, 5, 6]] = np.nan
+ tm.assert_frame_equal(actual, expected)
+
+ # Filter Series
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
+ expected = ser.take(expected_indexes)
+ tm.assert_series_equal(actual, expected)
+
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
+ NA = np.nan
+ expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
+ # ^ made manually because this can get confusing!
+ tm.assert_series_equal(actual, expected)
+
+ # Transform Series
+ actual = grouped_ser.transform(len)
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
+ tm.assert_series_equal(actual, expected)
+
+ # Transform (a column from) DataFrameGroupBy
+ actual = grouped_df.pid.transform(len)
+ tm.assert_series_equal(actual, expected)
+
+
+def test_filter_and_transform_with_non_unique_float_index():
+ # GH4620
+ index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
+ df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
+ 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
+ grouped_df = df.groupby('tag')
+ ser = df['pid']
+ grouped_ser = ser.groupby(df['tag'])
+ expected_indexes = [1, 2, 4, 7]
+
+ # Filter DataFrame
+ actual = grouped_df.filter(lambda x: len(x) > 1)
+ expected = df.iloc[expected_indexes]
+ tm.assert_frame_equal(actual, expected)
+
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
+ expected = df.copy()
+ expected.iloc[[0, 3, 5, 6]] = np.nan
+ tm.assert_frame_equal(actual, expected)
+
+ # Filter Series
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
+ expected = ser.take(expected_indexes)
+ tm.assert_series_equal(actual, expected)
+
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
+ NA = np.nan
+ expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
+ # ^ made manually because this can get confusing!
+ tm.assert_series_equal(actual, expected)
+
+ # Transform Series
+ actual = grouped_ser.transform(len)
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
+ tm.assert_series_equal(actual, expected)
+
+ # Transform (a column from) DataFrameGroupBy
+ actual = grouped_df.pid.transform(len)
+ tm.assert_series_equal(actual, expected)
+
+
+def test_filter_and_transform_with_non_unique_timestamp_index():
+ # GH4620
+ t0 = Timestamp('2013-09-30 00:05:00')
+ t1 = Timestamp('2013-10-30 00:05:00')
+ t2 = Timestamp('2013-11-30 00:05:00')
+ index = [t1, t1, t1, t2, t1, t1, t0, t1]
+ df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
+ 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
+ grouped_df = df.groupby('tag')
+ ser = df['pid']
+ grouped_ser = ser.groupby(df['tag'])
+ expected_indexes = [1, 2, 4, 7]
+
+ # Filter DataFrame
+ actual = grouped_df.filter(lambda x: len(x) > 1)
+ expected = df.iloc[expected_indexes]
+ tm.assert_frame_equal(actual, expected)
+
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
+ expected = df.copy()
+ expected.iloc[[0, 3, 5, 6]] = np.nan
+ tm.assert_frame_equal(actual, expected)
+
+ # Filter Series
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
+ expected = ser.take(expected_indexes)
+ tm.assert_series_equal(actual, expected)
+
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
+ NA = np.nan
+ expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
+ # ^ made manually because this can get confusing!
+ tm.assert_series_equal(actual, expected)
+
+ # Transform Series
+ actual = grouped_ser.transform(len)
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
+ tm.assert_series_equal(actual, expected)
+
+ # Transform (a column from) DataFrameGroupBy
+ actual = grouped_df.pid.transform(len)
+ tm.assert_series_equal(actual, expected)
+
+
+def test_filter_and_transform_with_non_unique_string_index():
+ # GH4620
+ index = list('bbbcbbab')
+ df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
+ 'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
+ grouped_df = df.groupby('tag')
+ ser = df['pid']
+ grouped_ser = ser.groupby(df['tag'])
+ expected_indexes = [1, 2, 4, 7]
+
+ # Filter DataFrame
+ actual = grouped_df.filter(lambda x: len(x) > 1)
+ expected = df.iloc[expected_indexes]
+ tm.assert_frame_equal(actual, expected)
+
+ actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
+ expected = df.copy()
+ expected.iloc[[0, 3, 5, 6]] = np.nan
+ tm.assert_frame_equal(actual, expected)
+
+ # Filter Series
+ actual = grouped_ser.filter(lambda x: len(x) > 1)
+ expected = ser.take(expected_indexes)
+ tm.assert_series_equal(actual, expected)
+
+ actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
+ NA = np.nan
+ expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
+ # ^ made manually because this can get confusing!
+ tm.assert_series_equal(actual, expected)
+
+ # Transform Series
+ actual = grouped_ser.transform(len)
+ expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
+ tm.assert_series_equal(actual, expected)
+
+ # Transform (a column from) DataFrameGroupBy
+ actual = grouped_df.pid.transform(len)
+ tm.assert_series_equal(actual, expected)
+
+
+def test_filter_has_access_to_grouped_cols():
+ df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
+ g = df.groupby('A')
+ # previously didn't have access to col A #????
+ filt = g.filter(lambda x: x['A'].sum() == 2)
+ tm.assert_frame_equal(filt, df.iloc[[0, 1]])
+
+
+def test_filter_enforces_scalarness():
+ df = pd.DataFrame([
+ ['best', 'a', 'x'],
+ ['worst', 'b', 'y'],
+ ['best', 'c', 'x'],
+ ['best', 'd', 'y'],
+ ['worst', 'd', 'y'],
+ ['worst', 'd', 'y'],
+ ['best', 'd', 'z'],
+ ], columns=['a', 'b', 'c'])
+ with tm.assert_raises_regex(TypeError,
+ 'filter function returned a.*'):
+ df.groupby('c').filter(lambda g: g['a'] == 'best')
+
+
+def test_filter_non_bool_raises():
+ df = pd.DataFrame([
+ ['best', 'a', 1],
+ ['worst', 'b', 1],
+ ['best', 'c', 1],
+ ['best', 'd', 1],
+ ['worst', 'd', 1],
+ ['worst', 'd', 1],
+ ['best', 'd', 1],
+ ], columns=['a', 'b', 'c'])
+ with tm.assert_raises_regex(TypeError,
+ 'filter function returned a.*'):
+ df.groupby('a').filter(lambda g: g.c.mean())
+
+
+def test_filter_dropna_with_empty_groups():
+ # GH 10780
+ data = pd.Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
+ groupped = data.groupby(level=0)
+ result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
+ expected_false = pd.Series([np.nan] * 9,
+ index=np.repeat([1, 2, 3], 3))
+ tm.assert_series_equal(result_false, expected_false)
+
+ result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
+ expected_true = pd.Series(index=pd.Index([], dtype=int))
+ tm.assert_series_equal(result_true, expected_true)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
new file mode 100644
index 0000000000000..ba1371fe9f931
--- /dev/null
+++ b/pandas/tests/groupby/test_function.py
@@ -0,0 +1,1120 @@
+import pytest
+
+import numpy as np
+import pandas as pd
+from pandas import (DataFrame, Index, compat, isna,
+ Series, MultiIndex, Timestamp, date_range)
+from pandas.errors import UnsupportedFunctionCall
+from pandas.util import testing as tm
+import pandas.core.nanops as nanops
+from string import ascii_lowercase
+from pandas.compat import product as cart_product
+
+
+@pytest.mark.parametrize("agg_func", ['any', 'all'])
+@pytest.mark.parametrize("skipna", [True, False])
+@pytest.mark.parametrize("vals", [
+ ['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
+ [1, 2, 3], [1, 0, 0], [0, 0, 0],
+ [1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
+ [True, True, True], [True, False, False], [False, False, False],
+ [np.nan, np.nan, np.nan]
+])
+def test_groupby_bool_aggs(agg_func, skipna, vals):
+ df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
+
+ # Figure out expectation using Python builtin
+ exp = getattr(compat.builtins, agg_func)(vals)
+
+ # edge case for missing data with skipna and 'any'
+ if skipna and all(isna(vals)) and agg_func == 'any':
+ exp = False
+
+ exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
+ ['a', 'b'], name='key'))
+ result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
+ tm.assert_frame_equal(result, exp_df)
+
+
+def test_max_min_non_numeric():
+ # #2700
+ aa = DataFrame({'nn': [11, 11, 22, 22],
+ 'ii': [1, 2, 3, 4],
+ 'ss': 4 * ['mama']})
+
+ result = aa.groupby('nn').max()
+ assert 'ss' in result
+
+ result = aa.groupby('nn').max(numeric_only=False)
+ assert 'ss' in result
+
+ result = aa.groupby('nn').min()
+ assert 'ss' in result
+
+ result = aa.groupby('nn').min(numeric_only=False)
+ assert 'ss' in result
+
+
+def test_intercept_builtin_sum():
+ s = Series([1., 2., np.nan, 3.])
+ grouped = s.groupby([0, 1, 2, 2])
+
+ result = grouped.agg(compat.builtins.sum)
+ result2 = grouped.apply(compat.builtins.sum)
+ expected = grouped.sum()
+ tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result2, expected)
+
+
+def test_builtins_apply(): # GH8155
+ df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
+ columns=['jim', 'joe'])
+ df['jolie'] = np.random.randn(1000)
+
+ for keys in ['jim', ['jim', 'joe']]: # single key & multi-key
+ if keys == 'jim':
+ continue
+ for f in [max, min, sum]:
+ fname = f.__name__
+ result = df.groupby(keys).apply(f)
+ result.shape
+ ngroups = len(df.drop_duplicates(subset=keys))
+ assert result.shape == (ngroups, 3), 'invalid frame shape: '\
+ '{} (expected ({}, 3))'.format(result.shape, ngroups)
+
+ tm.assert_frame_equal(result, # numpy's equivalent function
+ df.groupby(keys).apply(getattr(np, fname)))
+
+ if f != sum:
+ expected = df.groupby(keys).agg(fname).reset_index()
+ expected.set_index(keys, inplace=True, drop=False)
+ tm.assert_frame_equal(result, expected, check_dtype=False)
+
+ tm.assert_series_equal(getattr(result, fname)(),
+ getattr(df, fname)())
+
+
+def test_arg_passthru():
+ # make sure that we are passing thru kwargs
+ # to our agg functions
+
+ # GH3668
+ # GH5724
+ df = pd.DataFrame(
+ {'group': [1, 1, 2],
+ 'int': [1, 2, 3],
+ 'float': [4., 5., 6.],
+ 'string': list('abc'),
+ 'category_string': pd.Series(list('abc')).astype('category'),
+ 'category_int': [7, 8, 9],
+ 'datetime': pd.date_range('20130101', periods=3),
+ 'datetimetz': pd.date_range('20130101',
+ periods=3,
+ tz='US/Eastern'),
+ 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
+ columns=['group', 'int', 'float', 'string',
+ 'category_string', 'category_int',
+ 'datetime', 'datetimetz',
+ 'timedelta'])
+
+ expected_columns_numeric = Index(['int', 'float', 'category_int'])
+
+ # mean / median
+ expected = pd.DataFrame(
+ {'category_int': [7.5, 9],
+ 'float': [4.5, 6.],
+ 'timedelta': [pd.Timedelta('1.5s'),
+ pd.Timedelta('3s')],
+ 'int': [1.5, 3],
+ 'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
+ pd.Timestamp('2013-01-03 00:00:00')],
+ 'datetimetz': [
+ pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
+ pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
+ index=Index([1, 2], name='group'),
+ columns=['int', 'float', 'category_int',
+ 'datetime', 'datetimetz', 'timedelta'])
+ for attr in ['mean', 'median']:
+ f = getattr(df.groupby('group'), attr)
+ result = f()
+ tm.assert_index_equal(result.columns, expected_columns_numeric)
+
+ result = f(numeric_only=False)
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
+
+ # TODO: min, max *should* handle
+ # categorical (ordered) dtype
+ expected_columns = Index(['int', 'float', 'string',
+ 'category_int',
+ 'datetime', 'datetimetz',
+ 'timedelta'])
+ for attr in ['min', 'max']:
+ f = getattr(df.groupby('group'), attr)
+ result = f()
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ result = f(numeric_only=False)
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ expected_columns = Index(['int', 'float', 'string',
+ 'category_string', 'category_int',
+ 'datetime', 'datetimetz',
+ 'timedelta'])
+ for attr in ['first', 'last']:
+ f = getattr(df.groupby('group'), attr)
+ result = f()
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ result = f(numeric_only=False)
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ expected_columns = Index(['int', 'float', 'string',
+ 'category_int', 'timedelta'])
+ for attr in ['sum']:
+ f = getattr(df.groupby('group'), attr)
+ result = f()
+ tm.assert_index_equal(result.columns, expected_columns_numeric)
+
+ result = f(numeric_only=False)
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ expected_columns = Index(['int', 'float', 'category_int'])
+ for attr in ['prod', 'cumprod']:
+ f = getattr(df.groupby('group'), attr)
+ result = f()
+ tm.assert_index_equal(result.columns, expected_columns_numeric)
+
+ result = f(numeric_only=False)
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ # like min, max, but don't include strings
+ expected_columns = Index(['int', 'float',
+ 'category_int',
+ 'datetime', 'datetimetz',
+ 'timedelta'])
+ for attr in ['cummin', 'cummax']:
+ f = getattr(df.groupby('group'), attr)
+ result = f()
+ # GH 15561: numeric_only=False set by default like min/max
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ result = f(numeric_only=False)
+ tm.assert_index_equal(result.columns, expected_columns)
+
+ expected_columns = Index(['int', 'float', 'category_int',
+ 'timedelta'])
+ for attr in ['cumsum']:
+ f = getattr(df.groupby('group'), attr)
+ result = f()
+ tm.assert_index_equal(result.columns, expected_columns_numeric)
+
+ result = f(numeric_only=False)
+ tm.assert_index_equal(result.columns, expected_columns)
+
+
+def test_non_cython_api():
+
+ # GH5610
+ # non-cython calls should not include the grouper
+
+ df = DataFrame(
+ [[1, 2, 'foo'],
+ [1, np.nan, 'bar'],
+ [3, np.nan, 'baz']],
+ columns=['A', 'B', 'C'])
+ g = df.groupby('A')
+ gni = df.groupby('A', as_index=False)
+
+ # mad
+ expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
+ expected.index.name = 'A'
+ result = g.mad()
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
+ index=[0, 1])
+ result = gni.mad()
+ tm.assert_frame_equal(result, expected)
+
+ # describe
+ expected_index = pd.Index([1, 3], name='A')
+ expected_col = pd.MultiIndex(levels=[['B'],
+ ['count', 'mean', 'std', 'min',
+ '25%', '50%', '75%', 'max']],
+ labels=[[0] * 8, list(range(8))])
+ expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
+ [0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
+ np.nan, np.nan]],
+ index=expected_index,
+ columns=expected_col)
+ result = g.describe()
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
+ df[df.A == 3].describe().unstack().to_frame().T])
+ expected.index = pd.Index([0, 1])
+ result = gni.describe()
+ tm.assert_frame_equal(result, expected)
+
+ # any
+ expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
+ index=[1, 3])
+ expected.index.name = 'A'
+ result = g.any()
+ tm.assert_frame_equal(result, expected)
+
+ # idxmax
+ expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
+ expected.index.name = 'A'
+ result = g.idxmax()
+ tm.assert_frame_equal(result, expected)
+
+
+def test_cython_api2():
+
+ # this takes the fast apply path
+
+ # cumsum (GH5614)
+ df = DataFrame(
+ [[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
+ ], columns=['A', 'B', 'C'])
+ expected = DataFrame(
+ [[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
+ result = df.groupby('A').cumsum()
+ tm.assert_frame_equal(result, expected)
+
+ # GH 5755 - cumsum is a transformer and should ignore as_index
+ result = df.groupby('A', as_index=False).cumsum()
+ tm.assert_frame_equal(result, expected)
+
+ # GH 13994
+ result = df.groupby('A').cumsum(axis=1)
+ expected = df.cumsum(axis=1)
+ tm.assert_frame_equal(result, expected)
+ result = df.groupby('A').cumprod(axis=1)
+ expected = df.cumprod(axis=1)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_cython_median():
+ df = DataFrame(np.random.randn(1000))
+ df.values[::2] = np.nan
+
+ labels = np.random.randint(0, 50, size=1000).astype(float)
+ labels[::17] = np.nan
+
+ result = df.groupby(labels).median()
+ exp = df.groupby(labels).agg(nanops.nanmedian)
+ tm.assert_frame_equal(result, exp)
+
+ df = DataFrame(np.random.randn(1000, 5))
+ rs = df.groupby(labels).agg(np.median)
+ xp = df.groupby(labels).median()
+ tm.assert_frame_equal(rs, xp)
+
+
+def test_median_empty_bins():
+ df = pd.DataFrame(np.random.randint(0, 44, 500))
+
+ grps = range(0, 55, 5)
+ bins = pd.cut(df[0], grps)
+
+ result = df.groupby(bins).median()
+ expected = df.groupby(bins).agg(lambda x: x.median())
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("dtype", [
+ 'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
+@pytest.mark.parametrize("method,data", [
+ ('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
+ ('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
+ ('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
+ ('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
+ ('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
+ 'args': [1]}),
+ ('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
+ 'out_type': 'int64'})
+])
+def test_groupby_non_arithmetic_agg_types(dtype, method, data):
+ # GH9311, GH6620
+ df = pd.DataFrame(
+ [{'a': 1, 'b': 1},
+ {'a': 1, 'b': 2},
+ {'a': 2, 'b': 3},
+ {'a': 2, 'b': 4}])
+
+ df['b'] = df.b.astype(dtype)
+
+ if 'args' not in data:
+ data['args'] = []
+
+ if 'out_type' in data:
+ out_type = data['out_type']
+ else:
+ out_type = dtype
+
+ exp = data['df']
+ df_out = pd.DataFrame(exp)
+
+ df_out['b'] = df_out.b.astype(out_type)
+ df_out.set_index('a', inplace=True)
+
+ grpd = df.groupby('a')
+ t = getattr(grpd, method)(*data['args'])
+ tm.assert_frame_equal(t, df_out)
+
+
+def test_groupby_non_arithmetic_agg_intlike_precision():
+ # GH9311, GH6620
+ c = 24650000000000000
+
+ inputs = ((Timestamp('2011-01-15 12:50:28.502376'),
+ Timestamp('2011-01-20 12:50:28.593448')), (1 + c, 2 + c))
+
+ for i in inputs:
+ df = pd.DataFrame([{'a': 1, 'b': i[0]}, {'a': 1, 'b': i[1]}])
+
+ grp_exp = {'first': {'expected': i[0]},
+ 'last': {'expected': i[1]},
+ 'min': {'expected': i[0]},
+ 'max': {'expected': i[1]},
+ 'nth': {'expected': i[1],
+ 'args': [1]},
+ 'count': {'expected': 2}}
+
+ for method, data in compat.iteritems(grp_exp):
+ if 'args' not in data:
+ data['args'] = []
+
+ grpd = df.groupby('a')
+ res = getattr(grpd, method)(*data['args'])
+ assert res.iloc[0].b == data['expected']
+
+
+def test_fill_constistency():
+
+ # GH9221
+ # pass thru keyword arguments to the generated wrapper
+ # are set if the passed kw is None (only)
+ df = DataFrame(index=pd.MultiIndex.from_product(
+ [['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
+ columns=Index(
+ ['1', '2'], name='id'))
+ df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
+ np.nan, 22, np.nan]
+ df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
+ np.nan, 44, np.nan]
+
+ expected = df.groupby(level=0, axis=0).fillna(method='ffill')
+ result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
+ tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_cumprod():
+ # GH 4095
+ df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
+
+ actual = df.groupby('key')['value'].cumprod()
+ expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
+ expected.name = 'value'
+ tm.assert_series_equal(actual, expected)
+
+ df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
+ actual = df.groupby('key')['value'].cumprod()
+ # if overflows, groupby product casts to float
+ # while numpy passes back invalid values
+ df['value'] = df['value'].astype(float)
+ expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
+ expected.name = 'value'
+ tm.assert_series_equal(actual, expected)
+
+
+def test_ops_general():
+ ops = [('mean', np.mean),
+ ('median', np.median),
+ ('std', np.std),
+ ('var', np.var),
+ ('sum', np.sum),
+ ('prod', np.prod),
+ ('min', np.min),
+ ('max', np.max),
+ ('first', lambda x: x.iloc[0]),
+ ('last', lambda x: x.iloc[-1]),
+ ('count', np.size), ]
+ try:
+ from scipy.stats import sem
+ except ImportError:
+ pass
+ else:
+ ops.append(('sem', sem))
+ df = DataFrame(np.random.randn(1000))
+ labels = np.random.randint(0, 50, size=1000).astype(float)
+
+ for op, targop in ops:
+ result = getattr(df.groupby(labels), op)().astype(float)
+ expected = df.groupby(labels).agg(targop)
+ try:
+ tm.assert_frame_equal(result, expected)
+ except BaseException as exc:
+ exc.args += ('operation: %s' % op, )
+ raise
+
+
+def test_max_nan_bug():
+ raw = """,Date,app,File
+-04-23,2013-04-23 00:00:00,,log080001.log
+-05-06,2013-05-06 00:00:00,,log.log
+-05-07,2013-05-07 00:00:00,OE,xlsx"""
+
+ df = pd.read_csv(compat.StringIO(raw), parse_dates=[0])
+ gb = df.groupby('Date')
+ r = gb[['File']].max()
+ e = gb['File'].max().to_frame()
+ tm.assert_frame_equal(r, e)
+ assert not r['File'].isna().any()
+
+
+def test_nlargest():
+ a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
+ b = Series(list('a' * 5 + 'b' * 5))
+ gb = a.groupby(b)
+ r = gb.nlargest(3)
+ e = Series([
+ 7, 5, 3, 10, 9, 6
+ ], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
+ tm.assert_series_equal(r, e)
+
+ a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
+ gb = a.groupby(b)
+ e = Series([
+ 3, 2, 1, 3, 3, 2
+ ], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
+ tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
+
+
+def test_nsmallest():
+ a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
+ b = Series(list('a' * 5 + 'b' * 5))
+ gb = a.groupby(b)
+ r = gb.nsmallest(3)
+ e = Series([
+ 1, 2, 3, 0, 4, 6
+ ], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
+ tm.assert_series_equal(r, e)
+
+ a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
+ gb = a.groupby(b)
+ e = Series([
+ 0, 1, 1, 0, 1, 2
+ ], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
+ tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
+
+
+def test_numpy_compat():
+ # see gh-12811
+ df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
+ g = df.groupby('A')
+
+ msg = "numpy operations are not valid with groupby"
+
+ for func in ('mean', 'var', 'std', 'cumprod', 'cumsum'):
+ tm.assert_raises_regex(UnsupportedFunctionCall, msg,
+ getattr(g, func), 1, 2, 3)
+ tm.assert_raises_regex(UnsupportedFunctionCall, msg,
+ getattr(g, func), foo=1)
+
+
+def test_cummin_cummax():
+ # GH 15048
+ num_types = [np.int32, np.int64, np.float32, np.float64]
+ num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
+ np.finfo(np.float32).min, np.finfo(np.float64).min]
+ num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
+ np.finfo(np.float32).max, np.finfo(np.float64).max]
+ base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
+ 'B': [3, 4, 3, 2, 2, 3, 2, 1]})
+ expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
+ expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
+
+ for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
+ df = base_df.astype(dtype)
+
+ # cummin
+ expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
+ result = df.groupby('A').cummin()
+ tm.assert_frame_equal(result, expected)
+ result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
+ tm.assert_frame_equal(result, expected)
+
+ # Test cummin w/ min value for dtype
+ df.loc[[2, 6], 'B'] = min_val
+ expected.loc[[2, 3, 6, 7], 'B'] = min_val
+ result = df.groupby('A').cummin()
+ tm.assert_frame_equal(result, expected)
+ expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
+ tm.assert_frame_equal(result, expected)
+
+ # cummax
+ expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
+ result = df.groupby('A').cummax()
+ tm.assert_frame_equal(result, expected)
+ result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
+ tm.assert_frame_equal(result, expected)
+
+ # Test cummax w/ max value for dtype
+ df.loc[[2, 6], 'B'] = max_val
+ expected.loc[[2, 3, 6, 7], 'B'] = max_val
+ result = df.groupby('A').cummax()
+ tm.assert_frame_equal(result, expected)
+ expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
+ tm.assert_frame_equal(result, expected)
+
+ # Test nan in some values
+ base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
+ expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
+ np.nan, 3, np.nan, 1]})
+ result = base_df.groupby('A').cummin()
+ tm.assert_frame_equal(result, expected)
+ expected = (base_df.groupby('A')
+ .B
+ .apply(lambda x: x.cummin())
+ .to_frame())
+ tm.assert_frame_equal(result, expected)
+
+ expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
+ np.nan, 3, np.nan, 3]})
+ result = base_df.groupby('A').cummax()
+ tm.assert_frame_equal(result, expected)
+ expected = (base_df.groupby('A')
+ .B
+ .apply(lambda x: x.cummax())
+ .to_frame())
+ tm.assert_frame_equal(result, expected)
+
+ # Test nan in entire column
+ base_df['B'] = np.nan
+ expected = pd.DataFrame({'B': [np.nan] * 8})
+ result = base_df.groupby('A').cummin()
+ tm.assert_frame_equal(expected, result)
+ result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
+ tm.assert_frame_equal(expected, result)
+ result = base_df.groupby('A').cummax()
+ tm.assert_frame_equal(expected, result)
+ result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
+ tm.assert_frame_equal(expected, result)
+
+ # GH 15561
+ df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
+ expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
+ for method in ['cummax', 'cummin']:
+ result = getattr(df.groupby('a')['b'], method)()
+ tm.assert_series_equal(expected, result)
+
+ # GH 15635
+ df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
+ result = df.groupby('a').b.cummax()
+ expected = pd.Series([2, 1, 2], name='b')
+ tm.assert_series_equal(result, expected)
+
+ df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
+ result = df.groupby('a').b.cummin()
+ expected = pd.Series([1, 2, 1], name='b')
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('in_vals, out_vals', [
+
+ # Basics: strictly increasing (T), strictly decreasing (F),
+ # abs val increasing (F), non-strictly increasing (T)
+ ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
+ [True, False, False, True]),
+
+ # Test with inf vals
+ ([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
+ [True, False, True, False]),
+
+ # Test with nan vals; should always be False
+ ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
+ [False, False, False, False]),
+])
+def test_is_monotonic_increasing(in_vals, out_vals):
+ # GH 17015
+ source_dict = {
+ 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
+ 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
+ 'C': in_vals}
+ df = pd.DataFrame(source_dict)
+ result = df.groupby('B').C.is_monotonic_increasing
+ index = Index(list('abcd'), name='B')
+ expected = pd.Series(index=index, data=out_vals, name='C')
+ tm.assert_series_equal(result, expected)
+
+ # Also check result equal to manually taking x.is_monotonic_increasing.
+ expected = (
+ df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('in_vals, out_vals', [
+ # Basics: strictly decreasing (T), strictly increasing (F),
+ # abs val decreasing (F), non-strictly increasing (T)
+ ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
+ [True, False, False, True]),
+
+ # Test with inf vals
+ ([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
+ [True, True, False, True]),
+
+ # Test with nan vals; should always be False
+ ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
+ [False, False, False, False]),
+])
+def test_is_monotonic_decreasing(in_vals, out_vals):
+ # GH 17015
+ source_dict = {
+ 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
+ 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
+ 'C': in_vals}
+
+ df = pd.DataFrame(source_dict)
+ result = df.groupby('B').C.is_monotonic_decreasing
+ index = Index(list('abcd'), name='B')
+ expected = pd.Series(index=index, data=out_vals, name='C')
+ tm.assert_series_equal(result, expected)
+
+
+# describe
+# --------------------------------
+
+def test_apply_describe_bug(mframe):
+ grouped = mframe.groupby(level='first')
+ grouped.describe() # it works!
+
+
+def test_series_describe_multikey():
+ ts = tm.makeTimeSeries()
+ grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
+ result = grouped.describe()
+ tm.assert_series_equal(result['mean'], grouped.mean(),
+ check_names=False)
+ tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
+ tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
+
+
+def test_series_describe_single():
+ ts = tm.makeTimeSeries()
+ grouped = ts.groupby(lambda x: x.month)
+ result = grouped.apply(lambda x: x.describe())
+ expected = grouped.describe().stack()
+ tm.assert_series_equal(result, expected)
+
+
+def test_series_index_name(df):
+ grouped = df.loc[:, ['C']].groupby(df['A'])
+ result = grouped.agg(lambda x: x.mean())
+ assert result.index.name == 'A'
+
+
+def test_frame_describe_multikey(tsframe):
+ grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
+ result = grouped.describe()
+ desc_groups = []
+ for col in tsframe:
+ group = grouped[col].describe()
+ # GH 17464 - Remove duplicate MultiIndex levels
+ group_col = pd.MultiIndex(
+ levels=[[col], group.columns],
+ labels=[[0] * len(group.columns), range(len(group.columns))])
+ group = pd.DataFrame(group.values,
+ columns=group_col,
+ index=group.index)
+ desc_groups.append(group)
+ expected = pd.concat(desc_groups, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ groupedT = tsframe.groupby({'A': 0, 'B': 0,
+ 'C': 1, 'D': 1}, axis=1)
+ result = groupedT.describe()
+ expected = tsframe.describe().T
+ expected.index = pd.MultiIndex(
+ levels=[[0, 1], expected.index],
+ labels=[[0, 0, 1, 1], range(len(expected.index))])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_frame_describe_tupleindex():
+
+ # GH 14848 - regression from 0.19.0 to 0.19.1
+ df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
+ 'y': [10, 20, 30, 40, 50] * 3,
+ 'z': [100, 200, 300, 400, 500] * 3})
+ df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
+ df2 = df1.rename(columns={'k': 'key'})
+ pytest.raises(ValueError, lambda: df1.groupby('k').describe())
+ pytest.raises(ValueError, lambda: df2.groupby('key').describe())
+
+
+def test_frame_describe_unstacked_format():
+ # GH 4792
+ prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
+ pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
+ pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
+ volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
+ pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
+ pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
+ df = pd.DataFrame({'PRICE': prices,
+ 'VOLUME': volumes})
+ result = df.groupby('PRICE').VOLUME.describe()
+ data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
+ df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
+ expected = pd.DataFrame(data,
+ index=pd.Index([24990, 25499], name='PRICE'),
+ columns=['count', 'mean', 'std', 'min',
+ '25%', '50%', '75%', 'max'])
+ tm.assert_frame_equal(result, expected)
+
+
+# nunique
+# --------------------------------
+
+@pytest.mark.parametrize("n, m", cart_product(10 ** np.arange(2, 6),
+ (10, 100, 1000)))
+@pytest.mark.parametrize("sort, dropna", cart_product((False, True), repeat=2))
+def test_series_groupby_nunique(n, m, sort, dropna):
+
+ def check_nunique(df, keys, as_index=True):
+ gr = df.groupby(keys, as_index=as_index, sort=sort)
+ left = gr['julie'].nunique(dropna=dropna)
+
+ gr = df.groupby(keys, as_index=as_index, sort=sort)
+ right = gr['julie'].apply(Series.nunique, dropna=dropna)
+ if not as_index:
+ right = right.reset_index(drop=True)
+
+ tm.assert_series_equal(left, right, check_names=False)
+
+ days = date_range('2015-08-23', periods=10)
+
+ frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
+ 'joe': np.random.choice(days, n),
+ 'julie': np.random.randint(0, m, n)})
+
+ check_nunique(frame, ['jim'])
+ check_nunique(frame, ['jim', 'joe'])
+
+ frame.loc[1::17, 'jim'] = None
+ frame.loc[3::37, 'joe'] = None
+ frame.loc[7::19, 'julie'] = None
+ frame.loc[8::19, 'julie'] = None
+ frame.loc[9::19, 'julie'] = None
+
+ check_nunique(frame, ['jim'])
+ check_nunique(frame, ['jim', 'joe'])
+ check_nunique(frame, ['jim'], as_index=False)
+ check_nunique(frame, ['jim', 'joe'], as_index=False)
+
+
+def test_nunique():
+ df = DataFrame({
+ 'A': list('abbacc'),
+ 'B': list('abxacc'),
+ 'C': list('abbacx'),
+ })
+
+ expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
+ result = df.groupby('A', as_index=False).nunique()
+ tm.assert_frame_equal(result, expected)
+
+ # as_index
+ expected.index = list('abc')
+ expected.index.name = 'A'
+ result = df.groupby('A').nunique()
+ tm.assert_frame_equal(result, expected)
+
+ # with na
+ result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
+ tm.assert_frame_equal(result, expected)
+
+ # dropna
+ expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
+ index=list('abc'))
+ expected.index.name = 'A'
+ result = df.replace({'x': None}).groupby('A').nunique()
+ tm.assert_frame_equal(result, expected)
+
+
+def test_nunique_with_object():
+ # GH 11077
+ data = pd.DataFrame(
+ [[100, 1, 'Alice'],
+ [200, 2, 'Bob'],
+ [300, 3, 'Charlie'],
+ [-400, 4, 'Dan'],
+ [500, 5, 'Edith']],
+ columns=['amount', 'id', 'name']
+ )
+
+ result = data.groupby(['id', 'amount'])['name'].nunique()
+ index = MultiIndex.from_arrays([data.id, data.amount])
+ expected = pd.Series([1] * 5, name='name', index=index)
+ tm.assert_series_equal(result, expected)
+
+
+def test_nunique_with_empty_series():
+ # GH 12553
+ data = pd.Series(name='name')
+ result = data.groupby(level=0).nunique()
+ expected = pd.Series(name='name', dtype='int64')
+ tm.assert_series_equal(result, expected)
+
+
+def test_nunique_with_timegrouper():
+ # GH 13453
+ test = pd.DataFrame({
+ 'time': [Timestamp('2016-06-28 09:35:35'),
+ Timestamp('2016-06-28 16:09:30'),
+ Timestamp('2016-06-28 16:46:28')],
+ 'data': ['1', '2', '3']}).set_index('time')
+ result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
+ expected = test.groupby(
+ pd.Grouper(freq='h')
+ )['data'].apply(pd.Series.nunique)
+ tm.assert_series_equal(result, expected)
+
+
+# count
+# --------------------------------
+
+def test_groupby_timedelta_cython_count():
+ df = DataFrame({'g': list('ab' * 2),
+ 'delt': np.arange(4).astype('timedelta64[ns]')})
+ expected = Series([
+ 2, 2
+ ], index=pd.Index(['a', 'b'], name='g'), name='delt')
+ result = df.groupby('g').delt.count()
+ tm.assert_series_equal(expected, result)
+
+
+def test_count():
+ n = 1 << 15
+ dr = date_range('2015-08-30', periods=n // 10, freq='T')
+
+ df = DataFrame({
+ '1st': np.random.choice(
+ list(ascii_lowercase), n),
+ '2nd': np.random.randint(0, 5, n),
+ '3rd': np.random.randn(n).round(3),
+ '4th': np.random.randint(-10, 10, n),
+ '5th': np.random.choice(dr, n),
+ '6th': np.random.randn(n).round(3),
+ '7th': np.random.randn(n).round(3),
+ '8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
+ '9th': np.random.choice(
+ list(ascii_lowercase), n)
+ })
+
+ for col in df.columns.drop(['1st', '2nd', '4th']):
+ df.loc[np.random.choice(n, n // 10), col] = np.nan
+
+ df['9th'] = df['9th'].astype('category')
+
+ for key in '1st', '2nd', ['1st', '2nd']:
+ left = df.groupby(key).count()
+ right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
+ tm.assert_frame_equal(left, right)
+
+ # GH5610
+ # count counts non-nulls
+ df = pd.DataFrame([[1, 2, 'foo'],
+ [1, np.nan, 'bar'],
+ [3, np.nan, np.nan]],
+ columns=['A', 'B', 'C'])
+
+ count_as = df.groupby('A').count()
+ count_not_as = df.groupby('A', as_index=False).count()
+
+ expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
+ index=[1, 3])
+ expected.index.name = 'A'
+ tm.assert_frame_equal(count_not_as, expected.reset_index())
+ tm.assert_frame_equal(count_as, expected)
+
+ count_B = df.groupby('A')['B'].count()
+ tm.assert_series_equal(count_B, expected['B'])
+
+
+def test_count_object():
+ df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
+ result = df.groupby('c').a.count()
+ expected = pd.Series([
+ 3, 3
+ ], index=pd.Index([2, 3], name='c'), name='a')
+ tm.assert_series_equal(result, expected)
+
+ df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
+ 'c': [2] * 3 + [3] * 3})
+ result = df.groupby('c').a.count()
+ expected = pd.Series([
+ 1, 3
+ ], index=pd.Index([2, 3], name='c'), name='a')
+ tm.assert_series_equal(result, expected)
+
+
+def test_count_cross_type():
+ # GH8169
+ vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
+ 0, 2, (100, 2))))
+
+ df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
+ df[df == 2] = np.nan
+ expected = df.groupby(['c', 'd']).count()
+
+ for t in ['float32', 'object']:
+ df['a'] = df['a'].astype(t)
+ df['b'] = df['b'].astype(t)
+ result = df.groupby(['c', 'd']).count()
+ tm.assert_frame_equal(result, expected)
+
+
+def test_lower_int_prec_count():
+ df = DataFrame({'a': np.array(
+ [0, 1, 2, 100], np.int8),
+ 'b': np.array(
+ [1, 2, 3, 6], np.uint32),
+ 'c': np.array(
+ [4, 5, 6, 8], np.int16),
+ 'grp': list('ab' * 2)})
+ result = df.groupby('grp').count()
+ expected = DataFrame({'a': [2, 2],
+ 'b': [2, 2],
+ 'c': [2, 2]}, index=pd.Index(list('ab'),
+ name='grp'))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_count_uses_size_on_exception():
+ class RaisingObjectException(Exception):
+ pass
+
+ class RaisingObject(object):
+
+ def __init__(self, msg='I will raise inside Cython'):
+ super(RaisingObject, self).__init__()
+ self.msg = msg
+
+ def __eq__(self, other):
+ # gets called in Cython to check that raising calls the method
+ raise RaisingObjectException(self.msg)
+
+ df = DataFrame({'a': [RaisingObject() for _ in range(4)],
+ 'grp': list('ab' * 2)})
+ result = df.groupby('grp').count()
+ expected = DataFrame({'a': [2, 2]}, index=pd.Index(
+ list('ab'), name='grp'))
+ tm.assert_frame_equal(result, expected)
+
+
+# size
+# --------------------------------
+
+def test_size(df):
+ grouped = df.groupby(['A', 'B'])
+ result = grouped.size()
+ for key, group in grouped:
+ assert result[key] == len(group)
+
+ grouped = df.groupby('A')
+ result = grouped.size()
+ for key, group in grouped:
+ assert result[key] == len(group)
+
+ grouped = df.groupby('B')
+ result = grouped.size()
+ for key, group in grouped:
+ assert result[key] == len(group)
+
+ df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
+ for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
+ left = df.groupby(key, sort=sort).size()
+ right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
+ tm.assert_series_equal(left, right, check_names=False)
+
+ # GH11699
+ df = DataFrame([], columns=['A', 'B'])
+ out = Series([], dtype='int64', index=Index([], name='A'))
+ tm.assert_series_equal(df.groupby('A').size(), out)
+
+
+# pipe
+# --------------------------------
+
+def test_pipe():
+ # Test the pipe method of DataFrameGroupBy.
+ # Issue #17871
+
+ random_state = np.random.RandomState(1234567890)
+
+ df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B': random_state.randn(8),
+ 'C': random_state.randn(8)})
+
+ def f(dfgb):
+ return dfgb.B.max() - dfgb.C.min().min()
+
+ def square(srs):
+ return srs ** 2
+
+ # Note that the transformations are
+ # GroupBy -> Series
+ # Series -> Series
+ # This then chains the GroupBy.pipe and the
+ # NDFrame.pipe methods
+ result = df.groupby('A').pipe(f).pipe(square)
+
+ index = Index([u'bar', u'foo'], dtype='object', name=u'A')
+ expected = pd.Series([8.99110003361, 8.17516964785], name='B',
+ index=index)
+
+ tm.assert_series_equal(expected, result)
+
+
+def test_pipe_args():
+ # Test passing args to the pipe method of DataFrameGroupBy.
+ # Issue #17871
+
+ df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'],
+ 'x': [1.0, 2.0, 3.0, 2.0, 5.0],
+ 'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]})
+
+ def f(dfgb, arg1):
+ return (dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
+ .groupby(dfgb.grouper))
+
+ def g(dfgb, arg2):
+ return dfgb.sum() / dfgb.sum().sum() + arg2
+
+ def h(df, arg3):
+ return df.x + df.y - arg3
+
+ result = (df
+ .groupby('group')
+ .pipe(f, 0)
+ .pipe(g, 10)
+ .pipe(h, 100))
+
+ # Assert the results here
+ index = pd.Index(['A', 'B', 'C'], name='group')
+ expected = pd.Series([-79.5160891089, -78.4839108911, -80],
+ index=index)
+
+ tm.assert_series_equal(expected, result)
+
+ # test SeriesGroupby.pipe
+ ser = pd.Series([1, 1, 2, 2, 3, 3])
+ result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
+
+ expected = pd.Series([4, 8, 12], index=pd.Int64Index([1, 2, 3]))
+
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_functional.py b/pandas/tests/groupby/test_functional.py
deleted file mode 100644
index b9718663570bd..0000000000000
--- a/pandas/tests/groupby/test_functional.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# -*- coding: utf-8 -*-
-
-""" test function application """
-
-import pytest
-
-from string import ascii_lowercase
-from pandas import (date_range, Timestamp,
- Index, MultiIndex, DataFrame, Series)
-from pandas.util.testing import assert_frame_equal, assert_series_equal
-from pandas.compat import product as cart_product
-
-import numpy as np
-
-import pandas.util.testing as tm
-import pandas as pd
-from .common import MixIn
-
-
-# describe
-# --------------------------------
-
-class TestDescribe(MixIn):
-
- def test_apply_describe_bug(self):
- grouped = self.mframe.groupby(level='first')
- grouped.describe() # it works!
-
- def test_series_describe_multikey(self):
- ts = tm.makeTimeSeries()
- grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
- result = grouped.describe()
- assert_series_equal(result['mean'], grouped.mean(), check_names=False)
- assert_series_equal(result['std'], grouped.std(), check_names=False)
- assert_series_equal(result['min'], grouped.min(), check_names=False)
-
- def test_series_describe_single(self):
- ts = tm.makeTimeSeries()
- grouped = ts.groupby(lambda x: x.month)
- result = grouped.apply(lambda x: x.describe())
- expected = grouped.describe().stack()
- assert_series_equal(result, expected)
-
- def test_series_index_name(self):
- grouped = self.df.loc[:, ['C']].groupby(self.df['A'])
- result = grouped.agg(lambda x: x.mean())
- assert result.index.name == 'A'
-
- def test_frame_describe_multikey(self):
- grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
- result = grouped.describe()
- desc_groups = []
- for col in self.tsframe:
- group = grouped[col].describe()
- # GH 17464 - Remove duplicate MultiIndex levels
- group_col = pd.MultiIndex(
- levels=[[col], group.columns],
- labels=[[0] * len(group.columns), range(len(group.columns))])
- group = pd.DataFrame(group.values,
- columns=group_col,
- index=group.index)
- desc_groups.append(group)
- expected = pd.concat(desc_groups, axis=1)
- tm.assert_frame_equal(result, expected)
-
- groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
- 'C': 1, 'D': 1}, axis=1)
- result = groupedT.describe()
- expected = self.tsframe.describe().T
- expected.index = pd.MultiIndex(
- levels=[[0, 1], expected.index],
- labels=[[0, 0, 1, 1], range(len(expected.index))])
- tm.assert_frame_equal(result, expected)
-
- def test_frame_describe_tupleindex(self):
-
- # GH 14848 - regression from 0.19.0 to 0.19.1
- df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
- 'y': [10, 20, 30, 40, 50] * 3,
- 'z': [100, 200, 300, 400, 500] * 3})
- df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
- df2 = df1.rename(columns={'k': 'key'})
- pytest.raises(ValueError, lambda: df1.groupby('k').describe())
- pytest.raises(ValueError, lambda: df2.groupby('key').describe())
-
- def test_frame_describe_unstacked_format(self):
- # GH 4792
- prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
- pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
- pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
- volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
- pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
- pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
- df = pd.DataFrame({'PRICE': prices,
- 'VOLUME': volumes})
- result = df.groupby('PRICE').VOLUME.describe()
- data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
- df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
- expected = pd.DataFrame(data,
- index=pd.Index([24990, 25499], name='PRICE'),
- columns=['count', 'mean', 'std', 'min',
- '25%', '50%', '75%', 'max'])
- tm.assert_frame_equal(result, expected)
-
-
-# nunique
-# --------------------------------
-
-class TestNUnique(MixIn):
-
- def test_series_groupby_nunique(self):
-
- def check_nunique(df, keys, as_index=True):
- for sort, dropna in cart_product((False, True), repeat=2):
- gr = df.groupby(keys, as_index=as_index, sort=sort)
- left = gr['julie'].nunique(dropna=dropna)
-
- gr = df.groupby(keys, as_index=as_index, sort=sort)
- right = gr['julie'].apply(Series.nunique, dropna=dropna)
- if not as_index:
- right = right.reset_index(drop=True)
-
- assert_series_equal(left, right, check_names=False)
-
- days = date_range('2015-08-23', periods=10)
-
- for n, m in cart_product(10 ** np.arange(2, 6), (10, 100, 1000)):
- frame = DataFrame({
- 'jim': np.random.choice(
- list(ascii_lowercase), n),
- 'joe': np.random.choice(days, n),
- 'julie': np.random.randint(0, m, n)
- })
-
- check_nunique(frame, ['jim'])
- check_nunique(frame, ['jim', 'joe'])
-
- frame.loc[1::17, 'jim'] = None
- frame.loc[3::37, 'joe'] = None
- frame.loc[7::19, 'julie'] = None
- frame.loc[8::19, 'julie'] = None
- frame.loc[9::19, 'julie'] = None
-
- check_nunique(frame, ['jim'])
- check_nunique(frame, ['jim', 'joe'])
- check_nunique(frame, ['jim'], as_index=False)
- check_nunique(frame, ['jim', 'joe'], as_index=False)
-
- def test_nunique(self):
- df = DataFrame({
- 'A': list('abbacc'),
- 'B': list('abxacc'),
- 'C': list('abbacx'),
- })
-
- expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
- result = df.groupby('A', as_index=False).nunique()
- tm.assert_frame_equal(result, expected)
-
- # as_index
- expected.index = list('abc')
- expected.index.name = 'A'
- result = df.groupby('A').nunique()
- tm.assert_frame_equal(result, expected)
-
- # with na
- result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
- tm.assert_frame_equal(result, expected)
-
- # dropna
- expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
- index=list('abc'))
- expected.index.name = 'A'
- result = df.replace({'x': None}).groupby('A').nunique()
- tm.assert_frame_equal(result, expected)
-
- def test_nunique_with_object(self):
- # GH 11077
- data = pd.DataFrame(
- [[100, 1, 'Alice'],
- [200, 2, 'Bob'],
- [300, 3, 'Charlie'],
- [-400, 4, 'Dan'],
- [500, 5, 'Edith']],
- columns=['amount', 'id', 'name']
- )
-
- result = data.groupby(['id', 'amount'])['name'].nunique()
- index = MultiIndex.from_arrays([data.id, data.amount])
- expected = pd.Series([1] * 5, name='name', index=index)
- tm.assert_series_equal(result, expected)
-
- def test_nunique_with_empty_series(self):
- # GH 12553
- data = pd.Series(name='name')
- result = data.groupby(level=0).nunique()
- expected = pd.Series(name='name', dtype='int64')
- tm.assert_series_equal(result, expected)
-
- def test_nunique_with_timegrouper(self):
- # GH 13453
- test = pd.DataFrame({
- 'time': [Timestamp('2016-06-28 09:35:35'),
- Timestamp('2016-06-28 16:09:30'),
- Timestamp('2016-06-28 16:46:28')],
- 'data': ['1', '2', '3']}).set_index('time')
- result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
- expected = test.groupby(
- pd.Grouper(freq='h')
- )['data'].apply(pd.Series.nunique)
- tm.assert_series_equal(result, expected)
-
-
-# count
-# --------------------------------
-
-class TestCount(MixIn):
-
- def test_groupby_timedelta_cython_count(self):
- df = DataFrame({'g': list('ab' * 2),
- 'delt': np.arange(4).astype('timedelta64[ns]')})
- expected = Series([
- 2, 2
- ], index=pd.Index(['a', 'b'], name='g'), name='delt')
- result = df.groupby('g').delt.count()
- tm.assert_series_equal(expected, result)
-
- def test_count(self):
- n = 1 << 15
- dr = date_range('2015-08-30', periods=n // 10, freq='T')
-
- df = DataFrame({
- '1st': np.random.choice(
- list(ascii_lowercase), n),
- '2nd': np.random.randint(0, 5, n),
- '3rd': np.random.randn(n).round(3),
- '4th': np.random.randint(-10, 10, n),
- '5th': np.random.choice(dr, n),
- '6th': np.random.randn(n).round(3),
- '7th': np.random.randn(n).round(3),
- '8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
- '9th': np.random.choice(
- list(ascii_lowercase), n)
- })
-
- for col in df.columns.drop(['1st', '2nd', '4th']):
- df.loc[np.random.choice(n, n // 10), col] = np.nan
-
- df['9th'] = df['9th'].astype('category')
-
- for key in '1st', '2nd', ['1st', '2nd']:
- left = df.groupby(key).count()
- right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
- assert_frame_equal(left, right)
-
- # GH5610
- # count counts non-nulls
- df = pd.DataFrame([[1, 2, 'foo'],
- [1, np.nan, 'bar'],
- [3, np.nan, np.nan]],
- columns=['A', 'B', 'C'])
-
- count_as = df.groupby('A').count()
- count_not_as = df.groupby('A', as_index=False).count()
-
- expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
- index=[1, 3])
- expected.index.name = 'A'
- assert_frame_equal(count_not_as, expected.reset_index())
- assert_frame_equal(count_as, expected)
-
- count_B = df.groupby('A')['B'].count()
- assert_series_equal(count_B, expected['B'])
-
- def test_count_object(self):
- df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
- result = df.groupby('c').a.count()
- expected = pd.Series([
- 3, 3
- ], index=pd.Index([2, 3], name='c'), name='a')
- tm.assert_series_equal(result, expected)
-
- df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
- 'c': [2] * 3 + [3] * 3})
- result = df.groupby('c').a.count()
- expected = pd.Series([
- 1, 3
- ], index=pd.Index([2, 3], name='c'), name='a')
- tm.assert_series_equal(result, expected)
-
- def test_count_cross_type(self): # GH8169
- vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
- 0, 2, (100, 2))))
-
- df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
- df[df == 2] = np.nan
- expected = df.groupby(['c', 'd']).count()
-
- for t in ['float32', 'object']:
- df['a'] = df['a'].astype(t)
- df['b'] = df['b'].astype(t)
- result = df.groupby(['c', 'd']).count()
- tm.assert_frame_equal(result, expected)
-
- def test_lower_int_prec_count(self):
- df = DataFrame({'a': np.array(
- [0, 1, 2, 100], np.int8),
- 'b': np.array(
- [1, 2, 3, 6], np.uint32),
- 'c': np.array(
- [4, 5, 6, 8], np.int16),
- 'grp': list('ab' * 2)})
- result = df.groupby('grp').count()
- expected = DataFrame({'a': [2, 2],
- 'b': [2, 2],
- 'c': [2, 2]}, index=pd.Index(list('ab'),
- name='grp'))
- tm.assert_frame_equal(result, expected)
-
- def test_count_uses_size_on_exception(self):
- class RaisingObjectException(Exception):
- pass
-
- class RaisingObject(object):
-
- def __init__(self, msg='I will raise inside Cython'):
- super(RaisingObject, self).__init__()
- self.msg = msg
-
- def __eq__(self, other):
- # gets called in Cython to check that raising calls the method
- raise RaisingObjectException(self.msg)
-
- df = DataFrame({'a': [RaisingObject() for _ in range(4)],
- 'grp': list('ab' * 2)})
- result = df.groupby('grp').count()
- expected = DataFrame({'a': [2, 2]}, index=pd.Index(
- list('ab'), name='grp'))
- tm.assert_frame_equal(result, expected)
-
-
-# size
-# --------------------------------
-
-class TestSize(MixIn):
-
- def test_size(self):
- grouped = self.df.groupby(['A', 'B'])
- result = grouped.size()
- for key, group in grouped:
- assert result[key] == len(group)
-
- grouped = self.df.groupby('A')
- result = grouped.size()
- for key, group in grouped:
- assert result[key] == len(group)
-
- grouped = self.df.groupby('B')
- result = grouped.size()
- for key, group in grouped:
- assert result[key] == len(group)
-
- df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
- for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
- left = df.groupby(key, sort=sort).size()
- right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
- assert_series_equal(left, right, check_names=False)
-
- # GH11699
- df = DataFrame([], columns=['A', 'B'])
- out = Series([], dtype='int64', index=Index([], name='A'))
- assert_series_equal(df.groupby('A').size(), out)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index c3400b6b710e5..bb892f92f213e 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -5,3090 +5,1672 @@
from warnings import catch_warnings
from datetime import datetime
+from decimal import Decimal
-from pandas import (date_range, bdate_range, Timestamp,
+from pandas import (date_range, Timestamp,
Index, MultiIndex, DataFrame, Series,
- concat, Panel, DatetimeIndex, read_csv)
-from pandas.core.dtypes.missing import isna
-from pandas.errors import UnsupportedFunctionCall, PerformanceWarning
-from pandas.util.testing import (assert_frame_equal, assert_index_equal,
+ Panel, DatetimeIndex, read_csv)
+from pandas.errors import PerformanceWarning
+from pandas.util.testing import (assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.compat import (range, lrange, StringIO, lmap, lzip, map, zip,
- builtins, OrderedDict)
+ OrderedDict)
from pandas import compat
from collections import defaultdict
import pandas.core.common as com
import numpy as np
-import pandas.core.nanops as nanops
import pandas.util.testing as tm
import pandas as pd
-from .common import MixIn
-class TestGrouper(object):
+def test_repr():
+ # GH18203
+ result = repr(pd.Grouper(key='A', level='B'))
+ expected = "Grouper(key='A', level='B', axis=0, sort=False)"
+ assert result == expected
- def test_repr(self):
- # GH18203
- result = repr(pd.Grouper(key='A', level='B'))
- expected = "Grouper(key='A', level='B', axis=0, sort=False)"
- assert result == expected
+@pytest.mark.parametrize('dtype', ['int64', 'int32', 'float64', 'float32'])
+def test_basic(dtype):
-class TestGroupBy(MixIn):
+ data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
- def test_basic(self):
- def checkit(dtype):
- data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
+ index = np.arange(9)
+ np.random.shuffle(index)
+ data = data.reindex(index)
- index = np.arange(9)
- np.random.shuffle(index)
- data = data.reindex(index)
+ grouped = data.groupby(lambda x: x // 3)
- grouped = data.groupby(lambda x: x // 3)
+ for k, v in grouped:
+ assert len(v) == 3
- for k, v in grouped:
- assert len(v) == 3
+ agged = grouped.aggregate(np.mean)
+ assert agged[1] == 1
- agged = grouped.aggregate(np.mean)
- assert agged[1] == 1
+ assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
+ assert_series_equal(agged, grouped.mean())
+ assert_series_equal(grouped.agg(np.sum), grouped.sum())
- assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
- assert_series_equal(agged, grouped.mean())
- assert_series_equal(grouped.agg(np.sum), grouped.sum())
+ expected = grouped.apply(lambda x: x * x.sum())
+ transformed = grouped.transform(lambda x: x * x.sum())
+ assert transformed[7] == 12
+ assert_series_equal(transformed, expected)
- expected = grouped.apply(lambda x: x * x.sum())
- transformed = grouped.transform(lambda x: x * x.sum())
- assert transformed[7] == 12
- assert_series_equal(transformed, expected)
+ value_grouped = data.groupby(data)
+ assert_series_equal(value_grouped.aggregate(np.mean), agged,
+ check_index_type=False)
- value_grouped = data.groupby(data)
- assert_series_equal(value_grouped.aggregate(np.mean), agged,
- check_index_type=False)
+ # complex agg
+ agged = grouped.aggregate([np.mean, np.std])
- # complex agg
- agged = grouped.aggregate([np.mean, np.std])
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ agged = grouped.aggregate({'one': np.mean, 'two': np.std})
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- agged = grouped.aggregate({'one': np.mean, 'two': np.std})
+ group_constants = {0: 10, 1: 20, 2: 30}
+ agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
+ assert agged[1] == 21
- group_constants = {0: 10, 1: 20, 2: 30}
- agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
- assert agged[1] == 21
+ # corner cases
+ pytest.raises(Exception, grouped.aggregate, lambda x: x * 2)
- # corner cases
- pytest.raises(Exception, grouped.aggregate, lambda x: x * 2)
- for dtype in ['int64', 'int32', 'float64', 'float32']:
- checkit(dtype)
+def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
+ key = mframe.index.labels[0]
+ grouped = mframe.groupby(key)
+ result = grouped.sum()
- def test_groupby_nonobject_dtype(self):
- key = self.mframe.index.labels[0]
- grouped = self.mframe.groupby(key)
- result = grouped.sum()
+ expected = mframe.groupby(key.astype('O')).sum()
+ assert_frame_equal(result, expected)
- expected = self.mframe.groupby(key.astype('O')).sum()
- assert_frame_equal(result, expected)
+ # GH 3911, mixed frame non-conversion
+ df = df_mixed_floats.copy()
+ df['value'] = lrange(len(df))
- # GH 3911, mixed frame non-conversion
- df = self.df_mixed_floats.copy()
- df['value'] = lrange(len(df))
+ def max_value(group):
+ return group.loc[group['value'].idxmax()]
- def max_value(group):
- return group.loc[group['value'].idxmax()]
+ applied = df.groupby('A').apply(max_value)
+ result = applied.get_dtype_counts().sort_values()
+ expected = Series({'float64': 2,
+ 'int64': 1,
+ 'object': 2}).sort_values()
+ assert_series_equal(result, expected)
- applied = df.groupby('A').apply(max_value)
- result = applied.get_dtype_counts().sort_values()
- expected = Series({'float64': 2,
- 'int64': 1,
- 'object': 2}).sort_values()
- assert_series_equal(result, expected)
- def test_groupby_return_type(self):
+def test_groupby_return_type():
- # GH2893, return a reduced type
- df1 = DataFrame(
- [{"val1": 1, "val2": 20},
- {"val1": 1, "val2": 19},
- {"val1": 2, "val2": 27},
- {"val1": 2, "val2": 12}
- ])
+ # GH2893, return a reduced type
+ df1 = DataFrame(
+ [{"val1": 1, "val2": 20},
+ {"val1": 1, "val2": 19},
+ {"val1": 2, "val2": 27},
+ {"val1": 2, "val2": 12}
+ ])
- def func(dataf):
- return dataf["val2"] - dataf["val2"].mean()
+ def func(dataf):
+ return dataf["val2"] - dataf["val2"].mean()
- result = df1.groupby("val1", squeeze=True).apply(func)
- assert isinstance(result, Series)
+ result = df1.groupby("val1", squeeze=True).apply(func)
+ assert isinstance(result, Series)
- df2 = DataFrame(
- [{"val1": 1, "val2": 20},
- {"val1": 1, "val2": 19},
- {"val1": 1, "val2": 27},
- {"val1": 1, "val2": 12}
- ])
+ df2 = DataFrame(
+ [{"val1": 1, "val2": 20},
+ {"val1": 1, "val2": 19},
+ {"val1": 1, "val2": 27},
+ {"val1": 1, "val2": 12}
+ ])
- def func(dataf):
- return dataf["val2"] - dataf["val2"].mean()
+ def func(dataf):
+ return dataf["val2"] - dataf["val2"].mean()
+
+ result = df2.groupby("val1", squeeze=True).apply(func)
+ assert isinstance(result, Series)
- result = df2.groupby("val1", squeeze=True).apply(func)
- assert isinstance(result, Series)
+ # GH3596, return a consistent type (regression in 0.11 from 0.10.1)
+ df = DataFrame([[1, 1], [1, 1]], columns=['X', 'Y'])
+ result = df.groupby('X', squeeze=False).count()
+ assert isinstance(result, DataFrame)
+
+ # GH5592
+ # inconcistent return type
+ df = DataFrame(dict(A=['Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb',
+ 'Pony', 'Pony'], B=Series(
+ np.arange(7), dtype='int64'), C=date_range(
+ '20130101', periods=7)))
+
+ def f(grp):
+ return grp.iloc[0]
+
+ expected = df.groupby('A').first()[['B']]
+ result = df.groupby('A').apply(f)[['B']]
+ assert_frame_equal(result, expected)
+
+ def f(grp):
+ if grp.name == 'Tiger':
+ return None
+ return grp.iloc[0]
+
+ result = df.groupby('A').apply(f)[['B']]
+ e = expected.copy()
+ e.loc['Tiger'] = np.nan
+ assert_frame_equal(result, e)
+
+ def f(grp):
+ if grp.name == 'Pony':
+ return None
+ return grp.iloc[0]
+
+ result = df.groupby('A').apply(f)[['B']]
+ e = expected.copy()
+ e.loc['Pony'] = np.nan
+ assert_frame_equal(result, e)
+
+ # 5592 revisited, with datetimes
+ def f(grp):
+ if grp.name == 'Pony':
+ return None
+ return grp.iloc[0]
+
+ result = df.groupby('A').apply(f)[['C']]
+ e = df.groupby('A').first()[['C']]
+ e.loc['Pony'] = pd.NaT
+ assert_frame_equal(result, e)
+
+ # scalar outputs
+ def f(grp):
+ if grp.name == 'Pony':
+ return None
+ return grp.iloc[0].loc['C']
+
+ result = df.groupby('A').apply(f)
+ e = df.groupby('A').first()['C'].copy()
+ e.loc['Pony'] = np.nan
+ e.name = None
+ assert_series_equal(result, e)
- # GH3596, return a consistent type (regression in 0.11 from 0.10.1)
- df = DataFrame([[1, 1], [1, 1]], columns=['X', 'Y'])
- result = df.groupby('X', squeeze=False).count()
- assert isinstance(result, DataFrame)
- # GH5592
- # inconcistent return type
- df = DataFrame(dict(A=['Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb',
- 'Pony', 'Pony'], B=Series(
- np.arange(7), dtype='int64'), C=date_range(
- '20130101', periods=7)))
+def test_pass_args_kwargs(ts, tsframe):
- def f(grp):
- return grp.iloc[0]
+ def f(x, q=None, axis=0):
+ return np.percentile(x, q, axis=axis)
- expected = df.groupby('A').first()[['B']]
- result = df.groupby('A').apply(f)[['B']]
- assert_frame_equal(result, expected)
+ g = lambda x: np.percentile(x, 80, axis=0)
- def f(grp):
- if grp.name == 'Tiger':
- return None
- return grp.iloc[0]
+ # Series
+ ts_grouped = ts.groupby(lambda x: x.month)
+ agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
+ apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
+ trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
+
+ agg_expected = ts_grouped.quantile(.8)
+ trans_expected = ts_grouped.transform(g)
+
+ assert_series_equal(apply_result, agg_expected)
+ assert_series_equal(agg_result, agg_expected, check_names=False)
+ assert_series_equal(trans_result, trans_expected)
+
+ agg_result = ts_grouped.agg(f, q=80)
+ apply_result = ts_grouped.apply(f, q=80)
+ trans_result = ts_grouped.transform(f, q=80)
+ assert_series_equal(agg_result, agg_expected)
+ assert_series_equal(apply_result, agg_expected)
+ assert_series_equal(trans_result, trans_expected)
+
+ # DataFrame
+ df_grouped = tsframe.groupby(lambda x: x.month)
+ agg_result = df_grouped.agg(np.percentile, 80, axis=0)
+ apply_result = df_grouped.apply(DataFrame.quantile, .8)
+ expected = df_grouped.quantile(.8)
+ assert_frame_equal(apply_result, expected)
+ assert_frame_equal(agg_result, expected, check_names=False)
+
+ agg_result = df_grouped.agg(f, q=80)
+ apply_result = df_grouped.apply(DataFrame.quantile, q=.8)
+ assert_frame_equal(agg_result, expected, check_names=False)
+ assert_frame_equal(apply_result, expected)
+
+
+def test_len():
+ df = tm.makeTimeDataFrame()
+ grouped = df.groupby([lambda x: x.year, lambda x: x.month,
+ lambda x: x.day])
+ assert len(grouped) == len(df)
- result = df.groupby('A').apply(f)[['B']]
- e = expected.copy()
- e.loc['Tiger'] = np.nan
- assert_frame_equal(result, e)
+ grouped = df.groupby([lambda x: x.year, lambda x: x.month])
+ expected = len({(x.year, x.month) for x in df.index})
+ assert len(grouped) == expected
- def f(grp):
- if grp.name == 'Pony':
- return None
- return grp.iloc[0]
+ # issue 11016
+ df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
+ assert len(df.groupby(('a'))) == 0
+ assert len(df.groupby(('b'))) == 3
+ assert len(df.groupby(['a', 'b'])) == 3
+
+
+def test_basic_regression():
+ # regression
+ T = [1.0 * x for x in lrange(1, 10) * 10][:1095]
+ result = Series(T, lrange(0, len(T)))
- result = df.groupby('A').apply(f)[['B']]
- e = expected.copy()
- e.loc['Pony'] = np.nan
- assert_frame_equal(result, e)
+ groupings = np.random.random((1100, ))
+ groupings = Series(groupings, lrange(0, len(groupings))) * 10.
- # 5592 revisited, with datetimes
- def f(grp):
- if grp.name == 'Pony':
- return None
- return grp.iloc[0]
+ grouped = result.groupby(groupings)
+ grouped.mean()
- result = df.groupby('A').apply(f)[['C']]
- e = df.groupby('A').first()[['C']]
- e.loc['Pony'] = pd.NaT
- assert_frame_equal(result, e)
- # scalar outputs
- def f(grp):
- if grp.name == 'Pony':
- return None
- return grp.iloc[0].loc['C']
-
- result = df.groupby('A').apply(f)
- e = df.groupby('A').first()['C'].copy()
- e.loc['Pony'] = np.nan
- e.name = None
- assert_series_equal(result, e)
-
- def test_apply_issues(self):
- # GH 5788
-
- s = """2011.05.16,00:00,1.40893
-2011.05.16,01:00,1.40760
-2011.05.16,02:00,1.40750
-2011.05.16,03:00,1.40649
-2011.05.17,02:00,1.40893
-2011.05.17,03:00,1.40760
-2011.05.17,04:00,1.40750
-2011.05.17,05:00,1.40649
-2011.05.18,02:00,1.40893
-2011.05.18,03:00,1.40760
-2011.05.18,04:00,1.40750
-2011.05.18,05:00,1.40649"""
-
- df = pd.read_csv(
- StringIO(s), header=None, names=['date', 'time', 'value'],
- parse_dates=[['date', 'time']])
- df = df.set_index('date_time')
-
- expected = df.groupby(df.index.date).idxmax()
- result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
- assert_frame_equal(result, expected)
-
- # GH 5789
- # don't auto coerce dates
- df = pd.read_csv(
- StringIO(s), header=None, names=['date', 'time', 'value'])
- exp_idx = pd.Index(
- ['2011.05.16', '2011.05.17', '2011.05.18'
- ], dtype=object, name='date')
- expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
- result = df.groupby('date').apply(
- lambda x: x['time'][x['value'].idxmax()])
- assert_series_equal(result, expected)
-
- def test_apply_trivial(self):
- # GH 20066
- # trivial apply: ignore input and return a constant dataframe.
- df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
- 'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
- columns=['key', 'data'])
- expected = pd.concat([df.iloc[1:], df.iloc[1:]],
- axis=1, keys=['float64', 'object'])
- result = df.groupby([str(x) for x in df.dtypes],
- axis=1).apply(lambda x: df.iloc[1:])
-
- assert_frame_equal(result, expected)
-
- @pytest.mark.xfail(reason=("GH 20066; function passed into apply "
- "returns a DataFrame with the same index "
- "as the one to create GroupBy object."))
- def test_apply_trivial_fail(self):
- # GH 20066
- # trivial apply fails if the constant dataframe has the same index
- # with the one used to create GroupBy object.
- df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
- 'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
- columns=['key', 'data'])
- expected = pd.concat([df, df],
- axis=1, keys=['float64', 'object'])
- result = df.groupby([str(x) for x in df.dtypes],
- axis=1).apply(lambda x: df)
-
- assert_frame_equal(result, expected)
-
- def test_time_field_bug(self):
- # Test a fix for the following error related to GH issue 11324 When
- # non-key fields in a group-by dataframe contained time-based fields
- # that were not returned by the apply function, an exception would be
- # raised.
-
- df = pd.DataFrame({'a': 1, 'b': [datetime.now() for nn in range(10)]})
-
- def func_with_no_date(batch):
- return pd.Series({'c': 2})
-
- def func_with_date(batch):
- return pd.Series({'b': datetime(2015, 1, 1), 'c': 2})
-
- dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date)
- dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1])
- dfg_no_conversion_expected.index.name = 'a'
-
- dfg_conversion = df.groupby(by=['a']).apply(func_with_date)
- dfg_conversion_expected = pd.DataFrame(
- {'b': datetime(2015, 1, 1),
- 'c': 2}, index=[1])
- dfg_conversion_expected.index.name = 'a'
-
- tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
- tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
-
- def test_len(self):
- df = tm.makeTimeDataFrame()
- grouped = df.groupby([lambda x: x.year, lambda x: x.month,
- lambda x: x.day])
- assert len(grouped) == len(df)
-
- grouped = df.groupby([lambda x: x.year, lambda x: x.month])
- expected = len({(x.year, x.month) for x in df.index})
- assert len(grouped) == expected
-
- # issue 11016
- df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
- assert len(df.groupby(('a'))) == 0
- assert len(df.groupby(('b'))) == 3
- assert len(df.groupby(['a', 'b'])) == 3
-
- def test_basic_regression(self):
- # regression
- T = [1.0 * x for x in lrange(1, 10) * 10][:1095]
- result = Series(T, lrange(0, len(T)))
-
- groupings = np.random.random((1100, ))
- groupings = Series(groupings, lrange(0, len(groupings))) * 10.
-
- grouped = result.groupby(groupings)
- grouped.mean()
-
- def test_with_na_groups(self):
- index = Index(np.arange(10))
-
- for dtype in ['float64', 'float32', 'int64', 'int32', 'int16', 'int8']:
- values = Series(np.ones(10), index, dtype=dtype)
- labels = Series([np.nan, 'foo', 'bar', 'bar', np.nan, np.nan,
- 'bar', 'bar', np.nan, 'foo'], index=index)
-
- # this SHOULD be an int
- grouped = values.groupby(labels)
- agged = grouped.agg(len)
- expected = Series([4, 2], index=['bar', 'foo'])
-
- assert_series_equal(agged, expected, check_dtype=False)
-
- # assert issubclass(agged.dtype.type, np.integer)
-
- # explicitly return a float from my function
- def f(x):
- return float(len(x))
-
- agged = grouped.agg(f)
- expected = Series([4, 2], index=['bar', 'foo'])
-
- assert_series_equal(agged, expected, check_dtype=False)
- assert issubclass(agged.dtype.type, np.dtype(dtype).type)
-
- def test_indices_concatenation_order(self):
-
- # GH 2808
-
- def f1(x):
- y = x[(x.b % 2) == 1] ** 2
- if y.empty:
- multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
- names=['b', 'c'])
- res = DataFrame(None, columns=['a'], index=multiindex)
- return res
- else:
- y = y.set_index(['b', 'c'])
- return y
-
- def f2(x):
- y = x[(x.b % 2) == 1] ** 2
- if y.empty:
- return DataFrame()
- else:
- y = y.set_index(['b', 'c'])
- return y
-
- def f3(x):
- y = x[(x.b % 2) == 1] ** 2
- if y.empty:
- multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
- names=['foo', 'bar'])
- res = DataFrame(None, columns=['a', 'b'], index=multiindex)
- return res
- else:
- return y
-
- df = DataFrame({'a': [1, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
-
- df2 = DataFrame({'a': [3, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
-
- # correct result
- result1 = df.groupby('a').apply(f1)
- result2 = df2.groupby('a').apply(f1)
- assert_frame_equal(result1, result2)
-
- # should fail (not the same number of levels)
- pytest.raises(AssertionError, df.groupby('a').apply, f2)
- pytest.raises(AssertionError, df2.groupby('a').apply, f2)
-
- # should fail (incorrect shape)
- pytest.raises(AssertionError, df.groupby('a').apply, f3)
- pytest.raises(AssertionError, df2.groupby('a').apply, f3)
-
- def test_attr_wrapper(self):
- grouped = self.ts.groupby(lambda x: x.weekday())
-
- result = grouped.std()
- expected = grouped.agg(lambda x: np.std(x, ddof=1))
- assert_series_equal(result, expected)
-
- # this is pretty cool
- result = grouped.describe()
- expected = {}
- for name, gp in grouped:
- expected[name] = gp.describe()
- expected = DataFrame(expected).T
- assert_frame_equal(result, expected)
-
- # get attribute
- result = grouped.dtype
- expected = grouped.agg(lambda x: x.dtype)
-
- # make sure raises error
- pytest.raises(AttributeError, getattr, grouped, 'foo')
-
- def test_frame_groupby(self):
- grouped = self.tsframe.groupby(lambda x: x.weekday())
-
- # aggregate
- aggregated = grouped.aggregate(np.mean)
- assert len(aggregated) == 5
- assert len(aggregated.columns) == 4
-
- # by string
- tscopy = self.tsframe.copy()
- tscopy['weekday'] = [x.weekday() for x in tscopy.index]
- stragged = tscopy.groupby('weekday').aggregate(np.mean)
- assert_frame_equal(stragged, aggregated, check_names=False)
-
- # transform
- grouped = self.tsframe.head(30).groupby(lambda x: x.weekday())
- transformed = grouped.transform(lambda x: x - x.mean())
- assert len(transformed) == 30
- assert len(transformed.columns) == 4
-
- # transform propagate
- transformed = grouped.transform(lambda x: x.mean())
- for name, group in grouped:
- mean = group.mean()
- for idx in group.index:
- tm.assert_series_equal(transformed.xs(idx), mean,
- check_names=False)
-
- # iterate
- for weekday, group in grouped:
- assert group.index[0].weekday() == weekday
-
- # groups / group_indices
- groups = grouped.groups
- indices = grouped.indices
-
- for k, v in compat.iteritems(groups):
- samething = self.tsframe.index.take(indices[k])
- assert (samething == v).all()
-
- def test_frame_groupby_columns(self):
- mapping = {'A': 0, 'B': 0, 'C': 1, 'D': 1}
- grouped = self.tsframe.groupby(mapping, axis=1)
-
- # aggregate
- aggregated = grouped.aggregate(np.mean)
- assert len(aggregated) == len(self.tsframe)
- assert len(aggregated.columns) == 2
-
- # transform
- tf = lambda x: x - x.mean()
- groupedT = self.tsframe.T.groupby(mapping, axis=0)
- assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
-
- # iterate
- for k, v in grouped:
- assert len(v.columns) == 2
-
- def test_frame_set_name_single(self):
- grouped = self.df.groupby('A')
-
- result = grouped.mean()
- assert result.index.name == 'A'
-
- result = self.df.groupby('A', as_index=False).mean()
- assert result.index.name != 'A'
-
- result = grouped.agg(np.mean)
- assert result.index.name == 'A'
-
- result = grouped.agg({'C': np.mean, 'D': np.std})
- assert result.index.name == 'A'
-
- result = grouped['C'].mean()
- assert result.index.name == 'A'
- result = grouped['C'].agg(np.mean)
- assert result.index.name == 'A'
- result = grouped['C'].agg([np.mean, np.std])
- assert result.index.name == 'A'
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
- assert result.index.name == 'A'
-
- def test_multi_func(self):
- col1 = self.df['A']
- col2 = self.df['B']
-
- grouped = self.df.groupby([col1.get, col2.get])
- agged = grouped.mean()
- expected = self.df.groupby(['A', 'B']).mean()
-
- # TODO groupby get drops names
- assert_frame_equal(agged.loc[:, ['C', 'D']],
- expected.loc[:, ['C', 'D']],
- check_names=False)
-
- # some "groups" with no data
- df = DataFrame({'v1': np.random.randn(6),
- 'v2': np.random.randn(6),
- 'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
- 'k2': np.array(['1', '1', '1', '2', '2', '2'])},
- index=['one', 'two', 'three', 'four', 'five', 'six'])
- # only verify that it works for now
- grouped = df.groupby(['k1', 'k2'])
- grouped.agg(np.sum)
-
- def test_multi_key_multiple_functions(self):
- grouped = self.df.groupby(['A', 'B'])['C']
-
- agged = grouped.agg([np.mean, np.std])
- expected = DataFrame({'mean': grouped.agg(np.mean),
- 'std': grouped.agg(np.std)})
- assert_frame_equal(agged, expected)
-
- def test_frame_multi_key_function_list(self):
- data = DataFrame(
- {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
- 'foo', 'foo', 'foo'],
- 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
- 'two', 'two', 'one'],
- 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
- 'dull', 'shiny', 'shiny', 'shiny'],
- 'D': np.random.randn(11),
- 'E': np.random.randn(11),
- 'F': np.random.randn(11)})
-
- grouped = data.groupby(['A', 'B'])
- funcs = [np.mean, np.std]
- agged = grouped.agg(funcs)
- expected = concat([grouped['D'].agg(funcs), grouped['E'].agg(funcs),
- grouped['F'].agg(funcs)],
- keys=['D', 'E', 'F'], axis=1)
- assert (isinstance(agged.index, MultiIndex))
- assert (isinstance(expected.index, MultiIndex))
- assert_frame_equal(agged, expected)
-
- def test_groupby_multiple_columns(self):
- data = self.df
- grouped = data.groupby(['A', 'B'])
-
- def _check_op(op):
-
- with catch_warnings(record=True):
- result1 = op(grouped)
-
- expected = defaultdict(dict)
- for n1, gp1 in data.groupby('A'):
- for n2, gp2 in gp1.groupby('B'):
- expected[n1][n2] = op(gp2.loc[:, ['C', 'D']])
- expected = dict((k, DataFrame(v))
- for k, v in compat.iteritems(expected))
- expected = Panel.fromDict(expected).swapaxes(0, 1)
- expected.major_axis.name, expected.minor_axis.name = 'A', 'B'
-
- # a little bit crude
- for col in ['C', 'D']:
- result_col = op(grouped[col])
- exp = expected[col]
- pivoted = result1[col].unstack()
- pivoted2 = result_col.unstack()
- assert_frame_equal(pivoted.reindex_like(exp), exp)
- assert_frame_equal(pivoted2.reindex_like(exp), exp)
-
- _check_op(lambda x: x.sum())
- _check_op(lambda x: x.mean())
-
- # test single series works the same
- result = data['C'].groupby([data['A'], data['B']]).mean()
- expected = data.groupby(['A', 'B']).mean()['C']
-
- assert_series_equal(result, expected)
-
- def test_groupby_as_index_agg(self):
- grouped = self.df.groupby('A', as_index=False)
-
- # single-key
-
- result = grouped.agg(np.mean)
- expected = grouped.mean()
- assert_frame_equal(result, expected)
-
- result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
- expected2 = grouped.mean()
- expected2['D'] = grouped.sum()['D']
- assert_frame_equal(result2, expected2)
-
- grouped = self.df.groupby('A', as_index=True)
- expected3 = grouped['C'].sum()
- expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result3 = grouped['C'].agg({'Q': np.sum})
- assert_frame_equal(result3, expected3)
-
- # multi-key
-
- grouped = self.df.groupby(['A', 'B'], as_index=False)
-
- result = grouped.agg(np.mean)
- expected = grouped.mean()
- assert_frame_equal(result, expected)
-
- result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
- expected2 = grouped.mean()
- expected2['D'] = grouped.sum()['D']
- assert_frame_equal(result2, expected2)
-
- expected3 = grouped['C'].sum()
- expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
+@pytest.mark.parametrize('dtype', ['float64', 'float32', 'int64',
+ 'int32', 'int16', 'int8'])
+def test_with_na_groups(dtype):
+ index = Index(np.arange(10))
+ values = Series(np.ones(10), index, dtype=dtype)
+ labels = Series([np.nan, 'foo', 'bar', 'bar', np.nan, np.nan,
+ 'bar', 'bar', np.nan, 'foo'], index=index)
+
+ # this SHOULD be an int
+ grouped = values.groupby(labels)
+ agged = grouped.agg(len)
+ expected = Series([4, 2], index=['bar', 'foo'])
+
+ assert_series_equal(agged, expected, check_dtype=False)
+
+ # assert issubclass(agged.dtype.type, np.integer)
+
+ # explicitly return a float from my function
+ def f(x):
+ return float(len(x))
+
+ agged = grouped.agg(f)
+ expected = Series([4, 2], index=['bar', 'foo'])
+
+ assert_series_equal(agged, expected, check_dtype=False)
+ assert issubclass(agged.dtype.type, np.dtype(dtype).type)
+
+
+def test_indices_concatenation_order():
+
+ # GH 2808
+
+ def f1(x):
+ y = x[(x.b % 2) == 1] ** 2
+ if y.empty:
+ multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
+ names=['b', 'c'])
+ res = DataFrame(None, columns=['a'], index=multiindex)
+ return res
+ else:
+ y = y.set_index(['b', 'c'])
+ return y
+
+ def f2(x):
+ y = x[(x.b % 2) == 1] ** 2
+ if y.empty:
+ return DataFrame()
+ else:
+ y = y.set_index(['b', 'c'])
+ return y
+
+ def f3(x):
+ y = x[(x.b % 2) == 1] ** 2
+ if y.empty:
+ multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
+ names=['foo', 'bar'])
+ res = DataFrame(None, columns=['a', 'b'], index=multiindex)
+ return res
+ else:
+ return y
+
+ df = DataFrame({'a': [1, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
+
+ df2 = DataFrame({'a': [3, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
+
+ # correct result
+ result1 = df.groupby('a').apply(f1)
+ result2 = df2.groupby('a').apply(f1)
+ assert_frame_equal(result1, result2)
+
+ # should fail (not the same number of levels)
+ pytest.raises(AssertionError, df.groupby('a').apply, f2)
+ pytest.raises(AssertionError, df2.groupby('a').apply, f2)
+
+ # should fail (incorrect shape)
+ pytest.raises(AssertionError, df.groupby('a').apply, f3)
+ pytest.raises(AssertionError, df2.groupby('a').apply, f3)
+
+
+def test_attr_wrapper(ts):
+ grouped = ts.groupby(lambda x: x.weekday())
+
+ result = grouped.std()
+ expected = grouped.agg(lambda x: np.std(x, ddof=1))
+ assert_series_equal(result, expected)
+
+ # this is pretty cool
+ result = grouped.describe()
+ expected = {}
+ for name, gp in grouped:
+ expected[name] = gp.describe()
+ expected = DataFrame(expected).T
+ assert_frame_equal(result, expected)
+
+ # get attribute
+ result = grouped.dtype
+ expected = grouped.agg(lambda x: x.dtype)
+
+ # make sure raises error
+ pytest.raises(AttributeError, getattr, grouped, 'foo')
+
+
+def test_frame_groupby(tsframe):
+ grouped = tsframe.groupby(lambda x: x.weekday())
+
+ # aggregate
+ aggregated = grouped.aggregate(np.mean)
+ assert len(aggregated) == 5
+ assert len(aggregated.columns) == 4
+
+ # by string
+ tscopy = tsframe.copy()
+ tscopy['weekday'] = [x.weekday() for x in tscopy.index]
+ stragged = tscopy.groupby('weekday').aggregate(np.mean)
+ assert_frame_equal(stragged, aggregated, check_names=False)
+
+ # transform
+ grouped = tsframe.head(30).groupby(lambda x: x.weekday())
+ transformed = grouped.transform(lambda x: x - x.mean())
+ assert len(transformed) == 30
+ assert len(transformed.columns) == 4
+
+ # transform propagate
+ transformed = grouped.transform(lambda x: x.mean())
+ for name, group in grouped:
+ mean = group.mean()
+ for idx in group.index:
+ tm.assert_series_equal(transformed.xs(idx), mean,
+ check_names=False)
+
+ # iterate
+ for weekday, group in grouped:
+ assert group.index[0].weekday() == weekday
+
+ # groups / group_indices
+ groups = grouped.groups
+ indices = grouped.indices
+
+ for k, v in compat.iteritems(groups):
+ samething = tsframe.index.take(indices[k])
+ assert (samething == v).all()
+
+
+def test_frame_groupby_columns(tsframe):
+ mapping = {'A': 0, 'B': 0, 'C': 1, 'D': 1}
+ grouped = tsframe.groupby(mapping, axis=1)
+
+ # aggregate
+ aggregated = grouped.aggregate(np.mean)
+ assert len(aggregated) == len(tsframe)
+ assert len(aggregated.columns) == 2
+
+ # transform
+ tf = lambda x: x - x.mean()
+ groupedT = tsframe.T.groupby(mapping, axis=0)
+ assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
+
+ # iterate
+ for k, v in grouped:
+ assert len(v.columns) == 2
+
+
+def test_frame_set_name_single(df):
+ grouped = df.groupby('A')
+
+ result = grouped.mean()
+ assert result.index.name == 'A'
+
+ result = df.groupby('A', as_index=False).mean()
+ assert result.index.name != 'A'
+
+ result = grouped.agg(np.mean)
+ assert result.index.name == 'A'
+
+ result = grouped.agg({'C': np.mean, 'D': np.std})
+ assert result.index.name == 'A'
+
+ result = grouped['C'].mean()
+ assert result.index.name == 'A'
+ result = grouped['C'].agg(np.mean)
+ assert result.index.name == 'A'
+ result = grouped['C'].agg([np.mean, np.std])
+ assert result.index.name == 'A'
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
+ assert result.index.name == 'A'
+
+
+def test_multi_func(df):
+ col1 = df['A']
+ col2 = df['B']
+
+ grouped = df.groupby([col1.get, col2.get])
+ agged = grouped.mean()
+ expected = df.groupby(['A', 'B']).mean()
+
+ # TODO groupby get drops names
+ assert_frame_equal(agged.loc[:, ['C', 'D']],
+ expected.loc[:, ['C', 'D']],
+ check_names=False)
+
+ # some "groups" with no data
+ df = DataFrame({'v1': np.random.randn(6),
+ 'v2': np.random.randn(6),
+ 'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
+ 'k2': np.array(['1', '1', '1', '2', '2', '2'])},
+ index=['one', 'two', 'three', 'four', 'five', 'six'])
+ # only verify that it works for now
+ grouped = df.groupby(['k1', 'k2'])
+ grouped.agg(np.sum)
+
+
+def test_multi_key_multiple_functions(df):
+ grouped = df.groupby(['A', 'B'])['C']
+
+ agged = grouped.agg([np.mean, np.std])
+ expected = DataFrame({'mean': grouped.agg(np.mean),
+ 'std': grouped.agg(np.std)})
+ assert_frame_equal(agged, expected)
+
+
+def test_frame_multi_key_function_list():
+ data = DataFrame(
+ {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
+ 'foo', 'foo', 'foo'],
+ 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
+ 'two', 'two', 'one'],
+ 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
+ 'dull', 'shiny', 'shiny', 'shiny'],
+ 'D': np.random.randn(11),
+ 'E': np.random.randn(11),
+ 'F': np.random.randn(11)})
+
+ grouped = data.groupby(['A', 'B'])
+ funcs = [np.mean, np.std]
+ agged = grouped.agg(funcs)
+ expected = pd.concat([grouped['D'].agg(funcs), grouped['E'].agg(funcs),
+ grouped['F'].agg(funcs)],
+ keys=['D', 'E', 'F'], axis=1)
+ assert (isinstance(agged.index, MultiIndex))
+ assert (isinstance(expected.index, MultiIndex))
+ assert_frame_equal(agged, expected)
+
+
+@pytest.mark.parametrize('op', [lambda x: x.sum(), lambda x: x.mean()])
+def test_groupby_multiple_columns(df, op):
+ data = df
+ grouped = data.groupby(['A', 'B'])
+
+ with catch_warnings(record=True):
+ result1 = op(grouped)
+
+ expected = defaultdict(dict)
+ for n1, gp1 in data.groupby('A'):
+ for n2, gp2 in gp1.groupby('B'):
+ expected[n1][n2] = op(gp2.loc[:, ['C', 'D']])
+ expected = dict((k, DataFrame(v))
+ for k, v in compat.iteritems(expected))
+ expected = Panel.fromDict(expected).swapaxes(0, 1)
+ expected.major_axis.name, expected.minor_axis.name = 'A', 'B'
+
+ # a little bit crude
+ for col in ['C', 'D']:
+ result_col = op(grouped[col])
+ exp = expected[col]
+ pivoted = result1[col].unstack()
+ pivoted2 = result_col.unstack()
+ assert_frame_equal(pivoted.reindex_like(exp), exp)
+ assert_frame_equal(pivoted2.reindex_like(exp), exp)
+
+ # test single series works the same
+ result = data['C'].groupby([data['A'], data['B']]).mean()
+ expected = data.groupby(['A', 'B']).mean()['C']
+
+ assert_series_equal(result, expected)
+
+
+def test_groupby_as_index_agg(df):
+ grouped = df.groupby('A', as_index=False)
+
+ # single-key
+
+ result = grouped.agg(np.mean)
+ expected = grouped.mean()
+ assert_frame_equal(result, expected)
+
+ result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
+ expected2 = grouped.mean()
+ expected2['D'] = grouped.sum()['D']
+ assert_frame_equal(result2, expected2)
+
+ grouped = df.groupby('A', as_index=True)
+ expected3 = grouped['C'].sum()
+ expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
result3 = grouped['C'].agg({'Q': np.sum})
- assert_frame_equal(result3, expected3)
-
- # GH7115 & GH8112 & GH8582
- df = DataFrame(np.random.randint(0, 100, (50, 3)),
- columns=['jim', 'joe', 'jolie'])
- ts = Series(np.random.randint(5, 10, 50), name='jim')
-
- gr = df.groupby(ts)
- gr.nth(0) # invokes set_selection_from_grouper internally
- assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
-
- for attr in ['mean', 'max', 'count', 'idxmax', 'cumsum', 'all']:
- gr = df.groupby(ts, as_index=False)
- left = getattr(gr, attr)()
-
- gr = df.groupby(ts.values, as_index=True)
- right = getattr(gr, attr)().reset_index(drop=True)
-
- assert_frame_equal(left, right)
-
- def test_as_index_series_return_frame(self):
- grouped = self.df.groupby('A', as_index=False)
- grouped2 = self.df.groupby(['A', 'B'], as_index=False)
-
- result = grouped['C'].agg(np.sum)
- expected = grouped.agg(np.sum).loc[:, ['A', 'C']]
- assert isinstance(result, DataFrame)
- assert_frame_equal(result, expected)
-
- result2 = grouped2['C'].agg(np.sum)
- expected2 = grouped2.agg(np.sum).loc[:, ['A', 'B', 'C']]
- assert isinstance(result2, DataFrame)
- assert_frame_equal(result2, expected2)
-
- result = grouped['C'].sum()
- expected = grouped.sum().loc[:, ['A', 'C']]
- assert isinstance(result, DataFrame)
- assert_frame_equal(result, expected)
-
- result2 = grouped2['C'].sum()
- expected2 = grouped2.sum().loc[:, ['A', 'B', 'C']]
- assert isinstance(result2, DataFrame)
- assert_frame_equal(result2, expected2)
-
- # corner case
- pytest.raises(Exception, grouped['C'].__getitem__, 'D')
-
- def test_groupby_as_index_cython(self):
- data = self.df
-
- # single-key
- grouped = data.groupby('A', as_index=False)
- result = grouped.mean()
- expected = data.groupby(['A']).mean()
- expected.insert(0, 'A', expected.index)
- expected.index = np.arange(len(expected))
- assert_frame_equal(result, expected)
-
- # multi-key
- grouped = data.groupby(['A', 'B'], as_index=False)
- result = grouped.mean()
- expected = data.groupby(['A', 'B']).mean()
-
- arrays = lzip(*expected.index.values)
- expected.insert(0, 'A', arrays[0])
- expected.insert(1, 'B', arrays[1])
- expected.index = np.arange(len(expected))
- assert_frame_equal(result, expected)
-
- def test_groupby_as_index_series_scalar(self):
- grouped = self.df.groupby(['A', 'B'], as_index=False)
-
- # GH #421
-
- result = grouped['C'].agg(len)
- expected = grouped.agg(len).loc[:, ['A', 'B', 'C']]
- assert_frame_equal(result, expected)
-
- def test_groupby_as_index_corner(self):
- pytest.raises(TypeError, self.ts.groupby, lambda x: x.weekday(),
- as_index=False)
-
- pytest.raises(ValueError, self.df.groupby, lambda x: x.lower(),
- as_index=False, axis=1)
-
- def test_groupby_as_index_apply(self):
- # GH #4648 and #3417
- df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
- 'user_id': [1, 2, 1, 1, 3, 1],
- 'time': range(6)})
-
- g_as = df.groupby('user_id', as_index=True)
- g_not_as = df.groupby('user_id', as_index=False)
-
- res_as = g_as.head(2).index
- res_not_as = g_not_as.head(2).index
- exp = Index([0, 1, 2, 4])
- assert_index_equal(res_as, exp)
- assert_index_equal(res_not_as, exp)
-
- res_as_apply = g_as.apply(lambda x: x.head(2)).index
- res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
-
- # apply doesn't maintain the original ordering
- # changed in GH5610 as the as_index=False returns a MI here
- exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
- 2, 4)])
- tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
- exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
-
- assert_index_equal(res_as_apply, exp_as_apply)
- assert_index_equal(res_not_as_apply, exp_not_as_apply)
-
- ind = Index(list('abcde'))
- df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
- res = df.groupby(0, as_index=False).apply(lambda x: x).index
- assert_index_equal(res, ind)
-
- def test_groupby_multiple_key(self):
- df = tm.makeTimeDataFrame()
- grouped = df.groupby([lambda x: x.year, lambda x: x.month,
- lambda x: x.day])
- agged = grouped.sum()
- assert_almost_equal(df.values, agged.values)
-
- grouped = df.T.groupby([lambda x: x.year,
- lambda x: x.month,
- lambda x: x.day], axis=1)
-
- agged = grouped.agg(lambda x: x.sum())
- tm.assert_index_equal(agged.index, df.columns)
- assert_almost_equal(df.T.values, agged.values)
-
- agged = grouped.agg(lambda x: x.sum())
- assert_almost_equal(df.T.values, agged.values)
-
- def test_groupby_multi_corner(self):
- # test that having an all-NA column doesn't mess you up
- df = self.df.copy()
- df['bad'] = np.nan
- agged = df.groupby(['A', 'B']).mean()
-
- expected = self.df.groupby(['A', 'B']).mean()
- expected['bad'] = np.nan
-
- assert_frame_equal(agged, expected)
-
- def test_omit_nuisance(self):
- grouped = self.df.groupby('A')
-
- result = grouped.mean()
- expected = self.df.loc[:, ['A', 'C', 'D']].groupby('A').mean()
- assert_frame_equal(result, expected)
-
- agged = grouped.agg(np.mean)
- exp = grouped.mean()
- assert_frame_equal(agged, exp)
-
- df = self.df.loc[:, ['A', 'C', 'D']]
- df['E'] = datetime.now()
- grouped = df.groupby('A')
- result = grouped.agg(np.sum)
- expected = grouped.sum()
- assert_frame_equal(result, expected)
-
- # won't work with axis = 1
- grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1)
- result = pytest.raises(TypeError, grouped.agg,
- lambda x: x.sum(0, numeric_only=False))
-
- def test_omit_nuisance_python_multiple(self):
- grouped = self.three_group.groupby(['A', 'B'])
-
- agged = grouped.agg(np.mean)
- exp = grouped.mean()
- assert_frame_equal(agged, exp)
-
- def test_empty_groups_corner(self):
- # handle empty groups
- df = DataFrame({'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
- 'k2': np.array(['1', '1', '1', '2', '2', '2']),
- 'k3': ['foo', 'bar'] * 3,
- 'v1': np.random.randn(6),
- 'v2': np.random.randn(6)})
-
- grouped = df.groupby(['k1', 'k2'])
- result = grouped.agg(np.mean)
- expected = grouped.mean()
- assert_frame_equal(result, expected)
-
- grouped = self.mframe[3:5].groupby(level=0)
- agged = grouped.apply(lambda x: x.mean())
- agged_A = grouped['A'].apply(np.mean)
- assert_series_equal(agged['A'], agged_A)
- assert agged.index.name == 'first'
-
- def test_apply_concat_preserve_names(self):
- grouped = self.three_group.groupby(['A', 'B'])
-
- def desc(group):
- result = group.describe()
- result.index.name = 'stat'
- return result
-
- def desc2(group):
- result = group.describe()
- result.index.name = 'stat'
- result = result[:len(group)]
- # weirdo
- return result
-
- def desc3(group):
- result = group.describe()
-
- # names are different
- result.index.name = 'stat_%d' % len(group)
-
- result = result[:len(group)]
- # weirdo
- return result
-
- result = grouped.apply(desc)
- assert result.index.names == ('A', 'B', 'stat')
-
- result2 = grouped.apply(desc2)
- assert result2.index.names == ('A', 'B', 'stat')
-
- result3 = grouped.apply(desc3)
- assert result3.index.names == ('A', 'B', None)
-
- def test_nonsense_func(self):
- df = DataFrame([0])
- pytest.raises(Exception, df.groupby, lambda x: x + 'foo')
-
- def test_builtins_apply(self): # GH8155
- df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
- columns=['jim', 'joe'])
- df['jolie'] = np.random.randn(1000)
-
- for keys in ['jim', ['jim', 'joe']]: # single key & multi-key
- if keys == 'jim':
- continue
- for f in [max, min, sum]:
- fname = f.__name__
- result = df.groupby(keys).apply(f)
- result.shape
- ngroups = len(df.drop_duplicates(subset=keys))
- assert result.shape == (ngroups, 3), 'invalid frame shape: '\
- '{} (expected ({}, 3))'.format(result.shape, ngroups)
-
- assert_frame_equal(result, # numpy's equivalent function
- df.groupby(keys).apply(getattr(np, fname)))
-
- if f != sum:
- expected = df.groupby(keys).agg(fname).reset_index()
- expected.set_index(keys, inplace=True, drop=False)
- assert_frame_equal(result, expected, check_dtype=False)
-
- assert_series_equal(getattr(result, fname)(),
- getattr(df, fname)())
-
- def test_max_min_non_numeric(self):
- # #2700
- aa = DataFrame({'nn': [11, 11, 22, 22],
- 'ii': [1, 2, 3, 4],
- 'ss': 4 * ['mama']})
-
- result = aa.groupby('nn').max()
- assert 'ss' in result
-
- result = aa.groupby('nn').max(numeric_only=False)
- assert 'ss' in result
-
- result = aa.groupby('nn').min()
- assert 'ss' in result
-
- result = aa.groupby('nn').min(numeric_only=False)
- assert 'ss' in result
-
- def test_arg_passthru(self):
- # make sure that we are passing thru kwargs
- # to our agg functions
-
- # GH3668
- # GH5724
- df = pd.DataFrame(
- {'group': [1, 1, 2],
- 'int': [1, 2, 3],
- 'float': [4., 5., 6.],
- 'string': list('abc'),
- 'category_string': pd.Series(list('abc')).astype('category'),
- 'category_int': [7, 8, 9],
- 'datetime': pd.date_range('20130101', periods=3),
- 'datetimetz': pd.date_range('20130101',
- periods=3,
- tz='US/Eastern'),
- 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
- columns=['group', 'int', 'float', 'string',
- 'category_string', 'category_int',
- 'datetime', 'datetimetz',
- 'timedelta'])
-
- expected_columns_numeric = Index(['int', 'float', 'category_int'])
-
- # mean / median
- expected = pd.DataFrame(
- {'category_int': [7.5, 9],
- 'float': [4.5, 6.],
- 'timedelta': [pd.Timedelta('1.5s'),
- pd.Timedelta('3s')],
- 'int': [1.5, 3],
- 'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
- pd.Timestamp('2013-01-03 00:00:00')],
- 'datetimetz': [
- pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
- pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
- index=Index([1, 2], name='group'),
- columns=['int', 'float', 'category_int',
- 'datetime', 'datetimetz', 'timedelta'])
- for attr in ['mean', 'median']:
- f = getattr(df.groupby('group'), attr)
- result = f()
- tm.assert_index_equal(result.columns, expected_columns_numeric)
-
- result = f(numeric_only=False)
- assert_frame_equal(result.reindex_like(expected), expected)
-
- # TODO: min, max *should* handle
- # categorical (ordered) dtype
- expected_columns = Index(['int', 'float', 'string',
- 'category_int',
- 'datetime', 'datetimetz',
- 'timedelta'])
- for attr in ['min', 'max']:
- f = getattr(df.groupby('group'), attr)
- result = f()
- tm.assert_index_equal(result.columns, expected_columns)
-
- result = f(numeric_only=False)
- tm.assert_index_equal(result.columns, expected_columns)
-
- expected_columns = Index(['int', 'float', 'string',
- 'category_string', 'category_int',
- 'datetime', 'datetimetz',
- 'timedelta'])
- for attr in ['first', 'last']:
- f = getattr(df.groupby('group'), attr)
- result = f()
- tm.assert_index_equal(result.columns, expected_columns)
-
- result = f(numeric_only=False)
- tm.assert_index_equal(result.columns, expected_columns)
-
- expected_columns = Index(['int', 'float', 'string',
- 'category_int', 'timedelta'])
- for attr in ['sum']:
- f = getattr(df.groupby('group'), attr)
- result = f()
- tm.assert_index_equal(result.columns, expected_columns_numeric)
-
- result = f(numeric_only=False)
- tm.assert_index_equal(result.columns, expected_columns)
-
- expected_columns = Index(['int', 'float', 'category_int'])
- for attr in ['prod', 'cumprod']:
- f = getattr(df.groupby('group'), attr)
- result = f()
- tm.assert_index_equal(result.columns, expected_columns_numeric)
-
- result = f(numeric_only=False)
- tm.assert_index_equal(result.columns, expected_columns)
-
- # like min, max, but don't include strings
- expected_columns = Index(['int', 'float',
- 'category_int',
- 'datetime', 'datetimetz',
- 'timedelta'])
- for attr in ['cummin', 'cummax']:
- f = getattr(df.groupby('group'), attr)
- result = f()
- # GH 15561: numeric_only=False set by default like min/max
- tm.assert_index_equal(result.columns, expected_columns)
-
- result = f(numeric_only=False)
- tm.assert_index_equal(result.columns, expected_columns)
-
- expected_columns = Index(['int', 'float', 'category_int',
- 'timedelta'])
- for attr in ['cumsum']:
- f = getattr(df.groupby('group'), attr)
- result = f()
- tm.assert_index_equal(result.columns, expected_columns_numeric)
-
- result = f(numeric_only=False)
- tm.assert_index_equal(result.columns, expected_columns)
-
- def test_wrap_aggregated_output_multindex(self):
- df = self.mframe.T
- df['baz', 'two'] = 'peekaboo'
-
- keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
- agged = df.groupby(keys).agg(np.mean)
- assert isinstance(agged.columns, MultiIndex)
-
- def aggfun(ser):
- if ser.name == ('foo', 'one'):
- raise TypeError
- else:
- return ser.sum()
-
- agged2 = df.groupby(keys).aggregate(aggfun)
- assert len(agged2.columns) + 1 == len(df.columns)
-
- def test_groupby_level_apply(self):
- frame = self.mframe
-
- result = frame.groupby(level=0).count()
- assert result.index.name == 'first'
- result = frame.groupby(level=1).count()
- assert result.index.name == 'second'
-
- result = frame['A'].groupby(level=0).count()
- assert result.index.name == 'first'
-
- def test_groupby_level_mapper(self):
- frame = self.mframe
- deleveled = frame.reset_index()
-
- mapper0 = {'foo': 0, 'bar': 0, 'baz': 1, 'qux': 1}
- mapper1 = {'one': 0, 'two': 0, 'three': 1}
-
- result0 = frame.groupby(mapper0, level=0).sum()
- result1 = frame.groupby(mapper1, level=1).sum()
-
- mapped_level0 = np.array([mapper0.get(x) for x in deleveled['first']])
- mapped_level1 = np.array([mapper1.get(x) for x in deleveled['second']])
- expected0 = frame.groupby(mapped_level0).sum()
- expected1 = frame.groupby(mapped_level1).sum()
- expected0.index.name, expected1.index.name = 'first', 'second'
-
- assert_frame_equal(result0, expected0)
- assert_frame_equal(result1, expected1)
-
- def test_groupby_level_nonmulti(self):
- # GH 1313, GH 13901
- s = Series([1, 2, 3, 10, 4, 5, 20, 6],
- Index([1, 2, 3, 1, 4, 5, 2, 6], name='foo'))
- expected = Series([11, 22, 3, 4, 5, 6],
- Index(range(1, 7), name='foo'))
-
- result = s.groupby(level=0).sum()
- tm.assert_series_equal(result, expected)
- result = s.groupby(level=[0]).sum()
- tm.assert_series_equal(result, expected)
- result = s.groupby(level=-1).sum()
- tm.assert_series_equal(result, expected)
- result = s.groupby(level=[-1]).sum()
- tm.assert_series_equal(result, expected)
-
- pytest.raises(ValueError, s.groupby, level=1)
- pytest.raises(ValueError, s.groupby, level=-2)
- pytest.raises(ValueError, s.groupby, level=[])
- pytest.raises(ValueError, s.groupby, level=[0, 0])
- pytest.raises(ValueError, s.groupby, level=[0, 1])
- pytest.raises(ValueError, s.groupby, level=[1])
-
- def test_groupby_complex(self):
- # GH 12902
- a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
- expected = Series((1 + 2j, 5 + 10j))
-
- result = a.groupby(level=0).sum()
- assert_series_equal(result, expected)
-
- result = a.sum(level=0)
- assert_series_equal(result, expected)
-
- def test_apply_series_to_frame(self):
- def f(piece):
- with np.errstate(invalid='ignore'):
- logged = np.log(piece)
- return DataFrame({'value': piece,
- 'demeaned': piece - piece.mean(),
- 'logged': logged})
-
- dr = bdate_range('1/1/2000', periods=100)
- ts = Series(np.random.randn(100), index=dr)
-
- grouped = ts.groupby(lambda x: x.month)
- result = grouped.apply(f)
-
- assert isinstance(result, DataFrame)
- tm.assert_index_equal(result.index, ts.index)
-
- def test_apply_series_yield_constant(self):
- result = self.df.groupby(['A', 'B'])['C'].apply(len)
- assert result.index.names[:2] == ('A', 'B')
-
- def test_apply_frame_yield_constant(self):
- # GH13568
- result = self.df.groupby(['A', 'B']).apply(len)
- assert isinstance(result, Series)
- assert result.name is None
-
- result = self.df.groupby(['A', 'B'])[['C', 'D']].apply(len)
- assert isinstance(result, Series)
- assert result.name is None
-
- def test_apply_frame_to_series(self):
- grouped = self.df.groupby(['A', 'B'])
- result = grouped.apply(len)
- expected = grouped.count()['C']
- tm.assert_index_equal(result.index, expected.index)
- tm.assert_numpy_array_equal(result.values, expected.values)
-
- def test_apply_frame_concat_series(self):
- def trans(group):
- return group.groupby('B')['C'].sum().sort_values()[:2]
-
- def trans2(group):
- grouped = group.groupby(df.reindex(group.index)['B'])
- return grouped.sum().sort_values()[:2]
-
- df = DataFrame({'A': np.random.randint(0, 5, 1000),
- 'B': np.random.randint(0, 5, 1000),
- 'C': np.random.randn(1000)})
-
- result = df.groupby('A').apply(trans)
- exp = df.groupby('A')['C'].apply(trans2)
- assert_series_equal(result, exp, check_names=False)
- assert result.name == 'C'
-
- def test_apply_transform(self):
- grouped = self.ts.groupby(lambda x: x.month)
- result = grouped.apply(lambda x: x * 2)
- expected = grouped.transform(lambda x: x * 2)
- assert_series_equal(result, expected)
-
- def test_apply_multikey_corner(self):
- grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
-
- def f(group):
- return group.sort_values('A')[-5:]
-
- result = grouped.apply(f)
- for key, group in grouped:
- assert_frame_equal(result.loc[key], f(group))
-
- def test_mutate_groups(self):
-
- # GH3380
-
- mydf = DataFrame({
- 'cat1': ['a'] * 8 + ['b'] * 6,
- 'cat2': ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 +
- ['d'] * 2 + ['e'] * 2,
- 'cat3': lmap(lambda x: 'g%s' % x, lrange(1, 15)),
- 'val': np.random.randint(100, size=14),
- })
-
- def f_copy(x):
- x = x.copy()
- x['rank'] = x.val.rank(method='min')
- return x.groupby('cat2')['rank'].min()
-
- def f_no_copy(x):
- x['rank'] = x.val.rank(method='min')
- return x.groupby('cat2')['rank'].min()
-
- grpby_copy = mydf.groupby('cat1').apply(f_copy)
- grpby_no_copy = mydf.groupby('cat1').apply(f_no_copy)
- assert_series_equal(grpby_copy, grpby_no_copy)
-
- def test_no_mutate_but_looks_like(self):
-
- # GH 8467
- # first show's mutation indicator
- # second does not, but should yield the same results
- df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'value': range(9)})
-
- result1 = df.groupby('key', group_keys=True).apply(lambda x: x[:].key)
- result2 = df.groupby('key', group_keys=True).apply(lambda x: x.key)
- assert_series_equal(result1, result2)
-
- def test_apply_chunk_view(self):
- # Low level tinkering could be unsafe, make sure not
- df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
- 'value': lrange(9)})
-
- # return view
- f = lambda x: x[:2]
-
- result = df.groupby('key', group_keys=False).apply(f)
- expected = df.take([0, 1, 3, 4, 6, 7])
- assert_frame_equal(result, expected)
-
- def test_apply_no_name_column_conflict(self):
- df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
- 'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
- 'value': lrange(10)[::-1]})
-
- # it works! #2605
- grouped = df.groupby(['name', 'name2'])
- grouped.apply(lambda x: x.sort_values('value', inplace=True))
-
- def test_groupby_series_indexed_differently(self):
- s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],
- index=Index(['a', 'b', 'c', 'd', 'e', 'f', 'g']))
- s2 = Series([1.0, 1.0, 4.0, 5.0, 5.0, 7.0],
- index=Index(['a', 'b', 'd', 'f', 'g', 'h']))
-
- grouped = s1.groupby(s2)
- agged = grouped.mean()
- exp = s1.groupby(s2.reindex(s1.index).get).mean()
- assert_series_equal(agged, exp)
-
- def test_groupby_with_hier_columns(self):
- tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux',
- 'qux'], ['one', 'two', 'one', 'two', 'one', 'two',
- 'one', 'two']]))
- index = MultiIndex.from_tuples(tuples)
- columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'), (
- 'B', 'cat'), ('A', 'dog')])
- df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
-
- result = df.groupby(level=0).mean()
- tm.assert_index_equal(result.columns, columns)
-
- result = df.groupby(level=0, axis=1).mean()
- tm.assert_index_equal(result.index, df.index)
-
- result = df.groupby(level=0).agg(np.mean)
- tm.assert_index_equal(result.columns, columns)
-
- result = df.groupby(level=0).apply(lambda x: x.mean())
- tm.assert_index_equal(result.columns, columns)
-
- result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
- tm.assert_index_equal(result.columns, Index(['A', 'B']))
- tm.assert_index_equal(result.index, df.index)
-
- # add a nuisance column
- sorted_columns, _ = columns.sortlevel(0)
- df['A', 'foo'] = 'bar'
- result = df.groupby(level=0).mean()
- tm.assert_index_equal(result.columns, df.columns[:-1])
-
- def test_pass_args_kwargs(self):
- from numpy import percentile
-
- def f(x, q=None, axis=0):
- return percentile(x, q, axis=axis)
-
- g = lambda x: percentile(x, 80, axis=0)
-
- # Series
- ts_grouped = self.ts.groupby(lambda x: x.month)
- agg_result = ts_grouped.agg(percentile, 80, axis=0)
- apply_result = ts_grouped.apply(percentile, 80, axis=0)
- trans_result = ts_grouped.transform(percentile, 80, axis=0)
-
- agg_expected = ts_grouped.quantile(.8)
- trans_expected = ts_grouped.transform(g)
-
- assert_series_equal(apply_result, agg_expected)
- assert_series_equal(agg_result, agg_expected, check_names=False)
- assert_series_equal(trans_result, trans_expected)
-
- agg_result = ts_grouped.agg(f, q=80)
- apply_result = ts_grouped.apply(f, q=80)
- trans_result = ts_grouped.transform(f, q=80)
- assert_series_equal(agg_result, agg_expected)
- assert_series_equal(apply_result, agg_expected)
- assert_series_equal(trans_result, trans_expected)
-
- # DataFrame
- df_grouped = self.tsframe.groupby(lambda x: x.month)
- agg_result = df_grouped.agg(percentile, 80, axis=0)
- apply_result = df_grouped.apply(DataFrame.quantile, .8)
- expected = df_grouped.quantile(.8)
- assert_frame_equal(apply_result, expected)
- assert_frame_equal(agg_result, expected, check_names=False)
-
- agg_result = df_grouped.agg(f, q=80)
- apply_result = df_grouped.apply(DataFrame.quantile, q=.8)
- assert_frame_equal(agg_result, expected, check_names=False)
- assert_frame_equal(apply_result, expected)
-
- def test_non_cython_api(self):
-
- # GH5610
- # non-cython calls should not include the grouper
-
- df = DataFrame(
- [[1, 2, 'foo'],
- [1, np.nan, 'bar'],
- [3, np.nan, 'baz']],
- columns=['A', 'B', 'C'])
- g = df.groupby('A')
- gni = df.groupby('A', as_index=False)
-
- # mad
- expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
- expected.index.name = 'A'
- result = g.mad()
- assert_frame_equal(result, expected)
-
- expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
- index=[0, 1])
- result = gni.mad()
- assert_frame_equal(result, expected)
-
- # describe
- expected_index = pd.Index([1, 3], name='A')
- expected_col = pd.MultiIndex(levels=[['B'],
- ['count', 'mean', 'std', 'min',
- '25%', '50%', '75%', 'max']],
- labels=[[0] * 8, list(range(8))])
- expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
- [0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
- np.nan, np.nan]],
- index=expected_index,
- columns=expected_col)
- result = g.describe()
- assert_frame_equal(result, expected)
-
- expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
- df[df.A == 3].describe().unstack().to_frame().T])
- expected.index = pd.Index([0, 1])
- result = gni.describe()
- assert_frame_equal(result, expected)
-
- # any
- expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
- index=[1, 3])
- expected.index.name = 'A'
- result = g.any()
- assert_frame_equal(result, expected)
-
- # idxmax
- expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
- expected.index.name = 'A'
- result = g.idxmax()
- assert_frame_equal(result, expected)
-
- def test_cython_api2(self):
-
- # this takes the fast apply path
-
- # cumsum (GH5614)
- df = DataFrame(
- [[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
- ], columns=['A', 'B', 'C'])
- expected = DataFrame(
- [[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
- result = df.groupby('A').cumsum()
- assert_frame_equal(result, expected)
-
- # GH 5755 - cumsum is a transformer and should ignore as_index
- result = df.groupby('A', as_index=False).cumsum()
- assert_frame_equal(result, expected)
-
- # GH 13994
- result = df.groupby('A').cumsum(axis=1)
- expected = df.cumsum(axis=1)
- assert_frame_equal(result, expected)
- result = df.groupby('A').cumprod(axis=1)
- expected = df.cumprod(axis=1)
- assert_frame_equal(result, expected)
-
- def test_grouping_ndarray(self):
- grouped = self.df.groupby(self.df['A'].values)
-
- result = grouped.sum()
- expected = self.df.groupby('A').sum()
- assert_frame_equal(result, expected, check_names=False
- ) # Note: no names when grouping by value
-
- def test_apply_typecast_fail(self):
- df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
- 'c': np.tile(
- ['a', 'b', 'c'], 2),
- 'v': np.arange(1., 7.)})
-
- def f(group):
- v = group['v']
- group['v2'] = (v - v.min()) / (v.max() - v.min())
- return group
-
- result = df.groupby('d').apply(f)
-
- expected = df.copy()
- expected['v2'] = np.tile([0., 0.5, 1], 2)
-
- assert_frame_equal(result, expected)
-
- def test_apply_multiindex_fail(self):
- index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
- ])
- df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
- 'c': np.tile(['a', 'b', 'c'], 2),
- 'v': np.arange(1., 7.)}, index=index)
-
- def f(group):
- v = group['v']
- group['v2'] = (v - v.min()) / (v.max() - v.min())
- return group
-
- result = df.groupby('d').apply(f)
-
- expected = df.copy()
- expected['v2'] = np.tile([0., 0.5, 1], 2)
-
- assert_frame_equal(result, expected)
-
- def test_apply_corner(self):
- result = self.tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
- expected = self.tsframe * 2
- assert_frame_equal(result, expected)
-
- def test_apply_without_copy(self):
- # GH 5545
- # returning a non-copy in an applied function fails
-
- data = DataFrame({'id_field': [100, 100, 200, 300],
- 'category': ['a', 'b', 'c', 'c'],
- 'value': [1, 2, 3, 4]})
-
- def filt1(x):
- if x.shape[0] == 1:
- return x.copy()
- else:
- return x[x.category == 'c']
-
- def filt2(x):
- if x.shape[0] == 1:
- return x
- else:
- return x[x.category == 'c']
-
- expected = data.groupby('id_field').apply(filt1)
- result = data.groupby('id_field').apply(filt2)
- assert_frame_equal(result, expected)
-
- def test_apply_corner_cases(self):
- # #535, can't use sliding iterator
-
- N = 1000
- labels = np.random.randint(0, 100, size=N)
- df = DataFrame({'key': labels,
- 'value1': np.random.randn(N),
- 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
-
- grouped = df.groupby('key')
-
- def f(g):
- g['value3'] = g['value1'] * 2
- return g
-
- result = grouped.apply(f)
- assert 'value3' in result
+ assert_frame_equal(result3, expected3)
+
+ # multi-key
+
+ grouped = df.groupby(['A', 'B'], as_index=False)
+
+ result = grouped.agg(np.mean)
+ expected = grouped.mean()
+ assert_frame_equal(result, expected)
+
+ result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
+ expected2 = grouped.mean()
+ expected2['D'] = grouped.sum()['D']
+ assert_frame_equal(result2, expected2)
+
+ expected3 = grouped['C'].sum()
+ expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
+ result3 = grouped['C'].agg({'Q': np.sum})
+ assert_frame_equal(result3, expected3)
+
+ # GH7115 & GH8112 & GH8582
+ df = DataFrame(np.random.randint(0, 100, (50, 3)),
+ columns=['jim', 'joe', 'jolie'])
+ ts = Series(np.random.randint(5, 10, 50), name='jim')
+
+ gr = df.groupby(ts)
+ gr.nth(0) # invokes set_selection_from_grouper internally
+ assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
+
+ for attr in ['mean', 'max', 'count', 'idxmax', 'cumsum', 'all']:
+ gr = df.groupby(ts, as_index=False)
+ left = getattr(gr, attr)()
+
+ gr = df.groupby(ts.values, as_index=True)
+ right = getattr(gr, attr)().reset_index(drop=True)
+
+ assert_frame_equal(left, right)
+
+
+def test_as_index_series_return_frame(df):
+ grouped = df.groupby('A', as_index=False)
+ grouped2 = df.groupby(['A', 'B'], as_index=False)
+
+ result = grouped['C'].agg(np.sum)
+ expected = grouped.agg(np.sum).loc[:, ['A', 'C']]
+ assert isinstance(result, DataFrame)
+ assert_frame_equal(result, expected)
+
+ result2 = grouped2['C'].agg(np.sum)
+ expected2 = grouped2.agg(np.sum).loc[:, ['A', 'B', 'C']]
+ assert isinstance(result2, DataFrame)
+ assert_frame_equal(result2, expected2)
+
+ result = grouped['C'].sum()
+ expected = grouped.sum().loc[:, ['A', 'C']]
+ assert isinstance(result, DataFrame)
+ assert_frame_equal(result, expected)
+
+ result2 = grouped2['C'].sum()
+ expected2 = grouped2.sum().loc[:, ['A', 'B', 'C']]
+ assert isinstance(result2, DataFrame)
+ assert_frame_equal(result2, expected2)
+
+ # corner case
+ pytest.raises(Exception, grouped['C'].__getitem__, 'D')
+
+
+def test_groupby_as_index_cython(df):
+ data = df
+
+ # single-key
+ grouped = data.groupby('A', as_index=False)
+ result = grouped.mean()
+ expected = data.groupby(['A']).mean()
+ expected.insert(0, 'A', expected.index)
+ expected.index = np.arange(len(expected))
+ assert_frame_equal(result, expected)
+
+ # multi-key
+ grouped = data.groupby(['A', 'B'], as_index=False)
+ result = grouped.mean()
+ expected = data.groupby(['A', 'B']).mean()
+
+ arrays = lzip(*expected.index.values)
+ expected.insert(0, 'A', arrays[0])
+ expected.insert(1, 'B', arrays[1])
+ expected.index = np.arange(len(expected))
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_as_index_series_scalar(df):
+ grouped = df.groupby(['A', 'B'], as_index=False)
+
+ # GH #421
+
+ result = grouped['C'].agg(len)
+ expected = grouped.agg(len).loc[:, ['A', 'B', 'C']]
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_as_index_corner(df, ts):
+ pytest.raises(TypeError, ts.groupby, lambda x: x.weekday(),
+ as_index=False)
+
+ pytest.raises(ValueError, df.groupby, lambda x: x.lower(),
+ as_index=False, axis=1)
+
+
+def test_groupby_multiple_key(df):
+ df = tm.makeTimeDataFrame()
+ grouped = df.groupby([lambda x: x.year, lambda x: x.month,
+ lambda x: x.day])
+ agged = grouped.sum()
+ assert_almost_equal(df.values, agged.values)
+
+ grouped = df.T.groupby([lambda x: x.year,
+ lambda x: x.month,
+ lambda x: x.day], axis=1)
+
+ agged = grouped.agg(lambda x: x.sum())
+ tm.assert_index_equal(agged.index, df.columns)
+ assert_almost_equal(df.T.values, agged.values)
+
+ agged = grouped.agg(lambda x: x.sum())
+ assert_almost_equal(df.T.values, agged.values)
+
+
+def test_groupby_multi_corner(df):
+ # test that having an all-NA column doesn't mess you up
+ df = df.copy()
+ df['bad'] = np.nan
+ agged = df.groupby(['A', 'B']).mean()
+
+ expected = df.groupby(['A', 'B']).mean()
+ expected['bad'] = np.nan
+
+ assert_frame_equal(agged, expected)
+
+
+def test_omit_nuisance(df):
+ grouped = df.groupby('A')
+
+ result = grouped.mean()
+ expected = df.loc[:, ['A', 'C', 'D']].groupby('A').mean()
+ assert_frame_equal(result, expected)
+
+ agged = grouped.agg(np.mean)
+ exp = grouped.mean()
+ assert_frame_equal(agged, exp)
+
+ df = df.loc[:, ['A', 'C', 'D']]
+ df['E'] = datetime.now()
+ grouped = df.groupby('A')
+ result = grouped.agg(np.sum)
+ expected = grouped.sum()
+ assert_frame_equal(result, expected)
+
+ # won't work with axis = 1
+ grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1)
+ result = pytest.raises(TypeError, grouped.agg,
+ lambda x: x.sum(0, numeric_only=False))
+
+
+def test_omit_nuisance_python_multiple(three_group):
+ grouped = three_group.groupby(['A', 'B'])
+
+ agged = grouped.agg(np.mean)
+ exp = grouped.mean()
+ assert_frame_equal(agged, exp)
+
+
+def test_empty_groups_corner(mframe):
+ # handle empty groups
+ df = DataFrame({'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
+ 'k2': np.array(['1', '1', '1', '2', '2', '2']),
+ 'k3': ['foo', 'bar'] * 3,
+ 'v1': np.random.randn(6),
+ 'v2': np.random.randn(6)})
+
+ grouped = df.groupby(['k1', 'k2'])
+ result = grouped.agg(np.mean)
+ expected = grouped.mean()
+ assert_frame_equal(result, expected)
+
+ grouped = mframe[3:5].groupby(level=0)
+ agged = grouped.apply(lambda x: x.mean())
+ agged_A = grouped['A'].apply(np.mean)
+ assert_series_equal(agged['A'], agged_A)
+ assert agged.index.name == 'first'
+
+
+def test_nonsense_func():
+ df = DataFrame([0])
+ pytest.raises(Exception, df.groupby, lambda x: x + 'foo')
+
+
+def test_wrap_aggregated_output_multindex(mframe):
+ df = mframe.T
+ df['baz', 'two'] = 'peekaboo'
+
+ keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
+ agged = df.groupby(keys).agg(np.mean)
+ assert isinstance(agged.columns, MultiIndex)
+
+ def aggfun(ser):
+ if ser.name == ('foo', 'one'):
+ raise TypeError
+ else:
+ return ser.sum()
+
+ agged2 = df.groupby(keys).aggregate(aggfun)
+ assert len(agged2.columns) + 1 == len(df.columns)
+
+
+def test_groupby_level_apply(mframe):
+
+ result = mframe.groupby(level=0).count()
+ assert result.index.name == 'first'
+ result = mframe.groupby(level=1).count()
+ assert result.index.name == 'second'
+
+ result = mframe['A'].groupby(level=0).count()
+ assert result.index.name == 'first'
+
+
+def test_groupby_level_mapper(mframe):
+ deleveled = mframe.reset_index()
+
+ mapper0 = {'foo': 0, 'bar': 0, 'baz': 1, 'qux': 1}
+ mapper1 = {'one': 0, 'two': 0, 'three': 1}
+
+ result0 = mframe.groupby(mapper0, level=0).sum()
+ result1 = mframe.groupby(mapper1, level=1).sum()
+
+ mapped_level0 = np.array([mapper0.get(x) for x in deleveled['first']])
+ mapped_level1 = np.array([mapper1.get(x) for x in deleveled['second']])
+ expected0 = mframe.groupby(mapped_level0).sum()
+ expected1 = mframe.groupby(mapped_level1).sum()
+ expected0.index.name, expected1.index.name = 'first', 'second'
+
+ assert_frame_equal(result0, expected0)
+ assert_frame_equal(result1, expected1)
+
+
+def test_groupby_level_nonmulti():
+ # GH 1313, GH 13901
+ s = Series([1, 2, 3, 10, 4, 5, 20, 6],
+ Index([1, 2, 3, 1, 4, 5, 2, 6], name='foo'))
+ expected = Series([11, 22, 3, 4, 5, 6],
+ Index(range(1, 7), name='foo'))
+
+ result = s.groupby(level=0).sum()
+ tm.assert_series_equal(result, expected)
+ result = s.groupby(level=[0]).sum()
+ tm.assert_series_equal(result, expected)
+ result = s.groupby(level=-1).sum()
+ tm.assert_series_equal(result, expected)
+ result = s.groupby(level=[-1]).sum()
+ tm.assert_series_equal(result, expected)
+
+ pytest.raises(ValueError, s.groupby, level=1)
+ pytest.raises(ValueError, s.groupby, level=-2)
+ pytest.raises(ValueError, s.groupby, level=[])
+ pytest.raises(ValueError, s.groupby, level=[0, 0])
+ pytest.raises(ValueError, s.groupby, level=[0, 1])
+ pytest.raises(ValueError, s.groupby, level=[1])
+
+
+def test_groupby_complex():
+ # GH 12902
+ a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
+ expected = Series((1 + 2j, 5 + 10j))
+
+ result = a.groupby(level=0).sum()
+ assert_series_equal(result, expected)
+
+ result = a.sum(level=0)
+ assert_series_equal(result, expected)
+
+
+def test_mutate_groups():
+
+ # GH3380
+
+ df = DataFrame({
+ 'cat1': ['a'] * 8 + ['b'] * 6,
+ 'cat2': ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 +
+ ['d'] * 2 + ['e'] * 2,
+ 'cat3': lmap(lambda x: 'g%s' % x, lrange(1, 15)),
+ 'val': np.random.randint(100, size=14),
+ })
+
+ def f_copy(x):
+ x = x.copy()
+ x['rank'] = x.val.rank(method='min')
+ return x.groupby('cat2')['rank'].min()
+
+ def f_no_copy(x):
+ x['rank'] = x.val.rank(method='min')
+ return x.groupby('cat2')['rank'].min()
+
+ grpby_copy = df.groupby('cat1').apply(f_copy)
+ grpby_no_copy = df.groupby('cat1').apply(f_no_copy)
+ assert_series_equal(grpby_copy, grpby_no_copy)
+
+
+def test_no_mutate_but_looks_like():
+
+ # GH 8467
+ # first show's mutation indicator
+ # second does not, but should yield the same results
+ df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'value': range(9)})
+
+ result1 = df.groupby('key', group_keys=True).apply(lambda x: x[:].key)
+ result2 = df.groupby('key', group_keys=True).apply(lambda x: x.key)
+ assert_series_equal(result1, result2)
+
+
+def test_groupby_series_indexed_differently():
+ s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],
+ index=Index(['a', 'b', 'c', 'd', 'e', 'f', 'g']))
+ s2 = Series([1.0, 1.0, 4.0, 5.0, 5.0, 7.0],
+ index=Index(['a', 'b', 'd', 'f', 'g', 'h']))
+
+ grouped = s1.groupby(s2)
+ agged = grouped.mean()
+ exp = s1.groupby(s2.reindex(s1.index).get).mean()
+ assert_series_equal(agged, exp)
+
+
+def test_groupby_with_hier_columns():
+ tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux',
+ 'qux'], ['one', 'two', 'one', 'two', 'one', 'two',
+ 'one', 'two']]))
+ index = MultiIndex.from_tuples(tuples)
+ columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'), (
+ 'B', 'cat'), ('A', 'dog')])
+ df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
+
+ result = df.groupby(level=0).mean()
+ tm.assert_index_equal(result.columns, columns)
+
+ result = df.groupby(level=0, axis=1).mean()
+ tm.assert_index_equal(result.index, df.index)
+
+ result = df.groupby(level=0).agg(np.mean)
+ tm.assert_index_equal(result.columns, columns)
+
+ result = df.groupby(level=0).apply(lambda x: x.mean())
+ tm.assert_index_equal(result.columns, columns)
+
+ result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
+ tm.assert_index_equal(result.columns, Index(['A', 'B']))
+ tm.assert_index_equal(result.index, df.index)
+
+ # add a nuisance column
+ sorted_columns, _ = columns.sortlevel(0)
+ df['A', 'foo'] = 'bar'
+ result = df.groupby(level=0).mean()
+ tm.assert_index_equal(result.columns, df.columns[:-1])
- def test_groupby_wrong_multi_labels(self):
- data = """index,foo,bar,baz,spam,data
+
+def test_grouping_ndarray(df):
+ grouped = df.groupby(df['A'].values)
+
+ result = grouped.sum()
+ expected = df.groupby('A').sum()
+ assert_frame_equal(result, expected, check_names=False
+ ) # Note: no names when grouping by value
+
+
+def test_groupby_wrong_multi_labels():
+ data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
- data = read_csv(StringIO(data), index_col=0)
-
- grouped = data.groupby(['foo', 'bar', 'baz', 'spam'])
-
- result = grouped.agg(np.mean)
- expected = grouped.mean()
- assert_frame_equal(result, expected)
-
- def test_groupby_series_with_name(self):
- result = self.df.groupby(self.df['A']).mean()
- result2 = self.df.groupby(self.df['A'], as_index=False).mean()
- assert result.index.name == 'A'
- assert 'A' in result2
-
- result = self.df.groupby([self.df['A'], self.df['B']]).mean()
- result2 = self.df.groupby([self.df['A'], self.df['B']],
- as_index=False).mean()
- assert result.index.names == ('A', 'B')
- assert 'A' in result2
- assert 'B' in result2
-
- def test_seriesgroupby_name_attr(self):
- # GH 6265
- result = self.df.groupby('A')['C']
- assert result.count().name == 'C'
- assert result.mean().name == 'C'
-
- testFunc = lambda x: np.sum(x) * 2
- assert result.agg(testFunc).name == 'C'
-
- def test_consistency_name(self):
- # GH 12363
-
- df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': ['one', 'one', 'two', 'two',
- 'two', 'two', 'one', 'two'],
- 'C': np.random.randn(8) + 1.0,
- 'D': np.arange(8)})
-
- expected = df.groupby(['A']).B.count()
- result = df.B.groupby(df.A).count()
- assert_series_equal(result, expected)
-
- def test_groupby_name_propagation(self):
- # GH 6124
- def summarize(df, name=None):
- return Series({'count': 1, 'mean': 2, 'omissions': 3, }, name=name)
-
- def summarize_random_name(df):
- # Provide a different name for each Series. In this case, groupby
- # should not attempt to propagate the Series name since they are
- # inconsistent.
- return Series({
- 'count': 1,
- 'mean': 2,
- 'omissions': 3,
- }, name=df.iloc[0]['A'])
-
- metrics = self.df.groupby('A').apply(summarize)
- assert metrics.columns.name is None
- metrics = self.df.groupby('A').apply(summarize, 'metrics')
- assert metrics.columns.name == 'metrics'
- metrics = self.df.groupby('A').apply(summarize_random_name)
- assert metrics.columns.name is None
-
- def test_groupby_nonstring_columns(self):
- df = DataFrame([np.arange(10) for x in range(10)])
- grouped = df.groupby(0)
- result = grouped.mean()
- expected = df.groupby(df[0]).mean()
- assert_frame_equal(result, expected)
-
- def test_groupby_mixed_type_columns(self):
- # GH 13432, unorderable types in py3
- df = DataFrame([[0, 1, 2]], columns=['A', 'B', 0])
- expected = DataFrame([[1, 2]], columns=['B', 0],
- index=Index([0], name='A'))
-
- result = df.groupby('A').first()
- tm.assert_frame_equal(result, expected)
-
- result = df.groupby('A').sum()
- tm.assert_frame_equal(result, expected)
-
- def test_cython_grouper_series_bug_noncontig(self):
- arr = np.empty((100, 100))
- arr.fill(np.nan)
- obj = Series(arr[:, 0], index=lrange(100))
- inds = np.tile(lrange(10), 10)
-
- result = obj.groupby(inds).agg(Series.median)
- assert result.isna().all()
-
- def test_series_grouper_noncontig_index(self):
- index = Index(tm.rands_array(10, 100))
-
- values = Series(np.random.randn(50), index=index[::2])
- labels = np.random.randint(0, 5, 50)
-
- # it works!
- grouped = values.groupby(labels)
-
- # accessing the index elements causes segfault
- f = lambda x: len(set(map(id, x.index)))
- grouped.agg(f)
-
- def test_convert_objects_leave_decimal_alone(self):
-
- from decimal import Decimal
-
- s = Series(lrange(5))
- labels = np.array(['a', 'b', 'c', 'd', 'e'], dtype='O')
-
- def convert_fast(x):
- return Decimal(str(x.mean()))
-
- def convert_force_pure(x):
- # base will be length 0
- assert (len(x.base) > 0)
- return Decimal(str(x.mean()))
-
- grouped = s.groupby(labels)
-
- result = grouped.agg(convert_fast)
- assert result.dtype == np.object_
- assert isinstance(result[0], Decimal)
-
- result = grouped.agg(convert_force_pure)
- assert result.dtype == np.object_
- assert isinstance(result[0], Decimal)
-
- def test_fast_apply(self):
- # make sure that fast apply is correctly called
- # rather than raising any kind of error
- # otherwise the python path will be callsed
- # which slows things down
- N = 1000
- labels = np.random.randint(0, 2000, size=N)
- labels2 = np.random.randint(0, 3, size=N)
- df = DataFrame({'key': labels,
- 'key2': labels2,
- 'value1': np.random.randn(N),
- 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
-
- def f(g):
- return 1
-
- g = df.groupby(['key', 'key2'])
-
- grouper = g.grouper
-
- splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
- group_keys = grouper._get_group_keys()
-
- values, mutated = splitter.fast_apply(f, group_keys)
- assert not mutated
-
- def test_apply_with_mixed_dtype(self):
- # GH3480, apply with mixed dtype on axis=1 breaks in 0.11
- df = DataFrame({'foo1': np.random.randn(6),
- 'foo2': ['one', 'two', 'two', 'three', 'one', 'two']})
- result = df.apply(lambda x: x, axis=1)
- assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
-
- # GH 3610 incorrect dtype conversion with as_index=False
- df = DataFrame({"c1": [1, 2, 6, 6, 8]})
- df["c2"] = df.c1 / 2.0
- result1 = df.groupby("c2").mean().reset_index().c2
- result2 = df.groupby("c2", as_index=False).mean().c2
- assert_series_equal(result1, result2)
-
- def test_groupby_aggregation_mixed_dtype(self):
-
- # GH 6212
- expected = DataFrame({
- 'v1': [5, 5, 7, np.nan, 3, 3, 4, 1],
- 'v2': [55, 55, 77, np.nan, 33, 33, 44, 11]},
- index=MultiIndex.from_tuples([(1, 95), (1, 99), (2, 95), (2, 99),
- ('big', 'damp'),
- ('blue', 'dry'),
- ('red', 'red'), ('red', 'wet')],
- names=['by1', 'by2']))
-
- df = DataFrame({
- 'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
- 'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
- 'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan,
- 12],
- 'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99,
- np.nan, np.nan]
- })
-
- g = df.groupby(['by1', 'by2'])
- result = g[['v1', 'v2']].mean()
- assert_frame_equal(result, expected)
-
- def test_groupby_dtype_inference_empty(self):
- # GH 6733
- df = DataFrame({'x': [], 'range': np.arange(0, dtype='int64')})
- assert df['x'].dtype == np.float64
-
- result = df.groupby('x').first()
- exp_index = Index([], name='x', dtype=np.float64)
- expected = DataFrame({'range': Series(
- [], index=exp_index, dtype='int64')})
- assert_frame_equal(result, expected, by_blocks=True)
-
- def test_groupby_list_infer_array_like(self):
- result = self.df.groupby(list(self.df['A'])).mean()
- expected = self.df.groupby(self.df['A']).mean()
- assert_frame_equal(result, expected, check_names=False)
-
- pytest.raises(Exception, self.df.groupby, list(self.df['A'][:-1]))
-
- # pathological case of ambiguity
- df = DataFrame({'foo': [0, 1],
- 'bar': [3, 4],
- 'val': np.random.randn(2)})
-
- result = df.groupby(['foo', 'bar']).mean()
- expected = df.groupby([df['foo'], df['bar']]).mean()[['val']]
-
- def test_groupby_keys_same_size_as_index(self):
- # GH 11185
- freq = 's'
- index = pd.date_range(start=pd.Timestamp('2015-09-29T11:34:44-0700'),
- periods=2, freq=freq)
- df = pd.DataFrame([['A', 10], ['B', 15]], columns=[
- 'metric', 'values'
- ], index=index)
- result = df.groupby([pd.Grouper(level=0, freq=freq), 'metric']).mean()
- expected = df.set_index([df.index, 'metric'])
-
- assert_frame_equal(result, expected)
-
- def test_groupby_one_row(self):
- # GH 11741
- df1 = pd.DataFrame(np.random.randn(1, 4), columns=list('ABCD'))
- pytest.raises(KeyError, df1.groupby, 'Z')
- df2 = pd.DataFrame(np.random.randn(2, 4), columns=list('ABCD'))
- pytest.raises(KeyError, df2.groupby, 'Z')
-
- def test_groupby_nat_exclude(self):
- # GH 6992
- df = pd.DataFrame(
- {'values': np.random.randn(8),
- 'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp(
- '2013-02-01'), np.nan, pd.Timestamp('2013-02-01'), np.nan,
- pd.Timestamp('2013-01-01')],
- 'str': [np.nan, 'a', np.nan, 'a', np.nan, 'a', np.nan, 'b']})
- grouped = df.groupby('dt')
-
- expected = [pd.Index([1, 7]), pd.Index([3, 5])]
- keys = sorted(grouped.groups.keys())
- assert len(keys) == 2
- for k, e in zip(keys, expected):
- # grouped.groups keys are np.datetime64 with system tz
- # not to be affected by tz, only compare values
- tm.assert_index_equal(grouped.groups[k], e)
-
- # confirm obj is not filtered
- tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
- assert grouped.ngroups == 2
-
- expected = {
- Timestamp('2013-01-01 00:00:00'): np.array([1, 7], dtype=np.int64),
- Timestamp('2013-02-01 00:00:00'): np.array([3, 5], dtype=np.int64)
- }
-
- for k in grouped.indices:
- tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
-
- tm.assert_frame_equal(
- grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]])
- tm.assert_frame_equal(
- grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]])
+ data = read_csv(StringIO(data), index_col=0)
+
+ grouped = data.groupby(['foo', 'bar', 'baz', 'spam'])
+
+ result = grouped.agg(np.mean)
+ expected = grouped.mean()
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_series_with_name(df):
+ result = df.groupby(df['A']).mean()
+ result2 = df.groupby(df['A'], as_index=False).mean()
+ assert result.index.name == 'A'
+ assert 'A' in result2
+
+ result = df.groupby([df['A'], df['B']]).mean()
+ result2 = df.groupby([df['A'], df['B']],
+ as_index=False).mean()
+ assert result.index.names == ('A', 'B')
+ assert 'A' in result2
+ assert 'B' in result2
+
+
+def test_seriesgroupby_name_attr(df):
+ # GH 6265
+ result = df.groupby('A')['C']
+ assert result.count().name == 'C'
+ assert result.mean().name == 'C'
+
+ testFunc = lambda x: np.sum(x) * 2
+ assert result.agg(testFunc).name == 'C'
+
+
+def test_consistency_name():
+ # GH 12363
+
+ df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B': ['one', 'one', 'two', 'two',
+ 'two', 'two', 'one', 'two'],
+ 'C': np.random.randn(8) + 1.0,
+ 'D': np.arange(8)})
+
+ expected = df.groupby(['A']).B.count()
+ result = df.B.groupby(df.A).count()
+ assert_series_equal(result, expected)
+
+
+def test_groupby_name_propagation(df):
+ # GH 6124
+ def summarize(df, name=None):
+ return Series({'count': 1, 'mean': 2, 'omissions': 3, }, name=name)
+
+ def summarize_random_name(df):
+ # Provide a different name for each Series. In this case, groupby
+ # should not attempt to propagate the Series name since they are
+ # inconsistent.
+ return Series({
+ 'count': 1,
+ 'mean': 2,
+ 'omissions': 3,
+ }, name=df.iloc[0]['A'])
+
+ metrics = df.groupby('A').apply(summarize)
+ assert metrics.columns.name is None
+ metrics = df.groupby('A').apply(summarize, 'metrics')
+ assert metrics.columns.name == 'metrics'
+ metrics = df.groupby('A').apply(summarize_random_name)
+ assert metrics.columns.name is None
+
+
+def test_groupby_nonstring_columns():
+ df = DataFrame([np.arange(10) for x in range(10)])
+ grouped = df.groupby(0)
+ result = grouped.mean()
+ expected = df.groupby(df[0]).mean()
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_mixed_type_columns():
+ # GH 13432, unorderable types in py3
+ df = DataFrame([[0, 1, 2]], columns=['A', 'B', 0])
+ expected = DataFrame([[1, 2]], columns=['B', 0],
+ index=Index([0], name='A'))
+
+ result = df.groupby('A').first()
+ tm.assert_frame_equal(result, expected)
+
+ result = df.groupby('A').sum()
+ tm.assert_frame_equal(result, expected)
+
+
+def test_cython_grouper_series_bug_noncontig():
+ arr = np.empty((100, 100))
+ arr.fill(np.nan)
+ obj = Series(arr[:, 0], index=lrange(100))
+ inds = np.tile(lrange(10), 10)
+
+ result = obj.groupby(inds).agg(Series.median)
+ assert result.isna().all()
+
+
+def test_series_grouper_noncontig_index():
+ index = Index(tm.rands_array(10, 100))
+
+ values = Series(np.random.randn(50), index=index[::2])
+ labels = np.random.randint(0, 5, 50)
+
+ # it works!
+ grouped = values.groupby(labels)
+
+ # accessing the index elements causes segfault
+ f = lambda x: len(set(map(id, x.index)))
+ grouped.agg(f)
+
+def test_convert_objects_leave_decimal_alone():
+
+ s = Series(lrange(5))
+ labels = np.array(['a', 'b', 'c', 'd', 'e'], dtype='O')
+
+ def convert_fast(x):
+ return Decimal(str(x.mean()))
+
+ def convert_force_pure(x):
+ # base will be length 0
+ assert (len(x.base) > 0)
+ return Decimal(str(x.mean()))
+
+ grouped = s.groupby(labels)
+
+ result = grouped.agg(convert_fast)
+ assert result.dtype == np.object_
+ assert isinstance(result[0], Decimal)
+
+ result = grouped.agg(convert_force_pure)
+ assert result.dtype == np.object_
+ assert isinstance(result[0], Decimal)
+
+
+def test_groupby_dtype_inference_empty():
+ # GH 6733
+ df = DataFrame({'x': [], 'range': np.arange(0, dtype='int64')})
+ assert df['x'].dtype == np.float64
+
+ result = df.groupby('x').first()
+ exp_index = Index([], name='x', dtype=np.float64)
+ expected = DataFrame({'range': Series(
+ [], index=exp_index, dtype='int64')})
+ assert_frame_equal(result, expected, by_blocks=True)
+
+
+def test_groupby_list_infer_array_like(df):
+ result = df.groupby(list(df['A'])).mean()
+ expected = df.groupby(df['A']).mean()
+ assert_frame_equal(result, expected, check_names=False)
+
+ pytest.raises(Exception, df.groupby, list(df['A'][:-1]))
+
+ # pathological case of ambiguity
+ df = DataFrame({'foo': [0, 1],
+ 'bar': [3, 4],
+ 'val': np.random.randn(2)})
+
+ result = df.groupby(['foo', 'bar']).mean()
+ expected = df.groupby([df['foo'], df['bar']]).mean()[['val']]
+
+
+def test_groupby_keys_same_size_as_index():
+ # GH 11185
+ freq = 's'
+ index = pd.date_range(start=pd.Timestamp('2015-09-29T11:34:44-0700'),
+ periods=2, freq=freq)
+ df = pd.DataFrame([['A', 10], ['B', 15]], columns=[
+ 'metric', 'values'
+ ], index=index)
+ result = df.groupby([pd.Grouper(level=0, freq=freq), 'metric']).mean()
+ expected = df.set_index([df.index, 'metric'])
+
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_one_row():
+ # GH 11741
+ df1 = pd.DataFrame(np.random.randn(1, 4), columns=list('ABCD'))
+ pytest.raises(KeyError, df1.groupby, 'Z')
+ df2 = pd.DataFrame(np.random.randn(2, 4), columns=list('ABCD'))
+ pytest.raises(KeyError, df2.groupby, 'Z')
+
+
+def test_groupby_nat_exclude():
+ # GH 6992
+ df = pd.DataFrame(
+ {'values': np.random.randn(8),
+ 'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp(
+ '2013-02-01'), np.nan, pd.Timestamp('2013-02-01'), np.nan,
+ pd.Timestamp('2013-01-01')],
+ 'str': [np.nan, 'a', np.nan, 'a', np.nan, 'a', np.nan, 'b']})
+ grouped = df.groupby('dt')
+
+ expected = [pd.Index([1, 7]), pd.Index([3, 5])]
+ keys = sorted(grouped.groups.keys())
+ assert len(keys) == 2
+ for k, e in zip(keys, expected):
+ # grouped.groups keys are np.datetime64 with system tz
+ # not to be affected by tz, only compare values
+ tm.assert_index_equal(grouped.groups[k], e)
+
+ # confirm obj is not filtered
+ tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
+ assert grouped.ngroups == 2
+
+ expected = {
+ Timestamp('2013-01-01 00:00:00'): np.array([1, 7], dtype=np.int64),
+ Timestamp('2013-02-01 00:00:00'): np.array([3, 5], dtype=np.int64)
+ }
+
+ for k in grouped.indices:
+ tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
+
+ tm.assert_frame_equal(
+ grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]])
+ tm.assert_frame_equal(
+ grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]])
+
+ pytest.raises(KeyError, grouped.get_group, pd.NaT)
+
+ nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan],
+ 'nat': [pd.NaT, pd.NaT, pd.NaT]})
+ assert nan_df['nan'].dtype == 'float64'
+ assert nan_df['nat'].dtype == 'datetime64[ns]'
+
+ for key in ['nan', 'nat']:
+ grouped = nan_df.groupby(key)
+ assert grouped.groups == {}
+ assert grouped.ngroups == 0
+ assert grouped.indices == {}
+ pytest.raises(KeyError, grouped.get_group, np.nan)
pytest.raises(KeyError, grouped.get_group, pd.NaT)
- nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan],
- 'nat': [pd.NaT, pd.NaT, pd.NaT]})
- assert nan_df['nan'].dtype == 'float64'
- assert nan_df['nat'].dtype == 'datetime64[ns]'
-
- for key in ['nan', 'nat']:
- grouped = nan_df.groupby(key)
- assert grouped.groups == {}
- assert grouped.ngroups == 0
- assert grouped.indices == {}
- pytest.raises(KeyError, grouped.get_group, np.nan)
- pytest.raises(KeyError, grouped.get_group, pd.NaT)
-
- def test_sparse_friendly(self):
- sdf = self.df[['C', 'D']].to_sparse()
- with catch_warnings(record=True):
- panel = tm.makePanel()
- tm.add_nans(panel)
-
- def _check_work(gp):
- gp.mean()
- gp.agg(np.mean)
- dict(iter(gp))
-
- # it works!
- _check_work(sdf.groupby(lambda x: x // 2))
- _check_work(sdf['C'].groupby(lambda x: x // 2))
- _check_work(sdf.groupby(self.df['A']))
-
- # do this someday
- # _check_work(panel.groupby(lambda x: x.month, axis=1))
-
- def test_panel_groupby(self):
- with catch_warnings(record=True):
- self.panel = tm.makePanel()
- tm.add_nans(self.panel)
- grouped = self.panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},
- axis='items')
- agged = grouped.mean()
- agged2 = grouped.agg(lambda x: x.mean('items'))
-
- tm.assert_panel_equal(agged, agged2)
-
- tm.assert_index_equal(agged.items, Index([0, 1]))
-
- grouped = self.panel.groupby(lambda x: x.month, axis='major')
- agged = grouped.mean()
-
- exp = Index(sorted(list(set(self.panel.major_axis.month))))
- tm.assert_index_equal(agged.major_axis, exp)
-
- grouped = self.panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
- axis='minor')
- agged = grouped.mean()
- tm.assert_index_equal(agged.minor_axis, Index([0, 1]))
-
- def test_groupby_2d_malformed(self):
- d = DataFrame(index=lrange(2))
- d['group'] = ['g1', 'g2']
- d['zeros'] = [0, 0]
- d['ones'] = [1, 1]
- d['label'] = ['l1', 'l2']
- tmp = d.groupby(['group']).mean()
- res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
- tm.assert_index_equal(tmp.columns, Index(['zeros', 'ones']))
- tm.assert_numpy_array_equal(tmp.values, res_values)
-
- def test_int32_overflow(self):
- B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)
- ))
- A = np.arange(25000)
- df = DataFrame({'A': A,
- 'B': B,
- 'C': A,
- 'D': B,
- 'E': np.random.randn(25000)})
-
- left = df.groupby(['A', 'B', 'C', 'D']).sum()
- right = df.groupby(['D', 'C', 'B', 'A']).sum()
- assert len(left) == len(right)
-
- def test_groupby_sort_multi(self):
- df = DataFrame({'a': ['foo', 'bar', 'baz'],
- 'b': [3, 2, 1],
- 'c': [0, 1, 2],
- 'd': np.random.randn(3)})
-
- tups = lmap(tuple, df[['a', 'b', 'c']].values)
- tups = com._asarray_tuplesafe(tups)
- result = df.groupby(['a', 'b', 'c'], sort=True).sum()
- tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
- tups = lmap(tuple, df[['c', 'a', 'b']].values)
- tups = com._asarray_tuplesafe(tups)
- result = df.groupby(['c', 'a', 'b'], sort=True).sum()
- tm.assert_numpy_array_equal(result.index.values, tups)
+def test_sparse_friendly(df):
+ sdf = df[['C', 'D']].to_sparse()
+ with catch_warnings(record=True):
+ panel = tm.makePanel()
+ tm.add_nans(panel)
+
+ def _check_work(gp):
+ gp.mean()
+ gp.agg(np.mean)
+ dict(iter(gp))
+
+ # it works!
+ _check_work(sdf.groupby(lambda x: x // 2))
+ _check_work(sdf['C'].groupby(lambda x: x // 2))
+ _check_work(sdf.groupby(df['A']))
+
+ # do this someday
+ # _check_work(panel.groupby(lambda x: x.month, axis=1))
+
+
+def test_panel_groupby():
+ with catch_warnings(record=True):
+ panel = tm.makePanel()
+ tm.add_nans(panel)
+ grouped = panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},
+ axis='items')
+ agged = grouped.mean()
+ agged2 = grouped.agg(lambda x: x.mean('items'))
+
+ tm.assert_panel_equal(agged, agged2)
+
+ tm.assert_index_equal(agged.items, Index([0, 1]))
- tups = lmap(tuple, df[['b', 'c', 'a']].values)
+ grouped = panel.groupby(lambda x: x.month, axis='major')
+ agged = grouped.mean()
+
+ exp = Index(sorted(list(set(panel.major_axis.month))))
+ tm.assert_index_equal(agged.major_axis, exp)
+
+ grouped = panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
+ axis='minor')
+ agged = grouped.mean()
+ tm.assert_index_equal(agged.minor_axis, Index([0, 1]))
+
+
+def test_groupby_2d_malformed():
+ d = DataFrame(index=lrange(2))
+ d['group'] = ['g1', 'g2']
+ d['zeros'] = [0, 0]
+ d['ones'] = [1, 1]
+ d['label'] = ['l1', 'l2']
+ tmp = d.groupby(['group']).mean()
+ res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
+ tm.assert_index_equal(tmp.columns, Index(['zeros', 'ones']))
+ tm.assert_numpy_array_equal(tmp.values, res_values)
+
+
+def test_int32_overflow():
+ B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)
+ ))
+ A = np.arange(25000)
+ df = DataFrame({'A': A,
+ 'B': B,
+ 'C': A,
+ 'D': B,
+ 'E': np.random.randn(25000)})
+
+ left = df.groupby(['A', 'B', 'C', 'D']).sum()
+ right = df.groupby(['D', 'C', 'B', 'A']).sum()
+ assert len(left) == len(right)
+
+
+def test_groupby_sort_multi():
+ df = DataFrame({'a': ['foo', 'bar', 'baz'],
+ 'b': [3, 2, 1],
+ 'c': [0, 1, 2],
+ 'd': np.random.randn(3)})
+
+ tups = lmap(tuple, df[['a', 'b', 'c']].values)
+ tups = com._asarray_tuplesafe(tups)
+ result = df.groupby(['a', 'b', 'c'], sort=True).sum()
+ tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
+
+ tups = lmap(tuple, df[['c', 'a', 'b']].values)
+ tups = com._asarray_tuplesafe(tups)
+ result = df.groupby(['c', 'a', 'b'], sort=True).sum()
+ tm.assert_numpy_array_equal(result.index.values, tups)
+
+ tups = lmap(tuple, df[['b', 'c', 'a']].values)
+ tups = com._asarray_tuplesafe(tups)
+ result = df.groupby(['b', 'c', 'a'], sort=True).sum()
+ tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
+
+ df = DataFrame({'a': [0, 1, 2, 0, 1, 2],
+ 'b': [0, 0, 0, 1, 1, 1],
+ 'd': np.random.randn(6)})
+ grouped = df.groupby(['a', 'b'])['d']
+ result = grouped.sum()
+
+ def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
+ tups = lmap(tuple, df[keys].values)
tups = com._asarray_tuplesafe(tups)
- result = df.groupby(['b', 'c', 'a'], sort=True).sum()
- tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
-
- df = DataFrame({'a': [0, 1, 2, 0, 1, 2],
- 'b': [0, 0, 0, 1, 1, 1],
- 'd': np.random.randn(6)})
- grouped = df.groupby(['a', 'b'])['d']
- result = grouped.sum()
- _check_groupby(df, result, ['a', 'b'], 'd')
-
- def test_intercept_builtin_sum(self):
- s = Series([1., 2., np.nan, 3.])
- grouped = s.groupby([0, 1, 2, 2])
-
- result = grouped.agg(builtins.sum)
- result2 = grouped.apply(builtins.sum)
- expected = grouped.sum()
- assert_series_equal(result, expected)
- assert_series_equal(result2, expected)
-
- def test_rank_apply(self):
- lev1 = tm.rands_array(10, 100)
- lev2 = tm.rands_array(10, 130)
- lab1 = np.random.randint(0, 100, size=500)
- lab2 = np.random.randint(0, 130, size=500)
-
- df = DataFrame({'value': np.random.randn(500),
- 'key1': lev1.take(lab1),
- 'key2': lev2.take(lab2)})
-
- result = df.groupby(['key1', 'key2']).value.rank()
-
- expected = []
- for key, piece in df.groupby(['key1', 'key2']):
- expected.append(piece.value.rank())
- expected = concat(expected, axis=0)
- expected = expected.reindex(result.index)
- assert_series_equal(result, expected)
-
- result = df.groupby(['key1', 'key2']).value.rank(pct=True)
-
- expected = []
- for key, piece in df.groupby(['key1', 'key2']):
- expected.append(piece.value.rank(pct=True))
- expected = concat(expected, axis=0)
- expected = expected.reindex(result.index)
- assert_series_equal(result, expected)
-
- @pytest.mark.parametrize("grps", [
- ['qux'], ['qux', 'quux']])
- @pytest.mark.parametrize("vals", [
- [2, 2, 8, 2, 6],
- [pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'),
- pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
- pd.Timestamp('2018-01-06')]])
- @pytest.mark.parametrize("ties_method,ascending,pct,exp", [
- ('average', True, False, [2., 2., 5., 2., 4.]),
- ('average', True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
- ('average', False, False, [4., 4., 1., 4., 2.]),
- ('average', False, True, [.8, .8, .2, .8, .4]),
- ('min', True, False, [1., 1., 5., 1., 4.]),
- ('min', True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
- ('min', False, False, [3., 3., 1., 3., 2.]),
- ('min', False, True, [.6, .6, .2, .6, .4]),
- ('max', True, False, [3., 3., 5., 3., 4.]),
- ('max', True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
- ('max', False, False, [5., 5., 1., 5., 2.]),
- ('max', False, True, [1., 1., .2, 1., .4]),
- ('first', True, False, [1., 2., 5., 3., 4.]),
- ('first', True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
- ('first', False, False, [3., 4., 1., 5., 2.]),
- ('first', False, True, [.6, .8, .2, 1., .4]),
- ('dense', True, False, [1., 1., 3., 1., 2.]),
- ('dense', True, True, [0.2, 0.2, 0.6, 0.2, 0.4]),
- ('dense', False, False, [3., 3., 1., 3., 2.]),
- ('dense', False, True, [.6, .6, .2, .6, .4]),
- ])
- def test_rank_args(self, grps, vals, ties_method, ascending, pct, exp):
- key = np.repeat(grps, len(vals))
- vals = vals * len(grps)
- df = DataFrame({'key': key, 'val': vals})
- result = df.groupby('key').rank(method=ties_method,
- ascending=ascending, pct=pct)
-
- exp_df = DataFrame(exp * len(grps), columns=['val'])
- assert_frame_equal(result, exp_df)
-
- @pytest.mark.parametrize("grps", [
- ['qux'], ['qux', 'quux']])
- @pytest.mark.parametrize("vals", [
- [-np.inf, -np.inf, np.nan, 1., np.nan, np.inf, np.inf],
- ])
- @pytest.mark.parametrize("ties_method,ascending,na_option,exp", [
- ('average', True, 'keep', [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
- ('average', True, 'top', [3.5, 3.5, 1.5, 5., 1.5, 6.5, 6.5]),
- ('average', True, 'bottom', [1.5, 1.5, 6.5, 3., 6.5, 4.5, 4.5]),
- ('average', False, 'keep', [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
- ('average', False, 'top', [6.5, 6.5, 1.5, 5., 1.5, 3.5, 3.5]),
- ('average', False, 'bottom', [4.5, 4.5, 6.5, 3., 6.5, 1.5, 1.5]),
- ('min', True, 'keep', [1., 1., np.nan, 3., np.nan, 4., 4.]),
- ('min', True, 'top', [3., 3., 1., 5., 1., 6., 6.]),
- ('min', True, 'bottom', [1., 1., 6., 3., 6., 4., 4.]),
- ('min', False, 'keep', [4., 4., np.nan, 3., np.nan, 1., 1.]),
- ('min', False, 'top', [6., 6., 1., 5., 1., 3., 3.]),
- ('min', False, 'bottom', [4., 4., 6., 3., 6., 1., 1.]),
- ('max', True, 'keep', [2., 2., np.nan, 3., np.nan, 5., 5.]),
- ('max', True, 'top', [4., 4., 2., 5., 2., 7., 7.]),
- ('max', True, 'bottom', [2., 2., 7., 3., 7., 5., 5.]),
- ('max', False, 'keep', [5., 5., np.nan, 3., np.nan, 2., 2.]),
- ('max', False, 'top', [7., 7., 2., 5., 2., 4., 4.]),
- ('max', False, 'bottom', [5., 5., 7., 3., 7., 2., 2.]),
- ('first', True, 'keep', [1., 2., np.nan, 3., np.nan, 4., 5.]),
- ('first', True, 'top', [3., 4., 1., 5., 2., 6., 7.]),
- ('first', True, 'bottom', [1., 2., 6., 3., 7., 4., 5.]),
- ('first', False, 'keep', [4., 5., np.nan, 3., np.nan, 1., 2.]),
- ('first', False, 'top', [6., 7., 1., 5., 2., 3., 4.]),
- ('first', False, 'bottom', [4., 5., 6., 3., 7., 1., 2.]),
- ('dense', True, 'keep', [1., 1., np.nan, 2., np.nan, 3., 3.]),
- ('dense', True, 'top', [2., 2., 1., 3., 1., 4., 4.]),
- ('dense', True, 'bottom', [1., 1., 4., 2., 4., 3., 3.]),
- ('dense', False, 'keep', [3., 3., np.nan, 2., np.nan, 1., 1.]),
- ('dense', False, 'top', [4., 4., 1., 3., 1., 2., 2.]),
- ('dense', False, 'bottom', [3., 3., 4., 2., 4., 1., 1.])
- ])
- def test_infs_n_nans(self, grps, vals, ties_method, ascending, na_option,
- exp):
- # GH 20561
- key = np.repeat(grps, len(vals))
- vals = vals * len(grps)
- df = DataFrame({'key': key, 'val': vals})
- result = df.groupby('key').rank(method=ties_method,
- ascending=ascending,
- na_option=na_option)
- exp_df = DataFrame(exp * len(grps), columns=['val'])
- assert_frame_equal(result, exp_df)
-
- @pytest.mark.parametrize("grps", [
- ['qux'], ['qux', 'quux']])
- @pytest.mark.parametrize("vals", [
- [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], # floats
- [pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan,
- pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
- pd.Timestamp('2018-01-06'), np.nan, np.nan]
- ])
- @pytest.mark.parametrize("ties_method,ascending,na_option,pct,exp", [
- ('average', True, 'keep', False,
- [2., 2., np.nan, 5., 2., 4., np.nan, np.nan]),
- ('average', True, 'keep', True,
- [0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan]),
- ('average', False, 'keep', False,
- [4., 4., np.nan, 1., 4., 2., np.nan, np.nan]),
- ('average', False, 'keep', True,
- [.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan]),
- ('min', True, 'keep', False,
- [1., 1., np.nan, 5., 1., 4., np.nan, np.nan]),
- ('min', True, 'keep', True,
- [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
- ('min', False, 'keep', False,
- [3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
- ('min', False, 'keep', True,
- [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
- ('max', True, 'keep', False,
- [3., 3., np.nan, 5., 3., 4., np.nan, np.nan]),
- ('max', True, 'keep', True,
- [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
- ('max', False, 'keep', False,
- [5., 5., np.nan, 1., 5., 2., np.nan, np.nan]),
- ('max', False, 'keep', True,
- [1., 1., np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
- ('first', True, 'keep', False,
- [1., 2., np.nan, 5., 3., 4., np.nan, np.nan]),
- ('first', True, 'keep', True,
- [0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
- ('first', False, 'keep', False,
- [3., 4., np.nan, 1., 5., 2., np.nan, np.nan]),
- ('first', False, 'keep', True,
- [.6, 0.8, np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
- ('dense', True, 'keep', False,
- [1., 1., np.nan, 3., 1., 2., np.nan, np.nan]),
- ('dense', True, 'keep', True,
- [0.2, 0.2, np.nan, 0.6, 0.2, 0.4, np.nan, np.nan]),
- ('dense', False, 'keep', False,
- [3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
- ('dense', False, 'keep', True,
- [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
- ('average', True, 'no_na', False, [2., 2., 7., 5., 2., 4., 7., 7.]),
- ('average', True, 'no_na', True,
- [0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]),
- ('average', False, 'no_na', False, [4., 4., 7., 1., 4., 2., 7., 7.]),
- ('average', False, 'no_na', True,
- [0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875]),
- ('min', True, 'no_na', False, [1., 1., 6., 5., 1., 4., 6., 6.]),
- ('min', True, 'no_na', True,
- [0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75]),
- ('min', False, 'no_na', False, [3., 3., 6., 1., 3., 2., 6., 6.]),
- ('min', False, 'no_na', True,
- [0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75]),
- ('max', True, 'no_na', False, [3., 3., 8., 5., 3., 4., 8., 8.]),
- ('max', True, 'no_na', True,
- [0.375, 0.375, 1., 0.625, 0.375, 0.5, 1., 1.]),
- ('max', False, 'no_na', False, [5., 5., 8., 1., 5., 2., 8., 8.]),
- ('max', False, 'no_na', True,
- [0.625, 0.625, 1., 0.125, 0.625, 0.25, 1., 1.]),
- ('first', True, 'no_na', False, [1., 2., 6., 5., 3., 4., 7., 8.]),
- ('first', True, 'no_na', True,
- [0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.]),
- ('first', False, 'no_na', False, [3., 4., 6., 1., 5., 2., 7., 8.]),
- ('first', False, 'no_na', True,
- [0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]),
- ('dense', True, 'no_na', False, [1., 1., 4., 3., 1., 2., 4., 4.]),
- ('dense', True, 'no_na', True,
- [0.125, 0.125, 0.5, 0.375, 0.125, 0.25, 0.5, 0.5]),
- ('dense', False, 'no_na', False, [3., 3., 4., 1., 3., 2., 4., 4.]),
- ('dense', False, 'no_na', True,
- [0.375, 0.375, 0.5, 0.125, 0.375, 0.25, 0.5, 0.5])
- ])
- def test_rank_args_missing(self, grps, vals, ties_method, ascending,
- na_option, pct, exp):
- key = np.repeat(grps, len(vals))
- vals = vals * len(grps)
- df = DataFrame({'key': key, 'val': vals})
- result = df.groupby('key').rank(method=ties_method,
- ascending=ascending,
- na_option=na_option, pct=pct)
-
- exp_df = DataFrame(exp * len(grps), columns=['val'])
- assert_frame_equal(result, exp_df)
-
- @pytest.mark.parametrize("pct,exp", [
- (False, [3., 3., 3., 3., 3.]),
- (True, [.6, .6, .6, .6, .6])])
- def test_rank_resets_each_group(self, pct, exp):
- df = DataFrame(
- {'key': ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'],
- 'val': [1] * 10}
- )
- result = df.groupby('key').rank(pct=pct)
- exp_df = DataFrame(exp * 2, columns=['val'])
- assert_frame_equal(result, exp_df)
-
- def test_rank_avg_even_vals(self):
- df = DataFrame({'key': ['a'] * 4, 'val': [1] * 4})
- result = df.groupby('key').rank()
- exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=['val'])
- assert_frame_equal(result, exp_df)
-
- @pytest.mark.parametrize("ties_method", [
- 'average', 'min', 'max', 'first', 'dense'])
- @pytest.mark.parametrize("ascending", [True, False])
- @pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
- @pytest.mark.parametrize("pct", [True, False])
- @pytest.mark.parametrize("vals", [
- ['bar', 'bar', 'foo', 'bar', 'baz'],
- ['bar', np.nan, 'foo', np.nan, 'baz']
- ])
- def test_rank_object_raises(self, ties_method, ascending, na_option,
- pct, vals):
- df = DataFrame({'key': ['foo'] * 5, 'val': vals})
- with tm.assert_raises_regex(TypeError, "not callable"):
- df.groupby('key').rank(method=ties_method,
- ascending=ascending,
- na_option=na_option, pct=pct)
-
- @pytest.mark.parametrize("agg_func", ['any', 'all'])
- @pytest.mark.parametrize("skipna", [True, False])
- @pytest.mark.parametrize("vals", [
- ['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
- [1, 2, 3], [1, 0, 0], [0, 0, 0],
- [1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
- [True, True, True], [True, False, False], [False, False, False],
- [np.nan, np.nan, np.nan]
- ])
- def test_groupby_bool_aggs(self, agg_func, skipna, vals):
- df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
-
- # Figure out expectation using Python builtin
- exp = getattr(compat.builtins, agg_func)(vals)
-
- # edge case for missing data with skipna and 'any'
- if skipna and all(isna(vals)) and agg_func == 'any':
- exp = False
-
- exp_df = DataFrame([exp] * 2, columns=['val'], index=pd.Index(
- ['a', 'b'], name='key'))
- result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
- assert_frame_equal(result, exp_df)
-
- def test_dont_clobber_name_column(self):
- df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],
- 'name': ['foo', 'bar', 'baz'] * 2})
-
- result = df.groupby('key').apply(lambda x: x)
- assert_frame_equal(result, df)
-
- def test_skip_group_keys(self):
- from pandas import concat
-
- tsf = tm.makeTimeDataFrame()
-
- grouped = tsf.groupby(lambda x: x.month, group_keys=False)
- result = grouped.apply(lambda x: x.sort_values(by='A')[:3])
-
- pieces = []
- for key, group in grouped:
- pieces.append(group.sort_values(by='A')[:3])
-
- expected = concat(pieces)
- assert_frame_equal(result, expected)
-
- grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)
- result = grouped.apply(lambda x: x.sort_values()[:3])
-
- pieces = []
- for key, group in grouped:
- pieces.append(group.sort_values()[:3])
-
- expected = concat(pieces)
- assert_series_equal(result, expected)
-
- def test_no_nonsense_name(self):
- # GH #995
- s = self.frame['C'].copy()
- s.name = None
-
- result = s.groupby(self.frame['A']).agg(np.sum)
- assert result.name is None
-
- def test_multifunc_sum_bug(self):
- # GH #1065
- x = DataFrame(np.arange(9).reshape(3, 3))
- x['test'] = 0
- x['fl'] = [1.3, 1.5, 1.6]
-
- grouped = x.groupby('test')
- result = grouped.agg({'fl': 'sum', 2: 'size'})
- assert result['fl'].dtype == np.float64
-
- def test_handle_dict_return_value(self):
- def f(group):
- return {'max': group.max(), 'min': group.min()}
-
- def g(group):
- return Series({'max': group.max(), 'min': group.min()})
-
- result = self.df.groupby('A')['C'].apply(f)
- expected = self.df.groupby('A')['C'].apply(g)
-
- assert isinstance(result, Series)
- assert_series_equal(result, expected)
-
- def test_set_group_name(self):
- def f(group):
- assert group.name is not None
- return group
-
- def freduce(group):
- assert group.name is not None
- return group.sum()
-
- def foo(x):
- return freduce(x)
-
- def _check_all(grouped):
- # make sure all these work
- grouped.apply(f)
- grouped.aggregate(freduce)
- grouped.aggregate({'C': freduce, 'D': freduce})
- grouped.transform(f)
-
- grouped['C'].apply(f)
- grouped['C'].aggregate(freduce)
- grouped['C'].aggregate([freduce, foo])
- grouped['C'].transform(f)
+ expected = f(df.groupby(tups)[field])
+ for k, v in compat.iteritems(expected):
+ assert (result[k] == v)
- _check_all(self.df.groupby('A'))
- _check_all(self.df.groupby(['A', 'B']))
-
- def test_group_name_available_in_inference_pass(self):
- # gh-15062
- df = pd.DataFrame({'a': [0, 0, 1, 1, 2, 2], 'b': np.arange(6)})
-
- names = []
-
- def f(group):
- names.append(group.name)
- return group.copy()
-
- df.groupby('a', sort=False, group_keys=False).apply(f)
- # we expect 2 zeros because we call ``f`` once to see if a faster route
- # can be used.
- expected_names = [0, 0, 1, 2]
- assert names == expected_names
-
- def test_no_dummy_key_names(self):
- # see gh-1291
- result = self.df.groupby(self.df['A'].values).sum()
- assert result.index.name is None
-
- result = self.df.groupby([self.df['A'].values, self.df['B'].values
- ]).sum()
- assert result.index.names == (None, None)
-
- def test_groupby_sort_multiindex_series(self):
- # series multiindex groupby sort argument was not being passed through
- # _compress_group_index
- # GH 9444
- index = MultiIndex(levels=[[1, 2], [1, 2]],
- labels=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
- names=['a', 'b'])
- mseries = Series([0, 1, 2, 3, 4, 5], index=index)
- index = MultiIndex(levels=[[1, 2], [1, 2]],
- labels=[[0, 0, 1], [1, 0, 0]], names=['a', 'b'])
- mseries_result = Series([0, 2, 4], index=index)
-
- result = mseries.groupby(level=['a', 'b'], sort=False).first()
- assert_series_equal(result, mseries_result)
- result = mseries.groupby(level=['a', 'b'], sort=True).first()
- assert_series_equal(result, mseries_result.sort_index())
-
- def test_groupby_reindex_inside_function(self):
-
- periods = 1000
- ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
- df = DataFrame({'high': np.arange(
- periods), 'low': np.arange(periods)}, index=ind)
-
- def agg_before(hour, func, fix=False):
- """
- Run an aggregate func on the subset of data.
- """
-
- def _func(data):
- d = data.loc[data.index.map(
- lambda x: x.hour < 11)].dropna()
- if fix:
- data[data.index[0]]
- if len(d) == 0:
- return None
- return func(d)
-
- return _func
-
- def afunc(data):
- d = data.select(lambda x: x.hour < 11).dropna()
- return np.max(d)
-
- grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
- closure_bad = grouped.agg({'high': agg_before(11, np.max)})
- closure_good = grouped.agg({'high': agg_before(11, np.max, True)})
-
- assert_frame_equal(closure_bad, closure_good)
-
- def test_cython_median(self):
- df = DataFrame(np.random.randn(1000))
- df.values[::2] = np.nan
-
- labels = np.random.randint(0, 50, size=1000).astype(float)
- labels[::17] = np.nan
-
- result = df.groupby(labels).median()
- exp = df.groupby(labels).agg(nanops.nanmedian)
- assert_frame_equal(result, exp)
-
- df = DataFrame(np.random.randn(1000, 5))
- rs = df.groupby(labels).agg(np.median)
- xp = df.groupby(labels).median()
- assert_frame_equal(rs, xp)
-
- def test_median_empty_bins(self):
- df = pd.DataFrame(np.random.randint(0, 44, 500))
-
- grps = range(0, 55, 5)
- bins = pd.cut(df[0], grps)
-
- result = df.groupby(bins).median()
- expected = df.groupby(bins).agg(lambda x: x.median())
- assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("dtype", [
- 'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
- @pytest.mark.parametrize("method,data", [
- ('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
- ('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
- ('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
- ('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
- ('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
- 'args': [1]}),
- ('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
- 'out_type': 'int64'})
- ])
- def test_groupby_non_arithmetic_agg_types(self, dtype, method, data):
- # GH9311, GH6620
- df = pd.DataFrame(
- [{'a': 1, 'b': 1},
- {'a': 1, 'b': 2},
- {'a': 2, 'b': 3},
- {'a': 2, 'b': 4}])
-
- df['b'] = df.b.astype(dtype)
-
- if 'args' not in data:
- data['args'] = []
-
- if 'out_type' in data:
- out_type = data['out_type']
- else:
- out_type = dtype
-
- exp = data['df']
- df_out = pd.DataFrame(exp)
-
- df_out['b'] = df_out.b.astype(out_type)
- df_out.set_index('a', inplace=True)
-
- grpd = df.groupby('a')
- t = getattr(grpd, method)(*data['args'])
- assert_frame_equal(t, df_out)
-
- def test_groupby_non_arithmetic_agg_intlike_precision(self):
- # GH9311, GH6620
- c = 24650000000000000
-
- inputs = ((Timestamp('2011-01-15 12:50:28.502376'),
- Timestamp('2011-01-20 12:50:28.593448')), (1 + c, 2 + c))
-
- for i in inputs:
- df = pd.DataFrame([{'a': 1, 'b': i[0]}, {'a': 1, 'b': i[1]}])
-
- grp_exp = {'first': {'expected': i[0]},
- 'last': {'expected': i[1]},
- 'min': {'expected': i[0]},
- 'max': {'expected': i[1]},
- 'nth': {'expected': i[1],
- 'args': [1]},
- 'count': {'expected': 2}}
-
- for method, data in compat.iteritems(grp_exp):
- if 'args' not in data:
- data['args'] = []
-
- grpd = df.groupby('a')
- res = getattr(grpd, method)(*data['args'])
- assert res.iloc[0].b == data['expected']
-
- def test_groupby_multiindex_missing_pair(self):
- # GH9049
- df = DataFrame({'group1': ['a', 'a', 'a', 'b'],
- 'group2': ['c', 'c', 'd', 'c'],
- 'value': [1, 1, 1, 5]})
- df = df.set_index(['group1', 'group2'])
- df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
-
- res = df_grouped.agg('sum')
- idx = MultiIndex.from_tuples(
- [('a', 'c'), ('a', 'd'), ('b', 'c')], names=['group1', 'group2'])
- exp = DataFrame([[2], [1], [5]], index=idx, columns=['value'])
-
- tm.assert_frame_equal(res, exp)
-
- def test_groupby_multiindex_not_lexsorted(self):
- # GH 11640
-
- # define the lexsorted version
- lexsorted_mi = MultiIndex.from_tuples(
- [('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
- lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
- assert lexsorted_df.columns.is_lexsorted()
-
- # define the non-lexsorted version
- not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
- data=[[1, 'b1', 'c1', 3],
- [1, 'b2', 'c2', 4]])
- not_lexsorted_df = not_lexsorted_df.pivot_table(
- index='a', columns=['b', 'c'], values='d')
- not_lexsorted_df = not_lexsorted_df.reset_index()
- assert not not_lexsorted_df.columns.is_lexsorted()
-
- # compare the results
- tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
-
- expected = lexsorted_df.groupby('a').mean()
- with tm.assert_produces_warning(PerformanceWarning):
- result = not_lexsorted_df.groupby('a').mean()
- tm.assert_frame_equal(expected, result)
-
- # a transforming function should work regardless of sort
- # GH 14776
- df = DataFrame({'x': ['a', 'a', 'b', 'a'],
- 'y': [1, 1, 2, 2],
- 'z': [1, 2, 3, 4]}).set_index(['x', 'y'])
- assert not df.index.is_lexsorted()
-
- for level in [0, 1, [0, 1]]:
- for sort in [False, True]:
- result = df.groupby(level=level, sort=sort).apply(
- DataFrame.drop_duplicates)
- expected = df
- tm.assert_frame_equal(expected, result)
-
- result = df.sort_index().groupby(level=level, sort=sort).apply(
- DataFrame.drop_duplicates)
- expected = df.sort_index()
- tm.assert_frame_equal(expected, result)
-
- def test_gb_apply_list_of_unequal_len_arrays(self):
-
- # GH1738
- df = DataFrame({'group1': ['a', 'a', 'a', 'b', 'b', 'b', 'a', 'a', 'a',
- 'b', 'b', 'b'],
- 'group2': ['c', 'c', 'd', 'd', 'd', 'e', 'c', 'c', 'd',
- 'd', 'd', 'e'],
- 'weight': [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
- 'value': [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3]})
- df = df.set_index(['group1', 'group2'])
- df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
-
- def noddy(value, weight):
- out = np.array(value * weight).repeat(3)
- return out
-
- # the kernel function returns arrays of unequal length
- # pandas sniffs the first one, sees it's an array and not
- # a list, and assumed the rest are of equal length
- # and so tries a vstack
-
- # don't die
- df_grouped.apply(lambda x: noddy(x.value, x.weight))
-
- def test_fill_constistency(self):
-
- # GH9221
- # pass thru keyword arguments to the generated wrapper
- # are set if the passed kw is None (only)
- df = DataFrame(index=pd.MultiIndex.from_product(
- [['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
- columns=Index(
- ['1', '2'], name='id'))
- df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
- np.nan, 22, np.nan]
- df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
- np.nan, 44, np.nan]
-
- expected = df.groupby(level=0, axis=0).fillna(method='ffill')
- result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
- assert_frame_equal(result, expected)
-
- def test_index_label_overlaps_location(self):
- # checking we don't have any label/location confusion in the
- # the wake of GH5375
- df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1])
- g = df.groupby(list('ababb'))
- actual = g.filter(lambda x: len(x) > 2)
- expected = df.iloc[[1, 3, 4]]
- assert_frame_equal(actual, expected)
-
- ser = df[0]
- g = ser.groupby(list('ababb'))
- actual = g.filter(lambda x: len(x) > 2)
- expected = ser.take([1, 3, 4])
- assert_series_equal(actual, expected)
-
- # ... and again, with a generic Index of floats
- df.index = df.index.astype(float)
- g = df.groupby(list('ababb'))
- actual = g.filter(lambda x: len(x) > 2)
- expected = df.iloc[[1, 3, 4]]
- assert_frame_equal(actual, expected)
-
- ser = df[0]
- g = ser.groupby(list('ababb'))
- actual = g.filter(lambda x: len(x) > 2)
- expected = ser.take([1, 3, 4])
- assert_series_equal(actual, expected)
-
- def test_groupby_cumprod(self):
- # GH 4095
- df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
-
- actual = df.groupby('key')['value'].cumprod()
- expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
- expected.name = 'value'
- tm.assert_series_equal(actual, expected)
-
- df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
- actual = df.groupby('key')['value'].cumprod()
- # if overflows, groupby product casts to float
- # while numpy passes back invalid values
- df['value'] = df['value'].astype(float)
- expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
- expected.name = 'value'
- tm.assert_series_equal(actual, expected)
-
- def test_ops_general(self):
- ops = [('mean', np.mean),
- ('median', np.median),
- ('std', np.std),
- ('var', np.var),
- ('sum', np.sum),
- ('prod', np.prod),
- ('min', np.min),
- ('max', np.max),
- ('first', lambda x: x.iloc[0]),
- ('last', lambda x: x.iloc[-1]),
- ('count', np.size), ]
- try:
- from scipy.stats import sem
- except ImportError:
- pass
- else:
- ops.append(('sem', sem))
- df = DataFrame(np.random.randn(1000))
- labels = np.random.randint(0, 50, size=1000).astype(float)
-
- for op, targop in ops:
- result = getattr(df.groupby(labels), op)().astype(float)
- expected = df.groupby(labels).agg(targop)
- try:
- tm.assert_frame_equal(result, expected)
- except BaseException as exc:
- exc.args += ('operation: %s' % op, )
- raise
-
- def test_max_nan_bug(self):
- raw = """,Date,app,File
-2013-04-23,2013-04-23 00:00:00,,log080001.log
-2013-05-06,2013-05-06 00:00:00,,log.log
-2013-05-07,2013-05-07 00:00:00,OE,xlsx"""
-
- df = pd.read_csv(StringIO(raw), parse_dates=[0])
- gb = df.groupby('Date')
- r = gb[['File']].max()
- e = gb['File'].max().to_frame()
- tm.assert_frame_equal(r, e)
- assert not r['File'].isna().any()
-
- def test_nlargest(self):
- a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
- b = Series(list('a' * 5 + 'b' * 5))
- gb = a.groupby(b)
- r = gb.nlargest(3)
- e = Series([
- 7, 5, 3, 10, 9, 6
- ], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
- tm.assert_series_equal(r, e)
-
- a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
- gb = a.groupby(b)
- e = Series([
- 3, 2, 1, 3, 3, 2
- ], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
- assert_series_equal(gb.nlargest(3, keep='last'), e)
-
- def test_nsmallest(self):
- a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
- b = Series(list('a' * 5 + 'b' * 5))
- gb = a.groupby(b)
- r = gb.nsmallest(3)
- e = Series([
- 1, 2, 3, 0, 4, 6
- ], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
- tm.assert_series_equal(r, e)
-
- a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
- gb = a.groupby(b)
- e = Series([
- 0, 1, 1, 0, 1, 2
- ], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
- assert_series_equal(gb.nsmallest(3, keep='last'), e)
-
- def test_transform_doesnt_clobber_ints(self):
- # GH 7972
- n = 6
- x = np.arange(n)
- df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})
- df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})
-
- gb = df.groupby('a')
- result = gb.transform('mean')
-
- gb2 = df2.groupby('a')
- expected = gb2.transform('mean')
- tm.assert_frame_equal(result, expected)
-
- def test_groupby_apply_all_none(self):
- # Tests to make sure no errors if apply function returns all None
- # values. Issue 9684.
- test_df = DataFrame({'groups': [0, 0, 1, 1],
- 'random_vars': [8, 7, 4, 5]})
-
- def test_func(x):
- pass
-
- result = test_df.groupby('groups').apply(test_func)
- expected = DataFrame()
- tm.assert_frame_equal(result, expected)
-
- def test_groupby_apply_none_first(self):
- # GH 12824. Tests if apply returns None first.
- test_df1 = DataFrame({'groups': [1, 1, 1, 2], 'vars': [0, 1, 2, 3]})
- test_df2 = DataFrame({'groups': [1, 2, 2, 2], 'vars': [0, 1, 2, 3]})
-
- def test_func(x):
- if x.shape[0] < 2:
+ _check_groupby(df, result, ['a', 'b'], 'd')
+
+
+def test_dont_clobber_name_column():
+ df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],
+ 'name': ['foo', 'bar', 'baz'] * 2})
+
+ result = df.groupby('key').apply(lambda x: x)
+ assert_frame_equal(result, df)
+
+
+def test_skip_group_keys():
+
+ tsf = tm.makeTimeDataFrame()
+
+ grouped = tsf.groupby(lambda x: x.month, group_keys=False)
+ result = grouped.apply(lambda x: x.sort_values(by='A')[:3])
+
+ pieces = []
+ for key, group in grouped:
+ pieces.append(group.sort_values(by='A')[:3])
+
+ expected = pd.concat(pieces)
+ assert_frame_equal(result, expected)
+
+ grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)
+ result = grouped.apply(lambda x: x.sort_values()[:3])
+
+ pieces = []
+ for key, group in grouped:
+ pieces.append(group.sort_values()[:3])
+
+ expected = pd.concat(pieces)
+ assert_series_equal(result, expected)
+
+
+def test_no_nonsense_name(frame):
+ # GH #995
+ s = frame['C'].copy()
+ s.name = None
+
+ result = s.groupby(frame['A']).agg(np.sum)
+ assert result.name is None
+
+
+def test_multifunc_sum_bug():
+ # GH #1065
+ x = DataFrame(np.arange(9).reshape(3, 3))
+ x['test'] = 0
+ x['fl'] = [1.3, 1.5, 1.6]
+
+ grouped = x.groupby('test')
+ result = grouped.agg({'fl': 'sum', 2: 'size'})
+ assert result['fl'].dtype == np.float64
+
+
+def test_handle_dict_return_value(df):
+ def f(group):
+ return {'max': group.max(), 'min': group.min()}
+
+ def g(group):
+ return Series({'max': group.max(), 'min': group.min()})
+
+ result = df.groupby('A')['C'].apply(f)
+ expected = df.groupby('A')['C'].apply(g)
+
+ assert isinstance(result, Series)
+ assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('grouper', ['A', ['A', 'B']])
+def test_set_group_name(df, grouper):
+ def f(group):
+ assert group.name is not None
+ return group
+
+ def freduce(group):
+ assert group.name is not None
+ return group.sum()
+
+ def foo(x):
+ return freduce(x)
+
+ grouped = df.groupby(grouper)
+
+ # make sure all these work
+ grouped.apply(f)
+ grouped.aggregate(freduce)
+ grouped.aggregate({'C': freduce, 'D': freduce})
+ grouped.transform(f)
+
+ grouped['C'].apply(f)
+ grouped['C'].aggregate(freduce)
+ grouped['C'].aggregate([freduce, foo])
+ grouped['C'].transform(f)
+
+
+def test_group_name_available_in_inference_pass():
+ # gh-15062
+ df = pd.DataFrame({'a': [0, 0, 1, 1, 2, 2], 'b': np.arange(6)})
+
+ names = []
+
+ def f(group):
+ names.append(group.name)
+ return group.copy()
+
+ df.groupby('a', sort=False, group_keys=False).apply(f)
+ # we expect 2 zeros because we call ``f`` once to see if a faster route
+ # can be used.
+ expected_names = [0, 0, 1, 2]
+ assert names == expected_names
+
+
+def test_no_dummy_key_names(df):
+ # see gh-1291
+ result = df.groupby(df['A'].values).sum()
+ assert result.index.name is None
+
+ result = df.groupby([df['A'].values, df['B'].values]).sum()
+ assert result.index.names == (None, None)
+
+
+def test_groupby_sort_multiindex_series():
+ # series multiindex groupby sort argument was not being passed through
+ # _compress_group_index
+ # GH 9444
+ index = MultiIndex(levels=[[1, 2], [1, 2]],
+ labels=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
+ names=['a', 'b'])
+ mseries = Series([0, 1, 2, 3, 4, 5], index=index)
+ index = MultiIndex(levels=[[1, 2], [1, 2]],
+ labels=[[0, 0, 1], [1, 0, 0]], names=['a', 'b'])
+ mseries_result = Series([0, 2, 4], index=index)
+
+ result = mseries.groupby(level=['a', 'b'], sort=False).first()
+ assert_series_equal(result, mseries_result)
+ result = mseries.groupby(level=['a', 'b'], sort=True).first()
+ assert_series_equal(result, mseries_result.sort_index())
+
+
+def test_groupby_reindex_inside_function():
+
+ periods = 1000
+ ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
+ df = DataFrame({'high': np.arange(
+ periods), 'low': np.arange(periods)}, index=ind)
+
+ def agg_before(hour, func, fix=False):
+ """
+ Run an aggregate func on the subset of data.
+ """
+
+ def _func(data):
+ d = data.loc[data.index.map(
+ lambda x: x.hour < 11)].dropna()
+ if fix:
+ data[data.index[0]]
+ if len(d) == 0:
return None
- return x.iloc[[0, -1]]
-
- result1 = test_df1.groupby('groups').apply(test_func)
- result2 = test_df2.groupby('groups').apply(test_func)
- index1 = MultiIndex.from_arrays([[1, 1], [0, 2]],
- names=['groups', None])
- index2 = MultiIndex.from_arrays([[2, 2], [1, 3]],
- names=['groups', None])
- expected1 = DataFrame({'groups': [1, 1], 'vars': [0, 2]},
- index=index1)
- expected2 = DataFrame({'groups': [2, 2], 'vars': [1, 3]},
- index=index2)
- tm.assert_frame_equal(result1, expected1)
- tm.assert_frame_equal(result2, expected2)
-
- def test_groupby_preserves_sort(self):
- # Test to ensure that groupby always preserves sort order of original
- # object. Issue #8588 and #9651
-
- df = DataFrame(
- {'int_groups': [3, 1, 0, 1, 0, 3, 3, 3],
- 'string_groups': ['z', 'a', 'z', 'a', 'a', 'g', 'g', 'g'],
- 'ints': [8, 7, 4, 5, 2, 9, 1, 1],
- 'floats': [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
- 'strings': ['z', 'd', 'a', 'e', 'word', 'word2', '42', '47']})
-
- # Try sorting on different types and with different group types
- for sort_column in ['ints', 'floats', 'strings', ['ints', 'floats'],
- ['ints', 'strings']]:
- for group_column in ['int_groups', 'string_groups',
- ['int_groups', 'string_groups']]:
-
- df = df.sort_values(by=sort_column)
-
- g = df.groupby(group_column)
-
- def test_sort(x):
- assert_frame_equal(x, x.sort_values(by=sort_column))
-
- g.apply(test_sort)
-
- def test_numpy_compat(self):
- # see gh-12811
- df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
- g = df.groupby('A')
-
- msg = "numpy operations are not valid with groupby"
-
- for func in ('mean', 'var', 'std', 'cumprod', 'cumsum'):
- tm.assert_raises_regex(UnsupportedFunctionCall, msg,
- getattr(g, func), 1, 2, 3)
- tm.assert_raises_regex(UnsupportedFunctionCall, msg,
- getattr(g, func), foo=1)
-
- def test_group_shift_with_null_key(self):
- # This test is designed to replicate the segfault in issue #13813.
- n_rows = 1200
-
- # Generate a moderately large dataframe with occasional missing
- # values in column `B`, and then group by [`A`, `B`]. This should
- # force `-1` in `labels` array of `g.grouper.group_info` exactly
- # at those places, where the group-by key is partially missing.
- df = DataFrame([(i % 12, i % 3 if i % 3 else np.nan, i)
- for i in range(n_rows)], dtype=float,
- columns=["A", "B", "Z"], index=None)
- g = df.groupby(["A", "B"])
-
- expected = DataFrame([(i + 12 if i % 3 and i < n_rows - 12
- else np.nan)
- for i in range(n_rows)], dtype=float,
- columns=["Z"], index=None)
- result = g.shift(-1)
-
- assert_frame_equal(result, expected)
-
- def test_pivot_table_values_key_error(self):
- # This test is designed to replicate the error in issue #14938
- df = pd.DataFrame({'eventDate':
- pd.date_range(pd.datetime.today(),
- periods=20, freq='M').tolist(),
- 'thename': range(0, 20)})
-
- df['year'] = df.set_index('eventDate').index.year
- df['month'] = df.set_index('eventDate').index.month
-
- with pytest.raises(KeyError):
- df.reset_index().pivot_table(index='year', columns='month',
- values='badname', aggfunc='count')
-
- def test_cummin_cummax(self):
- # GH 15048
- num_types = [np.int32, np.int64, np.float32, np.float64]
- num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
- np.finfo(np.float32).min, np.finfo(np.float64).min]
- num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
- np.finfo(np.float32).max, np.finfo(np.float64).max]
- base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
- 'B': [3, 4, 3, 2, 2, 3, 2, 1]})
- expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
- expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
-
- for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
- df = base_df.astype(dtype)
-
- # cummin
- expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
- result = df.groupby('A').cummin()
- tm.assert_frame_equal(result, expected)
- result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
- tm.assert_frame_equal(result, expected)
-
- # Test cummin w/ min value for dtype
- df.loc[[2, 6], 'B'] = min_val
- expected.loc[[2, 3, 6, 7], 'B'] = min_val
- result = df.groupby('A').cummin()
- tm.assert_frame_equal(result, expected)
- expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
- tm.assert_frame_equal(result, expected)
-
- # cummax
- expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
- result = df.groupby('A').cummax()
- tm.assert_frame_equal(result, expected)
- result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
- tm.assert_frame_equal(result, expected)
-
- # Test cummax w/ max value for dtype
- df.loc[[2, 6], 'B'] = max_val
- expected.loc[[2, 3, 6, 7], 'B'] = max_val
- result = df.groupby('A').cummax()
- tm.assert_frame_equal(result, expected)
- expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
- tm.assert_frame_equal(result, expected)
-
- # Test nan in some values
- base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
- expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
- np.nan, 3, np.nan, 1]})
- result = base_df.groupby('A').cummin()
- tm.assert_frame_equal(result, expected)
- expected = (base_df.groupby('A')
- .B
- .apply(lambda x: x.cummin())
- .to_frame())
- tm.assert_frame_equal(result, expected)
-
- expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
- np.nan, 3, np.nan, 3]})
- result = base_df.groupby('A').cummax()
- tm.assert_frame_equal(result, expected)
- expected = (base_df.groupby('A')
- .B
- .apply(lambda x: x.cummax())
- .to_frame())
- tm.assert_frame_equal(result, expected)
-
- # Test nan in entire column
- base_df['B'] = np.nan
- expected = pd.DataFrame({'B': [np.nan] * 8})
- result = base_df.groupby('A').cummin()
- tm.assert_frame_equal(expected, result)
- result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
- tm.assert_frame_equal(expected, result)
- result = base_df.groupby('A').cummax()
- tm.assert_frame_equal(expected, result)
- result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
- tm.assert_frame_equal(expected, result)
-
- # GH 15561
- df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
- expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
- for method in ['cummax', 'cummin']:
- result = getattr(df.groupby('a')['b'], method)()
- tm.assert_series_equal(expected, result)
-
- # GH 15635
- df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
- result = df.groupby('a').b.cummax()
- expected = pd.Series([2, 1, 2], name='b')
- tm.assert_series_equal(result, expected)
-
- df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
- result = df.groupby('a').b.cummin()
- expected = pd.Series([1, 2, 1], name='b')
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize('in_vals, out_vals', [
-
- # Basics: strictly increasing (T), strictly decreasing (F),
- # abs val increasing (F), non-strictly increasing (T)
- ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
- [True, False, False, True]),
-
- # Test with inf vals
- ([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
- [True, False, True, False]),
-
- # Test with nan vals; should always be False
- ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
- [False, False, False, False]),
- ])
- def test_is_monotonic_increasing(self, in_vals, out_vals):
- # GH 17015
- source_dict = {
- 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
- 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
- 'C': in_vals}
- df = pd.DataFrame(source_dict)
- result = df.groupby('B').C.is_monotonic_increasing
- index = Index(list('abcd'), name='B')
- expected = pd.Series(index=index, data=out_vals, name='C')
- tm.assert_series_equal(result, expected)
-
- # Also check result equal to manually taking x.is_monotonic_increasing.
- expected = (
- df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize('in_vals, out_vals', [
- # Basics: strictly decreasing (T), strictly increasing (F),
- # abs val decreasing (F), non-strictly increasing (T)
- ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
- [True, False, False, True]),
-
- # Test with inf vals
- ([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
- [True, True, False, True]),
-
- # Test with nan vals; should always be False
- ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
- [False, False, False, False]),
- ])
- def test_is_monotonic_decreasing(self, in_vals, out_vals):
- # GH 17015
- source_dict = {
- 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
- 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
- 'C': in_vals}
-
- df = pd.DataFrame(source_dict)
- result = df.groupby('B').C.is_monotonic_decreasing
- index = Index(list('abcd'), name='B')
- expected = pd.Series(index=index, data=out_vals, name='C')
- tm.assert_series_equal(result, expected)
-
- def test_apply_numeric_coercion_when_datetime(self):
- # In the past, group-by/apply operations have been over-eager
- # in converting dtypes to numeric, in the presence of datetime
- # columns. Various GH issues were filed, the reproductions
- # for which are here.
-
- # GH 15670
- df = pd.DataFrame({'Number': [1, 2],
- 'Date': ["2017-03-02"] * 2,
- 'Str': ["foo", "inf"]})
- expected = df.groupby(['Number']).apply(lambda x: x.iloc[0])
- df.Date = pd.to_datetime(df.Date)
- result = df.groupby(['Number']).apply(lambda x: x.iloc[0])
- tm.assert_series_equal(result['Str'], expected['Str'])
-
- # GH 15421
- df = pd.DataFrame({'A': [10, 20, 30],
- 'B': ['foo', '3', '4'],
- 'T': [pd.Timestamp("12:31:22")] * 3})
-
- def get_B(g):
- return g.iloc[0][['B']]
- result = df.groupby('A').apply(get_B)['B']
- expected = df.B
- expected.index = df.A
- tm.assert_series_equal(result, expected)
-
- # GH 14423
- def predictions(tool):
- out = pd.Series(index=['p1', 'p2', 'useTime'], dtype=object)
- if 'step1' in list(tool.State):
- out['p1'] = str(tool[tool.State == 'step1'].Machine.values[0])
- if 'step2' in list(tool.State):
- out['p2'] = str(tool[tool.State == 'step2'].Machine.values[0])
- out['useTime'] = str(
- tool[tool.State == 'step2'].oTime.values[0])
- return out
- df1 = pd.DataFrame({'Key': ['B', 'B', 'A', 'A'],
- 'State': ['step1', 'step2', 'step1', 'step2'],
- 'oTime': ['', '2016-09-19 05:24:33',
- '', '2016-09-19 23:59:04'],
- 'Machine': ['23', '36L', '36R', '36R']})
- df2 = df1.copy()
- df2.oTime = pd.to_datetime(df2.oTime)
- expected = df1.groupby('Key').apply(predictions).p1
- result = df2.groupby('Key').apply(predictions).p1
- tm.assert_series_equal(expected, result)
-
- def test_pipe(self):
- # Test the pipe method of DataFrameGroupBy.
- # Issue #17871
-
- random_state = np.random.RandomState(1234567890)
-
- df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B': random_state.randn(8),
- 'C': random_state.randn(8)})
-
- def f(dfgb):
- return dfgb.B.max() - dfgb.C.min().min()
-
- def square(srs):
- return srs ** 2
-
- # Note that the transformations are
- # GroupBy -> Series
- # Series -> Series
- # This then chains the GroupBy.pipe and the
- # NDFrame.pipe methods
- result = df.groupby('A').pipe(f).pipe(square)
-
- index = Index([u'bar', u'foo'], dtype='object', name=u'A')
- expected = pd.Series([8.99110003361, 8.17516964785], name='B',
- index=index)
-
- assert_series_equal(expected, result)
-
- def test_pipe_args(self):
- # Test passing args to the pipe method of DataFrameGroupBy.
- # Issue #17871
-
- df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'],
- 'x': [1.0, 2.0, 3.0, 2.0, 5.0],
- 'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]})
-
- def f(dfgb, arg1):
- return (dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
- .groupby(dfgb.grouper))
-
- def g(dfgb, arg2):
- return dfgb.sum() / dfgb.sum().sum() + arg2
-
- def h(df, arg3):
- return df.x + df.y - arg3
-
- result = (df
- .groupby('group')
- .pipe(f, 0)
- .pipe(g, 10)
- .pipe(h, 100))
-
- # Assert the results here
- index = pd.Index(['A', 'B', 'C'], name='group')
- expected = pd.Series([-79.5160891089, -78.4839108911, -80],
- index=index)
-
- assert_series_equal(expected, result)
-
- # test SeriesGroupby.pipe
- ser = pd.Series([1, 1, 2, 2, 3, 3])
- result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
-
- expected = pd.Series([4, 8, 12], index=pd.Int64Index([1, 2, 3]))
-
- assert_series_equal(result, expected)
-
- def test_empty_dataframe_groupby(self):
- # GH8093
- df = DataFrame(columns=['A', 'B', 'C'])
-
- result = df.groupby('A').sum()
- expected = DataFrame(columns=['B', 'C'], dtype=np.float64)
- expected.index.name = 'A'
-
- assert_frame_equal(result, expected)
-
- def test_tuple_warns(self):
- # https://github.com/pandas-dev/pandas/issues/18314
- df = pd.DataFrame({('a', 'b'): [1, 1, 2, 2], 'a': [1, 1, 1, 2],
- 'b': [1, 2, 2, 2], 'c': [1, 1, 1, 1]})
- with tm.assert_produces_warning(FutureWarning) as w:
- df[['a', 'b', 'c']].groupby(('a', 'b')).c.mean()
-
- assert "Interpreting tuple 'by' as a list" in str(w[0].message)
+ return func(d)
+
+ return _func
+
+ def afunc(data):
+ d = data.select(lambda x: x.hour < 11).dropna()
+ return np.max(d)
+
+ grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
+ closure_bad = grouped.agg({'high': agg_before(11, np.max)})
+ closure_good = grouped.agg({'high': agg_before(11, np.max, True)})
+
+ assert_frame_equal(closure_bad, closure_good)
+
+
+def test_groupby_multiindex_missing_pair():
+ # GH9049
+ df = DataFrame({'group1': ['a', 'a', 'a', 'b'],
+ 'group2': ['c', 'c', 'd', 'c'],
+ 'value': [1, 1, 1, 5]})
+ df = df.set_index(['group1', 'group2'])
+ df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
+
+ res = df_grouped.agg('sum')
+ idx = MultiIndex.from_tuples(
+ [('a', 'c'), ('a', 'd'), ('b', 'c')], names=['group1', 'group2'])
+ exp = DataFrame([[2], [1], [5]], index=idx, columns=['value'])
+
+ tm.assert_frame_equal(res, exp)
+
+
+def test_groupby_multiindex_not_lexsorted():
+ # GH 11640
+
+ # define the lexsorted version
+ lexsorted_mi = MultiIndex.from_tuples(
+ [('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
+ lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
+ assert lexsorted_df.columns.is_lexsorted()
+
+ # define the non-lexsorted version
+ not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
+ data=[[1, 'b1', 'c1', 3],
+ [1, 'b2', 'c2', 4]])
+ not_lexsorted_df = not_lexsorted_df.pivot_table(
+ index='a', columns=['b', 'c'], values='d')
+ not_lexsorted_df = not_lexsorted_df.reset_index()
+ assert not not_lexsorted_df.columns.is_lexsorted()
+
+ # compare the results
+ tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
+
+ expected = lexsorted_df.groupby('a').mean()
+ with tm.assert_produces_warning(PerformanceWarning):
+ result = not_lexsorted_df.groupby('a').mean()
+ tm.assert_frame_equal(expected, result)
+
+ # a transforming function should work regardless of sort
+ # GH 14776
+ df = DataFrame({'x': ['a', 'a', 'b', 'a'],
+ 'y': [1, 1, 2, 2],
+ 'z': [1, 2, 3, 4]}).set_index(['x', 'y'])
+ assert not df.index.is_lexsorted()
+
+ for level in [0, 1, [0, 1]]:
+ for sort in [False, True]:
+ result = df.groupby(level=level, sort=sort).apply(
+ DataFrame.drop_duplicates)
+ expected = df
+ tm.assert_frame_equal(expected, result)
+
+ result = df.sort_index().groupby(level=level, sort=sort).apply(
+ DataFrame.drop_duplicates)
+ expected = df.sort_index()
+ tm.assert_frame_equal(expected, result)
+
+
+def test_index_label_overlaps_location():
+ # checking we don't have any label/location confusion in the
+ # the wake of GH5375
+ df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1])
+ g = df.groupby(list('ababb'))
+ actual = g.filter(lambda x: len(x) > 2)
+ expected = df.iloc[[1, 3, 4]]
+ assert_frame_equal(actual, expected)
+
+ ser = df[0]
+ g = ser.groupby(list('ababb'))
+ actual = g.filter(lambda x: len(x) > 2)
+ expected = ser.take([1, 3, 4])
+ assert_series_equal(actual, expected)
+
+ # ... and again, with a generic Index of floats
+ df.index = df.index.astype(float)
+ g = df.groupby(list('ababb'))
+ actual = g.filter(lambda x: len(x) > 2)
+ expected = df.iloc[[1, 3, 4]]
+ assert_frame_equal(actual, expected)
+
+ ser = df[0]
+ g = ser.groupby(list('ababb'))
+ actual = g.filter(lambda x: len(x) > 2)
+ expected = ser.take([1, 3, 4])
+ assert_series_equal(actual, expected)
+
+
+def test_transform_doesnt_clobber_ints():
+ # GH 7972
+ n = 6
+ x = np.arange(n)
+ df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})
+ df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})
+
+ gb = df.groupby('a')
+ result = gb.transform('mean')
+
+ gb2 = df2.groupby('a')
+ expected = gb2.transform('mean')
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize('sort_column', ['ints', 'floats', 'strings',
+ ['ints', 'floats'],
+ ['ints', 'strings']])
+@pytest.mark.parametrize('group_column', ['int_groups', 'string_groups',
+ ['int_groups', 'string_groups']])
+def test_groupby_preserves_sort(sort_column, group_column):
+ # Test to ensure that groupby always preserves sort order of original
+ # object. Issue #8588 and #9651
+
+ df = DataFrame(
+ {'int_groups': [3, 1, 0, 1, 0, 3, 3, 3],
+ 'string_groups': ['z', 'a', 'z', 'a', 'a', 'g', 'g', 'g'],
+ 'ints': [8, 7, 4, 5, 2, 9, 1, 1],
+ 'floats': [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
+ 'strings': ['z', 'd', 'a', 'e', 'word', 'word2', '42', '47']})
+
+ # Try sorting on different types and with different group types
+
+ df = df.sort_values(by=sort_column)
+ g = df.groupby(group_column)
+
+ def test_sort(x):
+ assert_frame_equal(x, x.sort_values(by=sort_column))
+ g.apply(test_sort)
+
+
+def test_group_shift_with_null_key():
+ # This test is designed to replicate the segfault in issue #13813.
+ n_rows = 1200
+
+ # Generate a moderately large dataframe with occasional missing
+ # values in column `B`, and then group by [`A`, `B`]. This should
+ # force `-1` in `labels` array of `g.grouper.group_info` exactly
+ # at those places, where the group-by key is partially missing.
+ df = DataFrame([(i % 12, i % 3 if i % 3 else np.nan, i)
+ for i in range(n_rows)], dtype=float,
+ columns=["A", "B", "Z"], index=None)
+ g = df.groupby(["A", "B"])
+
+ expected = DataFrame([(i + 12 if i % 3 and i < n_rows - 12
+ else np.nan)
+ for i in range(n_rows)], dtype=float,
+ columns=["Z"], index=None)
+ result = g.shift(-1)
+
+ assert_frame_equal(result, expected)
+
- with tm.assert_produces_warning(None):
- df.groupby(('a', 'b')).c.mean()
+def test_pivot_table_values_key_error():
+ # This test is designed to replicate the error in issue #14938
+ df = pd.DataFrame({'eventDate':
+ pd.date_range(pd.datetime.today(),
+ periods=20, freq='M').tolist(),
+ 'thename': range(0, 20)})
- def test_tuple_warns_unhashable(self):
- # https://github.com/pandas-dev/pandas/issues/18314
- business_dates = date_range(start='4/1/2014', end='6/30/2014',
- freq='B')
- df = DataFrame(1, index=business_dates, columns=['a', 'b'])
+ df['year'] = df.set_index('eventDate').index.year
+ df['month'] = df.set_index('eventDate').index.month
+
+ with pytest.raises(KeyError):
+ df.reset_index().pivot_table(index='year', columns='month',
+ values='badname', aggfunc='count')
- with tm.assert_produces_warning(FutureWarning) as w:
- df.groupby((df.index.year, df.index.month)).nth([0, 3, -1])
- assert "Interpreting tuple 'by' as a list" in str(w[0].message)
+def test_empty_dataframe_groupby():
+ # GH8093
+ df = DataFrame(columns=['A', 'B', 'C'])
- def test_tuple_correct_keyerror(self):
- # https://github.com/pandas-dev/pandas/issues/18798
- df = pd.DataFrame(1, index=range(3),
- columns=pd.MultiIndex.from_product([[1, 2],
- [3, 4]]))
- with tm.assert_raises_regex(KeyError, "(7, 8)"):
- df.groupby((7, 8)).mean()
+ result = df.groupby('A').sum()
+ expected = DataFrame(columns=['B', 'C'], dtype=np.float64)
+ expected.index.name = 'A'
+ assert_frame_equal(result, expected)
-def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
- tups = lmap(tuple, df[keys].values)
- tups = com._asarray_tuplesafe(tups)
- expected = f(df.groupby(tups)[field])
- for k, v in compat.iteritems(expected):
- assert (result[k] == v)
+
+def test_tuple_warns():
+ # https://github.com/pandas-dev/pandas/issues/18314
+ df = pd.DataFrame({('a', 'b'): [1, 1, 2, 2], 'a': [1, 1, 1, 2],
+ 'b': [1, 2, 2, 2], 'c': [1, 1, 1, 1]})
+ with tm.assert_produces_warning(FutureWarning) as w:
+ df[['a', 'b', 'c']].groupby(('a', 'b')).c.mean()
+
+ assert "Interpreting tuple 'by' as a list" in str(w[0].message)
+
+ with tm.assert_produces_warning(None):
+ df.groupby(('a', 'b')).c.mean()
+
+
+def test_tuple_warns_unhashable():
+ # https://github.com/pandas-dev/pandas/issues/18314
+ business_dates = date_range(start='4/1/2014', end='6/30/2014',
+ freq='B')
+ df = DataFrame(1, index=business_dates, columns=['a', 'b'])
+
+ with tm.assert_produces_warning(FutureWarning) as w:
+ df.groupby((df.index.year, df.index.month)).nth([0, 3, -1])
+
+ assert "Interpreting tuple 'by' as a list" in str(w[0].message)
+
+
+def test_tuple_correct_keyerror():
+ # https://github.com/pandas-dev/pandas/issues/18798
+ df = pd.DataFrame(1, index=range(3),
+ columns=pd.MultiIndex.from_product([[1, 2],
+ [3, 4]]))
+ with tm.assert_raises_regex(KeyError, "(7, 8)"):
+ df.groupby((7, 8)).mean()
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 57becd342d370..743237f5b386c 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -9,6 +9,7 @@
Index, MultiIndex, DataFrame, Series, CategoricalIndex)
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal)
+from pandas.core.groupby.groupby import Grouping
from pandas.compat import lrange, long
from pandas import compat
@@ -16,13 +17,12 @@
import pandas.util.testing as tm
import pandas as pd
-from .common import MixIn
# selection
# --------------------------------
-class TestSelection(MixIn):
+class TestSelection():
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
@@ -48,14 +48,14 @@ def test_groupby_duplicated_column_errormsg(self):
assert c.columns.nlevels == 1
assert c.columns.size == 3
- def test_column_select_via_attr(self):
- result = self.df.groupby('A').C.sum()
- expected = self.df.groupby('A')['C'].sum()
+ def test_column_select_via_attr(self, df):
+ result = df.groupby('A').C.sum()
+ expected = df.groupby('A')['C'].sum()
assert_series_equal(result, expected)
- self.df['mean'] = 1.5
- result = self.df.groupby('A').mean()
- expected = self.df.groupby('A').agg(np.mean)
+ df['mean'] = 1.5
+ result = df.groupby('A').mean()
+ expected = df.groupby('A').agg(np.mean)
assert_frame_equal(result, expected)
def test_getitem_list_of_columns(self):
@@ -96,7 +96,7 @@ def test_getitem_numeric_column_names(self):
# grouping
# --------------------------------
-class TestGrouping(MixIn):
+class TestGrouping():
def test_grouper_index_types(self):
# related GH5375
@@ -291,17 +291,17 @@ def test_grouper_getting_correct_binner(self):
names=['one', 'two']))
assert_frame_equal(result, expected)
- def test_grouper_iter(self):
- assert sorted(self.df.groupby('A').grouper) == ['bar', 'foo']
+ def test_grouper_iter(self, df):
+ assert sorted(df.groupby('A').grouper) == ['bar', 'foo']
- def test_empty_groups(self):
+ def test_empty_groups(self, df):
# see gh-1048
- pytest.raises(ValueError, self.df.groupby, [])
+ pytest.raises(ValueError, df.groupby, [])
- def test_groupby_grouper(self):
- grouped = self.df.groupby('A')
+ def test_groupby_grouper(self, df):
+ grouped = df.groupby('A')
- result = self.df.groupby(grouped.grouper).mean()
+ result = df.groupby(grouped.grouper).mean()
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
@@ -339,10 +339,9 @@ def test_groupby_grouper_f_sanity_checked(self):
pytest.raises(AssertionError, ts.groupby, lambda key: key[0:6])
- def test_grouping_error_on_multidim_input(self):
- from pandas.core.groupby.groupby import Grouping
+ def test_grouping_error_on_multidim_input(self, df):
pytest.raises(ValueError,
- Grouping, self.df.index, self.df[['A', 'A']])
+ Grouping, df.index, df[['A', 'A']])
def test_multiindex_passthru(self):
@@ -354,26 +353,25 @@ def test_multiindex_passthru(self):
result = df.groupby(axis=1, level=[0, 1]).first()
assert_frame_equal(result, df)
- def test_multiindex_negative_level(self):
+ def test_multiindex_negative_level(self, mframe):
# GH 13901
- result = self.mframe.groupby(level=-1).sum()
- expected = self.mframe.groupby(level='second').sum()
+ result = mframe.groupby(level=-1).sum()
+ expected = mframe.groupby(level='second').sum()
assert_frame_equal(result, expected)
- result = self.mframe.groupby(level=-2).sum()
- expected = self.mframe.groupby(level='first').sum()
+ result = mframe.groupby(level=-2).sum()
+ expected = mframe.groupby(level='first').sum()
assert_frame_equal(result, expected)
- result = self.mframe.groupby(level=[-2, -1]).sum()
- expected = self.mframe
+ result = mframe.groupby(level=[-2, -1]).sum()
+ expected = mframe
assert_frame_equal(result, expected)
- result = self.mframe.groupby(level=[-1, 'first']).sum()
- expected = self.mframe.groupby(level=['second', 'first']).sum()
+ result = mframe.groupby(level=[-1, 'first']).sum()
+ expected = mframe.groupby(level=['second', 'first']).sum()
assert_frame_equal(result, expected)
- def test_multifunc_select_col_integer_cols(self):
- df = self.df
+ def test_multifunc_select_col_integer_cols(self, df):
df.columns = np.arange(len(df.columns))
# it works!
@@ -428,9 +426,9 @@ def test_groupby_multiindex_tuple(self):
tm.assert_dict_equal(expected, result)
@pytest.mark.parametrize('sort', [True, False])
- def test_groupby_level(self, sort):
+ def test_groupby_level(self, sort, mframe, df):
# GH 17537
- frame = self.mframe
+ frame = mframe
deleveled = frame.reset_index()
result0 = frame.groupby(level=0, sort=sort).sum()
@@ -464,7 +462,7 @@ def test_groupby_level(self, sort):
assert_frame_equal(result1, expected1.T)
# raise exception for non-MultiIndex
- pytest.raises(ValueError, self.df.groupby, level=1)
+ pytest.raises(ValueError, df.groupby, level=1)
def test_groupby_level_index_names(self):
# GH4014 this used to raise ValueError since 'exp'>1 (in py2)
@@ -496,9 +494,9 @@ def test_groupby_level_with_nas(self, sort):
expected = Series([6., 18.], index=[0.0, 1.0])
assert_series_equal(result, expected)
- def test_groupby_args(self):
+ def test_groupby_args(self, mframe):
# PR8618 and issue 8015
- frame = self.mframe
+ frame = mframe
def j():
frame.groupby()
@@ -516,14 +514,14 @@ def k():
[True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],
[False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]]
])
- def test_level_preserve_order(self, sort, labels):
+ def test_level_preserve_order(self, sort, labels, mframe):
# GH 17537
- grouped = self.mframe.groupby(level=0, sort=sort)
+ grouped = mframe.groupby(level=0, sort=sort)
exp_labels = np.array(labels, np.intp)
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
- def test_grouping_labels(self):
- grouped = self.mframe.groupby(self.mframe.index.get_level_values(0))
+ def test_grouping_labels(self, mframe):
+ grouped = mframe.groupby(mframe.index.get_level_values(0))
exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
@@ -531,7 +529,7 @@ def test_grouping_labels(self):
# get_group
# --------------------------------
-class TestGetGroup(MixIn):
+class TestGetGroup():
def test_get_group(self):
with catch_warnings(record=True):
@@ -638,29 +636,28 @@ def test_gb_key_len_equal_axis_len(self):
# groups & iteration
# --------------------------------
-class TestIteration(MixIn):
+class TestIteration():
- def test_groups(self):
- grouped = self.df.groupby(['A'])
+ def test_groups(self, df):
+ grouped = df.groupby(['A'])
groups = grouped.groups
assert groups is grouped.groups # caching works
for k, v in compat.iteritems(grouped.groups):
- assert (self.df.loc[v]['A'] == k).all()
+ assert (df.loc[v]['A'] == k).all()
- grouped = self.df.groupby(['A', 'B'])
+ grouped = df.groupby(['A', 'B'])
groups = grouped.groups
assert groups is grouped.groups # caching works
for k, v in compat.iteritems(grouped.groups):
- assert (self.df.loc[v]['A'] == k[0]).all()
- assert (self.df.loc[v]['B'] == k[1]).all()
+ assert (df.loc[v]['A'] == k[0]).all()
+ assert (df.loc[v]['B'] == k[1]).all()
- def test_grouping_is_iterable(self):
+ def test_grouping_is_iterable(self, tsframe):
# this code path isn't used anywhere else
# not sure it's useful
- grouped = self.tsframe.groupby([lambda x: x.weekday(), lambda x: x.year
- ])
+ grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year])
# test it works
for g in grouped.grouper.groupings[0]:
@@ -682,7 +679,7 @@ def test_multi_iter(self):
assert e2 == two
assert_series_equal(three, e3)
- def test_multi_iter_frame(self):
+ def test_multi_iter_frame(self, three_group):
k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
df = DataFrame({'v1': np.random.randn(6),
@@ -715,7 +712,7 @@ def test_multi_iter_frame(self):
assert len(groups) == 2
# axis = 1
- three_levels = self.three_group.groupby(['A', 'B', 'C']).mean()
+ three_levels = three_group.groupby(['A', 'B', 'C']).mean()
grouped = three_levels.T.groupby(axis=1, level=(1, 2))
for key, group in grouped:
pass
@@ -733,13 +730,13 @@ def test_multi_iter_panel(self):
expected = wp.reindex(major=exp_axis)
assert_panel_equal(group, expected)
- def test_dictify(self):
- dict(iter(self.df.groupby('A')))
- dict(iter(self.df.groupby(['A', 'B'])))
- dict(iter(self.df['C'].groupby(self.df['A'])))
- dict(iter(self.df['C'].groupby([self.df['A'], self.df['B']])))
- dict(iter(self.df.groupby('A')['C']))
- dict(iter(self.df.groupby(['A', 'B'])['C']))
+ def test_dictify(self, df):
+ dict(iter(df.groupby('A')))
+ dict(iter(df.groupby(['A', 'B'])))
+ dict(iter(df['C'].groupby(df['A'])))
+ dict(iter(df['C'].groupby([df['A'], df['B']])))
+ dict(iter(df.groupby('A')['C']))
+ dict(iter(df.groupby(['A', 'B'])['C']))
def test_groupby_with_small_elem(self):
# GH 8542
diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index ccde545b5b8e9..a32ba9ad76f14 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -7,314 +7,316 @@
assert_produces_warning,
assert_series_equal)
-from .common import MixIn
-
-
-class TestNth(MixIn):
-
- def test_first_last_nth(self):
- # tests for first / last / nth
- grouped = self.df.groupby('A')
- first = grouped.first()
- expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
- expected.index = Index(['bar', 'foo'], name='A')
- expected = expected.sort_index()
- assert_frame_equal(first, expected)
-
- nth = grouped.nth(0)
- assert_frame_equal(nth, expected)
-
- last = grouped.last()
- expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
- expected.index = Index(['bar', 'foo'], name='A')
- assert_frame_equal(last, expected)
-
- nth = grouped.nth(-1)
- assert_frame_equal(nth, expected)
-
- nth = grouped.nth(1)
- expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()
- expected.index = Index(['foo', 'bar'], name='A')
- expected = expected.sort_index()
- assert_frame_equal(nth, expected)
-
- # it works!
- grouped['B'].first()
- grouped['B'].last()
- grouped['B'].nth(0)
-
- self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
- assert isna(grouped['B'].first()['foo'])
- assert isna(grouped['B'].last()['foo'])
- assert isna(grouped['B'].nth(0)['foo'])
-
- # v0.14.0 whatsnew
- df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A')
- result = g.first()
- expected = df.iloc[[1, 2]].set_index('A')
- assert_frame_equal(result, expected)
-
- expected = df.iloc[[1, 2]].set_index('A')
- result = g.nth(0, dropna='any')
- assert_frame_equal(result, expected)
-
- def test_first_last_nth_dtypes(self):
-
- df = self.df_mixed_floats.copy()
- df['E'] = True
- df['F'] = 1
-
- # tests for first / last / nth
- grouped = df.groupby('A')
- first = grouped.first()
- expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
- expected.index = Index(['bar', 'foo'], name='A')
- expected = expected.sort_index()
- assert_frame_equal(first, expected)
-
- last = grouped.last()
- expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
- expected.index = Index(['bar', 'foo'], name='A')
- expected = expected.sort_index()
- assert_frame_equal(last, expected)
-
- nth = grouped.nth(1)
- expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
- expected.index = Index(['bar', 'foo'], name='A')
- expected = expected.sort_index()
- assert_frame_equal(nth, expected)
-
- # GH 2763, first/last shifting dtypes
- idx = lrange(10)
- idx.append(9)
- s = Series(data=lrange(11), index=idx, name='IntCol')
- assert s.dtype == 'int64'
- f = s.groupby(level=0).first()
- assert f.dtype == 'int64'
-
- def test_nth(self):
- df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A')
-
- assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
- assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
- assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
- assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
- assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
- assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
- assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
- assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
- assert_frame_equal(g[['B']].nth(0),
- df.loc[[0, 2], ['A', 'B']].set_index('A'))
-
- exp = df.set_index('A')
- assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
- assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
-
- exp['B'] = np.nan
- assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
- assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
-
- # out of bounds, regression from 0.13.1
- # GH 6621
- df = DataFrame({'color': {0: 'green',
- 1: 'green',
- 2: 'red',
- 3: 'red',
- 4: 'red'},
- 'food': {0: 'ham',
- 1: 'eggs',
- 2: 'eggs',
- 3: 'ham',
- 4: 'pork'},
- 'two': {0: 1.5456590000000001,
- 1: -0.070345000000000005,
- 2: -2.4004539999999999,
- 3: 0.46206000000000003,
- 4: 0.52350799999999997},
- 'one': {0: 0.56573799999999996,
- 1: -0.9742360000000001,
- 2: 1.033801,
- 3: -0.78543499999999999,
- 4: 0.70422799999999997}}).set_index(['color',
- 'food'])
-
- result = df.groupby(level=0, as_index=False).nth(2)
- expected = df.iloc[[-1]]
- assert_frame_equal(result, expected)
-
- result = df.groupby(level=0, as_index=False).nth(3)
- expected = df.loc[[]]
- assert_frame_equal(result, expected)
-
- # GH 7559
- # from the vbench
- df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
- s = df[1]
- g = df[0]
- expected = s.groupby(g).first()
- expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
- assert_series_equal(expected2, expected, check_names=False)
- assert expected.name == 1
- assert expected2.name == 1
-
- # validate first
- v = s[g == 1].iloc[0]
- assert expected.iloc[0] == v
- assert expected2.iloc[0] == v
-
- # this is NOT the same as .first (as sorted is default!)
- # as it keeps the order in the series (and not the group order)
- # related GH 7287
- expected = s.groupby(g, sort=False).first()
- result = s.groupby(g, sort=False).nth(0, dropna='all')
- assert_series_equal(result, expected)
-
- # doc example
- df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
- g = df.groupby('A')
- # PR 17493, related to issue 11038
- # test Series.nth with True for dropna produces FutureWarning
- with assert_produces_warning(FutureWarning):
- result = g.B.nth(0, dropna=True)
- expected = g.B.first()
- assert_series_equal(result, expected)
-
- # test multiple nth values
- df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
- columns=['A', 'B'])
- g = df.groupby('A')
-
- assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
- assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
- assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
- assert_frame_equal(
- g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
- assert_frame_equal(
- g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
- assert_frame_equal(
- g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
- assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
- assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
-
- business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
- freq='B')
- df = DataFrame(1, index=business_dates, columns=['a', 'b'])
- # get the first, fourth and last two business days for each month
- key = [df.index.year, df.index.month]
- result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
- expected_dates = pd.to_datetime(
- ['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
- '2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
- '2014/6/27', '2014/6/30'])
- expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
- assert_frame_equal(result, expected)
-
- def test_nth_multi_index(self):
- # PR 9090, related to issue 8979
- # test nth on MultiIndex, should match .first()
- grouped = self.three_group.groupby(['A', 'B'])
- result = grouped.nth(0)
- expected = grouped.first()
- assert_frame_equal(result, expected)
-
- def test_nth_multi_index_as_expected(self):
- # PR 9090, related to issue 8979
- # test nth on MultiIndex
- three_group = DataFrame(
- {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
- 'foo', 'foo', 'foo'],
- 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
- 'two', 'two', 'one'],
- 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
- 'dull', 'shiny', 'shiny', 'shiny']})
- grouped = three_group.groupby(['A', 'B'])
- result = grouped.nth(0)
- expected = DataFrame(
- {'C': ['dull', 'dull', 'dull', 'dull']},
- index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
- ['one', 'two', 'one', 'two']],
- names=['A', 'B']))
- assert_frame_equal(result, expected)
-
- def test_groupby_head_tail(self):
- df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
- g_as = df.groupby('A', as_index=True)
- g_not_as = df.groupby('A', as_index=False)
-
- # as_index= False, much easier
- assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
- assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
-
- empty_not_as = DataFrame(columns=df.columns,
- index=pd.Index([], dtype=df.index.dtype))
- empty_not_as['A'] = empty_not_as['A'].astype(df.A.dtype)
- empty_not_as['B'] = empty_not_as['B'].astype(df.B.dtype)
- assert_frame_equal(empty_not_as, g_not_as.head(0))
- assert_frame_equal(empty_not_as, g_not_as.tail(0))
- assert_frame_equal(empty_not_as, g_not_as.head(-1))
- assert_frame_equal(empty_not_as, g_not_as.tail(-1))
-
- assert_frame_equal(df, g_not_as.head(7)) # contains all
- assert_frame_equal(df, g_not_as.tail(7))
-
- # as_index=True, (used to be different)
- df_as = df
-
- assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
- assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
-
- empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
- empty_as['A'] = empty_not_as['A'].astype(df.A.dtype)
- empty_as['B'] = empty_not_as['B'].astype(df.B.dtype)
- assert_frame_equal(empty_as, g_as.head(0))
- assert_frame_equal(empty_as, g_as.tail(0))
- assert_frame_equal(empty_as, g_as.head(-1))
- assert_frame_equal(empty_as, g_as.tail(-1))
-
- assert_frame_equal(df_as, g_as.head(7)) # contains all
- assert_frame_equal(df_as, g_as.tail(7))
-
- # test with selection
- assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
- assert_frame_equal(g_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
- assert_frame_equal(g_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
- assert_frame_equal(g_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
-
- assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
- assert_frame_equal(g_not_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
- assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
- assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
-
- def test_group_selection_cache(self):
- # GH 12839 nth, head, and tail should return same result consistently
- df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
- expected = df.iloc[[0, 2]].set_index('A')
-
- g = df.groupby('A')
- result1 = g.head(n=2)
- result2 = g.nth(0)
- assert_frame_equal(result1, df)
- assert_frame_equal(result2, expected)
-
- g = df.groupby('A')
- result1 = g.tail(n=2)
- result2 = g.nth(0)
- assert_frame_equal(result1, df)
- assert_frame_equal(result2, expected)
-
- g = df.groupby('A')
- result1 = g.nth(0)
- result2 = g.head(n=2)
- assert_frame_equal(result1, expected)
- assert_frame_equal(result2, df)
-
- g = df.groupby('A')
- result1 = g.nth(0)
- result2 = g.tail(n=2)
- assert_frame_equal(result1, expected)
- assert_frame_equal(result2, df)
+
+def test_first_last_nth(df):
+ # tests for first / last / nth
+ grouped = df.groupby('A')
+ first = grouped.first()
+ expected = df.loc[[1, 0], ['B', 'C', 'D']]
+ expected.index = Index(['bar', 'foo'], name='A')
+ expected = expected.sort_index()
+ assert_frame_equal(first, expected)
+
+ nth = grouped.nth(0)
+ assert_frame_equal(nth, expected)
+
+ last = grouped.last()
+ expected = df.loc[[5, 7], ['B', 'C', 'D']]
+ expected.index = Index(['bar', 'foo'], name='A')
+ assert_frame_equal(last, expected)
+
+ nth = grouped.nth(-1)
+ assert_frame_equal(nth, expected)
+
+ nth = grouped.nth(1)
+ expected = df.loc[[2, 3], ['B', 'C', 'D']].copy()
+ expected.index = Index(['foo', 'bar'], name='A')
+ expected = expected.sort_index()
+ assert_frame_equal(nth, expected)
+
+ # it works!
+ grouped['B'].first()
+ grouped['B'].last()
+ grouped['B'].nth(0)
+
+ df.loc[df['A'] == 'foo', 'B'] = np.nan
+ assert isna(grouped['B'].first()['foo'])
+ assert isna(grouped['B'].last()['foo'])
+ assert isna(grouped['B'].nth(0)['foo'])
+
+ # v0.14.0 whatsnew
+ df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
+ g = df.groupby('A')
+ result = g.first()
+ expected = df.iloc[[1, 2]].set_index('A')
+ assert_frame_equal(result, expected)
+
+ expected = df.iloc[[1, 2]].set_index('A')
+ result = g.nth(0, dropna='any')
+ assert_frame_equal(result, expected)
+
+
+def test_first_last_nth_dtypes(df_mixed_floats):
+
+ df = df_mixed_floats.copy()
+ df['E'] = True
+ df['F'] = 1
+
+ # tests for first / last / nth
+ grouped = df.groupby('A')
+ first = grouped.first()
+ expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
+ expected.index = Index(['bar', 'foo'], name='A')
+ expected = expected.sort_index()
+ assert_frame_equal(first, expected)
+
+ last = grouped.last()
+ expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
+ expected.index = Index(['bar', 'foo'], name='A')
+ expected = expected.sort_index()
+ assert_frame_equal(last, expected)
+
+ nth = grouped.nth(1)
+ expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
+ expected.index = Index(['bar', 'foo'], name='A')
+ expected = expected.sort_index()
+ assert_frame_equal(nth, expected)
+
+ # GH 2763, first/last shifting dtypes
+ idx = lrange(10)
+ idx.append(9)
+ s = Series(data=lrange(11), index=idx, name='IntCol')
+ assert s.dtype == 'int64'
+ f = s.groupby(level=0).first()
+ assert f.dtype == 'int64'
+
+
+def test_nth():
+ df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
+ g = df.groupby('A')
+
+ assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
+ assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
+ assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
+ assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
+ assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
+ assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
+ assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
+ assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
+ assert_frame_equal(g[['B']].nth(0),
+ df.loc[[0, 2], ['A', 'B']].set_index('A'))
+
+ exp = df.set_index('A')
+ assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
+ assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
+
+ exp['B'] = np.nan
+ assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
+ assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
+
+ # out of bounds, regression from 0.13.1
+ # GH 6621
+ df = DataFrame({'color': {0: 'green',
+ 1: 'green',
+ 2: 'red',
+ 3: 'red',
+ 4: 'red'},
+ 'food': {0: 'ham',
+ 1: 'eggs',
+ 2: 'eggs',
+ 3: 'ham',
+ 4: 'pork'},
+ 'two': {0: 1.5456590000000001,
+ 1: -0.070345000000000005,
+ 2: -2.4004539999999999,
+ 3: 0.46206000000000003,
+ 4: 0.52350799999999997},
+ 'one': {0: 0.56573799999999996,
+ 1: -0.9742360000000001,
+ 2: 1.033801,
+ 3: -0.78543499999999999,
+ 4: 0.70422799999999997}}).set_index(['color',
+ 'food'])
+
+ result = df.groupby(level=0, as_index=False).nth(2)
+ expected = df.iloc[[-1]]
+ assert_frame_equal(result, expected)
+
+ result = df.groupby(level=0, as_index=False).nth(3)
+ expected = df.loc[[]]
+ assert_frame_equal(result, expected)
+
+ # GH 7559
+ # from the vbench
+ df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
+ s = df[1]
+ g = df[0]
+ expected = s.groupby(g).first()
+ expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
+ assert_series_equal(expected2, expected, check_names=False)
+ assert expected.name == 1
+ assert expected2.name == 1
+
+ # validate first
+ v = s[g == 1].iloc[0]
+ assert expected.iloc[0] == v
+ assert expected2.iloc[0] == v
+
+ # this is NOT the same as .first (as sorted is default!)
+ # as it keeps the order in the series (and not the group order)
+ # related GH 7287
+ expected = s.groupby(g, sort=False).first()
+ result = s.groupby(g, sort=False).nth(0, dropna='all')
+ assert_series_equal(result, expected)
+
+ # doc example
+ df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
+ g = df.groupby('A')
+ # PR 17493, related to issue 11038
+ # test Series.nth with True for dropna produces FutureWarning
+ with assert_produces_warning(FutureWarning):
+ result = g.B.nth(0, dropna=True)
+ expected = g.B.first()
+ assert_series_equal(result, expected)
+
+ # test multiple nth values
+ df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
+ columns=['A', 'B'])
+ g = df.groupby('A')
+
+ assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
+ assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
+ assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
+ assert_frame_equal(
+ g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
+ assert_frame_equal(
+ g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
+ assert_frame_equal(
+ g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
+ assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
+ assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
+
+ business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
+ freq='B')
+ df = DataFrame(1, index=business_dates, columns=['a', 'b'])
+ # get the first, fourth and last two business days for each month
+ key = [df.index.year, df.index.month]
+ result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
+ expected_dates = pd.to_datetime(
+ ['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
+ '2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
+ '2014/6/27', '2014/6/30'])
+ expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
+ assert_frame_equal(result, expected)
+
+
+def test_nth_multi_index(three_group):
+ # PR 9090, related to issue 8979
+ # test nth on MultiIndex, should match .first()
+ grouped = three_group.groupby(['A', 'B'])
+ result = grouped.nth(0)
+ expected = grouped.first()
+ assert_frame_equal(result, expected)
+
+
+def test_nth_multi_index_as_expected():
+ # PR 9090, related to issue 8979
+ # test nth on MultiIndex
+ three_group = DataFrame(
+ {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
+ 'foo', 'foo', 'foo'],
+ 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
+ 'two', 'two', 'one'],
+ 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
+ 'dull', 'shiny', 'shiny', 'shiny']})
+ grouped = three_group.groupby(['A', 'B'])
+ result = grouped.nth(0)
+ expected = DataFrame(
+ {'C': ['dull', 'dull', 'dull', 'dull']},
+ index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
+ ['one', 'two', 'one', 'two']],
+ names=['A', 'B']))
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_head_tail():
+ df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
+ g_as = df.groupby('A', as_index=True)
+ g_not_as = df.groupby('A', as_index=False)
+
+ # as_index= False, much easier
+ assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
+ assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
+
+ empty_not_as = DataFrame(columns=df.columns,
+ index=pd.Index([], dtype=df.index.dtype))
+ empty_not_as['A'] = empty_not_as['A'].astype(df.A.dtype)
+ empty_not_as['B'] = empty_not_as['B'].astype(df.B.dtype)
+ assert_frame_equal(empty_not_as, g_not_as.head(0))
+ assert_frame_equal(empty_not_as, g_not_as.tail(0))
+ assert_frame_equal(empty_not_as, g_not_as.head(-1))
+ assert_frame_equal(empty_not_as, g_not_as.tail(-1))
+
+ assert_frame_equal(df, g_not_as.head(7)) # contains all
+ assert_frame_equal(df, g_not_as.tail(7))
+
+ # as_index=True, (used to be different)
+ df_as = df
+
+ assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
+ assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
+
+ empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
+ empty_as['A'] = empty_not_as['A'].astype(df.A.dtype)
+ empty_as['B'] = empty_not_as['B'].astype(df.B.dtype)
+ assert_frame_equal(empty_as, g_as.head(0))
+ assert_frame_equal(empty_as, g_as.tail(0))
+ assert_frame_equal(empty_as, g_as.head(-1))
+ assert_frame_equal(empty_as, g_as.tail(-1))
+
+ assert_frame_equal(df_as, g_as.head(7)) # contains all
+ assert_frame_equal(df_as, g_as.tail(7))
+
+ # test with selection
+ assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
+ assert_frame_equal(g_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
+ assert_frame_equal(g_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
+ assert_frame_equal(g_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
+
+ assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
+ assert_frame_equal(g_not_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
+ assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
+ assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
+
+
+def test_group_selection_cache():
+ # GH 12839 nth, head, and tail should return same result consistently
+ df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
+ expected = df.iloc[[0, 2]].set_index('A')
+
+ g = df.groupby('A')
+ result1 = g.head(n=2)
+ result2 = g.nth(0)
+ assert_frame_equal(result1, df)
+ assert_frame_equal(result2, expected)
+
+ g = df.groupby('A')
+ result1 = g.tail(n=2)
+ result2 = g.nth(0)
+ assert_frame_equal(result1, df)
+ assert_frame_equal(result2, expected)
+
+ g = df.groupby('A')
+ result1 = g.nth(0)
+ result2 = g.head(n=2)
+ assert_frame_equal(result1, expected)
+ assert_frame_equal(result2, df)
+
+ g = df.groupby('A')
+ result1 = g.nth(0)
+ result2 = g.tail(n=2)
+ assert_frame_equal(result1, expected)
+ assert_frame_equal(result2, df)
def test_nth_empty():
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
new file mode 100644
index 0000000000000..6ad8b4905abff
--- /dev/null
+++ b/pandas/tests/groupby/test_rank.py
@@ -0,0 +1,254 @@
+import pytest
+import numpy as np
+import pandas as pd
+from pandas import DataFrame, concat
+from pandas.util import testing as tm
+
+
+def test_rank_apply():
+ lev1 = tm.rands_array(10, 100)
+ lev2 = tm.rands_array(10, 130)
+ lab1 = np.random.randint(0, 100, size=500)
+ lab2 = np.random.randint(0, 130, size=500)
+
+ df = DataFrame({'value': np.random.randn(500),
+ 'key1': lev1.take(lab1),
+ 'key2': lev2.take(lab2)})
+
+ result = df.groupby(['key1', 'key2']).value.rank()
+
+ expected = []
+ for key, piece in df.groupby(['key1', 'key2']):
+ expected.append(piece.value.rank())
+ expected = concat(expected, axis=0)
+ expected = expected.reindex(result.index)
+ tm.assert_series_equal(result, expected)
+
+ result = df.groupby(['key1', 'key2']).value.rank(pct=True)
+
+ expected = []
+ for key, piece in df.groupby(['key1', 'key2']):
+ expected.append(piece.value.rank(pct=True))
+ expected = concat(expected, axis=0)
+ expected = expected.reindex(result.index)
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("grps", [
+ ['qux'], ['qux', 'quux']])
+@pytest.mark.parametrize("vals", [
+ [2, 2, 8, 2, 6],
+ [pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'),
+ pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
+ pd.Timestamp('2018-01-06')]])
+@pytest.mark.parametrize("ties_method,ascending,pct,exp", [
+ ('average', True, False, [2., 2., 5., 2., 4.]),
+ ('average', True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
+ ('average', False, False, [4., 4., 1., 4., 2.]),
+ ('average', False, True, [.8, .8, .2, .8, .4]),
+ ('min', True, False, [1., 1., 5., 1., 4.]),
+ ('min', True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
+ ('min', False, False, [3., 3., 1., 3., 2.]),
+ ('min', False, True, [.6, .6, .2, .6, .4]),
+ ('max', True, False, [3., 3., 5., 3., 4.]),
+ ('max', True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
+ ('max', False, False, [5., 5., 1., 5., 2.]),
+ ('max', False, True, [1., 1., .2, 1., .4]),
+ ('first', True, False, [1., 2., 5., 3., 4.]),
+ ('first', True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
+ ('first', False, False, [3., 4., 1., 5., 2.]),
+ ('first', False, True, [.6, .8, .2, 1., .4]),
+ ('dense', True, False, [1., 1., 3., 1., 2.]),
+ ('dense', True, True, [0.2, 0.2, 0.6, 0.2, 0.4]),
+ ('dense', False, False, [3., 3., 1., 3., 2.]),
+ ('dense', False, True, [.6, .6, .2, .6, .4]),
+])
+def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
+ key = np.repeat(grps, len(vals))
+ vals = vals * len(grps)
+ df = DataFrame({'key': key, 'val': vals})
+ result = df.groupby('key').rank(method=ties_method,
+ ascending=ascending, pct=pct)
+
+ exp_df = DataFrame(exp * len(grps), columns=['val'])
+ tm.assert_frame_equal(result, exp_df)
+
+
+@pytest.mark.parametrize("grps", [
+ ['qux'], ['qux', 'quux']])
+@pytest.mark.parametrize("vals", [
+ [-np.inf, -np.inf, np.nan, 1., np.nan, np.inf, np.inf],
+])
+@pytest.mark.parametrize("ties_method,ascending,na_option,exp", [
+ ('average', True, 'keep', [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
+ ('average', True, 'top', [3.5, 3.5, 1.5, 5., 1.5, 6.5, 6.5]),
+ ('average', True, 'bottom', [1.5, 1.5, 6.5, 3., 6.5, 4.5, 4.5]),
+ ('average', False, 'keep', [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
+ ('average', False, 'top', [6.5, 6.5, 1.5, 5., 1.5, 3.5, 3.5]),
+ ('average', False, 'bottom', [4.5, 4.5, 6.5, 3., 6.5, 1.5, 1.5]),
+ ('min', True, 'keep', [1., 1., np.nan, 3., np.nan, 4., 4.]),
+ ('min', True, 'top', [3., 3., 1., 5., 1., 6., 6.]),
+ ('min', True, 'bottom', [1., 1., 6., 3., 6., 4., 4.]),
+ ('min', False, 'keep', [4., 4., np.nan, 3., np.nan, 1., 1.]),
+ ('min', False, 'top', [6., 6., 1., 5., 1., 3., 3.]),
+ ('min', False, 'bottom', [4., 4., 6., 3., 6., 1., 1.]),
+ ('max', True, 'keep', [2., 2., np.nan, 3., np.nan, 5., 5.]),
+ ('max', True, 'top', [4., 4., 2., 5., 2., 7., 7.]),
+ ('max', True, 'bottom', [2., 2., 7., 3., 7., 5., 5.]),
+ ('max', False, 'keep', [5., 5., np.nan, 3., np.nan, 2., 2.]),
+ ('max', False, 'top', [7., 7., 2., 5., 2., 4., 4.]),
+ ('max', False, 'bottom', [5., 5., 7., 3., 7., 2., 2.]),
+ ('first', True, 'keep', [1., 2., np.nan, 3., np.nan, 4., 5.]),
+ ('first', True, 'top', [3., 4., 1., 5., 2., 6., 7.]),
+ ('first', True, 'bottom', [1., 2., 6., 3., 7., 4., 5.]),
+ ('first', False, 'keep', [4., 5., np.nan, 3., np.nan, 1., 2.]),
+ ('first', False, 'top', [6., 7., 1., 5., 2., 3., 4.]),
+ ('first', False, 'bottom', [4., 5., 6., 3., 7., 1., 2.]),
+ ('dense', True, 'keep', [1., 1., np.nan, 2., np.nan, 3., 3.]),
+ ('dense', True, 'top', [2., 2., 1., 3., 1., 4., 4.]),
+ ('dense', True, 'bottom', [1., 1., 4., 2., 4., 3., 3.]),
+ ('dense', False, 'keep', [3., 3., np.nan, 2., np.nan, 1., 1.]),
+ ('dense', False, 'top', [4., 4., 1., 3., 1., 2., 2.]),
+ ('dense', False, 'bottom', [3., 3., 4., 2., 4., 1., 1.])
+])
+def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
+ # GH 20561
+ key = np.repeat(grps, len(vals))
+ vals = vals * len(grps)
+ df = DataFrame({'key': key, 'val': vals})
+ result = df.groupby('key').rank(method=ties_method,
+ ascending=ascending,
+ na_option=na_option)
+ exp_df = DataFrame(exp * len(grps), columns=['val'])
+ tm.assert_frame_equal(result, exp_df)
+
+
+@pytest.mark.parametrize("grps", [
+ ['qux'], ['qux', 'quux']])
+@pytest.mark.parametrize("vals", [
+ [2, 2, np.nan, 8, 2, 6, np.nan, np.nan], # floats
+ [pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan,
+ pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
+ pd.Timestamp('2018-01-06'), np.nan, np.nan]
+])
+@pytest.mark.parametrize("ties_method,ascending,na_option,pct,exp", [
+ ('average', True, 'keep', False,
+ [2., 2., np.nan, 5., 2., 4., np.nan, np.nan]),
+ ('average', True, 'keep', True,
+ [0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan]),
+ ('average', False, 'keep', False,
+ [4., 4., np.nan, 1., 4., 2., np.nan, np.nan]),
+ ('average', False, 'keep', True,
+ [.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan]),
+ ('min', True, 'keep', False,
+ [1., 1., np.nan, 5., 1., 4., np.nan, np.nan]),
+ ('min', True, 'keep', True,
+ [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
+ ('min', False, 'keep', False,
+ [3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
+ ('min', False, 'keep', True,
+ [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
+ ('max', True, 'keep', False,
+ [3., 3., np.nan, 5., 3., 4., np.nan, np.nan]),
+ ('max', True, 'keep', True,
+ [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
+ ('max', False, 'keep', False,
+ [5., 5., np.nan, 1., 5., 2., np.nan, np.nan]),
+ ('max', False, 'keep', True,
+ [1., 1., np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
+ ('first', True, 'keep', False,
+ [1., 2., np.nan, 5., 3., 4., np.nan, np.nan]),
+ ('first', True, 'keep', True,
+ [0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
+ ('first', False, 'keep', False,
+ [3., 4., np.nan, 1., 5., 2., np.nan, np.nan]),
+ ('first', False, 'keep', True,
+ [.6, 0.8, np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
+ ('dense', True, 'keep', False,
+ [1., 1., np.nan, 3., 1., 2., np.nan, np.nan]),
+ ('dense', True, 'keep', True,
+ [0.2, 0.2, np.nan, 0.6, 0.2, 0.4, np.nan, np.nan]),
+ ('dense', False, 'keep', False,
+ [3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
+ ('dense', False, 'keep', True,
+ [.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
+ ('average', True, 'no_na', False, [2., 2., 7., 5., 2., 4., 7., 7.]),
+ ('average', True, 'no_na', True,
+ [0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]),
+ ('average', False, 'no_na', False, [4., 4., 7., 1., 4., 2., 7., 7.]),
+ ('average', False, 'no_na', True,
+ [0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875]),
+ ('min', True, 'no_na', False, [1., 1., 6., 5., 1., 4., 6., 6.]),
+ ('min', True, 'no_na', True,
+ [0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75]),
+ ('min', False, 'no_na', False, [3., 3., 6., 1., 3., 2., 6., 6.]),
+ ('min', False, 'no_na', True,
+ [0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75]),
+ ('max', True, 'no_na', False, [3., 3., 8., 5., 3., 4., 8., 8.]),
+ ('max', True, 'no_na', True,
+ [0.375, 0.375, 1., 0.625, 0.375, 0.5, 1., 1.]),
+ ('max', False, 'no_na', False, [5., 5., 8., 1., 5., 2., 8., 8.]),
+ ('max', False, 'no_na', True,
+ [0.625, 0.625, 1., 0.125, 0.625, 0.25, 1., 1.]),
+ ('first', True, 'no_na', False, [1., 2., 6., 5., 3., 4., 7., 8.]),
+ ('first', True, 'no_na', True,
+ [0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.]),
+ ('first', False, 'no_na', False, [3., 4., 6., 1., 5., 2., 7., 8.]),
+ ('first', False, 'no_na', True,
+ [0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]),
+ ('dense', True, 'no_na', False, [1., 1., 4., 3., 1., 2., 4., 4.]),
+ ('dense', True, 'no_na', True,
+ [0.125, 0.125, 0.5, 0.375, 0.125, 0.25, 0.5, 0.5]),
+ ('dense', False, 'no_na', False, [3., 3., 4., 1., 3., 2., 4., 4.]),
+ ('dense', False, 'no_na', True,
+ [0.375, 0.375, 0.5, 0.125, 0.375, 0.25, 0.5, 0.5])
+])
+def test_rank_args_missing(grps, vals, ties_method, ascending,
+ na_option, pct, exp):
+ key = np.repeat(grps, len(vals))
+ vals = vals * len(grps)
+ df = DataFrame({'key': key, 'val': vals})
+ result = df.groupby('key').rank(method=ties_method,
+ ascending=ascending,
+ na_option=na_option, pct=pct)
+
+ exp_df = DataFrame(exp * len(grps), columns=['val'])
+ tm.assert_frame_equal(result, exp_df)
+
+
+@pytest.mark.parametrize("pct,exp", [
+ (False, [3., 3., 3., 3., 3.]),
+ (True, [.6, .6, .6, .6, .6])])
+def test_rank_resets_each_group(pct, exp):
+ df = DataFrame(
+ {'key': ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'],
+ 'val': [1] * 10}
+ )
+ result = df.groupby('key').rank(pct=pct)
+ exp_df = DataFrame(exp * 2, columns=['val'])
+ tm.assert_frame_equal(result, exp_df)
+
+
+def test_rank_avg_even_vals():
+ df = DataFrame({'key': ['a'] * 4, 'val': [1] * 4})
+ result = df.groupby('key').rank()
+ exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=['val'])
+ tm.assert_frame_equal(result, exp_df)
+
+
+@pytest.mark.parametrize("ties_method", [
+ 'average', 'min', 'max', 'first', 'dense'])
+@pytest.mark.parametrize("ascending", [True, False])
+@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
+@pytest.mark.parametrize("pct", [True, False])
+@pytest.mark.parametrize("vals", [
+ ['bar', 'bar', 'foo', 'bar', 'baz'],
+ ['bar', np.nan, 'foo', np.nan, 'baz']
+])
+def test_rank_object_raises(ties_method, ascending, na_option,
+ pct, vals):
+ df = DataFrame({'key': ['foo'] * 5, 'val': vals})
+ with tm.assert_raises_regex(TypeError, "not callable"):
+ df.groupby('key').rank(method=ties_method,
+ ascending=ascending,
+ na_option=na_option, pct=pct)
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 390b99d0fab1c..626057c1ea760 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -10,728 +10,758 @@
_ensure_platform_int, is_timedelta64_dtype)
from pandas.compat import StringIO
from pandas._libs import groupby
-from .common import MixIn, assert_fp_equal
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby.groupby import DataError
from pandas.core.config import option_context
-class TestGroupBy(MixIn):
-
- def test_transform(self):
- data = Series(np.arange(9) // 3, index=np.arange(9))
-
- index = np.arange(9)
- np.random.shuffle(index)
- data = data.reindex(index)
-
- grouped = data.groupby(lambda x: x // 3)
-
- transformed = grouped.transform(lambda x: x * x.sum())
- assert transformed[7] == 12
-
- # GH 8046
- # make sure that we preserve the input order
-
- df = DataFrame(
- np.arange(6, dtype='int64').reshape(
- 3, 2), columns=["a", "b"], index=[0, 2, 1])
- key = [0, 0, 1]
- expected = df.sort_index().groupby(key).transform(
- lambda x: x - x.mean()).groupby(key).mean()
- result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
- key).mean()
- assert_frame_equal(result, expected)
-
- def demean(arr):
- return arr - arr.mean()
-
- people = DataFrame(np.random.randn(5, 5),
- columns=['a', 'b', 'c', 'd', 'e'],
- index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
- key = ['one', 'two', 'one', 'two', 'one']
- result = people.groupby(key).transform(demean).groupby(key).mean()
- expected = people.groupby(key).apply(demean).groupby(key).mean()
- assert_frame_equal(result, expected)
-
- # GH 8430
- df = tm.makeTimeDataFrame()
- g = df.groupby(pd.Grouper(freq='M'))
- g.transform(lambda x: x - 1)
-
- # GH 9700
- df = DataFrame({'a': range(5, 10), 'b': range(5)})
- result = df.groupby('a').transform(max)
- expected = DataFrame({'b': range(5)})
- tm.assert_frame_equal(result, expected)
-
- def test_transform_fast(self):
-
- df = DataFrame({'id': np.arange(100000) / 3,
- 'val': np.random.randn(100000)})
-
- grp = df.groupby('id')['val']
-
- values = np.repeat(grp.mean().values,
- _ensure_platform_int(grp.count().values))
- expected = pd.Series(values, index=df.index, name='val')
-
- result = grp.transform(np.mean)
- assert_series_equal(result, expected)
-
- result = grp.transform('mean')
- assert_series_equal(result, expected)
-
- # GH 12737
- df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
- 'd': pd.date_range('2014-1-1', '2014-1-4'),
- 'i': [1, 2, 3, 4]},
- columns=['grouping', 'f', 'i', 'd'])
- result = df.groupby('grouping').transform('first')
-
- dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
- pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
- expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
- 'd': dates,
- 'i': [1, 2, 2, 4]},
- columns=['f', 'i', 'd'])
- assert_frame_equal(result, expected)
-
- # selection
- result = df.groupby('grouping')[['f', 'i']].transform('first')
- expected = expected[['f', 'i']]
- assert_frame_equal(result, expected)
-
- # dup columns
- df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
- result = df.groupby('g').transform('first')
- expected = df.drop('g', axis=1)
- assert_frame_equal(result, expected)
-
- def test_transform_broadcast(self):
- grouped = self.ts.groupby(lambda x: x.month)
- result = grouped.transform(np.mean)
-
- tm.assert_index_equal(result.index, self.ts.index)
- for _, gp in grouped:
- assert_fp_equal(result.reindex(gp.index), gp.mean())
-
- grouped = self.tsframe.groupby(lambda x: x.month)
- result = grouped.transform(np.mean)
- tm.assert_index_equal(result.index, self.tsframe.index)
- for _, gp in grouped:
- agged = gp.mean()
- res = result.reindex(gp.index)
- for col in self.tsframe:
- assert_fp_equal(res[col], agged[col])
-
- # group columns
- grouped = self.tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
- axis=1)
- result = grouped.transform(np.mean)
- tm.assert_index_equal(result.index, self.tsframe.index)
- tm.assert_index_equal(result.columns, self.tsframe.columns)
- for _, gp in grouped:
- agged = gp.mean(1)
- res = result.reindex(columns=gp.columns)
- for idx in gp.index:
- assert_fp_equal(res.xs(idx), agged[idx])
-
- def test_transform_axis(self):
-
- # make sure that we are setting the axes
- # correctly when on axis=0 or 1
- # in the presence of a non-monotonic indexer
- # GH12713
-
- base = self.tsframe.iloc[0:5]
- r = len(base.index)
- c = len(base.columns)
- tso = DataFrame(np.random.randn(r, c),
- index=base.index,
- columns=base.columns,
- dtype='float64')
- # monotonic
- ts = tso
- grouped = ts.groupby(lambda x: x.weekday())
- result = ts - grouped.transform('mean')
- expected = grouped.apply(lambda x: x - x.mean())
- assert_frame_equal(result, expected)
-
- ts = ts.T
- grouped = ts.groupby(lambda x: x.weekday(), axis=1)
- result = ts - grouped.transform('mean')
- expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
- assert_frame_equal(result, expected)
-
- # non-monotonic
- ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
- grouped = ts.groupby(lambda x: x.weekday())
- result = ts - grouped.transform('mean')
- expected = grouped.apply(lambda x: x - x.mean())
- assert_frame_equal(result, expected)
-
- ts = ts.T
- grouped = ts.groupby(lambda x: x.weekday(), axis=1)
- result = ts - grouped.transform('mean')
- expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
- assert_frame_equal(result, expected)
-
- def test_transform_dtype(self):
- # GH 9807
- # Check transform dtype output is preserved
- df = DataFrame([[1, 3], [2, 3]])
- result = df.groupby(1).transform('mean')
- expected = DataFrame([[1.5], [1.5]])
- assert_frame_equal(result, expected)
-
- def test_transform_bug(self):
- # GH 5712
- # transforming on a datetime column
- df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
- result = df.groupby('A')['B'].transform(
- lambda x: x.rank(ascending=False))
- expected = Series(np.arange(5, 0, step=-1), name='B')
- assert_series_equal(result, expected)
-
- def test_transform_numeric_to_boolean(self):
- # GH 16875
- # inconsistency in transforming boolean values
- expected = pd.Series([True, True], name='A')
-
- df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})
- result = df.groupby('B').A.transform(lambda x: True)
- assert_series_equal(result, expected)
-
- df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})
- result = df.groupby('B').A.transform(lambda x: True)
- assert_series_equal(result, expected)
-
- def test_transform_datetime_to_timedelta(self):
- # GH 15429
- # transforming a datetime to timedelta
- df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
- expected = pd.Series([
- Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
-
- # this does date math without changing result type in transform
- base_time = df['A'][0]
- result = df.groupby('A')['A'].transform(
- lambda x: x.max() - x.min() + base_time) - base_time
- assert_series_equal(result, expected)
-
- # this does date math and causes the transform to return timedelta
- result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
- assert_series_equal(result, expected)
-
- def test_transform_datetime_to_numeric(self):
- # GH 10972
- # convert dt to float
- df = DataFrame({
- 'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
- result = df.groupby('a').b.transform(
- lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
-
- expected = Series([-0.5, 0.5], name='b')
- assert_series_equal(result, expected)
-
- # convert dt to int
- df = DataFrame({
- 'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
- result = df.groupby('a').b.transform(
- lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
-
- expected = Series([0, 1], name='b')
- assert_series_equal(result, expected)
-
- def test_transform_casting(self):
- # 13046
- data = """
- idx A ID3 DATETIME
- 0 B-028 b76cd912ff "2014-10-08 13:43:27"
- 1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
- 2 B-076 1a682034f8 "2014-10-08 14:29:01"
- 3 B-023 b76cd912ff "2014-10-08 18:39:34"
- 4 B-023 f88g8d7sds "2014-10-08 18:40:18"
- 5 B-033 b76cd912ff "2014-10-08 18:44:30"
- 6 B-032 b76cd912ff "2014-10-08 18:46:00"
- 7 B-037 b76cd912ff "2014-10-08 18:52:15"
- 8 B-046 db959faf02 "2014-10-08 18:59:59"
- 9 B-053 b76cd912ff "2014-10-08 19:17:48"
- 10 B-065 b76cd912ff "2014-10-08 19:21:38"
- """
- df = pd.read_csv(StringIO(data), sep=r'\s+',
- index_col=[0], parse_dates=['DATETIME'])
-
- result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
- assert is_timedelta64_dtype(result.dtype)
-
- result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
- lambda x: x.diff())
- assert is_timedelta64_dtype(result.DATETIME.dtype)
-
- def test_transform_multiple(self):
- grouped = self.ts.groupby([lambda x: x.year, lambda x: x.month])
-
- grouped.transform(lambda x: x * 2)
- grouped.transform(np.mean)
-
- def test_dispatch_transform(self):
- df = self.tsframe[::5].reindex(self.tsframe.index)
-
- grouped = df.groupby(lambda x: x.month)
-
- filled = grouped.fillna(method='pad')
- fillit = lambda x: x.fillna(method='pad')
- expected = df.groupby(lambda x: x.month).transform(fillit)
- assert_frame_equal(filled, expected)
-
- def test_transform_select_columns(self):
- f = lambda x: x.mean()
- result = self.df.groupby('A')['C', 'D'].transform(f)
-
- selection = self.df[['C', 'D']]
- expected = selection.groupby(self.df['A']).transform(f)
-
- assert_frame_equal(result, expected)
-
- def test_transform_exclude_nuisance(self):
-
- # this also tests orderings in transform between
- # series/frame to make sure it's consistent
- expected = {}
- grouped = self.df.groupby('A')
- expected['C'] = grouped['C'].transform(np.mean)
- expected['D'] = grouped['D'].transform(np.mean)
- expected = DataFrame(expected)
- result = self.df.groupby('A').transform(np.mean)
-
- assert_frame_equal(result, expected)
-
- def test_transform_function_aliases(self):
- result = self.df.groupby('A').transform('mean')
- expected = self.df.groupby('A').transform(np.mean)
- assert_frame_equal(result, expected)
-
- result = self.df.groupby('A')['C'].transform('mean')
- expected = self.df.groupby('A')['C'].transform(np.mean)
- assert_series_equal(result, expected)
-
- def test_series_fast_transform_date(self):
- # GH 13191
- df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
- 'd': pd.date_range('2014-1-1', '2014-1-4')})
- result = df.groupby('grouping')['d'].transform('first')
- dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
- pd.Timestamp('2014-1-4')]
- expected = pd.Series(dates, name='d')
- assert_series_equal(result, expected)
-
- def test_transform_length(self):
- # GH 9697
- df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
- expected = pd.Series([3.0] * 4)
-
- def nsum(x):
- return np.nansum(x)
-
- results = [df.groupby('col1').transform(sum)['col2'],
- df.groupby('col1')['col2'].transform(sum),
- df.groupby('col1').transform(nsum)['col2'],
- df.groupby('col1')['col2'].transform(nsum)]
- for result in results:
- assert_series_equal(result, expected, check_names=False)
-
- def test_transform_coercion(self):
-
- # 14457
- # when we are transforming be sure to not coerce
- # via assignment
- df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
- g = df.groupby('A')
-
- expected = g.transform(np.mean)
- result = g.transform(lambda x: np.mean(x))
- assert_frame_equal(result, expected)
-
- def test_groupby_transform_with_int(self):
-
- # GH 3740, make sure that we might upcast on item-by-item transform
-
- # floats
- df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
- C=Series(
- [1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
- with np.errstate(all='ignore'):
- result = df.groupby('A').transform(
- lambda x: (x - x.mean()) / x.std())
- expected = DataFrame(dict(B=np.nan, C=Series(
- [-1, 0, 1, -1, 0, 1], dtype='float64')))
- assert_frame_equal(result, expected)
-
- # int case
- df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
- C=[1, 2, 3, 1, 2, 3], D='foo'))
- with np.errstate(all='ignore'):
- result = df.groupby('A').transform(
- lambda x: (x - x.mean()) / x.std())
- expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
- assert_frame_equal(result, expected)
-
- # int that needs float conversion
- s = Series([2, 3, 4, 10, 5, -1])
- df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
- with np.errstate(all='ignore'):
- result = df.groupby('A').transform(
- lambda x: (x - x.mean()) / x.std())
-
- s1 = s.iloc[0:3]
- s1 = (s1 - s1.mean()) / s1.std()
- s2 = s.iloc[3:6]
- s2 = (s2 - s2.mean()) / s2.std()
- expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
- assert_frame_equal(result, expected)
-
- # int downcasting
- result = df.groupby('A').transform(lambda x: x * 2 / 2)
- expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
- assert_frame_equal(result, expected)
-
- def test_groupby_transform_with_nan_group(self):
- # GH 9941
- df = pd.DataFrame({'a': range(10),
- 'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
- result = df.groupby(df.b)['a'].transform(max)
- expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
- name='a')
- assert_series_equal(result, expected)
-
- def test_transform_mixed_type(self):
- index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
- ])
- df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
- 'c': np.tile(['a', 'b', 'c'], 2),
- 'v': np.arange(1., 7.)}, index=index)
-
- def f(group):
- group['g'] = group['d'] * 2
- return group[:1]
-
- grouped = df.groupby('c')
- result = grouped.apply(f)
-
- assert result['d'].dtype == np.float64
-
- # this is by definition a mutating operation!
- with option_context('mode.chained_assignment', None):
- for key, group in grouped:
- res = f(group)
- assert_frame_equal(res, result.loc[key])
-
- def test_cython_group_transform_algos(self):
- # GH 4095
- dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
- np.uint64, np.float32, np.float64]
-
- ops = [(groupby.group_cumprod_float64, np.cumproduct, [np.float64]),
- (groupby.group_cumsum, np.cumsum, dtypes)]
-
- is_datetimelike = False
- for pd_op, np_op, dtypes in ops:
- for dtype in dtypes:
- data = np.array([[1], [2], [3], [4]], dtype=dtype)
- ans = np.zeros_like(data)
- labels = np.array([0, 0, 0, 0], dtype=np.int64)
- pd_op(ans, data, labels, is_datetimelike)
- tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
- check_dtype=False)
-
- # with nans
- labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
-
- data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
- actual = np.zeros_like(data)
- actual.fill(np.nan)
- groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
- expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
- tm.assert_numpy_array_equal(actual[:, 0], expected)
-
- actual = np.zeros_like(data)
- actual.fill(np.nan)
- groupby.group_cumsum(actual, data, labels, is_datetimelike)
- expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
- tm.assert_numpy_array_equal(actual[:, 0], expected)
-
- # timedelta
- is_datetimelike = True
- data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
- actual = np.zeros_like(data, dtype='int64')
- groupby.group_cumsum(actual, data.view('int64'), labels,
- is_datetimelike)
- expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
- 2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
- np.timedelta64(5, 'ns')])
- tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
-
- @pytest.mark.parametrize(
- "op, args, targop",
- [('cumprod', (), lambda x: x.cumprod()),
- ('cumsum', (), lambda x: x.cumsum()),
- ('shift', (-1, ), lambda x: x.shift(-1)),
- ('shift', (1, ), lambda x: x.shift())])
- def test_cython_transform_series(self, op, args, targop):
- # GH 4095
- s = Series(np.random.randn(1000))
- s_missing = s.copy()
- s_missing.iloc[2:10] = np.nan
- labels = np.random.randint(0, 50, size=1000).astype(float)
-
- # series
- for data in [s, s_missing]:
- # print(data.head())
- expected = data.groupby(labels).transform(targop)
-
- tm.assert_series_equal(
+def assert_fp_equal(a, b):
+ assert (np.abs(a - b) < 1e-12).all()
+
+
+def test_transform():
+ data = Series(np.arange(9) // 3, index=np.arange(9))
+
+ index = np.arange(9)
+ np.random.shuffle(index)
+ data = data.reindex(index)
+
+ grouped = data.groupby(lambda x: x // 3)
+
+ transformed = grouped.transform(lambda x: x * x.sum())
+ assert transformed[7] == 12
+
+ # GH 8046
+ # make sure that we preserve the input order
+
+ df = DataFrame(
+ np.arange(6, dtype='int64').reshape(
+ 3, 2), columns=["a", "b"], index=[0, 2, 1])
+ key = [0, 0, 1]
+ expected = df.sort_index().groupby(key).transform(
+ lambda x: x - x.mean()).groupby(key).mean()
+ result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
+ key).mean()
+ assert_frame_equal(result, expected)
+
+ def demean(arr):
+ return arr - arr.mean()
+
+ people = DataFrame(np.random.randn(5, 5),
+ columns=['a', 'b', 'c', 'd', 'e'],
+ index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
+ key = ['one', 'two', 'one', 'two', 'one']
+ result = people.groupby(key).transform(demean).groupby(key).mean()
+ expected = people.groupby(key).apply(demean).groupby(key).mean()
+ assert_frame_equal(result, expected)
+
+ # GH 8430
+ df = tm.makeTimeDataFrame()
+ g = df.groupby(pd.Grouper(freq='M'))
+ g.transform(lambda x: x - 1)
+
+ # GH 9700
+ df = DataFrame({'a': range(5, 10), 'b': range(5)})
+ result = df.groupby('a').transform(max)
+ expected = DataFrame({'b': range(5)})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_transform_fast():
+
+ df = DataFrame({'id': np.arange(100000) / 3,
+ 'val': np.random.randn(100000)})
+
+ grp = df.groupby('id')['val']
+
+ values = np.repeat(grp.mean().values,
+ _ensure_platform_int(grp.count().values))
+ expected = pd.Series(values, index=df.index, name='val')
+
+ result = grp.transform(np.mean)
+ assert_series_equal(result, expected)
+
+ result = grp.transform('mean')
+ assert_series_equal(result, expected)
+
+ # GH 12737
+ df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
+ 'd': pd.date_range('2014-1-1', '2014-1-4'),
+ 'i': [1, 2, 3, 4]},
+ columns=['grouping', 'f', 'i', 'd'])
+ result = df.groupby('grouping').transform('first')
+
+ dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
+ pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
+ expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
+ 'd': dates,
+ 'i': [1, 2, 2, 4]},
+ columns=['f', 'i', 'd'])
+ assert_frame_equal(result, expected)
+
+ # selection
+ result = df.groupby('grouping')[['f', 'i']].transform('first')
+ expected = expected[['f', 'i']]
+ assert_frame_equal(result, expected)
+
+ # dup columns
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
+ result = df.groupby('g').transform('first')
+ expected = df.drop('g', axis=1)
+ assert_frame_equal(result, expected)
+
+
+def test_transform_broadcast(tsframe, ts):
+ grouped = ts.groupby(lambda x: x.month)
+ result = grouped.transform(np.mean)
+
+ tm.assert_index_equal(result.index, ts.index)
+ for _, gp in grouped:
+ assert_fp_equal(result.reindex(gp.index), gp.mean())
+
+ grouped = tsframe.groupby(lambda x: x.month)
+ result = grouped.transform(np.mean)
+ tm.assert_index_equal(result.index, tsframe.index)
+ for _, gp in grouped:
+ agged = gp.mean()
+ res = result.reindex(gp.index)
+ for col in tsframe:
+ assert_fp_equal(res[col], agged[col])
+
+ # group columns
+ grouped = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
+ axis=1)
+ result = grouped.transform(np.mean)
+ tm.assert_index_equal(result.index, tsframe.index)
+ tm.assert_index_equal(result.columns, tsframe.columns)
+ for _, gp in grouped:
+ agged = gp.mean(1)
+ res = result.reindex(columns=gp.columns)
+ for idx in gp.index:
+ assert_fp_equal(res.xs(idx), agged[idx])
+
+
+def test_transform_axis(tsframe):
+
+ # make sure that we are setting the axes
+ # correctly when on axis=0 or 1
+ # in the presence of a non-monotonic indexer
+ # GH12713
+
+ base = tsframe.iloc[0:5]
+ r = len(base.index)
+ c = len(base.columns)
+ tso = DataFrame(np.random.randn(r, c),
+ index=base.index,
+ columns=base.columns,
+ dtype='float64')
+ # monotonic
+ ts = tso
+ grouped = ts.groupby(lambda x: x.weekday())
+ result = ts - grouped.transform('mean')
+ expected = grouped.apply(lambda x: x - x.mean())
+ assert_frame_equal(result, expected)
+
+ ts = ts.T
+ grouped = ts.groupby(lambda x: x.weekday(), axis=1)
+ result = ts - grouped.transform('mean')
+ expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
+ assert_frame_equal(result, expected)
+
+ # non-monotonic
+ ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
+ grouped = ts.groupby(lambda x: x.weekday())
+ result = ts - grouped.transform('mean')
+ expected = grouped.apply(lambda x: x - x.mean())
+ assert_frame_equal(result, expected)
+
+ ts = ts.T
+ grouped = ts.groupby(lambda x: x.weekday(), axis=1)
+ result = ts - grouped.transform('mean')
+ expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
+ assert_frame_equal(result, expected)
+
+
+def test_transform_dtype():
+ # GH 9807
+ # Check transform dtype output is preserved
+ df = DataFrame([[1, 3], [2, 3]])
+ result = df.groupby(1).transform('mean')
+ expected = DataFrame([[1.5], [1.5]])
+ assert_frame_equal(result, expected)
+
+
+def test_transform_bug():
+ # GH 5712
+ # transforming on a datetime column
+ df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
+ result = df.groupby('A')['B'].transform(
+ lambda x: x.rank(ascending=False))
+ expected = Series(np.arange(5, 0, step=-1), name='B')
+ assert_series_equal(result, expected)
+
+
+def test_transform_numeric_to_boolean():
+ # GH 16875
+ # inconsistency in transforming boolean values
+ expected = pd.Series([True, True], name='A')
+
+ df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})
+ result = df.groupby('B').A.transform(lambda x: True)
+ assert_series_equal(result, expected)
+
+ df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})
+ result = df.groupby('B').A.transform(lambda x: True)
+ assert_series_equal(result, expected)
+
+
+def test_transform_datetime_to_timedelta():
+ # GH 15429
+ # transforming a datetime to timedelta
+ df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
+ expected = pd.Series([
+ Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
+
+ # this does date math without changing result type in transform
+ base_time = df['A'][0]
+ result = df.groupby('A')['A'].transform(
+ lambda x: x.max() - x.min() + base_time) - base_time
+ assert_series_equal(result, expected)
+
+ # this does date math and causes the transform to return timedelta
+ result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
+ assert_series_equal(result, expected)
+
+
+def test_transform_datetime_to_numeric():
+ # GH 10972
+ # convert dt to float
+ df = DataFrame({
+ 'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
+ result = df.groupby('a').b.transform(
+ lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
+
+ expected = Series([-0.5, 0.5], name='b')
+ assert_series_equal(result, expected)
+
+ # convert dt to int
+ df = DataFrame({
+ 'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
+ result = df.groupby('a').b.transform(
+ lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
+
+ expected = Series([0, 1], name='b')
+ assert_series_equal(result, expected)
+
+
+def test_transform_casting():
+ # 13046
+ data = """
+ idx A ID3 DATETIME
+ 0 B-028 b76cd912ff "2014-10-08 13:43:27"
+ 1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
+ 2 B-076 1a682034f8 "2014-10-08 14:29:01"
+ 3 B-023 b76cd912ff "2014-10-08 18:39:34"
+ 4 B-023 f88g8d7sds "2014-10-08 18:40:18"
+ 5 B-033 b76cd912ff "2014-10-08 18:44:30"
+ 6 B-032 b76cd912ff "2014-10-08 18:46:00"
+ 7 B-037 b76cd912ff "2014-10-08 18:52:15"
+ 8 B-046 db959faf02 "2014-10-08 18:59:59"
+ 9 B-053 b76cd912ff "2014-10-08 19:17:48"
+ 10 B-065 b76cd912ff "2014-10-08 19:21:38"
+ """
+ df = pd.read_csv(StringIO(data), sep=r'\s+',
+ index_col=[0], parse_dates=['DATETIME'])
+
+ result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
+ assert is_timedelta64_dtype(result.dtype)
+
+ result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
+ lambda x: x.diff())
+ assert is_timedelta64_dtype(result.DATETIME.dtype)
+
+
+def test_transform_multiple(ts):
+ grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
+
+ grouped.transform(lambda x: x * 2)
+ grouped.transform(np.mean)
+
+
+def test_dispatch_transform(tsframe):
+ df = tsframe[::5].reindex(tsframe.index)
+
+ grouped = df.groupby(lambda x: x.month)
+
+ filled = grouped.fillna(method='pad')
+ fillit = lambda x: x.fillna(method='pad')
+ expected = df.groupby(lambda x: x.month).transform(fillit)
+ assert_frame_equal(filled, expected)
+
+
+def test_transform_select_columns(df):
+ f = lambda x: x.mean()
+ result = df.groupby('A')['C', 'D'].transform(f)
+
+ selection = df[['C', 'D']]
+ expected = selection.groupby(df['A']).transform(f)
+
+ assert_frame_equal(result, expected)
+
+
+def test_transform_exclude_nuisance(df):
+
+ # this also tests orderings in transform between
+ # series/frame to make sure it's consistent
+ expected = {}
+ grouped = df.groupby('A')
+ expected['C'] = grouped['C'].transform(np.mean)
+ expected['D'] = grouped['D'].transform(np.mean)
+ expected = DataFrame(expected)
+ result = df.groupby('A').transform(np.mean)
+
+ assert_frame_equal(result, expected)
+
+
+def test_transform_function_aliases(df):
+ result = df.groupby('A').transform('mean')
+ expected = df.groupby('A').transform(np.mean)
+ assert_frame_equal(result, expected)
+
+ result = df.groupby('A')['C'].transform('mean')
+ expected = df.groupby('A')['C'].transform(np.mean)
+ assert_series_equal(result, expected)
+
+
+def test_series_fast_transform_date():
+ # GH 13191
+ df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
+ 'd': pd.date_range('2014-1-1', '2014-1-4')})
+ result = df.groupby('grouping')['d'].transform('first')
+ dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
+ pd.Timestamp('2014-1-4')]
+ expected = pd.Series(dates, name='d')
+ assert_series_equal(result, expected)
+
+
+def test_transform_length():
+ # GH 9697
+ df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
+ expected = pd.Series([3.0] * 4)
+
+ def nsum(x):
+ return np.nansum(x)
+
+ results = [df.groupby('col1').transform(sum)['col2'],
+ df.groupby('col1')['col2'].transform(sum),
+ df.groupby('col1').transform(nsum)['col2'],
+ df.groupby('col1')['col2'].transform(nsum)]
+ for result in results:
+ assert_series_equal(result, expected, check_names=False)
+
+
+def test_transform_coercion():
+
+ # 14457
+ # when we are transforming be sure to not coerce
+ # via assignment
+ df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
+ g = df.groupby('A')
+
+ expected = g.transform(np.mean)
+ result = g.transform(lambda x: np.mean(x))
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_transform_with_int():
+
+ # GH 3740, make sure that we might upcast on item-by-item transform
+
+ # floats
+ df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
+ C=Series(
+ [1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
+ with np.errstate(all='ignore'):
+ result = df.groupby('A').transform(
+ lambda x: (x - x.mean()) / x.std())
+ expected = DataFrame(dict(B=np.nan, C=Series(
+ [-1, 0, 1, -1, 0, 1], dtype='float64')))
+ assert_frame_equal(result, expected)
+
+ # int case
+ df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
+ C=[1, 2, 3, 1, 2, 3], D='foo'))
+ with np.errstate(all='ignore'):
+ result = df.groupby('A').transform(
+ lambda x: (x - x.mean()) / x.std())
+ expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
+ assert_frame_equal(result, expected)
+
+ # int that needs float conversion
+ s = Series([2, 3, 4, 10, 5, -1])
+ df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
+ with np.errstate(all='ignore'):
+ result = df.groupby('A').transform(
+ lambda x: (x - x.mean()) / x.std())
+
+ s1 = s.iloc[0:3]
+ s1 = (s1 - s1.mean()) / s1.std()
+ s2 = s.iloc[3:6]
+ s2 = (s2 - s2.mean()) / s2.std()
+ expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
+ assert_frame_equal(result, expected)
+
+ # int downcasting
+ result = df.groupby('A').transform(lambda x: x * 2 / 2)
+ expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
+ assert_frame_equal(result, expected)
+
+
+def test_groupby_transform_with_nan_group():
+ # GH 9941
+ df = pd.DataFrame({'a': range(10),
+ 'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
+ result = df.groupby(df.b)['a'].transform(max)
+ expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
+ name='a')
+ assert_series_equal(result, expected)
+
+
+def test_transform_mixed_type():
+ index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
+ ])
+ df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
+ 'c': np.tile(['a', 'b', 'c'], 2),
+ 'v': np.arange(1., 7.)}, index=index)
+
+ def f(group):
+ group['g'] = group['d'] * 2
+ return group[:1]
+
+ grouped = df.groupby('c')
+ result = grouped.apply(f)
+
+ assert result['d'].dtype == np.float64
+
+ # this is by definition a mutating operation!
+ with option_context('mode.chained_assignment', None):
+ for key, group in grouped:
+ res = f(group)
+ assert_frame_equal(res, result.loc[key])
+
+
+def test_cython_group_transform_algos():
+ # GH 4095
+ dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
+ np.uint64, np.float32, np.float64]
+
+ ops = [(groupby.group_cumprod_float64, np.cumproduct, [np.float64]),
+ (groupby.group_cumsum, np.cumsum, dtypes)]
+
+ is_datetimelike = False
+ for pd_op, np_op, dtypes in ops:
+ for dtype in dtypes:
+ data = np.array([[1], [2], [3], [4]], dtype=dtype)
+ ans = np.zeros_like(data)
+ labels = np.array([0, 0, 0, 0], dtype=np.int64)
+ pd_op(ans, data, labels, is_datetimelike)
+ tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
+ check_dtype=False)
+
+ # with nans
+ labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
+
+ data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
+ actual = np.zeros_like(data)
+ actual.fill(np.nan)
+ groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
+ expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
+ tm.assert_numpy_array_equal(actual[:, 0], expected)
+
+ actual = np.zeros_like(data)
+ actual.fill(np.nan)
+ groupby.group_cumsum(actual, data, labels, is_datetimelike)
+ expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
+ tm.assert_numpy_array_equal(actual[:, 0], expected)
+
+ # timedelta
+ is_datetimelike = True
+ data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
+ actual = np.zeros_like(data, dtype='int64')
+ groupby.group_cumsum(actual, data.view('int64'), labels,
+ is_datetimelike)
+ expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
+ 2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
+ np.timedelta64(5, 'ns')])
+ tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
+
+
+@pytest.mark.parametrize(
+ "op, args, targop",
+ [('cumprod', (), lambda x: x.cumprod()),
+ ('cumsum', (), lambda x: x.cumsum()),
+ ('shift', (-1, ), lambda x: x.shift(-1)),
+ ('shift', (1, ), lambda x: x.shift())])
+def test_cython_transform_series(op, args, targop):
+ # GH 4095
+ s = Series(np.random.randn(1000))
+ s_missing = s.copy()
+ s_missing.iloc[2:10] = np.nan
+ labels = np.random.randint(0, 50, size=1000).astype(float)
+
+ # series
+ for data in [s, s_missing]:
+ # print(data.head())
+ expected = data.groupby(labels).transform(targop)
+
+ tm.assert_series_equal(
+ expected,
+ data.groupby(labels).transform(op, *args))
+ tm.assert_series_equal(expected, getattr(
+ data.groupby(labels), op)(*args))
+
+
+@pytest.mark.parametrize("op", ['cumprod', 'cumsum'])
+@pytest.mark.parametrize("skipna", [False, True])
+@pytest.mark.parametrize('input, exp', [
+ # When everything is NaN
+ ({'key': ['b'] * 10, 'value': np.nan},
+ pd.Series([np.nan] * 10, name='value')),
+ # When there is a single NaN
+ ({'key': ['b'] * 10 + ['a'] * 2,
+ 'value': [3] * 3 + [np.nan] + [3] * 8},
+ {('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
+ ('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,
+ 2187., 6561., 19683., 3.0, 9.0],
+ ('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
+ ('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,
+ 21., 24., 27., 3.0, 6.0]})])
+def test_groupby_cum_skipna(op, skipna, input, exp):
+ df = pd.DataFrame(input)
+ result = df.groupby('key')['value'].transform(op, skipna=skipna)
+ if isinstance(exp, dict):
+ expected = exp[(op, skipna)]
+ else:
+ expected = exp
+ expected = pd.Series(expected, name='value')
+ tm.assert_series_equal(expected, result)
+
+
+@pytest.mark.parametrize(
+ "op, args, targop",
+ [('cumprod', (), lambda x: x.cumprod()),
+ ('cumsum', (), lambda x: x.cumsum()),
+ ('shift', (-1, ), lambda x: x.shift(-1)),
+ ('shift', (1, ), lambda x: x.shift())])
+def test_cython_transform_frame(op, args, targop):
+ s = Series(np.random.randn(1000))
+ s_missing = s.copy()
+ s_missing.iloc[2:10] = np.nan
+ labels = np.random.randint(0, 50, size=1000).astype(float)
+ strings = list('qwertyuiopasdfghjklz')
+ strings_missing = strings[:]
+ strings_missing[5] = np.nan
+ df = DataFrame({'float': s,
+ 'float_missing': s_missing,
+ 'int': [1, 1, 1, 1, 2] * 200,
+ 'datetime': pd.date_range('1990-1-1', periods=1000),
+ 'timedelta': pd.timedelta_range(1, freq='s',
+ periods=1000),
+ 'string': strings * 50,
+ 'string_missing': strings_missing * 50},
+ columns=['float', 'float_missing', 'int', 'datetime',
+ 'timedelta', 'string', 'string_missing'])
+ df['cat'] = df['string'].astype('category')
+
+ df2 = df.copy()
+ df2.index = pd.MultiIndex.from_product([range(100), range(10)])
+
+ # DataFrame - Single and MultiIndex,
+ # group by values, index level, columns
+ for df in [df, df2]:
+ for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
+ ]: # dict(by='string_missing')]:
+ # dict(by=['int','string'])]:
+
+ gb = df.groupby(**gb_target)
+ # whitelisted methods set the selection before applying
+ # bit a of hack to make sure the cythonized shift
+ # is equivalent to pre 0.17.1 behavior
+ if op == 'shift':
+ gb._set_group_selection()
+
+ if op != 'shift' and 'int' not in gb_target:
+ # numeric apply fastpath promotes dtype so have
+ # to apply separately and concat
+ i = gb[['int']].apply(targop)
+ f = gb[['float', 'float_missing']].apply(targop)
+ expected = pd.concat([f, i], axis=1)
+ else:
+ expected = gb.apply(targop)
+
+ expected = expected.sort_index(axis=1)
+ tm.assert_frame_equal(expected,
+ gb.transform(op, *args).sort_index(
+ axis=1))
+ tm.assert_frame_equal(
expected,
- data.groupby(labels).transform(op, *args))
- tm.assert_series_equal(expected, getattr(
- data.groupby(labels), op)(*args))
-
- @pytest.mark.parametrize("op", ['cumprod', 'cumsum'])
- @pytest.mark.parametrize("skipna", [False, True])
- @pytest.mark.parametrize('input, exp', [
- # When everything is NaN
- ({'key': ['b'] * 10, 'value': np.nan},
- pd.Series([np.nan] * 10, name='value')),
- # When there is a single NaN
- ({'key': ['b'] * 10 + ['a'] * 2,
- 'value': [3] * 3 + [np.nan] + [3] * 8},
- {('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
- ('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,
- 2187., 6561., 19683., 3.0, 9.0],
- ('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
- ('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,
- 21., 24., 27., 3.0, 6.0]})])
- def test_groupby_cum_skipna(self, op, skipna, input, exp):
- df = pd.DataFrame(input)
- result = df.groupby('key')['value'].transform(op, skipna=skipna)
- if isinstance(exp, dict):
- expected = exp[(op, skipna)]
- else:
- expected = exp
- expected = pd.Series(expected, name='value')
- tm.assert_series_equal(expected, result)
-
- @pytest.mark.parametrize(
- "op, args, targop",
- [('cumprod', (), lambda x: x.cumprod()),
- ('cumsum', (), lambda x: x.cumsum()),
- ('shift', (-1, ), lambda x: x.shift(-1)),
- ('shift', (1, ), lambda x: x.shift())])
- def test_cython_transform_frame(self, op, args, targop):
- s = Series(np.random.randn(1000))
- s_missing = s.copy()
- s_missing.iloc[2:10] = np.nan
- labels = np.random.randint(0, 50, size=1000).astype(float)
- strings = list('qwertyuiopasdfghjklz')
- strings_missing = strings[:]
- strings_missing[5] = np.nan
- df = DataFrame({'float': s,
- 'float_missing': s_missing,
- 'int': [1, 1, 1, 1, 2] * 200,
- 'datetime': pd.date_range('1990-1-1', periods=1000),
- 'timedelta': pd.timedelta_range(1, freq='s',
- periods=1000),
- 'string': strings * 50,
- 'string_missing': strings_missing * 50},
- columns=['float', 'float_missing', 'int', 'datetime',
- 'timedelta', 'string', 'string_missing'])
- df['cat'] = df['string'].astype('category')
-
- df2 = df.copy()
- df2.index = pd.MultiIndex.from_product([range(100), range(10)])
-
- # DataFrame - Single and MultiIndex,
- # group by values, index level, columns
- for df in [df, df2]:
- for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
- ]: # dict(by='string_missing')]:
- # dict(by=['int','string'])]:
-
- gb = df.groupby(**gb_target)
- # whitelisted methods set the selection before applying
- # bit a of hack to make sure the cythonized shift
- # is equivalent to pre 0.17.1 behavior
- if op == 'shift':
- gb._set_group_selection()
-
- if op != 'shift' and 'int' not in gb_target:
- # numeric apply fastpath promotes dtype so have
- # to apply separately and concat
- i = gb[['int']].apply(targop)
- f = gb[['float', 'float_missing']].apply(targop)
- expected = pd.concat([f, i], axis=1)
+ getattr(gb, op)(*args).sort_index(axis=1))
+ # individual columns
+ for c in df:
+ if c not in ['float', 'int', 'float_missing'
+ ] and op != 'shift':
+ pytest.raises(DataError, gb[c].transform, op)
+ pytest.raises(DataError, getattr(gb[c], op))
else:
- expected = gb.apply(targop)
-
- expected = expected.sort_index(axis=1)
- tm.assert_frame_equal(expected,
- gb.transform(op, *args).sort_index(
- axis=1))
- tm.assert_frame_equal(
- expected,
- getattr(gb, op)(*args).sort_index(axis=1))
- # individual columns
- for c in df:
- if c not in ['float', 'int', 'float_missing'
- ] and op != 'shift':
- pytest.raises(DataError, gb[c].transform, op)
- pytest.raises(DataError, getattr(gb[c], op))
- else:
- expected = gb[c].apply(targop)
- expected.name = c
- tm.assert_series_equal(expected,
- gb[c].transform(op, *args))
- tm.assert_series_equal(expected,
- getattr(gb[c], op)(*args))
-
- def test_transform_with_non_scalar_group(self):
- # GH 10165
- cols = pd.MultiIndex.from_tuples([
- ('syn', 'A'), ('mis', 'A'), ('non', 'A'),
- ('syn', 'C'), ('mis', 'C'), ('non', 'C'),
- ('syn', 'T'), ('mis', 'T'), ('non', 'T'),
- ('syn', 'G'), ('mis', 'G'), ('non', 'G')])
- df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
- columns=cols,
- index=['A', 'C', 'G', 'T'])
- tm.assert_raises_regex(ValueError, 'transform must return '
- 'a scalar value for each '
- 'group.*',
- df.groupby(axis=1, level=1).transform,
- lambda z: z.div(z.sum(axis=1), axis=0))
-
- @pytest.mark.parametrize('cols,exp,comp_func', [
- ('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
- (['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
- tm.assert_frame_equal)
- ])
- @pytest.mark.parametrize('agg_func', [
- 'count', 'rank', 'size'])
- def test_transform_numeric_ret(self, cols, exp, comp_func, agg_func):
- if agg_func == 'size' and isinstance(cols, list):
- pytest.xfail("'size' transformation not supported with "
- "NDFrameGroupy")
-
- # GH 19200
- df = pd.DataFrame(
- {'a': pd.date_range('2018-01-01', periods=3),
- 'b': range(3),
- 'c': range(7, 10)})
-
- result = df.groupby('b')[cols].transform(agg_func)
-
- if agg_func == 'rank':
- exp = exp.astype('float')
-
- comp_func(result, exp)
-
- @pytest.mark.parametrize("mix_groupings", [True, False])
- @pytest.mark.parametrize("as_series", [True, False])
- @pytest.mark.parametrize("val1,val2", [
- ('foo', 'bar'), (1, 2), (1., 2.)])
- @pytest.mark.parametrize("fill_method,limit,exp_vals", [
- ("ffill", None,
- [np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
- ("ffill", 1,
- [np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
- ("bfill", None,
- ['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
- ("bfill", 1,
- [np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
- ])
- def test_group_fill_methods(self, mix_groupings, as_series, val1, val2,
- fill_method, limit, exp_vals):
- vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
- _exp_vals = list(exp_vals)
- # Overwrite placeholder values
- for index, exp_val in enumerate(_exp_vals):
- if exp_val == 'val1':
- _exp_vals[index] = val1
- elif exp_val == 'val2':
- _exp_vals[index] = val2
-
- # Need to modify values and expectations depending on the
- # Series / DataFrame that we ultimately want to generate
- if mix_groupings: # ['a', 'b', 'a, 'b', ...]
- keys = ['a', 'b'] * len(vals)
-
- def interweave(list_obj):
- temp = list()
- for x in list_obj:
- temp.extend([x, x])
-
- return temp
-
- _exp_vals = interweave(_exp_vals)
- vals = interweave(vals)
- else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
- keys = ['a'] * len(vals) + ['b'] * len(vals)
- _exp_vals = _exp_vals * 2
- vals = vals * 2
-
- df = DataFrame({'key': keys, 'val': vals})
- if as_series:
- result = getattr(
- df.groupby('key')['val'], fill_method)(limit=limit)
- exp = Series(_exp_vals, name='val')
- assert_series_equal(result, exp)
- else:
- result = getattr(df.groupby('key'), fill_method)(limit=limit)
- exp = DataFrame({'key': keys, 'val': _exp_vals})
- assert_frame_equal(result, exp)
-
- @pytest.mark.parametrize("test_series", [True, False])
- @pytest.mark.parametrize("periods,fill_method,limit", [
- (1, 'ffill', None), (1, 'ffill', 1),
- (1, 'bfill', None), (1, 'bfill', 1),
- (-1, 'ffill', None), (-1, 'ffill', 1),
- (-1, 'bfill', None), (-1, 'bfill', 1)])
- def test_pct_change(self, test_series, periods, fill_method, limit):
- vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
- exp_vals = Series(vals).pct_change(periods=periods,
- fill_method=fill_method,
- limit=limit).tolist()
-
- df = DataFrame({'key': ['a'] * len(vals) + ['b'] * len(vals),
- 'vals': vals * 2})
- grp = df.groupby('key')
-
- def get_result(grp_obj):
- return grp_obj.pct_change(periods=periods,
- fill_method=fill_method,
- limit=limit)
-
- if test_series:
- exp = pd.Series(exp_vals * 2)
- exp.name = 'vals'
- grp = grp['vals']
- result = get_result(grp)
- tm.assert_series_equal(result, exp)
- else:
- exp = DataFrame({'vals': exp_vals * 2})
- result = get_result(grp)
- tm.assert_frame_equal(result, exp)
-
- @pytest.mark.parametrize("func", [np.any, np.all])
- def test_any_all_np_func(self, func):
- # GH 20653
- df = pd.DataFrame([['foo', True],
- [np.nan, True],
- ['foo', True]], columns=['key', 'val'])
-
- exp = pd.Series([True, np.nan, True], name='val')
-
- res = df.groupby('key')['val'].transform(func)
- tm.assert_series_equal(res, exp)
+ expected = gb[c].apply(targop)
+ expected.name = c
+ tm.assert_series_equal(expected,
+ gb[c].transform(op, *args))
+ tm.assert_series_equal(expected,
+ getattr(gb[c], op)(*args))
+
+
+def test_transform_with_non_scalar_group():
+ # GH 10165
+ cols = pd.MultiIndex.from_tuples([
+ ('syn', 'A'), ('mis', 'A'), ('non', 'A'),
+ ('syn', 'C'), ('mis', 'C'), ('non', 'C'),
+ ('syn', 'T'), ('mis', 'T'), ('non', 'T'),
+ ('syn', 'G'), ('mis', 'G'), ('non', 'G')])
+ df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
+ columns=cols,
+ index=['A', 'C', 'G', 'T'])
+ tm.assert_raises_regex(ValueError, 'transform must return '
+ 'a scalar value for each '
+ 'group.*',
+ df.groupby(axis=1, level=1).transform,
+ lambda z: z.div(z.sum(axis=1), axis=0))
+
+
+@pytest.mark.parametrize('cols,exp,comp_func', [
+ ('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
+ (['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
+ tm.assert_frame_equal)
+])
+@pytest.mark.parametrize('agg_func', [
+ 'count', 'rank', 'size'])
+def test_transform_numeric_ret(cols, exp, comp_func, agg_func):
+ if agg_func == 'size' and isinstance(cols, list):
+ pytest.xfail("'size' transformation not supported with "
+ "NDFrameGroupy")
+
+ # GH 19200
+ df = pd.DataFrame(
+ {'a': pd.date_range('2018-01-01', periods=3),
+ 'b': range(3),
+ 'c': range(7, 10)})
+
+ result = df.groupby('b')[cols].transform(agg_func)
+
+ if agg_func == 'rank':
+ exp = exp.astype('float')
+
+ comp_func(result, exp)
+
+
+@pytest.mark.parametrize("mix_groupings", [True, False])
+@pytest.mark.parametrize("as_series", [True, False])
+@pytest.mark.parametrize("val1,val2", [
+ ('foo', 'bar'), (1, 2), (1., 2.)])
+@pytest.mark.parametrize("fill_method,limit,exp_vals", [
+ ("ffill", None,
+ [np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
+ ("ffill", 1,
+ [np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
+ ("bfill", None,
+ ['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
+ ("bfill", 1,
+ [np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
+])
+def test_group_fill_methods(mix_groupings, as_series, val1, val2,
+ fill_method, limit, exp_vals):
+ vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
+ _exp_vals = list(exp_vals)
+ # Overwrite placeholder values
+ for index, exp_val in enumerate(_exp_vals):
+ if exp_val == 'val1':
+ _exp_vals[index] = val1
+ elif exp_val == 'val2':
+ _exp_vals[index] = val2
+
+ # Need to modify values and expectations depending on the
+ # Series / DataFrame that we ultimately want to generate
+ if mix_groupings: # ['a', 'b', 'a, 'b', ...]
+ keys = ['a', 'b'] * len(vals)
+
+ def interweave(list_obj):
+ temp = list()
+ for x in list_obj:
+ temp.extend([x, x])
+
+ return temp
+
+ _exp_vals = interweave(_exp_vals)
+ vals = interweave(vals)
+ else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
+ keys = ['a'] * len(vals) + ['b'] * len(vals)
+ _exp_vals = _exp_vals * 2
+ vals = vals * 2
+
+ df = DataFrame({'key': keys, 'val': vals})
+ if as_series:
+ result = getattr(
+ df.groupby('key')['val'], fill_method)(limit=limit)
+ exp = Series(_exp_vals, name='val')
+ assert_series_equal(result, exp)
+ else:
+ result = getattr(df.groupby('key'), fill_method)(limit=limit)
+ exp = DataFrame({'key': keys, 'val': _exp_vals})
+ assert_frame_equal(result, exp)
+
+
+@pytest.mark.parametrize("test_series", [True, False])
+@pytest.mark.parametrize("periods,fill_method,limit", [
+ (1, 'ffill', None), (1, 'ffill', 1),
+ (1, 'bfill', None), (1, 'bfill', 1),
+ (-1, 'ffill', None), (-1, 'ffill', 1),
+ (-1, 'bfill', None), (-1, 'bfill', 1)])
+def test_pct_change(test_series, periods, fill_method, limit):
+ vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
+ exp_vals = Series(vals).pct_change(periods=periods,
+ fill_method=fill_method,
+ limit=limit).tolist()
+
+ df = DataFrame({'key': ['a'] * len(vals) + ['b'] * len(vals),
+ 'vals': vals * 2})
+ grp = df.groupby('key')
+
+ def get_result(grp_obj):
+ return grp_obj.pct_change(periods=periods,
+ fill_method=fill_method,
+ limit=limit)
+
+ if test_series:
+ exp = pd.Series(exp_vals * 2)
+ exp.name = 'vals'
+ grp = grp['vals']
+ result = get_result(grp)
+ tm.assert_series_equal(result, exp)
+ else:
+ exp = DataFrame({'vals': exp_vals * 2})
+ result = get_result(grp)
+ tm.assert_frame_equal(result, exp)
+
+
+@pytest.mark.parametrize("func", [np.any, np.all])
+def test_any_all_np_func(func):
+ # GH 20653
+ df = pd.DataFrame([['foo', True],
+ [np.nan, True],
+ ['foo', True]], columns=['key', 'val'])
+
+ exp = pd.Series([True, np.nan, True], name='val')
+
+ res = df.groupby('key')['val'].transform(func)
+ tm.assert_series_equal(res, exp)
| closes #20696 | https://api.github.com/repos/pandas-dev/pandas/pulls/20781 | 2018-04-21T19:00:09Z | 2018-04-21T22:26:28Z | 2018-04-21T22:26:28Z | 2018-04-21T22:26:28Z |
Accessor registry | diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 06c4068f86bfe..c638b9e4ea117 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -148,6 +148,7 @@ def decorator(accessor):
UserWarning,
stacklevel=2)
setattr(cls, name, CachedAccessor(name, accessor))
+ cls._accessors.add(name)
return accessor
return decorator
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 41d67c15c55b5..b67ed9cfd2241 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -326,6 +326,7 @@ def _constructor(self):
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv', 'from_items'])
+ _accessors = set()
@property
def _constructor_expanddim(self):
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 12bb09e8f8a8a..27668f925ce23 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -242,7 +242,8 @@ class Index(IndexOpsMixin, PandasObject):
_engine_type = libindex.ObjectEngine
- _accessors = frozenset(['str'])
+ _accessors = set(['str'])
+
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2ed4c99b7a998..13e94f971d003 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -153,7 +153,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
Copy input data
"""
_metadata = ['name']
- _accessors = frozenset(['dt', 'cat', 'str'])
+ _accessors = set(['dt', 'cat', 'str'])
_deprecations = generic.NDFrame._deprecations | frozenset(
['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value',
'from_csv', 'valid'])
diff --git a/pandas/tests/test_register_accessor.py b/pandas/tests/test_register_accessor.py
index fe0cf4c9b38af..33b9798b7606a 100644
--- a/pandas/tests/test_register_accessor.py
+++ b/pandas/tests/test_register_accessor.py
@@ -17,6 +17,7 @@ def ensure_removed(obj, attr):
delattr(obj, attr)
except AttributeError:
pass
+ obj._accessors.discard(attr)
class MyAccessor(object):
@@ -38,13 +39,14 @@ def method(self):
(pd.DataFrame, pd.api.extensions.register_dataframe_accessor),
(pd.Index, pd.api.extensions.register_index_accessor)
])
-def test_series_register(obj, registrar):
+def test_register(obj, registrar):
with ensure_removed(obj, 'mine'):
before = set(dir(obj))
registrar('mine')(MyAccessor)
assert obj([]).mine.prop == 'item'
after = set(dir(obj))
assert (before ^ after) == {'mine'}
+ assert 'mine' in obj._accessors
def test_accessor_works():
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 3863451757709..aed3eb2f1226d 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -182,7 +182,8 @@ def doc_parameters(self):
@property
def signature_parameters(self):
if (inspect.isclass(self.method_obj)
- and self.method_name.split('.')[-1] in {'dt', 'str', 'cat'}):
+ and self.method_name.split('.')[-1] in
+ self.method_obj._accessors):
# accessor classes have a signature, but don't want to show this
return tuple()
try:
| Closes #19963
cc @datapythonista I think this would have solved your issues w/ the validation script. | https://api.github.com/repos/pandas-dev/pandas/pulls/20780 | 2018-04-21T18:34:10Z | 2018-04-23T12:49:43Z | 2018-04-23T12:49:43Z | 2018-04-23T12:49:48Z |
ENH: Implement mode(dropna=False) | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 55e76512b2440..6cbc19cca99e1 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -13,6 +13,7 @@ New features
Other Enhancements
^^^^^^^^^^^^^^^^^^
- :func:`to_datetime` now supports the ``%Z`` and ``%z`` directive when passed into ``format`` (:issue:`13486`)
+- :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether NaN/NaT values should be considered (:issue:`17534`)
- :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`)
-
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index c97639481f12c..521e564447c59 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -288,7 +288,8 @@ def ismember_{{dtype}}({{scalar}}[:] arr, {{scalar}}[:] values, bint hasnans=0):
{{py:
# dtype, ctype, table_type, npy_dtype
-dtypes = [('int64', 'int64_t', 'int64', 'int64'),
+dtypes = [('float64', 'float64_t', 'float64', 'float64'),
+ ('int64', 'int64_t', 'int64', 'int64'),
('uint64', 'uint64_t', 'uint64', 'uint64'),
('object', 'object', 'pymap', 'object_')]
}}
@@ -302,11 +303,11 @@ dtypes = [('int64', 'int64_t', 'int64', 'int64'),
{{if dtype == 'object'}}
-def mode_{{dtype}}(ndarray[{{ctype}}] values):
+def mode_{{dtype}}(ndarray[{{ctype}}] values, bint dropna):
{{else}}
-def mode_{{dtype}}({{ctype}}[:] values):
+def mode_{{dtype}}({{ctype}}[:] values, bint dropna):
{{endif}}
cdef:
int count, max_count = 1
@@ -317,9 +318,9 @@ def mode_{{dtype}}({{ctype}}[:] values):
table = kh_init_{{table_type}}()
{{if dtype == 'object'}}
- build_count_table_{{dtype}}(values, table, 1)
+ build_count_table_{{dtype}}(values, table, dropna)
{{else}}
- build_count_table_{{dtype}}(values, table, 0)
+ build_count_table_{{dtype}}(values, table, dropna)
{{endif}}
modes = np.empty(table.n_buckets, dtype=np.{{npy_dtype}})
@@ -329,7 +330,6 @@ def mode_{{dtype}}({{ctype}}[:] values):
for k in range(table.n_buckets):
if kh_exist_{{table_type}}(table, k):
count = table.vals[k]
-
if count == max_count:
j += 1
elif count > max_count:
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 88bc497f9f22d..0b0fa69588784 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -25,8 +25,8 @@
is_bool_dtype, needs_i8_conversion,
is_datetimetz,
is_datetime64_any_dtype, is_datetime64tz_dtype,
- is_timedelta64_dtype, is_interval_dtype,
- is_scalar, is_list_like,
+ is_timedelta64_dtype, is_datetimelike,
+ is_interval_dtype, is_scalar, is_list_like,
_ensure_platform_int, _ensure_object,
_ensure_float64, _ensure_uint64,
_ensure_int64)
@@ -798,7 +798,7 @@ def duplicated(values, keep='first'):
return f(values, keep=keep)
-def mode(values):
+def mode(values, dropna=True):
"""
Returns the mode(s) of an array.
@@ -806,6 +806,10 @@ def mode(values):
----------
values : array-like
Array over which to check for duplicate values.
+ dropna : boolean, default True
+ Don't consider counts of NaN/NaT.
+
+ .. versionadded:: 0.24.0
Returns
-------
@@ -818,20 +822,18 @@ def mode(values):
# categorical is a fast-path
if is_categorical_dtype(values):
-
if isinstance(values, Series):
- return Series(values.values.mode(), name=values.name)
- return values.mode()
+ return Series(values.values.mode(dropna=dropna), name=values.name)
+ return values.mode(dropna=dropna)
- values, dtype, ndtype = _ensure_data(values)
+ if dropna and is_datetimelike(values):
+ mask = values.isnull()
+ values = values[~mask]
- # TODO: this should support float64
- if ndtype not in ['int64', 'uint64', 'object']:
- ndtype = 'object'
- values = _ensure_object(values)
+ values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
- result = f(values)
+ result = f(values, dropna=dropna)
try:
result = np.sort(result)
except TypeError as e:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index a1a8f098b582e..30f9c56d24f02 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2118,20 +2118,30 @@ def max(self, numeric_only=None, **kwargs):
else:
return self.categories[pointer]
- def mode(self):
+ def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
+ Parameters
+ ----------
+ dropna : boolean, default True
+ Don't consider counts of NaN/NaT.
+
+ .. versionadded:: 0.24.0
+
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
- good = self._codes != -1
- values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
+ values = self._codes
+ if dropna:
+ good = self._codes != -1
+ values = self._codes[good]
+ values = sorted(htable.mode_int64(_ensure_int64(values), dropna))
result = self._constructor(values=values, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1c7339a91c2fd..e75d76cf612e9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7038,7 +7038,7 @@ def _get_agg_axis(self, axis_num):
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
- def mode(self, axis=0, numeric_only=False):
+ def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
@@ -7056,6 +7056,10 @@ def mode(self, axis=0, numeric_only=False):
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
+ dropna : boolean, default True
+ Don't consider counts of NaN/NaT.
+
+ .. versionadded:: 0.24.0
Returns
-------
@@ -7072,7 +7076,7 @@ def mode(self, axis=0, numeric_only=False):
data = self if not numeric_only else self._get_numeric_data()
def f(s):
- return s.mode()
+ return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d59401414181f..4cf29319f703e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1431,17 +1431,24 @@ def count(self, level=None):
return self._constructor(out, index=lev,
dtype='int64').__finalize__(self)
- def mode(self):
+ def mode(self, dropna=True):
"""Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
+ Parameters
+ -------
+ dropna : boolean, default True
+ Don't consider counts of NaN/NaT.
+
+ .. versionadded:: 0.24.0
+
Returns
-------
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
- return algorithms.mode(self)
+ return algorithms.mode(self, dropna=dropna)
def unique(self):
"""
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index d1a4a5f615b86..b8f1acc2aa679 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -15,11 +15,11 @@
from pandas.compat import lrange, product, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
- _np_version_under1p12, _np_version_under1p15)
+ _np_version_under1p12, _np_version_under1p15,
+ to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
-import pandas.io.formats.printing as printing
import pandas.util.testing as tm
import pandas.util._test_decorators as td
@@ -840,54 +840,74 @@ def wrapper(x):
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
- def test_mode(self):
- df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
- "B": [10, 10, 10, np.nan, 3, 4],
- "C": [8, 8, 8, 9, 9, 9],
- "D": np.arange(6, dtype='int64'),
- "E": [8, 8, 1, 1, 3, 3]})
- tm.assert_frame_equal(df[["A"]].mode(),
- pd.DataFrame({"A": [12]}))
- expected = pd.Series([0, 1, 2, 3, 4, 5], dtype='int64', name='D').\
- to_frame()
- tm.assert_frame_equal(df[["D"]].mode(), expected)
- expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
- tm.assert_frame_equal(df[["E"]].mode(), expected)
- tm.assert_frame_equal(df[["A", "B"]].mode(),
- pd.DataFrame({"A": [12], "B": [10.]}))
- tm.assert_frame_equal(df.mode(),
- pd.DataFrame({"A": [12, np.nan, np.nan, np.nan,
- np.nan, np.nan],
- "B": [10, np.nan, np.nan, np.nan,
- np.nan, np.nan],
- "C": [8, 9, np.nan, np.nan, np.nan,
- np.nan],
- "D": [0, 1, 2, 3, 4, 5],
- "E": [1, 3, 8, np.nan, np.nan,
- np.nan]}))
-
- # outputs in sorted order
- df["C"] = list(reversed(df["C"]))
- printing.pprint_thing(df["C"])
- printing.pprint_thing(df["C"].mode())
- a, b = (df[["A", "B", "C"]].mode(),
- pd.DataFrame({"A": [12, np.nan],
- "B": [10, np.nan],
- "C": [8, 9]}))
- printing.pprint_thing(a)
- printing.pprint_thing(b)
- tm.assert_frame_equal(a, b)
- # should work with heterogeneous types
- df = pd.DataFrame({"A": np.arange(6, dtype='int64'),
- "B": pd.date_range('2011', periods=6),
- "C": list('abcdef')})
- exp = pd.DataFrame({"A": pd.Series(np.arange(6, dtype='int64'),
- dtype=df["A"].dtype),
- "B": pd.Series(pd.date_range('2011', periods=6),
- dtype=df["B"].dtype),
- "C": pd.Series(list('abcdef'),
- dtype=df["C"].dtype)})
- tm.assert_frame_equal(df.mode(), exp)
+ @pytest.mark.parametrize("dropna, expected", [
+ (True, {'A': [12],
+ 'B': [10.0],
+ 'C': [1.0],
+ 'D': ['a'],
+ 'E': Categorical(['a'], categories=['a']),
+ 'F': to_datetime(['2000-1-2']),
+ 'G': to_timedelta(['1 days'])}),
+ (False, {'A': [12],
+ 'B': [10.0],
+ 'C': [np.nan],
+ 'D': np.array([np.nan], dtype=object),
+ 'E': Categorical([np.nan], categories=['a']),
+ 'F': [pd.NaT],
+ 'G': to_timedelta([pd.NaT])}),
+ (True, {'H': [8, 9, np.nan, np.nan],
+ 'I': [8, 9, np.nan, np.nan],
+ 'J': [1, np.nan, np.nan, np.nan],
+ 'K': Categorical(['a', np.nan, np.nan, np.nan],
+ categories=['a']),
+ 'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
+ 'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
+ 'N': [0, 1, 2, 3]}),
+ (False, {'H': [8, 9, np.nan, np.nan],
+ 'I': [8, 9, np.nan, np.nan],
+ 'J': [1, np.nan, np.nan, np.nan],
+ 'K': Categorical([np.nan, 'a', np.nan, np.nan],
+ categories=['a']),
+ 'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
+ 'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
+ 'N': [0, 1, 2, 3]})
+ ])
+ def test_mode_dropna(self, dropna, expected):
+
+ df = DataFrame({"A": [12, 12, 19, 11],
+ "B": [10, 10, np.nan, 3],
+ "C": [1, np.nan, np.nan, np.nan],
+ "D": [np.nan, np.nan, 'a', np.nan],
+ "E": Categorical([np.nan, np.nan, 'a', np.nan]),
+ "F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
+ "G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
+ "H": [8, 8, 9, 9],
+ "I": [9, 9, 8, 8],
+ "J": [1, 1, np.nan, np.nan],
+ "K": Categorical(['a', np.nan, 'a', np.nan]),
+ "L": to_datetime(['2000-1-2', '2000-1-2',
+ 'NaT', 'NaT']),
+ "M": to_timedelta(['1 days', 'nan',
+ '1 days', 'nan']),
+ "N": np.arange(4, dtype='int64')})
+
+ result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
+ expected = DataFrame(expected)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.skipif(not compat.PY3, reason="only PY3")
+ def test_mode_sortwarning(self):
+ # Check for the warning that is raised when the mode
+ # results cannot be sorted
+
+ df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
+ expected = DataFrame({'A': ['a', np.nan]})
+
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
+ result = df.mode(dropna=False)
+ result = result.sort_values(by='A').reset_index(drop=True)
+
+ tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 6ea40329f4bc3..14ae1ef42865a 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -225,102 +225,6 @@ def test_median(self):
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
tm.assert_almost_equal(np.median(int_ts), int_ts.median())
- def test_mode(self):
- # No mode should be found.
- exp = Series([], dtype=np.float64)
- tm.assert_series_equal(Series([]).mode(), exp)
-
- exp = Series([1], dtype=np.int64)
- tm.assert_series_equal(Series([1]).mode(), exp)
-
- exp = Series(['a', 'b', 'c'], dtype=np.object)
- tm.assert_series_equal(Series(['a', 'b', 'c']).mode(), exp)
-
- # Test numerical data types.
- exp_single = [1]
- data_single = [1] * 5 + [2] * 3
-
- exp_multi = [1, 3]
- data_multi = [1] * 5 + [2] * 3 + [3] * 5
-
- for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
- s = Series(data_single, dtype=dt)
- exp = Series(exp_single, dtype=dt)
- tm.assert_series_equal(s.mode(), exp)
-
- s = Series(data_multi, dtype=dt)
- exp = Series(exp_multi, dtype=dt)
- tm.assert_series_equal(s.mode(), exp)
-
- # Test string and object types.
- exp = ['b']
- data = ['a'] * 2 + ['b'] * 3
-
- s = Series(data, dtype='c')
- exp = Series(exp, dtype='c')
- tm.assert_series_equal(s.mode(), exp)
-
- exp = ['bar']
- data = ['foo'] * 2 + ['bar'] * 3
-
- for dt in [str, object]:
- s = Series(data, dtype=dt)
- exp = Series(exp, dtype=dt)
- tm.assert_series_equal(s.mode(), exp)
-
- # Test datetime types.
- exp = Series(['1900-05-03', '2011-01-03',
- '2013-01-02'], dtype='M8[ns]')
- s = Series(['2011-01-03', '2013-01-02',
- '1900-05-03'], dtype='M8[ns]')
- tm.assert_series_equal(s.mode(), exp)
-
- exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
- s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
- '2011-01-03', '2013-01-02'], dtype='M8[ns]')
- tm.assert_series_equal(s.mode(), exp)
-
- # gh-5986: Test timedelta types.
- exp = Series(['-1 days', '0 days', '1 days'], dtype='timedelta64[ns]')
- s = Series(['1 days', '-1 days', '0 days'],
- dtype='timedelta64[ns]')
- tm.assert_series_equal(s.mode(), exp)
-
- exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
- s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
- '2 min', '2 min'], dtype='timedelta64[ns]')
- tm.assert_series_equal(s.mode(), exp)
-
- # Test mixed dtype.
- exp = Series(['foo'])
- s = Series([1, 'foo', 'foo'])
- tm.assert_series_equal(s.mode(), exp)
-
- # Test for uint64 overflow.
- exp = Series([2**63], dtype=np.uint64)
- s = Series([1, 2**63, 2**63], dtype=np.uint64)
- tm.assert_series_equal(s.mode(), exp)
-
- exp = Series([1, 2**63], dtype=np.uint64)
- s = Series([1, 2**63], dtype=np.uint64)
- tm.assert_series_equal(s.mode(), exp)
-
- # Test category dtype.
- c = Categorical([1, 2])
- exp = Categorical([1, 2], categories=[1, 2])
- exp = Series(exp, dtype='category')
- tm.assert_series_equal(Series(c).mode(), exp)
-
- c = Categorical([1, 'a', 'a'])
- exp = Categorical(['a'], categories=[1, 'a'])
- exp = Series(exp, dtype='category')
- tm.assert_series_equal(Series(c).mode(), exp)
-
- c = Categorical([1, 1, 2, 3, 3])
- exp = Categorical([1, 3], categories=[1, 2, 3])
- exp = Series(exp, dtype='category')
- tm.assert_series_equal(Series(c).mode(), exp)
-
def test_prod(self):
self._check_stat_op('prod', np.prod)
@@ -1866,6 +1770,180 @@ def s_main_dtypes():
return df
+class TestMode(object):
+
+ @pytest.mark.parametrize('dropna, expected', [
+ (True, Series([], dtype=np.float64)),
+ (False, Series([], dtype=np.float64))
+ ])
+ def test_mode_empty(self, dropna, expected):
+ s = Series([], dtype=np.float64)
+ result = s.mode(dropna)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize('dropna, data, expected', [
+ (True, [1, 1, 1, 2], [1]),
+ (True, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
+ (False, [1, 1, 1, 2], [1]),
+ (False, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
+ ])
+ @pytest.mark.parametrize(
+ 'dt',
+ list(np.typecodes['AllInteger'] + np.typecodes['Float'])
+ )
+ def test_mode_numerical(self, dropna, data, expected, dt):
+ s = Series(data, dtype=dt)
+ result = s.mode(dropna)
+ expected = Series(expected, dtype=dt)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize('dropna, expected', [
+ (True, [1.0]),
+ (False, [1, np.nan]),
+ ])
+ def test_mode_numerical_nan(self, dropna, expected):
+ s = Series([1, 1, 2, np.nan, np.nan])
+ result = s.mode(dropna)
+ expected = Series(expected)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize('dropna, expected1, expected2, expected3', [
+ (True, ['b'], ['bar'], ['nan']),
+ (False, ['b'], [np.nan], ['nan'])
+ ])
+ def test_mode_str_obj(self, dropna, expected1, expected2, expected3):
+ # Test string and object types.
+ data = ['a'] * 2 + ['b'] * 3
+
+ s = Series(data, dtype='c')
+ result = s.mode(dropna)
+ expected1 = Series(expected1, dtype='c')
+ tm.assert_series_equal(result, expected1)
+
+ data = ['foo', 'bar', 'bar', np.nan, np.nan, np.nan]
+
+ s = Series(data, dtype=object)
+ result = s.mode(dropna)
+ expected2 = Series(expected2, dtype=object)
+ tm.assert_series_equal(result, expected2)
+
+ data = ['foo', 'bar', 'bar', np.nan, np.nan, np.nan]
+
+ s = Series(data, dtype=str)
+ result = s.mode(dropna)
+ expected3 = Series(expected3, dtype=str)
+ tm.assert_series_equal(result, expected3)
+
+ @pytest.mark.parametrize('dropna, expected1, expected2', [
+ (True, ['foo'], ['foo']),
+ (False, ['foo'], [np.nan])
+ ])
+ def test_mode_mixeddtype(self, dropna, expected1, expected2):
+ s = Series([1, 'foo', 'foo'])
+ result = s.mode(dropna)
+ expected = Series(expected1)
+ tm.assert_series_equal(result, expected)
+
+ s = Series([1, 'foo', 'foo', np.nan, np.nan, np.nan])
+ result = s.mode(dropna)
+ expected = Series(expected2, dtype=object)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize('dropna, expected1, expected2', [
+ (True, ['1900-05-03', '2011-01-03', '2013-01-02'],
+ ['2011-01-03', '2013-01-02']),
+ (False, [np.nan], [np.nan, '2011-01-03', '2013-01-02']),
+ ])
+ def test_mode_datetime(self, dropna, expected1, expected2):
+ s = Series(['2011-01-03', '2013-01-02',
+ '1900-05-03', 'nan', 'nan'], dtype='M8[ns]')
+ result = s.mode(dropna)
+ expected1 = Series(expected1, dtype='M8[ns]')
+ tm.assert_series_equal(result, expected1)
+
+ s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
+ '2011-01-03', '2013-01-02', 'nan', 'nan'],
+ dtype='M8[ns]')
+ result = s.mode(dropna)
+ expected2 = Series(expected2, dtype='M8[ns]')
+ tm.assert_series_equal(result, expected2)
+
+ @pytest.mark.parametrize('dropna, expected1, expected2', [
+ (True, ['-1 days', '0 days', '1 days'], ['2 min', '1 day']),
+ (False, [np.nan], [np.nan, '2 min', '1 day']),
+ ])
+ def test_mode_timedelta(self, dropna, expected1, expected2):
+ # gh-5986: Test timedelta types.
+
+ s = Series(['1 days', '-1 days', '0 days', 'nan', 'nan'],
+ dtype='timedelta64[ns]')
+ result = s.mode(dropna)
+ expected1 = Series(expected1, dtype='timedelta64[ns]')
+ tm.assert_series_equal(result, expected1)
+
+ s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
+ '2 min', '2 min', 'nan', 'nan'],
+ dtype='timedelta64[ns]')
+ result = s.mode(dropna)
+ expected2 = Series(expected2, dtype='timedelta64[ns]')
+ tm.assert_series_equal(result, expected2)
+
+ @pytest.mark.parametrize('dropna, expected1, expected2, expected3', [
+ (True, Categorical([1, 2], categories=[1, 2]),
+ Categorical(['a'], categories=[1, 'a']),
+ Categorical([3, 1], categories=[3, 2, 1], ordered=True)),
+ (False, Categorical([np.nan], categories=[1, 2]),
+ Categorical([np.nan, 'a'], categories=[1, 'a']),
+ Categorical([np.nan, 3, 1], categories=[3, 2, 1], ordered=True)),
+ ])
+ def test_mode_category(self, dropna, expected1, expected2, expected3):
+ s = Series(Categorical([1, 2, np.nan, np.nan]))
+ result = s.mode(dropna)
+ expected1 = Series(expected1, dtype='category')
+ tm.assert_series_equal(result, expected1)
+
+ s = Series(Categorical([1, 'a', 'a', np.nan, np.nan]))
+ result = s.mode(dropna)
+ expected2 = Series(expected2, dtype='category')
+ tm.assert_series_equal(result, expected2)
+
+ s = Series(Categorical([1, 1, 2, 3, 3, np.nan, np.nan],
+ categories=[3, 2, 1], ordered=True))
+ result = s.mode(dropna)
+ expected3 = Series(expected3, dtype='category')
+ tm.assert_series_equal(result, expected3)
+
+ @pytest.mark.parametrize('dropna, expected1, expected2', [
+ (True, [2**63], [1, 2**63]),
+ (False, [2**63], [1, 2**63])
+ ])
+ def test_mode_intoverflow(self, dropna, expected1, expected2):
+ # Test for uint64 overflow.
+ s = Series([1, 2**63, 2**63], dtype=np.uint64)
+ result = s.mode(dropna)
+ expected1 = Series(expected1, dtype=np.uint64)
+ tm.assert_series_equal(result, expected1)
+
+ s = Series([1, 2**63], dtype=np.uint64)
+ result = s.mode(dropna)
+ expected2 = Series(expected2, dtype=np.uint64)
+ tm.assert_series_equal(result, expected2)
+
+ @pytest.mark.skipif(not compat.PY3, reason="only PY3")
+ def test_mode_sortwarning(self):
+ # Check for the warning that is raised when the mode
+ # results cannot be sorted
+
+ expected = Series(['foo', np.nan])
+ s = Series([1, 'foo', 'foo', np.nan, np.nan])
+
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
+ result = s.mode(dropna=False)
+ result = result.sort_values().reset_index(drop=True)
+
+ tm.assert_series_equal(result, expected)
+
+
class TestNLargestNSmallest(object):
@pytest.mark.parametrize(
@@ -1994,26 +2072,6 @@ def test_min_max(self):
assert np.isnan(_min)
assert _max == 1
- def test_mode(self):
- s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
- categories=[5, 4, 3, 2, 1], ordered=True))
- res = s.mode()
- exp = Series(Categorical([5], categories=[
- 5, 4, 3, 2, 1], ordered=True))
- tm.assert_series_equal(res, exp)
- s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
- categories=[5, 4, 3, 2, 1], ordered=True))
- res = s.mode()
- exp = Series(Categorical([5, 1], categories=[
- 5, 4, 3, 2, 1], ordered=True))
- tm.assert_series_equal(res, exp)
- s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
- ordered=True))
- res = s.mode()
- exp = Series(Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1],
- ordered=True))
- tm.assert_series_equal(res, exp)
-
def test_value_counts(self):
# GH 12835
cats = Categorical(list('abcccb'), categories=list('cabd'))
| - [x] closes #17534
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
There are other mode tests in ``tests/test_algos.py`` but I'm not sure if I should test the ``dropna`` parameter in this file too. The tests seem very similar to those in ``tests/series/test_analytics.py``. | https://api.github.com/repos/pandas-dev/pandas/pulls/20779 | 2018-04-21T17:15:28Z | 2018-05-31T21:44:30Z | 2018-05-31T21:44:29Z | 2018-06-02T12:07:26Z |
BUG: Fix KeyError in merge on CategoricalIndex | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index f5e3b1a35acb5..9cdda2d14669c 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1327,6 +1327,7 @@ Sparse
Reshaping
^^^^^^^^^
+- Bug in :func:`DataFrame.merge` where referencing a ``CategoricalIndex`` by name, where the ``by`` kwarg would ``KeyError`` (:issue:`20777`)
- Bug in :func:`DataFrame.stack` which fails trying to sort mixed type levels under Python 3 (:issue:`18310`)
- Bug in :func:`DataFrame.unstack` which casts int to float if ``columns`` is a ``MultiIndex`` with unused levels (:issue:`17845`)
- Bug in :func:`DataFrame.unstack` which raises an error if ``index`` is a ``MultiIndex`` with unused labels on the unstacked level (:issue:`18562`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index a03d892432b51..e8f74cf58a262 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1585,6 +1585,8 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
if is_sparse(arr):
arr = arr.get_values()
+ elif isinstance(arr, (ABCIndexClass, ABCSeries)):
+ arr = arr.values
arr = np.asarray(arr)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 7b1a0875bba59..0204e655bfa2c 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -705,8 +705,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
take_right = self.right[name]._values
elif left_indexer is not None \
- and isinstance(self.left_join_keys[i], np.ndarray):
-
+ and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index f3827ac251cf0..436fe8f9f5d7e 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1,26 +1,27 @@
# pylint: disable=E1103
-import pytest
-from datetime import datetime, date
-from numpy.random import randn
-from numpy import nan
-import numpy as np
import random
import re
+from collections import OrderedDict
+from datetime import date, datetime
+
+import numpy as np
+import pytest
+from numpy import nan
+from numpy.random import randn
import pandas as pd
+import pandas.util.testing as tm
+from pandas import (Categorical, CategoricalIndex, DataFrame, DatetimeIndex,
+ Float64Index, Index, Int64Index, MultiIndex, RangeIndex,
+ Series, UInt64Index)
+from pandas.api.types import CategoricalDtype as CDT
from pandas.compat import lrange, lzip
+from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype
+from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.reshape.concat import concat
-from pandas.core.reshape.merge import merge, MergeError
+from pandas.core.reshape.merge import MergeError, merge
from pandas.util.testing import assert_frame_equal, assert_series_equal
-from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas.core.dtypes.common import (
- is_categorical_dtype,
- is_object_dtype,
-)
-from pandas import DataFrame, Index, MultiIndex, Series, Categorical
-import pandas.util.testing as tm
-from pandas.api.types import CategoricalDtype as CDT
N = 50
NGROUPS = 8
@@ -813,7 +814,7 @@ def test_validation(self):
# Dups on right
right_w_dups = right.append(pd.DataFrame({'a': ['e'], 'c': ['moo']},
- index=[4]))
+ index=[4]))
merge(left, right_w_dups, left_index=True, right_index=True,
validate='one_to_many')
@@ -1388,17 +1389,24 @@ def test_merge_datetime_index(self, klass):
if klass is not None:
on_vector = klass(on_vector)
- expected = DataFrame({"a": [1, 2, 3]})
-
- if klass == np.asarray:
- # The join key is added for ndarray.
- expected["key_1"] = [2016, 2017, 2018]
+ expected = DataFrame(
+ OrderedDict([
+ ("a", [1, 2, 3]),
+ ("key_1", [2016, 2017, 2018]),
+ ])
+ )
result = df.merge(df, on=["a", on_vector], how="inner")
tm.assert_frame_equal(result, expected)
- expected = DataFrame({"a_x": [1, 2, 3],
- "a_y": [1, 2, 3]})
+ expected = DataFrame(
+ OrderedDict([
+ ("key_0", [2016, 2017, 2018]),
+ ("a_x", [1, 2, 3]),
+ ("a_y", [1, 2, 3]),
+ ])
+ )
+
result = df.merge(df, on=[df.index.year], how="inner")
tm.assert_frame_equal(result, expected)
@@ -1427,7 +1435,7 @@ def test_different(self, right_vals):
# We allow merging on object and categorical cols and cast
# categorical cols to object
if (is_categorical_dtype(right['A'].dtype) or
- is_object_dtype(right['A'].dtype)):
+ is_object_dtype(right['A'].dtype)):
result = pd.merge(left, right, on='A')
assert is_object_dtype(result.A.dtype)
@@ -1826,3 +1834,26 @@ def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
how=how,
sort=sort)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ 'index', [
+ CategoricalIndex(['A', 'B'], categories=['A', 'B'], name='index_col'),
+ Float64Index([1.0, 2.0], name='index_col'),
+ Int64Index([1, 2], name='index_col'),
+ UInt64Index([1, 2], name='index_col'),
+ RangeIndex(start=0, stop=2, name='index_col'),
+ DatetimeIndex(["2018-01-01", "2018-01-02"], name='index_col'),
+ ], ids=lambda x: type(x).__name__)
+def test_merge_index_types(index):
+ # gh-20777
+ # assert key access is consistent across index types
+ left = DataFrame({"left_data": [1, 2]}, index=index)
+ right = DataFrame({"right_data": [1.0, 2.0]}, index=index)
+
+ result = left.merge(right, on=['index_col'])
+
+ expected = DataFrame(
+ OrderedDict([('left_data', [1, 2]), ('right_data', [1.0, 2.0])]),
+ index=index)
+ assert_frame_equal(result, expected)
| For categorical type indices a `KeyError` is raised when the index level is used during a merge on an index level
Example:
```
import pandas as pd
left = pd.DataFrame(
{"left_data": [1, 2]},
index=pd.CategoricalIndex(data=["A", "B"], categories=["A", "B"], name='index_col')
)
right = pd.DataFrame(
{"right_data": [1.0, 2.0]},
index=pd.CategoricalIndex(data=["A", "B"], categories=["A", "B"], name='index_col')
)
result = left.merge(right, on=['index_col'])
```
With this fix, the behavior of the test `test_merge_datetime_index(self, klass)` changed, though. IMHO, the behavior in this PR is more consistent since it is the same for all input types but I'm not sure what the actual behavior should be and I couldn't find a section in the documentation explaining this path.
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/20777 | 2018-04-21T15:39:14Z | 2018-05-03T10:37:12Z | 2018-05-03T10:37:12Z | 2018-05-03T10:37:18Z |
DOC: Add interpolate to fillna 'See Also' section | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d3ab7afc025c9..2b683d3a606b1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5289,6 +5289,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
See Also
--------
+ interpolate : Fill NaN values using interpolation.
reindex, asfreq
Returns
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 0d0023b9f67d3..f8d283e932f44 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -659,6 +659,7 @@ def fillna(self, method, limit=None):
pad : Forward fill NaN values in the resampled data.
nearest : Fill NaN values in the resampled data
with nearest neighbor starting from center.
+ interpolate : Fill NaN values using interpolation.
pandas.Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'bfill' and 'ffill'.
pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the
| https://api.github.com/repos/pandas-dev/pandas/pulls/20776 | 2018-04-21T10:20:17Z | 2018-04-21T17:23:14Z | 2018-04-21T17:23:14Z | 2018-04-21T17:26:53Z | |
BUG: Fix freq setter for DatetimeIndex/TimedeltaIndex and deprecate for PeriodIndex | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index c128058858c17..4330552e0eb11 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -890,8 +890,10 @@ Deprecations
of the ``Series`` and ``Index`` classes have been deprecated and will be
removed in a future version (:issue:`20419`).
- ``DatetimeIndex.offset`` is deprecated. Use ``DatetimeIndex.freq`` instead (:issue:`20716`)
+- Setting ``PeriodIndex.freq`` (which was not guaranteed to work correctly) is deprecated. Use :meth:`PeriodIndex.asfreq` instead (:issue:`20678`)
- ``Index.get_duplicates()`` is deprecated and will be removed in a future version (:issue:`20239`)
+
.. _whatsnew_0230.prior_deprecations:
Removal of prior version deprecations/changes
@@ -1046,6 +1048,7 @@ Datetimelike
- Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`)
- Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` addition and subtraction where name of the returned object was not always set consistently. (:issue:`19744`)
- Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` addition and subtraction where operations with numpy arrays raised ``TypeError`` (:issue:`19847`)
+- Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` where setting the ``freq`` attribute was not fully supported (:issue:`20678`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 51cd1837fecca..158b272384ae8 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -205,6 +205,43 @@ def floor(self, freq):
def ceil(self, freq):
return self._round(freq, np.ceil)
+ @classmethod
+ def _validate_frequency(cls, index, freq, **kwargs):
+ """
+ Validate that a frequency is compatible with the values of a given
+ DatetimeIndex or TimedeltaIndex
+
+ Parameters
+ ----------
+ index : DatetimeIndex or TimedeltaIndex
+ The index on which to determine if the given frequency is valid
+ freq : DateOffset
+ The frequency to validate
+ """
+ inferred = index.inferred_freq
+ if index.empty or inferred == freq.freqstr:
+ return None
+
+ on_freq = cls._generate(
+ index[0], None, len(index), None, freq, **kwargs)
+ if not np.array_equal(index.asi8, on_freq.asi8):
+ msg = ('Inferred frequency {infer} from passed values does not '
+ 'conform to passed frequency {passed}')
+ raise ValueError(msg.format(infer=inferred, passed=freq.freqstr))
+
+ @property
+ def freq(self):
+ """Return the frequency object if it is set, otherwise None"""
+ return self._freq
+
+ @freq.setter
+ def freq(self, value):
+ if value is not None:
+ value = frequencies.to_offset(value)
+ self._validate_frequency(self, value)
+
+ self._freq = value
+
class DatetimeIndexOpsMixin(object):
""" common ops mixin to support a unified interface datetimelike Index """
@@ -401,7 +438,7 @@ def __getitem__(self, key):
@property
def freqstr(self):
"""
- Return the frequency object as a string if its set, otherwise None
+ Return the frequency object as a string if it is set, otherwise None
"""
if self.freq is None:
return None
@@ -410,7 +447,7 @@ def freqstr(self):
@cache_readonly
def inferred_freq(self):
"""
- Tryies to return a string representing a frequency guess,
+ Tries to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index e0e7ba3e8b518..720718e78d50e 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -454,15 +454,7 @@ def __new__(cls, data=None,
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
- inferred = subarr.inferred_freq
- if inferred != freq.freqstr:
- on_freq = cls._generate(subarr[0], None, len(subarr), None,
- freq, tz=tz, ambiguous=ambiguous)
- if not np.array_equal(subarr.asi8, on_freq.asi8):
- raise ValueError('Inferred frequency {0} from passed '
- 'dates does not conform to passed '
- 'frequency {1}'
- .format(inferred, freq.freqstr))
+ cls._validate_frequency(subarr, freq, ambiguous=ambiguous)
if freq_infer:
inferred = subarr.inferred_freq
@@ -836,7 +828,7 @@ def __setstate__(self, state):
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
- self.freq = own_state[1]
+ self._freq = own_state[1]
self._tz = timezones.tz_standardize(own_state[2])
# provide numpy < 1.7 compat
@@ -1726,16 +1718,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
else:
raise
- @property
- def freq(self):
- """get/set the frequency of the Index"""
- return self._freq
-
- @freq.setter
- def freq(self, value):
- """get/set the frequency of the Index"""
- self._freq = value
-
@property
def offset(self):
"""get/set the frequency of the Index"""
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 4a224d4e6ee7f..b9e8f9028dbf7 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -219,7 +219,7 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
_is_numeric_dtype = False
_infer_as_myclass = True
- freq = None
+ _freq = None
_engine_type = libindex.PeriodEngine
@@ -367,7 +367,7 @@ def _from_ordinals(cls, values, name=None, freq=None, **kwargs):
result.name = name
if freq is None:
raise ValueError('freq is not specified and cannot be inferred')
- result.freq = Period._maybe_convert_freq(freq)
+ result._freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
@@ -560,6 +560,19 @@ def is_full(self):
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
+ @property
+ def freq(self):
+ """Return the frequency object if it is set, otherwise None"""
+ return self._freq
+
+ @freq.setter
+ def freq(self, value):
+ msg = ('Setting PeriodIndex.freq has been deprecated and will be '
+ 'removed in a future version; use PeriodIndex.asfreq instead. '
+ 'The PeriodIndex.freq setter is not guaranteed to work.')
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ self._freq = value
+
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
@@ -1060,7 +1073,7 @@ def __setstate__(self, state):
np.ndarray.__setstate__(data, nd_state)
# backcompat
- self.freq = Period._maybe_convert_freq(own_state[1])
+ self._freq = Period._maybe_convert_freq(own_state[1])
else: # pragma: no cover
data = np.empty(state)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 9757d775201cc..6b278fc35c831 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -28,7 +28,8 @@
import pandas.core.common as com
import pandas.core.dtypes.concat as _concat
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
-from pandas.core.indexes.datetimelike import TimelikeOps, DatetimeIndexOpsMixin
+from pandas.core.indexes.datetimelike import (
+ TimelikeOps, DatetimeIndexOpsMixin)
from pandas.core.tools.timedeltas import (
to_timedelta, _coerce_scalar_to_timedelta_type)
from pandas.tseries.offsets import Tick, DateOffset
@@ -195,7 +196,7 @@ def _add_comparison_methods(cls):
_is_numeric_dtype = True
_infer_as_myclass = True
- freq = None
+ _freq = None
def __new__(cls, data=None, unit=None, freq=None, start=None, end=None,
periods=None, closed=None, dtype=None, copy=False,
@@ -251,15 +252,7 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None,
if verify_integrity and len(data) > 0:
if freq is not None and not freq_infer:
index = cls._simple_new(data, name=name)
- inferred = index.inferred_freq
- if inferred != freq.freqstr:
- on_freq = cls._generate(
- index[0], None, len(index), name, freq)
- if not np.array_equal(index.asi8, on_freq.asi8):
- raise ValueError('Inferred frequency {0} from passed '
- 'timedeltas does not conform to '
- 'passed frequency {1}'
- .format(inferred, freq.freqstr))
+ cls._validate_frequency(index, freq)
index.freq = freq
return index
@@ -327,7 +320,7 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
- result.freq = freq
+ result._freq = freq
result._reset_identity()
return result
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 3c7d5d37e98f3..c6334e70a1d2c 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -9,8 +9,9 @@
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp,
date_range, _np_version_under1p10, Index,
bdate_range)
-from pandas.tseries.offsets import BMonthEnd, CDay, BDay
+from pandas.tseries.offsets import BMonthEnd, CDay, BDay, Day, Hour
from pandas.tests.test_base import Ops
+from pandas.core.dtypes.generic import ABCDateOffset
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
@@ -405,6 +406,38 @@ def test_equals(self):
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
+ @pytest.mark.parametrize('values', [
+ ['20180101', '20180103', '20180105'], []])
+ @pytest.mark.parametrize('freq', [
+ '2D', Day(2), '2B', BDay(2), '48H', Hour(48)])
+ @pytest.mark.parametrize('tz', [None, 'US/Eastern'])
+ def test_freq_setter(self, values, freq, tz):
+ # GH 20678
+ idx = DatetimeIndex(values, tz=tz)
+
+ # can set to an offset, converting from string if necessary
+ idx.freq = freq
+ assert idx.freq == freq
+ assert isinstance(idx.freq, ABCDateOffset)
+
+ # can reset to None
+ idx.freq = None
+ assert idx.freq is None
+
+ def test_freq_setter_errors(self):
+ # GH 20678
+ idx = DatetimeIndex(['20180101', '20180103', '20180105'])
+
+ # setting with an incompatible freq
+ msg = ('Inferred frequency 2D from passed values does not conform to '
+ 'passed frequency 5D')
+ with tm.assert_raises_regex(ValueError, msg):
+ idx.freq = '5D'
+
+ # setting with non-freq string
+ with tm.assert_raises_regex(ValueError, 'Invalid frequency'):
+ idx.freq = 'foo'
+
def test_offset_deprecated(self):
# GH 20716
idx = pd.DatetimeIndex(['20180101', '20180102'])
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 7d117b0b626cf..85aa3f6a38fb3 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -401,6 +401,18 @@ def test_equals(self, freq):
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
+ def test_freq_setter_deprecated(self):
+ # GH 20678
+ idx = pd.period_range('2018Q1', periods=4, freq='Q')
+
+ # no warning for getter
+ with tm.assert_produces_warning(None):
+ idx.freq
+
+ # warning for setter
+ with tm.assert_produces_warning(FutureWarning):
+ idx.freq = pd.offsets.Day()
+
class TestPeriodIndexSeriesMethods(object):
""" Test PeriodIndex and Period Series Ops consistency """
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 49737e5359c2f..2e257bb8a500a 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -11,6 +11,8 @@
_np_version_under1p10)
from pandas._libs.tslib import iNaT
from pandas.tests.test_base import Ops
+from pandas.tseries.offsets import Day, Hour
+from pandas.core.dtypes.generic import ABCDateOffset
class TestTimedeltaIndexOps(Ops):
@@ -306,6 +308,40 @@ def test_equals(self):
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
+ @pytest.mark.parametrize('values', [['0 days', '2 days', '4 days'], []])
+ @pytest.mark.parametrize('freq', ['2D', Day(2), '48H', Hour(48)])
+ def test_freq_setter(self, values, freq):
+ # GH 20678
+ idx = TimedeltaIndex(values)
+
+ # can set to an offset, converting from string if necessary
+ idx.freq = freq
+ assert idx.freq == freq
+ assert isinstance(idx.freq, ABCDateOffset)
+
+ # can reset to None
+ idx.freq = None
+ assert idx.freq is None
+
+ def test_freq_setter_errors(self):
+ # GH 20678
+ idx = TimedeltaIndex(['0 days', '2 days', '4 days'])
+
+ # setting with an incompatible freq
+ msg = ('Inferred frequency 2D from passed values does not conform to '
+ 'passed frequency 5D')
+ with tm.assert_raises_regex(ValueError, msg):
+ idx.freq = '5D'
+
+ # setting with a non-fixed frequency
+ msg = '<2 \* BusinessDays> is a non-fixed frequency'
+ with tm.assert_raises_regex(ValueError, msg):
+ idx.freq = '2B'
+
+ # setting with non-freq string
+ with tm.assert_raises_regex(ValueError, 'Invalid frequency'):
+ idx.freq = 'foo'
+
class TestTimedeltas(object):
| - [X] closes #20678
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
For `DatetimeIndex`/`TimedeltaIndex`:
- Setting `freq` with a string alias now works
- Frequencies are validated with invalid frequencies raising
- Same behavior as when the constructor is called with an invalid frequency
For `PeriodIndex`:
- Setting `freq` is deprecated in favor of `asfreq`
- The behavior of the setter would be ambiguous without the extra `asfreq` params
- Left the existing (buggy) `freq` setter behavior as-is
- Didn't seem worthwhile to try fixing it
- I don't think that the setter was widely used for `PeriodIndex` anyways
- Didn't see any warnings in the tests | https://api.github.com/repos/pandas-dev/pandas/pulls/20772 | 2018-04-20T23:07:38Z | 2018-04-30T17:53:13Z | 2018-04-30T17:53:12Z | 2018-04-30T18:02:17Z |
Emit warning for missing labels in Multiindex.loc[[...]] (and more) | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index e19aedac80213..a05fca94a14e5 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -873,6 +873,7 @@ Deprecations
- The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`).
- ``IntervalIndex.from_intervals`` is deprecated in favor of the :class:`IntervalIndex` constructor (:issue:`19263`)
- ``DataFrame.from_items`` is deprecated. Use :func:`DataFrame.from_dict` instead, or ``DataFrame.from_dict(OrderedDict())`` if you wish to preserve the key order (:issue:`17320`, :issue:`17312`)
+- Indexing a :class:`MultiIndex` or a :class:`FloatIndex` with a list containing some missing keys will now show a :class:`FutureWarning`, which is consistent with other types of indexes (:issue:`17758`).
- The ``broadcast`` parameter of ``.apply()`` is deprecated in favor of ``result_type='broadcast'`` (:issue:`18577`)
- The ``reduce`` parameter of ``.apply()`` is deprecated in favor of ``result_type='reduce'`` (:issue:`18577`)
@@ -1107,6 +1108,8 @@ Indexing
- :func:`Index.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`)
- :func:`DatetimeIndex.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`)
- Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`)
+- Bug in indexing with iterator containing only missing keys, which raised no error (:issue:`20748`)
+- Fixed inconsistency in ``.ix`` between list and scalar keys when the index has integer dtype and does not include the desired keys (:issue:`20753`)
- Bug in ``__setitem__`` when indexing a :class:`DataFrame` with a 2-d boolean ndarray (:issue:`18582`)
- Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`)
- Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 12bb09e8f8a8a..3eb7f0d8b5ba7 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4881,6 +4881,9 @@ def _ensure_index(index_like, copy=False):
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
+ if is_iterator(index_like):
+ index_like = list(index_like)
+
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 2eb52ecc6bcc7..d5f0ca862bfd1 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -16,6 +16,8 @@
_is_unorderable_exception,
_ensure_platform_int)
from pandas.core.dtypes.missing import isna, _infer_fill_value
+from pandas.errors import AbstractMethodError
+from pandas.util._decorators import Appender
from pandas.core.index import Index, MultiIndex
@@ -186,33 +188,43 @@ def __setitem__(self, key, value):
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
- def _has_valid_type(self, k, axis):
- raise NotImplementedError()
+ def _validate_key(self, key, axis):
+ """
+ Ensure that key is valid for current indexer.
+
+ Parameters
+ ----------
+ key : scalar, slice or list-like
+ The key requested
+
+ axis : int
+ Dimension on which the indexing is being made
+
+ Raises
+ ------
+ TypeError
+ If the key (or some element of it) has wrong type
+
+ IndexError
+ If the key (or some element of it) is out of bounds
+
+ KeyError
+ If the key was not found
+ """
+ raise AbstractMethodError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
- if not self._has_valid_type(k, i):
+ try:
+ self._validate_key(k, i)
+ except ValueError:
raise ValueError("Location based indexing can only have "
"[{types}] types"
.format(types=self._valid_types))
- def _should_validate_iterable(self, axis=None):
- """ return a boolean whether this axes needs validation for a passed
- iterable
- """
- if axis is None:
- axis = self.axis or 0
- ax = self.obj._get_axis(axis)
- if isinstance(ax, MultiIndex):
- return False
- elif ax.is_floating():
- return False
-
- return True
-
def _is_nested_tuple_indexer(self, tup):
if any(isinstance(ax, MultiIndex) for ax in self.obj.axes):
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
@@ -906,38 +918,34 @@ def _multi_take(self, tup):
"""
try:
o = self.obj
- d = {a: self._convert_for_reindex(t, axis=o._get_axis_number(a))
- for t, a in zip(tup, o._AXIS_ORDERS)}
- return o.reindex(**d)
- except(KeyError, IndexingError):
- raise self._exception
+ d = {}
+ for key, axis in zip(tup, o._AXIS_ORDERS):
+ ax = o._get_axis(axis)
+ # Have the index compute an indexer or return None
+ # if it cannot handle:
+ indexer, keyarr = ax._convert_listlike_indexer(key,
+ kind=self.name)
+ # We only act on all found values:
+ if indexer is not None and (indexer != -1).all():
+ self._validate_read_indexer(key, indexer, axis)
+ d[axis] = (ax[indexer], indexer)
+ continue
+
+ # If we are trying to get actual keys from empty Series, we
+ # patiently wait for a KeyError later on - otherwise, convert
+ if len(ax) or not len(key):
+ key = self._convert_for_reindex(key, axis)
+ indexer = ax.get_indexer_for(key)
+ keyarr = ax.reindex(keyarr)[0]
+ self._validate_read_indexer(keyarr, indexer,
+ o._get_axis_number(axis))
+ d[axis] = (keyarr, indexer)
+ return o._reindex_with_indexers(d, copy=True, allow_dups=True)
+ except (KeyError, IndexingError) as detail:
+ raise self._exception(detail)
def _convert_for_reindex(self, key, axis=None):
- if axis is None:
- axis = self.axis or 0
- labels = self.obj._get_axis(axis)
-
- if com.is_bool_indexer(key):
- key = check_bool_indexer(labels, key)
- return labels[key]
- else:
- if isinstance(key, Index):
- keyarr = labels._convert_index_indexer(key)
- else:
- # asarray can be unsafe, NumPy strings are weird
- keyarr = com._asarray_tuplesafe(key)
-
- if is_integer_dtype(keyarr):
- # Cast the indexer to uint64 if possible so
- # that the values returned from indexing are
- # also uint64.
- keyarr = labels._convert_arr_indexer(keyarr)
-
- if not labels.is_integer():
- keyarr = _ensure_platform_int(keyarr)
- return labels.take(keyarr)
-
- return keyarr
+ return key
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
@@ -1073,8 +1081,9 @@ def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
- if self._should_validate_iterable(axis):
- self._has_valid_type(key, axis)
+ if is_iterator(key):
+ key = list(key)
+ self._validate_key(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
@@ -1110,8 +1119,7 @@ def _getitem_iterable(self, key, axis=None):
if axis is None:
axis = self.axis or 0
- if self._should_validate_iterable(axis):
- self._has_valid_type(key, axis)
+ self._validate_key(key, axis)
labels = self.obj._get_axis(axis)
@@ -1125,19 +1133,18 @@ def _getitem_iterable(self, key, axis=None):
indexer, keyarr = labels._convert_listlike_indexer(
key, kind=self.name)
if indexer is not None and (indexer != -1).all():
+ self._validate_read_indexer(key, indexer, axis)
return self.obj.take(indexer, axis=axis)
+ ax = self.obj._get_axis(axis)
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
+ indexer = ax.get_indexer_for(key)
+ self._validate_read_indexer(key, indexer, axis)
- try:
- return self.obj.reindex(keyarr, axis=axis)
- except AttributeError:
-
- # Series
- if axis != 0:
- raise AssertionError('axis must be 0')
- return self.obj.reindex(keyarr)
+ d = {axis: [ax.reindex(keyarr)[0], indexer]}
+ return self.obj._reindex_with_indexers(d, copy=True,
+ allow_dups=True)
# existing labels are non-unique
else:
@@ -1154,15 +1161,68 @@ def _getitem_iterable(self, key, axis=None):
result = self.obj._take(indexer[indexer != -1], axis=axis,
convert=False)
+ self._validate_read_indexer(key, new_indexer, axis)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]},
copy=True, allow_dups=True)
else:
+ self._validate_read_indexer(key, indexer, axis)
result = self.obj._take(indexer, axis=axis)
return result
+ def _validate_read_indexer(self, key, indexer, axis):
+ """
+ Check that indexer can be used to return a result (e.g. at least one
+ element was found, unless the list of keys was actually empty).
+
+ Parameters
+ ----------
+ key : list-like
+ Target labels (only used to show correct error message)
+ indexer: array-like of booleans
+ Indices corresponding to the key (with -1 indicating not found)
+ axis: int
+ Dimension on which the indexing is being made
+
+ Raises
+ ------
+ KeyError
+ If at least one key was requested none was found.
+ """
+
+ ax = self.obj._get_axis(axis)
+
+ if len(key) == 0:
+ return
+
+ # Count missing values:
+ missing = (indexer < 0).sum()
+
+ if missing:
+ if missing == len(indexer):
+ raise KeyError(
+ u"None of [{key}] are in the [{axis}]".format(
+ key=key, axis=self.obj._get_axis_name(axis)))
+
+ # we skip the warning on Categorical/Interval
+ # as this check is actually done (check for
+ # non-missing values), but a bit later in the
+ # code, so we want to avoid warning & then
+ # just raising
+
+ _missing_key_warning = textwrap.dedent("""
+ Passing list-likes to .loc or [] with any missing label will raise
+ KeyError in the future, you can use .reindex() as an alternative.
+
+ See the documentation here:
+ https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa
+
+ if not (ax.is_categorical() or ax.is_interval()):
+ warnings.warn(_missing_key_warning,
+ FutureWarning, stacklevel=5)
+
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
@@ -1337,7 +1397,8 @@ def __init__(self, name, obj):
DeprecationWarning, stacklevel=2)
super(_IXIndexer, self).__init__(name, obj)
- def _has_valid_type(self, key, axis):
+ @Appender(_NDFrameIndexer._validate_key.__doc__)
+ def _validate_key(self, key, axis):
if isinstance(key, slice):
return True
@@ -1353,6 +1414,49 @@ def _has_valid_type(self, key, axis):
return True
+ def _convert_for_reindex(self, key, axis=None):
+ """
+ Transform a list of keys into a new array ready to be used as axis of
+ the object we return (e.g. including NaNs).
+
+ Parameters
+ ----------
+ key : list-like
+ Target labels
+ axis: int
+ Where the indexing is being made
+
+ Returns
+ -------
+ list-like of labels
+ """
+
+ if axis is None:
+ axis = self.axis or 0
+ labels = self.obj._get_axis(axis)
+
+ if com.is_bool_indexer(key):
+ key = check_bool_indexer(labels, key)
+ return labels[key]
+
+ if isinstance(key, Index):
+ keyarr = labels._convert_index_indexer(key)
+ else:
+ # asarray can be unsafe, NumPy strings are weird
+ keyarr = com._asarray_tuplesafe(key)
+
+ if is_integer_dtype(keyarr):
+ # Cast the indexer to uint64 if possible so
+ # that the values returned from indexing are
+ # also uint64.
+ keyarr = labels._convert_arr_indexer(keyarr)
+
+ if not labels.is_integer():
+ keyarr = _ensure_platform_int(keyarr)
+ return labels.take(keyarr)
+
+ return keyarr
+
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
@@ -1656,7 +1760,8 @@ class _LocIndexer(_LocationIndexer):
"index is integers), listlike of labels, boolean")
_exception = KeyError
- def _has_valid_type(self, key, axis):
+ @Appender(_NDFrameIndexer._validate_key.__doc__)
+ def _validate_key(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
@@ -1665,48 +1770,12 @@ def _has_valid_type(self, key, axis):
# boolean
if isinstance(key, slice):
- return True
+ return
elif com.is_bool_indexer(key):
- return True
-
- elif is_list_like_indexer(key):
-
- # mi is just a passthru
- if isinstance(key, tuple) and isinstance(ax, MultiIndex):
- return True
-
- if not is_iterator(key) and len(key):
-
- # True indicates missing values
- missing = ax.get_indexer_for(key) < 0
-
- if np.any(missing):
- if len(key) == 1 or np.all(missing):
- raise KeyError(
- u"None of [{key}] are in the [{axis}]".format(
- key=key, axis=self.obj._get_axis_name(axis)))
- else:
-
- # we skip the warning on Categorical/Interval
- # as this check is actually done (check for
- # non-missing values), but a bit later in the
- # code, so we want to avoid warning & then
- # just raising
- _missing_key_warning = textwrap.dedent("""
- Passing list-likes to .loc or [] with any missing label will raise
- KeyError in the future, you can use .reindex() as an alternative.
-
- See the documentation here:
- https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa
+ return
- if not (ax.is_categorical() or ax.is_interval()):
- warnings.warn(_missing_key_warning,
- FutureWarning, stacklevel=5)
-
- return True
-
- else:
+ elif not is_list_like_indexer(key):
def error():
if isna(key):
@@ -1729,8 +1798,6 @@ def error():
except:
error()
- return True
-
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
@@ -1789,11 +1856,14 @@ def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
+ if is_iterator(key):
+ key = list(key)
+
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
- self._has_valid_type(key, axis)
+ self._validate_key(key, axis)
return self._get_slice_axis(key, axis=axis)
elif com.is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
@@ -1839,7 +1909,7 @@ def _getitem_axis(self, key, axis=None):
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
- self._has_valid_type(key, axis)
+ self._validate_key(key, axis)
return self._get_label(key, axis=axis)
@@ -1871,7 +1941,7 @@ class _iLocIndexer(_LocationIndexer):
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
- def _has_valid_type(self, key, axis):
+ def _validate_key(self, key, axis):
if com.is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
@@ -1880,15 +1950,26 @@ def _has_valid_type(self, key, axis):
"is not available")
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
- return True
+ return
if isinstance(key, slice):
- return True
+ return
elif is_integer(key):
- return self._is_valid_integer(key, axis)
+ self._validate_integer(key, axis)
+ elif isinstance(key, tuple):
+ # a tuple should already have been caught by this point
+ # so don't treat a tuple as a valid indexer
+ raise IndexingError('Too many indexers')
elif is_list_like_indexer(key):
- return self._is_valid_list_like(key, axis)
- return False
+ # check that the key does not exceed the maximum size of the index
+ arr = np.array(key)
+ l = len(self.obj._get_axis(axis))
+
+ if len(arr) and (arr.max() >= l or arr.min() < -l):
+ raise IndexError("positional indexers are out-of-bounds")
+ else:
+ raise ValueError("Can only index by location with "
+ "a [{types}]".format(types=self._valid_types))
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
@@ -1920,33 +2001,13 @@ def _getitem_scalar(self, key):
values = self.obj._get_value(*key, takeable=True)
return values
- def _is_valid_integer(self, key, axis):
+ def _validate_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
l = len(ax)
if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
- return True
-
- def _is_valid_list_like(self, key, axis):
- # return a boolean if we are a valid list-like (e.g. that we don't
- # have out-of-bounds values)
-
- # a tuple should already have been caught by this point
- # so don't treat a tuple as a valid indexer
- if isinstance(key, tuple):
- raise IndexingError('Too many indexers')
-
- # coerce the key to not exceed the maximum size of the index
- arr = np.array(key)
- ax = self.obj._get_axis(axis)
- l = len(ax)
- if (hasattr(arr, '__len__') and len(arr) and
- (arr.max() >= l or arr.min() < -l)):
- raise IndexError("positional indexers are out-of-bounds")
-
- return True
def _getitem_tuple(self, tup):
@@ -2017,17 +2078,13 @@ def _getitem_axis(self, key, axis=None):
axis = self.axis or 0
if isinstance(key, slice):
- self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
- try:
- key = np.asarray(key)
- except TypeError: # pragma: no cover
- pass
+ key = np.asarray(key)
if com.is_bool_indexer(key):
- self._has_valid_type(key, axis)
+ self._validate_key(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
@@ -2043,7 +2100,7 @@ def _getitem_axis(self, key, axis=None):
"non-integer key")
# validate the location
- self._is_valid_integer(key, axis)
+ self._validate_integer(key, axis)
return self._get_loc(key, axis=axis)
@@ -2059,11 +2116,12 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False):
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
- elif self._has_valid_type(obj, axis):
+ try:
+ self._validate_key(obj, axis)
return obj
-
- raise ValueError("Can only index by location with "
- "a [{types}]".format(types=self._valid_types))
+ except ValueError:
+ raise ValueError("Can only index by location with "
+ "a [{types}]".format(types=self._valid_types))
class _ScalarAccessIndexer(_NDFrameIndexer):
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 2cefbea722098..28299fbe61daf 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -715,6 +715,9 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
if fill_value is None:
fill_value = np.nan
+ reindexers = {self._get_axis_number(a): val
+ for (a, val) in compat.iteritems(reindexers)}
+
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index a8b81b1b03552..617833b2d71fd 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -396,8 +396,12 @@ def test_getitem_setitem_ix_negative_integers(self):
assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
- with catch_warnings(record=True):
- assert isna(df.ix[:, [-1]].values).all()
+ # ix does label-based indexing when having an integer index
+ with pytest.raises(KeyError):
+ df.ix[[-1]]
+
+ with pytest.raises(KeyError):
+ df.ix[:, [-1]]
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index ded16224aedf2..f0ba1851b28dd 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -1,12 +1,13 @@
""" common utilities """
import itertools
-from warnings import catch_warnings
+from warnings import catch_warnings, filterwarnings
import numpy as np
from pandas.compat import lrange
from pandas.core.dtypes.common import is_scalar
-from pandas import Series, DataFrame, Panel, date_range, UInt64Index
+from pandas import (Series, DataFrame, Panel, date_range, UInt64Index,
+ Float64Index, MultiIndex)
from pandas.util import testing as tm
from pandas.io.formats.printing import pprint_thing
@@ -29,7 +30,7 @@ class Base(object):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
- 'ts', 'floats', 'empty', 'ts_rev'])
+ 'ts', 'floats', 'empty', 'ts_rev', 'multi'])
def setup_method(self, method):
@@ -54,6 +55,32 @@ def setup_method(self, method):
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
+ self.series_floats = Series(np.random.rand(4),
+ index=Float64Index(range(0, 8, 2)))
+ self.frame_floats = DataFrame(np.random.randn(4, 4),
+ index=Float64Index(range(0, 8, 2)),
+ columns=Float64Index(range(0, 12, 3)))
+ with catch_warnings(record=True):
+ self.panel_floats = Panel(np.random.rand(4, 4, 4),
+ items=Float64Index(range(0, 8, 2)),
+ major_axis=Float64Index(range(0, 12, 3)),
+ minor_axis=Float64Index(range(0, 16, 4)))
+
+ m_idces = [MultiIndex.from_product([[1, 2], [3, 4]]),
+ MultiIndex.from_product([[5, 6], [7, 8]]),
+ MultiIndex.from_product([[9, 10], [11, 12]])]
+
+ self.series_multi = Series(np.random.rand(4),
+ index=m_idces[0])
+ self.frame_multi = DataFrame(np.random.randn(4, 4),
+ index=m_idces[0],
+ columns=m_idces[1])
+ with catch_warnings(record=True):
+ self.panel_multi = Panel(np.random.rand(4, 4, 4),
+ items=m_idces[0],
+ major_axis=m_idces[1],
+ minor_axis=m_idces[2])
+
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
@@ -273,7 +300,8 @@ def _call(obj=obj):
# Panel deprecations
if isinstance(obj, Panel):
- with catch_warnings(record=True):
+ with catch_warnings():
+ filterwarnings("ignore", "\nPanel*", FutureWarning)
_call()
else:
_call()
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index e3f93924aca0d..32a56aeafc6ad 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -685,17 +685,23 @@ def test_floating_misc(self):
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
- result1 = s[[1.6, 5, 10]]
- result2 = s.loc[[1.6, 5, 10]]
- result3 = s.loc[[1.6, 5, 10]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result1 = s[[1.6, 5, 10]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result2 = s.loc[[1.6, 5, 10]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result3 = s.loc[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[np.nan, 2, 4], index=[1.6, 5, 10]))
- result1 = s[[0, 1, 2]]
- result2 = s.loc[[0, 1, 2]]
- result3 = s.loc[[0, 1, 2]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result1 = s[[0, 1, 2]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result2 = s.loc[[0, 1, 2]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index c66310d10ebdc..77f865c66cd10 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -19,6 +19,7 @@
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas import NaT, DataFrame, Index, Series, MultiIndex
import pandas.util.testing as tm
+from pandas.compat import PY2
from pandas.tests.indexing.common import Base, _mklbl
@@ -192,15 +193,10 @@ def test_dups_fancy_indexing(self):
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
- # inconsistent returns for unique/duplicate indices when values are
- # missing
- df = DataFrame(np.random.randn(4, 3), index=list('ABCD'))
- expected = df.reindex(['E'])
-
+ # List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD'))
- with catch_warnings(record=True):
- result = dfnu.ix[['E']]
- tm.assert_frame_equal(result, expected)
+ with pytest.raises(KeyError):
+ dfnu.ix[['E']]
# ToDo: check_index_type can be True after GH 11497
@@ -225,6 +221,9 @@ def test_dups_fancy_indexing(self):
result = df.loc[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
+ @pytest.mark.skipif(PY2,
+ reason="GH-20770. Py2 unreliable warnings catching.")
+ def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 39f4d2b7bd395..6ccff7e898a6a 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -12,6 +12,7 @@
from pandas.util import testing as tm
from pandas.tests.indexing.common import Base
from pandas.api.types import is_scalar
+from pandas.compat import PY2
class TestLoc(Base):
@@ -120,7 +121,7 @@ def test_loc_getitem_label_out_of_range(self):
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
- typs=['floats'], fails=TypeError)
+ typs=['floats'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
@@ -128,7 +129,7 @@ def test_loc_getitem_label_out_of_range(self):
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
- axes=0, fails=TypeError)
+ axes=0, fails=KeyError)
def test_loc_getitem_label_list(self):
@@ -152,17 +153,32 @@ def test_loc_getitem_label_list(self):
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
+ @pytest.mark.skipif(PY2, reason=("Catching warnings unreliable with "
+ "Python 2 (GH #20770)"))
def test_loc_getitem_label_list_with_missing(self):
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
- typs=['ints', 'uints'], axes=0, fails=KeyError)
+ self.check_result('list lbl', 'loc', [0, 2, 10], 'ix', [0, 2, 10],
+ typs=['ints', 'uints', 'floats'],
+ axes=0, fails=KeyError)
+
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
- typs=['ints', 'uints'], axes=1, fails=KeyError)
- self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
- typs=['ints', 'uints'], axes=2, fails=KeyError)
+ typs=['ints', 'uints', 'floats'],
+ axes=1, fails=KeyError)
+
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
+ typs=['ints', 'uints', 'floats'],
+ axes=2, fails=KeyError)
+
+ # GH 17758 - MultiIndex and missing keys
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ self.check_result('list lbl', 'loc', [(1, 3), (1, 4), (2, 5)],
+ 'ix', [(1, 3), (1, 4), (2, 5)],
+ typs=['multi'],
+ axes=0)
def test_getitem_label_list_with_missing(self):
s = Series(range(3), index=['a', 'b', 'c'])
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index b964ec3874998..6df63c3981af3 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -57,14 +57,29 @@ def test_get_nan():
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
- # ensure that fixing the above hasn't broken get
+
+def test_get_nan_multiple():
+ # GH 8569
+ # ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
+ s = pd.Float64Index(range(10)).to_series()
+
+ idx = [2, 30]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ assert_series_equal(s.get(idx),
+ Series([2, np.nan], index=idx))
+
+ idx = [2, np.nan]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ assert_series_equal(s.get(idx),
+ Series([2, np.nan], index=idx))
+
+ # GH 17295 - all missing keys
idx = [20, 30]
- assert_series_equal(s.get(idx),
- Series([np.nan] * 2, index=idx))
+ assert(s.get(idx) is None)
+
idx = [np.nan, np.nan]
- assert_series_equal(s.get(idx),
- Series([np.nan] * 2, index=idx))
+ assert(s.get(idx) is None)
def test_delitem():
| - [x] closes #17758
- [x] closes #20748
- [x] closes #20753
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Main changes:
- the presence of at least one key (and of all of them, for the temporary warning) is now checked _after_ building the indexer, using the indexer itself, in a unique place
- for this to be possible, ``obj.reindex`` was replaced with ``obj._reindex_with_indexers``
- ``_multi_take`` is uglier than before... but it's ugly anyway, and must be removed in a future refactoring (in which first indexers are built in all possible code paths, _then_ values are extracted)
- ``_has_valid_type`` was mostly used in "assertion mode" (without looking at its return value), so I changed it to ``_validate_key`` and now it is _only_ used in "assertion mode"
Asv (``--bench indexing``) give
```
before after ratio
[3a2e9e6c] [25711d3d]
+ 162±0.2ms 273±5ms 1.68 indexing.NumericSeriesIndexing.time_getitem_array(<class 'pandas.core.indexes.numeric.Float64Index'>)
+ 162±1ms 269±0.7ms 1.66 indexing.NumericSeriesIndexing.time_loc_array(<class 'pandas.core.indexes.numeric.Float64Index'>)
+ 162±0.6ms 267±1ms 1.65 indexing.NumericSeriesIndexing.time_ix_list_like(<class 'pandas.core.indexes.numeric.Float64Index'>)
+ 164±2ms 270±1ms 1.64 indexing.NumericSeriesIndexing.time_ix_array(<class 'pandas.core.indexes.numeric.Float64Index'>)
+ 166±0.6ms 273±0.6ms 1.64 indexing.NumericSeriesIndexing.time_getitem_lists(<class 'pandas.core.indexes.numeric.Float64Index'>)
+ 163±0.5ms 267±0.8ms 1.64 indexing.NumericSeriesIndexing.time_getitem_list_like(<class 'pandas.core.indexes.numeric.Float64Index'>)
+ 163±3ms 267±0.4ms 1.64 indexing.NumericSeriesIndexing.time_loc_list_like(<class 'pandas.core.indexes.numeric.Float64Index'>)
+ 412±3μs 664±4μs 1.61 indexing.NumericSeriesIndexing.time_ix_list_like(<class 'pandas.core.indexes.numeric.Int64Index'>)
+ 1.08±0ms 1.42±0.05ms 1.32 indexing.NumericSeriesIndexing.time_ix_array(<class 'pandas.core.indexes.numeric.Int64Index'>)
+ 1.14±0.01ms 1.31±0.02ms 1.15 indexing.PanelIndexing.time_subset
- 133±0.2ms 121±0.4ms 0.91 frame_methods.Iteration.time_iteritems_indexing
- 442±2ns 399±2ns 0.90 indexing.MethodLookup.time_lookup_iloc
- 145±0.8μs 130±0.8μs 0.90 indexing.IntervalIndexing.time_getitem_list
- 151±2μs 134±1μs 0.89 indexing.AssignTimeseriesIndex.time_frame_assign_timeseries_index
- 48.5±0.8μs 41.2±0.1μs 0.85 indexing.IntervalIndexing.time_getitem_scalar
- 44.3±1μs 35.6±0.1μs 0.80 indexing.NumericSeriesIndexing.time_iloc_list_like(<class 'pandas.core.indexes.numeric.Float64Index'>)
- 3.81±0.02ms 2.06±0.02ms 0.54 indexing.DataFrameNumericIndexing.time_loc_dups
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
```
notice that ``FloatIndex`` was _not_ being checked at all (i.e. #17758 applied to it too), now it is.
In any case, as I wrote above, there are several improvements still to be made, but I want to go gradually because the complexity of this PR is already pretty high for me. | https://api.github.com/repos/pandas-dev/pandas/pulls/20770 | 2018-04-20T18:16:11Z | 2018-05-01T14:13:08Z | 2018-05-01T14:13:07Z | 2018-05-01T23:02:07Z |
Use conda-env for CI environments | diff --git a/.travis.yml b/.travis.yml
index e4dab4eb53afb..4e25380a7d941 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -34,33 +34,32 @@ matrix:
- os: osx
language: generic
env:
- - JOB="3.5_OSX" TEST_ARGS="--skip-slow --skip-network"
+ - JOB="3.5, OSX" ENV_FILE="ci/travis-35-osx.yaml" TEST_ARGS="--skip-slow --skip-network"
- dist: trusty
env:
- - JOB="2.7_LOCALE" LOCALE_OVERRIDE="zh_CN.UTF-8" SLOW=true
+ - JOB="2.7, locale, slow, old NumPy" ENV_FILE="ci/travis-27-locale.yaml" LOCALE_OVERRIDE="zh_CN.UTF-8" SLOW=true
addons:
apt:
packages:
- language-pack-zh-hans
- dist: trusty
env:
- - JOB="2.7" TEST_ARGS="--skip-slow" LINT=true
+ - JOB="2.7, lint" ENV_FILE="ci/travis-27.yaml" TEST_ARGS="--skip-slow" LINT=true
addons:
apt:
packages:
- python-gtk2
- # In allow_failures
- dist: trusty
env:
- - JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true COVERAGE=true
+ - JOB="3.6, coverage" ENV_FILE="ci/travis-36.yaml" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" COVERAGE=true
# In allow_failures
- dist: trusty
env:
- - JOB="3.6_SLOW" SLOW=true
+ - JOB="3.6, slow" ENV_FILE="ci/travis-36-slow.yaml" SLOW=true
# In allow_failures
- dist: trusty
env:
- - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
+ - JOB="3.6, NumPy dev" ENV_FILE="ci/travis-36-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
addons:
apt:
packages:
@@ -68,21 +67,21 @@ matrix:
# In allow_failures
- dist: trusty
env:
- - JOB="3.6_DOC" DOC=true
+ - JOB="3.6, doc" ENV_FILE="ci/travis-36-doc.yaml" DOC=true
allow_failures:
- dist: trusty
env:
- - JOB="3.6_SLOW" SLOW=true
+ - JOB="3.6, slow" ENV_FILE="ci/travis-36-slow.yaml" SLOW=true
- dist: trusty
env:
- - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
+ - JOB="3.6, NumPy dev" ENV_FILE="ci/travis-36-numpydev.yaml" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
addons:
apt:
packages:
- xsel
- dist: trusty
env:
- - JOB="3.6_DOC" DOC=true
+ - JOB="3.6, doc" ENV_FILE="ci/travis-36-doc.yaml" DOC=true
before_install:
- echo "before_install"
diff --git a/appveyor.yml b/appveyor.yml
index ba001208864a8..f70fc829ec971 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -73,19 +73,12 @@ install:
- cmd: conda info -a
# create our env
- - cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest>=3.1.0 pytest-xdist
+ - cmd: conda env create -q -n pandas --file=ci\appveyor-%CONDA_PY%.yaml
- cmd: activate pandas
- - cmd: pip install moto
- - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.run
- - cmd: echo "installing requirements from %REQ%"
- - cmd: conda install -n pandas --file=%REQ%
- cmd: conda list -n pandas
- - cmd: echo "installing requirements from %REQ% - done"
-
- # add some pip only reqs to the env
- - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.pip
- - cmd: echo "installing requirements from %REQ%"
- - cmd: pip install -Ur %REQ%
+ # uninstall pandas if it's present
+ - cmd: conda remove pandas -y --force & exit 0
+ - cmd: pip uninstall -y pandas & exit 0
# build em using the local source checkout in the correct windows env
- cmd: '%CMD_IN_ENV% python setup.py build_ext --inplace'
diff --git a/ci/appveyor-27.yaml b/ci/appveyor-27.yaml
new file mode 100644
index 0000000000000..84107c605b14f
--- /dev/null
+++ b/ci/appveyor-27.yaml
@@ -0,0 +1,29 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - beautifulsoup4
+ - bottleneck
+ - dateutil
+ - html5lib
+ - jinja2=2.8
+ - lxml
+ - matplotlib
+ - numexpr
+ - numpy=1.10*
+ - openpyxl
+ - pytables==3.2.2
+ - python=2.7.*
+ - pytz
+ - s3fs
+ - scipy
+ - sqlalchemy
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - cython
+ - pytest
+ - pytest-xdist
+ - moto
diff --git a/ci/appveyor-36.yaml b/ci/appveyor-36.yaml
new file mode 100644
index 0000000000000..5e370de39958a
--- /dev/null
+++ b/ci/appveyor-36.yaml
@@ -0,0 +1,27 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - blosc
+ - bottleneck
+ - fastparquet
+ - feather-format
+ - matplotlib
+ - numexpr
+ - numpy=1.13*
+ - openpyxl
+ - pyarrow
+ - pytables
+ - python-dateutil
+ - python=3.6.*
+ - pytz
+ - scipy
+ - thrift=0.10*
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - cython
+ - pytest
+ - pytest-xdist
diff --git a/ci/circle-27-compat.yaml b/ci/circle-27-compat.yaml
new file mode 100644
index 0000000000000..81a48d4edf11c
--- /dev/null
+++ b/ci/circle-27-compat.yaml
@@ -0,0 +1,28 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - bottleneck=1.0.0
+ - cython=0.24
+ - jinja2=2.8
+ - numexpr=2.4.4 # we test that we correctly don't use an unsupported numexpr
+ - numpy=1.9.2
+ - openpyxl
+ - psycopg2
+ - pytables=3.2.2
+ - python-dateutil=2.5.0
+ - python=2.7*
+ - pytz=2013b
+ - scipy=0.14.0
+ - sqlalchemy=0.7.8
+ - xlrd=0.9.2
+ - xlsxwriter=0.5.2
+ - xlwt=0.7.5
+ # universal
+ - pytest
+ - pytest-xdist
+ - pip:
+ - html5lib==1.0b2
+ - beautifulsoup4==4.2.1
+ - pymysql==0.6.0
diff --git a/ci/circle-35-ascii.yaml b/ci/circle-35-ascii.yaml
new file mode 100644
index 0000000000000..602c414b49bb2
--- /dev/null
+++ b/ci/circle-35-ascii.yaml
@@ -0,0 +1,13 @@
+name: pandas
+channels:
+ - defaults
+dependencies:
+ - cython
+ - nomkl
+ - numpy
+ - python-dateutil
+ - python=3.5*
+ - pytz
+ # universal
+ - pytest
+ - pytest-xdist
diff --git a/ci/circle-36-locale.yaml b/ci/circle-36-locale.yaml
new file mode 100644
index 0000000000000..cc852c1e2aeeb
--- /dev/null
+++ b/ci/circle-36-locale.yaml
@@ -0,0 +1,33 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - beautifulsoup4
+ - cython
+ - html5lib
+ - ipython
+ - jinja2
+ - lxml
+ - matplotlib
+ - nomkl
+ - numexpr
+ - numpy
+ - openpyxl
+ - psycopg2
+ - pymysql
+ - pytables
+ - python-dateutil
+ - python=3.6*
+ - pytz
+ - s3fs
+ - scipy
+ - sqlalchemy
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - pytest
+ - pytest-xdist
+ - moto
diff --git a/ci/circle-36-locale_slow.yaml b/ci/circle-36-locale_slow.yaml
new file mode 100644
index 0000000000000..cc852c1e2aeeb
--- /dev/null
+++ b/ci/circle-36-locale_slow.yaml
@@ -0,0 +1,33 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - beautifulsoup4
+ - cython
+ - html5lib
+ - ipython
+ - jinja2
+ - lxml
+ - matplotlib
+ - nomkl
+ - numexpr
+ - numpy
+ - openpyxl
+ - psycopg2
+ - pymysql
+ - pytables
+ - python-dateutil
+ - python=3.6*
+ - pytz
+ - s3fs
+ - scipy
+ - sqlalchemy
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - pytest
+ - pytest-xdist
+ - moto
diff --git a/ci/install_circle.sh b/ci/install_circle.sh
index fd79f907625e9..5ffff84c88488 100755
--- a/ci/install_circle.sh
+++ b/ci/install_circle.sh
@@ -46,10 +46,6 @@ echo "[environmental variable file]"
cat $ENVS_FILE
source $ENVS_FILE
-export REQ_BUILD=ci/requirements-${JOB}.build
-export REQ_RUN=ci/requirements-${JOB}.run
-export REQ_PIP=ci/requirements-${JOB}.pip
-
# edit the locale override if needed
if [ -n "$LOCALE_OVERRIDE" ]; then
echo "[Adding locale to the first line of pandas/__init__.py]"
@@ -62,25 +58,23 @@ if [ -n "$LOCALE_OVERRIDE" ]; then
fi
# create envbuild deps
-echo "[create env: ${REQ_BUILD}]"
-time conda create -n pandas -q --file=${REQ_BUILD} || exit 1
-time conda install -n pandas pytest>=3.1.0 || exit 1
+echo "[create env]"
+time conda env create -q -n pandas --file="${ENV_FILE}" || exit 1
source activate pandas
-time pip install moto || exit 1
+
+# remove any installed pandas package
+# w/o removing anything else
+echo
+echo "[removing installed pandas]"
+conda remove pandas -y --force
+pip uninstall -y pandas
# build but don't install
echo "[build em]"
time python setup.py build_ext --inplace || exit 1
-# we may have run installations
-echo "[conda installs: ${REQ_RUN}]"
-if [ -e ${REQ_RUN} ]; then
- time conda install -q --file=${REQ_RUN} || exit 1
-fi
+echo
+echo "[show environment]"
-# we may have additional pip installs
-echo "[pip installs: ${REQ_PIP}]"
-if [ -e ${REQ_PIP} ]; then
- pip install -r $REQ_PIP
-fi
+conda list
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 9ccb4baf25505..fd4a36f86db6c 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -50,16 +50,6 @@ conda config --set ssl_verify false || exit 1
conda config --set quiet true --set always_yes true --set changeps1 false || exit 1
conda update -q conda
-echo
-echo "[add channels]"
-conda config --remove channels defaults || exit 1
-conda config --add channels defaults || exit 1
-
-if [ "$CONDA_FORGE" ]; then
- # add conda-forge channel as priority
- conda config --add channels conda-forge || exit 1
-fi
-
# Useful for debugging any issues with conda
conda info -a || exit 1
@@ -90,55 +80,10 @@ echo
echo "[create env]"
# create our environment
-REQ="ci/requirements-${JOB}.build"
-time conda create -n pandas --file=${REQ} || exit 1
+time conda env create -q -n pandas --file="${ENV_FILE}" || exit 1
source activate pandas
-# may have addtl installation instructions for this build
-echo
-echo "[build addtl installs]"
-REQ="ci/requirements-${JOB}.build.sh"
-if [ -e ${REQ} ]; then
- time bash $REQ || exit 1
-fi
-
-time conda install -n pandas pytest>=3.1.0
-time pip install -q pytest-xdist moto
-
-if [ "$LINT" ]; then
- conda install flake8=3.4.1
- pip install cpplint
-fi
-
-if [ "$COVERAGE" ]; then
- pip install coverage pytest-cov
-fi
-
-# we may have run installations
-echo
-echo "[conda installs]"
-REQ="ci/requirements-${JOB}.run"
-if [ -e ${REQ} ]; then
- time conda install -n pandas --file=${REQ} || exit 1
-fi
-
-# we may have additional pip installs
-echo
-echo "[pip installs]"
-REQ="ci/requirements-${JOB}.pip"
-if [ -e ${REQ} ]; then
- pip install -r $REQ
-fi
-
-# may have addtl installation instructions for this build
-echo
-echo "[addtl installs]"
-REQ="ci/requirements-${JOB}.sh"
-if [ -e ${REQ} ]; then
- time bash $REQ || exit 1
-fi
-
# remove any installed pandas package
# w/o removing anything else
echo
@@ -156,8 +101,8 @@ echo "[running setup.py develop]"
python setup.py develop || exit 1
echo
-echo "[show pandas]"
-conda list pandas
+echo "[show environment]"
+conda list
echo
echo "[done]"
diff --git a/ci/requirements-2.7.build b/ci/requirements-2.7.build
deleted file mode 100644
index 17d34f3895c64..0000000000000
--- a/ci/requirements-2.7.build
+++ /dev/null
@@ -1,6 +0,0 @@
-python=2.7*
-python-dateutil=2.5.0
-pytz=2013b
-nomkl
-numpy=1.13*
-cython=0.24
diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip
deleted file mode 100644
index 876d9e978fa84..0000000000000
--- a/ci/requirements-2.7.pip
+++ /dev/null
@@ -1,10 +0,0 @@
-blosc
-pandas-gbq
-html5lib
-beautifulsoup4
-pathlib
-backports.lzma
-py
-PyCrypto
-mock
-ipython
diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run
deleted file mode 100644
index 7c10b98fb6e14..0000000000000
--- a/ci/requirements-2.7.run
+++ /dev/null
@@ -1,20 +0,0 @@
-python-dateutil=2.5.0
-pytz=2013b
-numpy
-xlwt=0.7.5
-numexpr
-pytables
-matplotlib
-openpyxl=2.4.0
-xlrd=0.9.2
-sqlalchemy=0.9.6
-lxml
-scipy
-xlsxwriter=0.5.2
-s3fs
-bottleneck
-psycopg2
-patsy
-pymysql=0.6.3
-jinja2=2.8
-xarray=0.8.0
diff --git a/ci/requirements-2.7.sh b/ci/requirements-2.7.sh
deleted file mode 100644
index 95169e5dcce57..0000000000000
--- a/ci/requirements-2.7.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "install 27"
-
-conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1 jemalloc=4.5.0.post fastparquet
diff --git a/ci/requirements-2.7_COMPAT.build b/ci/requirements-2.7_COMPAT.build
deleted file mode 100644
index 0a83a7346e8b5..0000000000000
--- a/ci/requirements-2.7_COMPAT.build
+++ /dev/null
@@ -1,5 +0,0 @@
-python=2.7*
-numpy=1.9.2
-cython=0.24
-python-dateutil=2.5.0
-pytz=2013b
diff --git a/ci/requirements-2.7_COMPAT.pip b/ci/requirements-2.7_COMPAT.pip
deleted file mode 100644
index 0e154dbc07525..0000000000000
--- a/ci/requirements-2.7_COMPAT.pip
+++ /dev/null
@@ -1,4 +0,0 @@
-html5lib==1.0b2
-beautifulsoup4==4.2.1
-openpyxl
-argparse
diff --git a/ci/requirements-2.7_COMPAT.run b/ci/requirements-2.7_COMPAT.run
deleted file mode 100644
index c3daed6e6e1da..0000000000000
--- a/ci/requirements-2.7_COMPAT.run
+++ /dev/null
@@ -1,14 +0,0 @@
-numpy=1.9.2
-python-dateutil=2.5.0
-pytz=2013b
-scipy=0.14.0
-xlwt=0.7.5
-xlrd=0.9.2
-bottleneck=1.0.0
-numexpr=2.4.4 # we test that we correctly don't use an unsupported numexpr
-pytables=3.2.2
-psycopg2
-pymysql=0.6.0
-sqlalchemy=0.7.8
-xlsxwriter=0.5.2
-jinja2=2.8
diff --git a/ci/requirements-2.7_LOCALE.build b/ci/requirements-2.7_LOCALE.build
deleted file mode 100644
index a6f2e25387910..0000000000000
--- a/ci/requirements-2.7_LOCALE.build
+++ /dev/null
@@ -1,5 +0,0 @@
-python=2.7*
-python-dateutil
-pytz=2013b
-numpy=1.9.2
-cython=0.24
diff --git a/ci/requirements-2.7_LOCALE.pip b/ci/requirements-2.7_LOCALE.pip
deleted file mode 100644
index 1b825bbf492ca..0000000000000
--- a/ci/requirements-2.7_LOCALE.pip
+++ /dev/null
@@ -1,3 +0,0 @@
-html5lib==1.0b2
-beautifulsoup4==4.2.1
-blosc
diff --git a/ci/requirements-2.7_LOCALE.run b/ci/requirements-2.7_LOCALE.run
deleted file mode 100644
index 0a809a7dd6e5d..0000000000000
--- a/ci/requirements-2.7_LOCALE.run
+++ /dev/null
@@ -1,12 +0,0 @@
-python-dateutil
-pytz
-numpy=1.9.2
-xlwt=0.7.5
-openpyxl=2.4.0
-xlsxwriter=0.5.2
-xlrd=0.9.2
-bottleneck=1.0.0
-matplotlib=1.4.3
-sqlalchemy=0.8.1
-lxml
-scipy
diff --git a/ci/requirements-2.7_WIN.pip b/ci/requirements-2.7_WIN.pip
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/ci/requirements-2.7_WIN.run b/ci/requirements-2.7_WIN.run
deleted file mode 100644
index c4ca7fc736bb1..0000000000000
--- a/ci/requirements-2.7_WIN.run
+++ /dev/null
@@ -1,18 +0,0 @@
-dateutil
-pytz
-numpy=1.10*
-xlwt
-numexpr
-pytables==3.2.2
-matplotlib
-openpyxl
-xlrd
-sqlalchemy
-lxml
-scipy
-xlsxwriter
-s3fs
-bottleneck
-html5lib
-beautifulsoup4
-jinja2=2.8
diff --git a/ci/requirements-3.5.build b/ci/requirements-3.5.build
deleted file mode 100644
index f7befe3b31865..0000000000000
--- a/ci/requirements-3.5.build
+++ /dev/null
@@ -1,6 +0,0 @@
-python=3.5*
-python-dateutil
-pytz
-nomkl
-numpy
-cython
diff --git a/ci/requirements-3.5.pip b/ci/requirements-3.5.pip
deleted file mode 100644
index c9565f2173070..0000000000000
--- a/ci/requirements-3.5.pip
+++ /dev/null
@@ -1,2 +0,0 @@
-xarray==0.9.1
-pandas_gbq
diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5.run
deleted file mode 100644
index 669cf437f2164..0000000000000
--- a/ci/requirements-3.5.run
+++ /dev/null
@@ -1,20 +0,0 @@
-pytz
-numpy
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-scipy
-numexpr
-pytables
-html5lib
-lxml
-matplotlib
-jinja2
-bottleneck
-sqlalchemy
-pymysql
-psycopg2
-s3fs
-beautifulsoup4
-ipython
diff --git a/ci/requirements-3.5.sh b/ci/requirements-3.5.sh
deleted file mode 100644
index 529e1e8742722..0000000000000
--- a/ci/requirements-3.5.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "install 35"
-
-# pip install python-dateutil to get latest
-conda remove -n pandas python-dateutil --force
-pip install python-dateutil
-
-conda install -n pandas -c conda-forge feather-format pyarrow=0.7.1
diff --git a/ci/requirements-3.5_ASCII.build b/ci/requirements-3.5_ASCII.build
deleted file mode 100644
index f7befe3b31865..0000000000000
--- a/ci/requirements-3.5_ASCII.build
+++ /dev/null
@@ -1,6 +0,0 @@
-python=3.5*
-python-dateutil
-pytz
-nomkl
-numpy
-cython
diff --git a/ci/requirements-3.5_ASCII.run b/ci/requirements-3.5_ASCII.run
deleted file mode 100644
index b9d543f557d06..0000000000000
--- a/ci/requirements-3.5_ASCII.run
+++ /dev/null
@@ -1,3 +0,0 @@
-python-dateutil
-pytz
-numpy
diff --git a/ci/requirements-3.5_OSX.build b/ci/requirements-3.5_OSX.build
deleted file mode 100644
index f5bc01b67a20a..0000000000000
--- a/ci/requirements-3.5_OSX.build
+++ /dev/null
@@ -1,4 +0,0 @@
-python=3.5*
-nomkl
-numpy=1.10.4
-cython
diff --git a/ci/requirements-3.5_OSX.pip b/ci/requirements-3.5_OSX.pip
deleted file mode 100644
index d1fc1fe24a079..0000000000000
--- a/ci/requirements-3.5_OSX.pip
+++ /dev/null
@@ -1 +0,0 @@
-python-dateutil==2.5.3
diff --git a/ci/requirements-3.5_OSX.run b/ci/requirements-3.5_OSX.run
deleted file mode 100644
index 1d83474d10f2f..0000000000000
--- a/ci/requirements-3.5_OSX.run
+++ /dev/null
@@ -1,16 +0,0 @@
-pytz
-numpy=1.10.4
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-numexpr
-pytables
-html5lib
-lxml
-matplotlib
-jinja2
-bottleneck
-xarray
-s3fs
-beautifulsoup4
diff --git a/ci/requirements-3.5_OSX.sh b/ci/requirements-3.5_OSX.sh
deleted file mode 100644
index c2978b175968c..0000000000000
--- a/ci/requirements-3.5_OSX.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "install 35_OSX"
-
-conda install -n pandas -c conda-forge feather-format==0.3.1 fastparquet
diff --git a/ci/requirements-3.6.build b/ci/requirements-3.6.build
deleted file mode 100644
index 1c4b46aea3865..0000000000000
--- a/ci/requirements-3.6.build
+++ /dev/null
@@ -1,6 +0,0 @@
-python=3.6*
-python-dateutil
-pytz
-nomkl
-numpy
-cython
diff --git a/ci/requirements-3.6.pip b/ci/requirements-3.6.pip
deleted file mode 100644
index 753a60d6c119a..0000000000000
--- a/ci/requirements-3.6.pip
+++ /dev/null
@@ -1 +0,0 @@
-brotlipy
diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run
deleted file mode 100644
index 822144a80bc9a..0000000000000
--- a/ci/requirements-3.6.run
+++ /dev/null
@@ -1,25 +0,0 @@
-python-dateutil
-pytz
-numpy
-scipy
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-numexpr
-pytables
-matplotlib
-lxml
-html5lib
-jinja2
-sqlalchemy
-pymysql
-feather-format
-pyarrow
-psycopg2
-python-snappy
-fastparquet
-beautifulsoup4
-s3fs
-xarray
-ipython
diff --git a/ci/requirements-3.6.sh b/ci/requirements-3.6.sh
deleted file mode 100644
index f5c3dbf59a29d..0000000000000
--- a/ci/requirements-3.6.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "[install 3.6 downstream deps]"
-
-conda install -n pandas -c conda-forge pandas-datareader xarray geopandas seaborn statsmodels scikit-learn dask
diff --git a/ci/requirements-3.6_DOC.build b/ci/requirements-3.6_DOC.build
deleted file mode 100644
index bc72eed2a0d4e..0000000000000
--- a/ci/requirements-3.6_DOC.build
+++ /dev/null
@@ -1,5 +0,0 @@
-python=3.6*
-python-dateutil
-pytz
-numpy=1.13*
-cython
diff --git a/ci/requirements-3.6_DOC.run b/ci/requirements-3.6_DOC.run
deleted file mode 100644
index 084f38ce17eb2..0000000000000
--- a/ci/requirements-3.6_DOC.run
+++ /dev/null
@@ -1,25 +0,0 @@
-ipython
-ipykernel
-ipywidgets
-sphinx
-nbconvert
-nbformat
-notebook
-matplotlib
-seaborn
-scipy
-lxml
-beautifulsoup4
-html5lib
-pytables
-python-snappy
-openpyxl
-xlrd
-xlwt
-xlsxwriter
-sqlalchemy
-numexpr
-bottleneck
-statsmodels
-xarray
-pyqt
diff --git a/ci/requirements-3.6_DOC.sh b/ci/requirements-3.6_DOC.sh
deleted file mode 100644
index aec0f62148622..0000000000000
--- a/ci/requirements-3.6_DOC.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "[install DOC_BUILD deps]"
-
-pip install pandas-gbq
-
-conda install -n pandas -c conda-forge feather-format pyarrow nbsphinx pandoc fastparquet
-
-conda install -n pandas -c r r rpy2 --yes
diff --git a/ci/requirements-3.6_LOCALE.build b/ci/requirements-3.6_LOCALE.build
deleted file mode 100644
index 1c4b46aea3865..0000000000000
--- a/ci/requirements-3.6_LOCALE.build
+++ /dev/null
@@ -1,6 +0,0 @@
-python=3.6*
-python-dateutil
-pytz
-nomkl
-numpy
-cython
diff --git a/ci/requirements-3.6_LOCALE.pip b/ci/requirements-3.6_LOCALE.pip
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/ci/requirements-3.6_LOCALE.run b/ci/requirements-3.6_LOCALE.run
deleted file mode 100644
index ad54284c6f7e3..0000000000000
--- a/ci/requirements-3.6_LOCALE.run
+++ /dev/null
@@ -1,22 +0,0 @@
-python-dateutil
-pytz
-numpy
-scipy
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-numexpr
-pytables
-matplotlib
-lxml
-html5lib
-jinja2
-sqlalchemy
-pymysql
-# feather-format (not available on defaults ATM)
-psycopg2
-beautifulsoup4
-s3fs
-xarray
-ipython
diff --git a/ci/requirements-3.6_LOCALE_SLOW.build b/ci/requirements-3.6_LOCALE_SLOW.build
deleted file mode 100644
index 1c4b46aea3865..0000000000000
--- a/ci/requirements-3.6_LOCALE_SLOW.build
+++ /dev/null
@@ -1,6 +0,0 @@
-python=3.6*
-python-dateutil
-pytz
-nomkl
-numpy
-cython
diff --git a/ci/requirements-3.6_LOCALE_SLOW.pip b/ci/requirements-3.6_LOCALE_SLOW.pip
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/ci/requirements-3.6_LOCALE_SLOW.run b/ci/requirements-3.6_LOCALE_SLOW.run
deleted file mode 100644
index ad54284c6f7e3..0000000000000
--- a/ci/requirements-3.6_LOCALE_SLOW.run
+++ /dev/null
@@ -1,22 +0,0 @@
-python-dateutil
-pytz
-numpy
-scipy
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-numexpr
-pytables
-matplotlib
-lxml
-html5lib
-jinja2
-sqlalchemy
-pymysql
-# feather-format (not available on defaults ATM)
-psycopg2
-beautifulsoup4
-s3fs
-xarray
-ipython
diff --git a/ci/requirements-3.6_NUMPY_DEV.build b/ci/requirements-3.6_NUMPY_DEV.build
deleted file mode 100644
index 336fbe86b57d8..0000000000000
--- a/ci/requirements-3.6_NUMPY_DEV.build
+++ /dev/null
@@ -1,2 +0,0 @@
-python=3.6*
-pytz
diff --git a/ci/requirements-3.6_NUMPY_DEV.build.sh b/ci/requirements-3.6_NUMPY_DEV.build.sh
deleted file mode 100644
index fd79142c5cebb..0000000000000
--- a/ci/requirements-3.6_NUMPY_DEV.build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "install numpy master wheel"
-
-# remove the system installed numpy
-pip uninstall numpy -y
-
-# install numpy wheel from master
-PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com"
-pip install --pre --upgrade --timeout=60 -f $PRE_WHEELS numpy scipy
-
-# install dateutil from master
-pip install -U git+git://github.com/dateutil/dateutil.git
-
-# cython via pip
-pip install cython
-
-true
diff --git a/ci/requirements-3.6_NUMPY_DEV.pip b/ci/requirements-3.6_NUMPY_DEV.pip
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/ci/requirements-3.6_NUMPY_DEV.run b/ci/requirements-3.6_NUMPY_DEV.run
deleted file mode 100644
index af44f198c687e..0000000000000
--- a/ci/requirements-3.6_NUMPY_DEV.run
+++ /dev/null
@@ -1 +0,0 @@
-pytz
diff --git a/ci/requirements-3.6_SLOW.build b/ci/requirements-3.6_SLOW.build
deleted file mode 100644
index bdcfe28105866..0000000000000
--- a/ci/requirements-3.6_SLOW.build
+++ /dev/null
@@ -1,5 +0,0 @@
-python=3.6*
-python-dateutil
-pytz
-numpy
-cython
diff --git a/ci/requirements-3.6_SLOW.pip b/ci/requirements-3.6_SLOW.pip
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/ci/requirements-3.6_SLOW.run b/ci/requirements-3.6_SLOW.run
deleted file mode 100644
index ab5253ad99e51..0000000000000
--- a/ci/requirements-3.6_SLOW.run
+++ /dev/null
@@ -1,19 +0,0 @@
-python-dateutil
-pytz
-numpy
-matplotlib
-scipy
-patsy
-xlwt
-openpyxl
-xlsxwriter
-xlrd
-numexpr
-pytables
-sqlalchemy
-lxml
-s3fs
-psycopg2
-pymysql
-html5lib
-beautifulsoup4
diff --git a/ci/requirements-3.6_WIN.pip b/ci/requirements-3.6_WIN.pip
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run
deleted file mode 100644
index 3042888763863..0000000000000
--- a/ci/requirements-3.6_WIN.run
+++ /dev/null
@@ -1,17 +0,0 @@
-python-dateutil
-pytz
-numpy=1.13*
-bottleneck
-openpyxl
-xlsxwriter
-xlrd
-xlwt
-scipy
-feather-format
-numexpr
-pytables
-matplotlib
-blosc
-thrift=0.10*
-fastparquet
-pyarrow
diff --git a/ci/travis-27-locale.yaml b/ci/travis-27-locale.yaml
new file mode 100644
index 0000000000000..1312c1296d46a
--- /dev/null
+++ b/ci/travis-27-locale.yaml
@@ -0,0 +1,27 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - bottleneck=1.0.0
+ - cython=0.24
+ - lxml
+ - matplotlib=1.4.3
+ - numpy=1.9.2
+ - openpyxl=2.4.0
+ - python-dateutil
+ - python-blosc
+ - python=2.7
+ - pytz
+ - pytz=2013b
+ - scipy
+ - sqlalchemy=0.8.1
+ - xlrd=0.9.2
+ - xlsxwriter=0.5.2
+ - xlwt=0.7.5
+ # universal
+ - pytest
+ - pytest-xdist
+ - pip:
+ - html5lib==1.0b2
+ - beautifulsoup4==4.2.1
diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml
new file mode 100644
index 0000000000000..22b993a2da886
--- /dev/null
+++ b/ci/travis-27.yaml
@@ -0,0 +1,49 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - beautifulsoup4
+ - bottleneck
+ - cython=0.24
+ - fastparquet
+ - feather-format
+ - flake8=3.4.1
+ - html5lib
+ - ipython
+ - jemalloc=4.5.0.post
+ - jinja2=2.8
+ - lxml
+ - matplotlib
+ - mock
+ - nomkl
+ - numexpr
+ - numpy=1.13*
+ - openpyxl=2.4.0
+ - patsy
+ - psycopg2
+ - py
+ - pyarrow=0.4.1
+ - PyCrypto
+ - pymysql=0.6.3
+ - pytables
+ - python-blosc
+ - python-dateutil=2.5.0
+ - python=2.7*
+ - pytz=2013b
+ - s3fs
+ - scipy
+ - sqlalchemy=0.9.6
+ - xarray=0.8.0
+ - xlrd=0.9.2
+ - xlsxwriter=0.5.2
+ - xlwt=0.7.5
+ # universal
+ - pytest
+ - pytest-xdist
+ - moto
+ - pip:
+ - backports.lzma
+ - cpplint
+ - pandas-gbq
+ - pathlib
diff --git a/ci/travis-35-osx.yaml b/ci/travis-35-osx.yaml
new file mode 100644
index 0000000000000..e74abac4c9775
--- /dev/null
+++ b/ci/travis-35-osx.yaml
@@ -0,0 +1,27 @@
+name: pandas
+channels:
+ - defaults
+dependencies:
+ - beautifulsoup4
+ - bottleneck
+ - cython
+ - html5lib
+ - jinja2
+ - lxml
+ - matplotlib
+ - nomkl
+ - numexpr
+ - numpy=1.10.4
+ - openpyxl
+ - pytables
+ - python=3.5*
+ - pytz
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - pytest
+ - pytest-xdist
+ - pip:
+ - python-dateutil==2.5.3
diff --git a/ci/travis-36-doc.yaml b/ci/travis-36-doc.yaml
new file mode 100644
index 0000000000000..c22dddbe0ba3f
--- /dev/null
+++ b/ci/travis-36-doc.yaml
@@ -0,0 +1,45 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+ - r
+dependencies:
+ - beautifulsoup4
+ - bottleneck
+ - cython
+ - fastparquet
+ - feather-format
+ - html5lib
+ - ipykernel
+ - ipython
+ - ipywidgets
+ - lxml
+ - matplotlib
+ - nbconvert
+ - nbformat
+ - nbsphinx
+ - notebook
+ - numexpr
+ - numpy=1.13*
+ - openpyxl
+ - pandoc
+ - pyqt
+ - pytables
+ - python-dateutil
+ - python-snappy
+ - python=3.6*
+ - pytz
+ - r
+ - rpy2
+ - scipy
+ - seaborn
+ - sphinx
+ - sqlalchemy
+ - statsmodels
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - pytest
+ - pytest-xdist
diff --git a/ci/travis-36-numpydev.yaml b/ci/travis-36-numpydev.yaml
new file mode 100644
index 0000000000000..455d65feb4242
--- /dev/null
+++ b/ci/travis-36-numpydev.yaml
@@ -0,0 +1,16 @@
+name: pandas
+channels:
+ - defaults
+dependencies:
+ - python=3.6*
+ - pytz
+ - Cython
+ # universal
+ - pytest
+ - pytest-xdist
+ - pip:
+ - "git+git://github.com/dateutil/dateutil.git"
+ - "-f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com"
+ - "--pre"
+ - "numpy"
+ - "scipy"
diff --git a/ci/travis-36-slow.yaml b/ci/travis-36-slow.yaml
new file mode 100644
index 0000000000000..6c475dc48723c
--- /dev/null
+++ b/ci/travis-36-slow.yaml
@@ -0,0 +1,30 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - beautifulsoup4
+ - cython
+ - html5lib
+ - lxml
+ - matplotlib
+ - numexpr
+ - numpy
+ - openpyxl
+ - patsy
+ - psycopg2
+ - pymysql
+ - pytables
+ - python-dateutil
+ - python=3.6*
+ - pytz
+ - s3fs
+ - scipy
+ - sqlalchemy
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - pytest
+ - pytest-xdist
+ - moto
diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml
new file mode 100644
index 0000000000000..fe057e714761e
--- /dev/null
+++ b/ci/travis-36.yaml
@@ -0,0 +1,47 @@
+name: pandas
+channels:
+ - defaults
+ - conda-forge
+dependencies:
+ - beautifulsoup4
+ - cython
+ - dask
+ - fastparquet
+ - feather-format
+ - geopandas
+ - html5lib
+ - ipython
+ - jinja2
+ - lxml
+ - matplotlib
+ - nomkl
+ - numexpr
+ - numpy
+ - openpyxl
+ - pandas-datareader
+ - psycopg2
+ - pyarrow
+ - pymysql
+ - pytables
+ - python-dateutil
+ - python-snappy
+ - python=3.6*
+ - pytz
+ - s3fs
+ - scikit-learn
+ - scipy
+ - seaborn
+ - sqlalchemy
+ - statsmodels
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ # universal
+ - pytest
+ - pytest-xdist
+ - pytest-cov
+ - moto
+ - pip:
+ - brotlipy
+ - coverage
diff --git a/circle.yml b/circle.yml
index 9d49145af54e3..66415defba6fe 100644
--- a/circle.yml
+++ b/circle.yml
@@ -21,13 +21,13 @@ dependencies:
- >
case $CIRCLE_NODE_INDEX in
0)
- sudo apt-get install language-pack-it && ./ci/install_circle.sh JOB="2.7_COMPAT" LOCALE_OVERRIDE="it_IT.UTF-8" ;;
+ sudo apt-get install language-pack-it && ./ci/install_circle.sh JOB="2.7_COMPAT" ENV_FILE="ci/circle-27-compat.yaml" LOCALE_OVERRIDE="it_IT.UTF-8" ;;
1)
- sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.6_LOCALE" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
+ sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.6_LOCALE" ENV_FILE="ci/circle-36-locale.yaml" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
2)
- sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.6_LOCALE_SLOW" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
+ sudo apt-get install language-pack-zh-hans && ./ci/install_circle.sh JOB="3.6_LOCALE_SLOW" ENV_FILE="ci/circle-36-locale_slow.yaml" LOCALE_OVERRIDE="zh_CN.UTF-8" ;;
3)
- ./ci/install_circle.sh JOB="3.5_ASCII" LOCALE_OVERRIDE="C" ;;
+ ./ci/install_circle.sh JOB="3.5_ASCII" ENV_FILE="ci/circle-35-ascii.yaml" LOCALE_OVERRIDE="C" ;;
esac
- ./ci/show_circle.sh
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index 416535aac3a4c..fdf45f307e953 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -183,7 +183,6 @@ def test_read_csv_handles_boto_s3_object(self,
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
- @pytest.mark.xfail(reason="buggy s3 / moto interaction on CI: gh-20720")
def test_read_csv_chunked_download(self, s3_resource, caplog):
# 8 MB, S3FS usees 5MB chunks
df = DataFrame(np.random.randn(100000, 4), columns=list('abcd'))
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 41a1db57c954b..11cbea8ce6331 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -432,7 +432,6 @@ def test_categorical_unsupported(self, pa_lt_070):
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
self.check_error_on_write(df, pa, NotImplementedError)
- @pytest.mark.xfail(reason="buggy s3 / moto interaction on CI: gh-20720")
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa,
@@ -499,7 +498,6 @@ def test_filter_row_groups(self, fp):
result = read_parquet(path, fp, filters=[('a', '==', 0)])
assert len(result) == 1
- @pytest.mark.xfail(reason="buggy s3 / moto interaction on CI: gh-20720")
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp,
| Closes https://github.com/pandas-dev/pandas/issues/20720
The old setup where things were installed in multiple places resulted in some strange environments, with multiple copies of the same package in `site-packages`.
This attempts to reduce the environment definition to a single file per CI job, and the environment installation to 3 steps
- create the environment
- uninstall pandas if it's present (installing a pandas dependency)
- build pandas from source & install
Travis only for now. Will move over the appveyor / circle ones too. | https://api.github.com/repos/pandas-dev/pandas/pulls/20767 | 2018-04-20T15:17:18Z | 2018-04-24T01:35:30Z | 2018-04-24T01:35:29Z | 2018-04-24T13:58:35Z |
DOC: Clean up badges in README | diff --git a/README.md b/README.md
index 36323410854b0..78e9b93ae535f 100644
--- a/README.md
+++ b/README.md
@@ -9,18 +9,33 @@
<table>
<tr>
<td>Latest Release</td>
- <td><img src="https://img.shields.io/pypi/v/pandas.svg" alt="latest release" /></td>
+ <td>
+ <a href="https://pypi.python.org/pypi/pandas/">
+ <img src="https://img.shields.io/pypi/v/pandas.svg" alt="latest release" />
+ </a>
+ </td>
</tr>
<td></td>
- <td><img src="https://anaconda.org/conda-forge/pandas/badges/version.svg" alt="latest release" /></td>
+ <td>
+ <a href="https://anaconda.org/anaconda/pandas/">
+ <img src="https://anaconda.org/conda-forge/pandas/badges/version.svg" alt="latest release" />
+ </a>
+</td>
</tr>
<tr>
<td>Package Status</td>
- <td><img src="https://img.shields.io/pypi/status/pandas.svg" alt="status" /></td>
+ <td>
+ <a href="https://pypi.python.org/pypi/pandas/">
+ <img src="https://img.shields.io/pypi/status/pandas.svg" alt="status" /></td>
+ </a>
</tr>
<tr>
<td>License</td>
- <td><img src="https://img.shields.io/pypi/l/pandas.svg" alt="license" /></td>
+ <td>
+ <a href="https://github.com/pandas-dev/pandas/blob/master/LICENSE">
+ <img src="https://img.shields.io/pypi/l/pandas.svg" alt="license" />
+ </a>
+</td>
</tr>
<tr>
<td>Build Status</td>
@@ -50,20 +65,12 @@
<td>Coverage</td>
<td>
<a href="https://codecov.io/gh/pandas-dev/pandas">
- <img src="https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=master" alt="coverage" />
- </a>
- </td>
-</tr>
-<tr>
- <td>Conda</td>
- <td>
- <a href="https://pandas.pydata.org">
- <img src="http://pubbadges.s3-website-us-east-1.amazonaws.com/pkgs-downloads-pandas.png" alt="conda default downloads" />
+ <img src="https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=master" alt="coverage" />
</a>
</td>
</tr>
<tr>
- <td>Conda-forge</td>
+ <td>Downloads</td>
<td>
<a href="https://pandas.pydata.org">
<img src="https://anaconda.org/conda-forge/pandas/badges/downloads.svg" alt="conda-forge downloads" />
@@ -71,16 +78,16 @@
</td>
</tr>
<tr>
- <td>PyPI</td>
- <td>
- <a href="https://pypi.python.org/pypi/pandas/">
- <img src="https://img.shields.io/pypi/dm/pandas.svg" alt="pypi downloads" />
- </a>
- </td>
+ <td>Gitter</td>
+ <td>
+ <a href="https://gitter.im/pydata/pandas">
+ <img src="https://badges.gitter.im/Join%20Chat.svg"
+ </a>
+ </td>
</tr>
</table>
-[](https://gitter.im/pydata/pandas?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
## What is it
| Since we have pypi and conda covered in latest release and the links to them are no longer available. It probably make sense to remove these two rows. | https://api.github.com/repos/pandas-dev/pandas/pulls/20749 | 2018-04-19T17:07:50Z | 2018-04-20T10:12:37Z | 2018-04-20T10:12:37Z | 2018-04-20T10:12:42Z |
TST: correct constructor in extension array tests | diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 97a764fa7dbe8..bb910b687cd0f 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -27,7 +27,7 @@ class ExtensionArray(object):
The interface includes the following abstract methods that must be
implemented by subclasses:
- * _constructor_from_sequence
+ * _from_sequence
* _from_factorized
* __getitem__
* __len__
@@ -78,7 +78,7 @@ class ExtensionArray(object):
# Constructors
# ------------------------------------------------------------------------
@classmethod
- def _constructor_from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars):
"""Construct a new ExtensionArray from a sequence of scalars.
Parameters
@@ -365,7 +365,7 @@ def fillna(self, value=None, method=None, limit=None):
func = pad_1d if method == 'pad' else backfill_1d
new_values = func(self.astype(object), limit=limit,
mask=mask)
- new_values = self._constructor_from_sequence(new_values)
+ new_values = self._from_sequence(new_values)
else:
# fill with value
new_values = self.copy()
@@ -384,7 +384,7 @@ def unique(self):
from pandas import unique
uniques = unique(self.astype(object))
- return self._constructor_from_sequence(uniques)
+ return self._from_sequence(uniques)
def _values_for_factorize(self):
# type: () -> Tuple[ndarray, Any]
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index b5a4785fd98a6..599161521f3a7 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -422,7 +422,7 @@ def _constructor(self):
return Categorical
@classmethod
- def _constructor_from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars):
return Categorical(scalars)
def copy(self):
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 5ac3a84517fe9..489a430bb4020 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -11,7 +11,7 @@ class BaseConstructorsTests(BaseExtensionTests):
def test_array_from_scalars(self, data):
scalars = [data[0], data[1], data[2]]
- result = data._constructor_from_sequence(scalars)
+ result = data._from_sequence(scalars)
assert isinstance(result, type(data))
def test_series_constructor(self, data):
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index 4e2a65eba06dc..ac156900671a6 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -145,7 +145,7 @@ def test_take_series(self, data):
s = pd.Series(data)
result = s.take([0, -1])
expected = pd.Series(
- data._constructor_from_sequence([data[0], data[len(data) - 1]]),
+ data._from_sequence([data[0], data[len(data) - 1]]),
index=[0, len(data) - 1])
self.assert_series_equal(result, expected)
@@ -158,12 +158,11 @@ def test_reindex(self, data, na_value):
n = len(data)
result = s.reindex([-1, 0, n])
expected = pd.Series(
- data._constructor_from_sequence([na_value, data[0], na_value]),
+ data._from_sequence([na_value, data[0], na_value]),
index=[-1, 0, n])
self.assert_series_equal(result, expected)
result = s.reindex([n, n + 1])
- expected = pd.Series(
- data._constructor_from_sequence([na_value, na_value]),
- index=[n, n + 1])
+ expected = pd.Series(data._from_sequence([na_value, na_value]),
+ index=[n, n + 1])
self.assert_series_equal(result, expected)
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 430c571aab0a4..c5436aa731d50 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -76,7 +76,7 @@ def test_sort_values_frame(self, data_for_sorting, ascending):
@pytest.mark.parametrize('box', [pd.Series, lambda x: x])
@pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
- duplicated = box(data._constructor_from_sequence([data[0], data[0]]))
+ duplicated = box(data._from_sequence([data[0], data[0]]))
result = method(duplicated)
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index d3360eb199a89..f6cee9af0b722 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -70,7 +70,8 @@ def test_fillna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.fillna(fill_value)
- expected = pd.Series(type(data_missing)([fill_value, fill_value]))
+ expected = pd.Series(
+ data_missing._from_sequence([fill_value, fill_value]))
self.assert_series_equal(result, expected)
# Fill with a series
@@ -89,7 +90,8 @@ def test_fillna_series_method(self, data_missing, method):
data_missing = type(data_missing)(data_missing[::-1])
result = pd.Series(data_missing).fillna(method=method)
- expected = pd.Series(type(data_missing)([fill_value, fill_value]))
+ expected = pd.Series(
+ data_missing._from_sequence([fill_value, fill_value]))
self.assert_series_equal(result, expected)
@@ -102,7 +104,7 @@ def test_fillna_frame(self, data_missing):
}).fillna(fill_value)
expected = pd.DataFrame({
- "A": type(data_missing)([fill_value, fill_value]),
+ "A": data_missing._from_sequence([fill_value, fill_value]),
"B": [1, 2],
})
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index f50222b82df0f..40456453cb43d 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -47,8 +47,8 @@ def test_align(self, data, na_value):
r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3]))
# Assumes that the ctor can take a list of scalars of the type
- e1 = pd.Series(type(data)(list(a) + [na_value]))
- e2 = pd.Series(type(data)([na_value] + list(b)))
+ e1 = pd.Series(data._from_sequence(list(a) + [na_value]))
+ e2 = pd.Series(data._from_sequence([na_value] + list(b)))
self.assert_series_equal(r1, e1)
self.assert_series_equal(r2, e2)
@@ -60,8 +60,8 @@ def test_align_frame(self, data, na_value):
)
# Assumes that the ctor can take a list of scalars of the type
- e1 = pd.DataFrame({'A': type(data)(list(a) + [na_value])})
- e2 = pd.DataFrame({'A': type(data)([na_value] + list(b))})
+ e1 = pd.DataFrame({'A': data._from_sequence(list(a) + [na_value])})
+ e2 = pd.DataFrame({'A': data._from_sequence([na_value] + list(b))})
self.assert_frame_equal(r1, e1)
self.assert_frame_equal(r2, e2)
@@ -71,9 +71,8 @@ def test_align_series_frame(self, data, na_value):
df = pd.DataFrame({"col": np.arange(len(ser) + 1)})
r1, r2 = ser.align(df)
- e1 = pd.Series(
- data._constructor_from_sequence(list(data) + [na_value]),
- name=ser.name)
+ e1 = pd.Series(data._from_sequence(list(data) + [na_value]),
+ name=ser.name)
self.assert_series_equal(r1, e1)
self.assert_frame_equal(r2, df)
@@ -105,14 +104,13 @@ def test_merge(self, data, na_value):
res = pd.merge(df1, df2)
exp = pd.DataFrame(
{'int1': [1, 1, 2], 'int2': [1, 2, 3], 'key': [0, 0, 1],
- 'ext': data._constructor_from_sequence(
- [data[0], data[0], data[1]])})
+ 'ext': data._from_sequence([data[0], data[0], data[1]])})
self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']])
res = pd.merge(df1, df2, how='outer')
exp = pd.DataFrame(
{'int1': [1, 1, 2, 3, np.nan], 'int2': [1, 2, 3, np.nan, 4],
'key': [0, 0, 1, 2, 3],
- 'ext': data._constructor_from_sequence(
+ 'ext': data._from_sequence(
[data[0], data[0], data[1], data[2], na_value])})
self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']])
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index e91345b504d86..4e27f1eca538f 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -27,7 +27,7 @@ def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
ser = pd.Series(data)
value = [data[0]]
if as_array:
- value = type(data)(value)
+ value = data._from_sequence(value)
xpr = 'cannot set using a {} indexer with a different length'
with tm.assert_raises_regex(ValueError, xpr.format('list-like')):
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index a8e88365b5648..28e667a776c7b 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -36,7 +36,7 @@ def __init__(self, values):
self._items = self._data = self.data = self.values
@classmethod
- def _constructor_from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars):
return cls(scalars)
@classmethod
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 33843492cb706..1f7fcbdfd585a 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -34,7 +34,7 @@ def __init__(self, values):
self.data = values
@classmethod
- def _constructor_from_sequence(cls, scalars):
+ def _from_sequence(cls, scalars):
return cls(scalars)
@classmethod
@@ -45,9 +45,7 @@ def __getitem__(self, item):
if isinstance(item, numbers.Integral):
return self.data[item]
elif isinstance(item, np.ndarray) and item.dtype == 'bool':
- return self._constructor_from_sequence([
- x for x, m in zip(self, item) if m
- ])
+ return self._from_sequence([x for x, m in zip(self, item) if m])
elif isinstance(item, collections.Iterable):
# fancy indexing
return type(self)([self.data[i] for i in item])
@@ -95,7 +93,7 @@ def take(self, indexer, allow_fill=True, fill_value=None):
except IndexError:
raise IndexError("Index is out of bounds or cannot do a "
"non-empty take from an empty array.")
- return self._constructor_from_sequence(output)
+ return self._from_sequence(output)
def copy(self, deep=False):
return type(self)(self.data[:])
| Replace `type(data)(..)` with `data._constructor_from_sequence`.
A bit annoying the name is that long :-), but this is the correct usage.
cc @TomAugspurger | https://api.github.com/repos/pandas-dev/pandas/pulls/20746 | 2018-04-19T14:37:58Z | 2018-04-23T14:06:24Z | 2018-04-23T14:06:24Z | 2018-04-23T14:06:28Z |
Fix pd.merge to preserve ExtensionArrays dtypes | diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 3a90feb7ccd7d..c45838e6040a9 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1807,7 +1807,7 @@ def _get_dtype(arr_or_dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
- elif isinstance(arr_or_dtype, CategoricalDtype):
+ elif isinstance(arr_or_dtype, ExtensionDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, DatetimeTZDtype):
return arr_or_dtype
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 37d11296400be..e98899b2f5c1a 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -5541,8 +5541,14 @@ def concatenate_join_units(join_units, concat_axis, copy):
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
- if copy and concat_values.base is not None:
- concat_values = concat_values.copy()
+ if copy:
+ if isinstance(concat_values, np.ndarray):
+ # non-reindexed (=not yet copied) arrays are made into a view
+ # in JoinUnit.get_reindexed_values
+ if concat_values.base is not None:
+ concat_values = concat_values.copy()
+ else:
+ concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
@@ -5823,7 +5829,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
- elif self.block.is_categorical:
+ elif self.block.is_extension:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index efc22c19a3eef..f50222b82df0f 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -95,3 +95,24 @@ def test_set_frame_overwrite_object(self, data):
df = pd.DataFrame({"A": [1] * len(data)}, dtype=object)
df['A'] = data
assert df.dtypes['A'] == data.dtype
+
+ def test_merge(self, data, na_value):
+ # GH-20743
+ df1 = pd.DataFrame({'ext': data[:3], 'int1': [1, 2, 3],
+ 'key': [0, 1, 2]})
+ df2 = pd.DataFrame({'int2': [1, 2, 3, 4], 'key': [0, 0, 1, 3]})
+
+ res = pd.merge(df1, df2)
+ exp = pd.DataFrame(
+ {'int1': [1, 1, 2], 'int2': [1, 2, 3], 'key': [0, 0, 1],
+ 'ext': data._constructor_from_sequence(
+ [data[0], data[0], data[1]])})
+ self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']])
+
+ res = pd.merge(df1, df2, how='outer')
+ exp = pd.DataFrame(
+ {'int1': [1, 1, 2, 3, np.nan], 'int2': [1, 2, 3, np.nan, 4],
+ 'key': [0, 0, 1, 2, 3],
+ 'ext': data._constructor_from_sequence(
+ [data[0], data[0], data[1], data[2], na_value])})
+ self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']])
diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py
index 27c156c15203f..6ebe700f13be0 100644
--- a/pandas/tests/extension/category/test_categorical.py
+++ b/pandas/tests/extension/category/test_categorical.py
@@ -75,6 +75,10 @@ def test_align(self, data, na_value):
def test_align_frame(self, data, na_value):
pass
+ @pytest.mark.skip(reason="Unobserved categories preseved in concat.")
+ def test_merge(self, data, na_value):
+ pass
+
class TestGetitem(base.BaseGetitemTests):
@pytest.mark.skip(reason="Backwards compatibility")
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index d509170565e1a..53d74cd6d38cb 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -72,6 +72,14 @@ def assert_series_equal(self, left, right, *args, **kwargs):
def assert_frame_equal(self, left, right, *args, **kwargs):
# TODO(EA): select_dtypes
+ tm.assert_index_equal(
+ left.columns, right.columns,
+ exact=kwargs.get('check_column_type', 'equiv'),
+ check_names=kwargs.get('check_names', True),
+ check_exact=kwargs.get('check_exact', False),
+ check_categorical=kwargs.get('check_categorical', True),
+ obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame')))
+
decimals = (left.dtypes == 'decimal').index
for col in decimals:
| Closes https://github.com/pandas-dev/pandas/issues/20743 | https://api.github.com/repos/pandas-dev/pandas/pulls/20745 | 2018-04-19T14:09:48Z | 2018-04-22T14:53:46Z | 2018-04-22T14:53:45Z | 2018-04-22T14:53:46Z |
DOC: use apply(raw=True) in docs to silence warning | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 4285767654e25..ff06c369e1897 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -323,7 +323,7 @@ compute the mean absolute deviation on a rolling basis:
mad = lambda x: np.fabs(x - x.mean()).mean()
@savefig rolling_apply_ex.png
- s.rolling(window=60).apply(mad).plot(style='k')
+ s.rolling(window=60).apply(mad, raw=True).plot(style='k')
.. _stats.rolling_window:
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 4e61228d5c0ad..893642410af02 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -496,7 +496,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
def Red(x):
return functools.reduce(CumRet,x,1.0)
- S.expanding().apply(Red)
+ S.expanding().apply(Red, raw=True)
`Replacing some values with mean of the rest of a group
| cc @jreback doc follow-up on https://github.com/pandas-dev/pandas/pull/20584 | https://api.github.com/repos/pandas-dev/pandas/pulls/20741 | 2018-04-19T08:38:34Z | 2018-04-19T10:07:35Z | 2018-04-19T10:07:35Z | 2018-04-19T10:07:35Z |
Fix more tests expecting little-endian | diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py
index 590f28b275aec..20cd8b43478d2 100644
--- a/pandas/tests/dtypes/test_cast.py
+++ b/pandas/tests/dtypes/test_cast.py
@@ -191,9 +191,9 @@ def testinfer_dtype_from_scalar_errors(self):
(pd.Categorical(list('aabc')), 'category', True),
(pd.Categorical([1, 2, 3]), 'category', True),
(Timestamp('20160101'), np.object_, False),
- (np.datetime64('2016-01-01'), np.dtype('<M8[D]'), False),
+ (np.datetime64('2016-01-01'), np.dtype('=M8[D]'), False),
(pd.date_range('20160101', periods=3),
- np.dtype('<M8[ns]'), False),
+ np.dtype('=M8[ns]'), False),
(pd.date_range('20160101', periods=3, tz='US/Eastern'),
'datetime64[ns, US/Eastern]', True),
(pd.Series([1., 2, 3]), np.float64, False),
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index 32b8a6e2b6b86..2472022b862bc 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -161,7 +161,7 @@ def test_to_records_with_unicode_column_names(self):
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
- "formats": ['<i8', '<f8']}
+ "formats": ['=i8', '=f8']}
)
tm.assert_almost_equal(result, expected)
diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py
index c3926cc5f1633..daf44a559cf5c 100644
--- a/pandas/tests/indexes/period/test_formats.py
+++ b/pandas/tests/indexes/period/test_formats.py
@@ -13,7 +13,7 @@ def test_to_native_types():
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
- '2017-01-03'], dtype='<U10')
+ '2017-01-03'], dtype='=U10')
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
@@ -23,14 +23,14 @@ def test_to_native_types():
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
- expected = np.array(['2017-01-01', '2017-01-03'], dtype='<U10')
+ expected = np.array(['2017-01-01', '2017-01-03'], dtype='=U10')
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
- '01-2017-03'], dtype='<U10')
+ '01-2017-03'], dtype='=U10')
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
| As in #14832, use = (native) instead of < (little-endian)
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20738 | 2018-04-18T19:14:32Z | 2018-04-19T10:12:49Z | 2018-04-19T10:12:49Z | 2018-04-19T13:30:39Z |
DOC: add coverage href to README.md | diff --git a/README.md b/README.md
index 86cf95508a5d9..36323410854b0 100644
--- a/README.md
+++ b/README.md
@@ -48,7 +48,11 @@
</tr>
<tr>
<td>Coverage</td>
- <td><img src="https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=master" alt="coverage" /></td>
+ <td>
+ <a href="https://codecov.io/gh/pandas-dev/pandas">
+ <img src="https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=master" alt="coverage" />
+ </a>
+ </td>
</tr>
<tr>
<td>Conda</td>
| https://api.github.com/repos/pandas-dev/pandas/pulls/20736 | 2018-04-18T18:23:18Z | 2018-04-19T10:15:33Z | 2018-04-19T10:15:33Z | 2018-04-19T17:29:39Z | |
BUG: unexpected assign by a single-element list (GH19474) | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index cb96c7093c005..1c9849730edd6 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1113,6 +1113,7 @@ Indexing
- Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` in presence of entire rows of NaNs in the middle of values (:issue:`20499`).
- Bug in :class:`IntervalIndex` where some indexing operations were not supported for overlapping or non-monotonic ``uint64`` data (:issue:`20636`)
- Bug in ``Series.is_unique`` where extraneous output in stderr is shown if Series contains objects with ``__ne__`` defined (:issue:`20661`)
+- Bug in ``.loc`` assignment with a single-element list-like incorrectly assigns as a list (:issue:`19474`)
- Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`)
MultiIndex
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5240a4703c242..2eb52ecc6bcc7 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -532,7 +532,8 @@ def setter(item, v):
def can_do_equal_len():
""" return True if we have an equal len settable """
- if not len(labels) == 1 or not np.iterable(value):
+ if (not len(labels) == 1 or not np.iterable(value) or
+ is_scalar(plane_indexer[0])):
return False
l = len(value)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index a5506abe8f355..f1178d44dbfe0 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -10,6 +10,7 @@
from pandas import Series, DataFrame, date_range, concat, isna
from pandas.util import testing as tm
from pandas.tests.indexing.common import Base
+from pandas.api.types import is_scalar
class TestiLoc(Base):
@@ -526,6 +527,21 @@ def test_iloc_setitem_list_of_lists(self):
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
+ @pytest.mark.parametrize(
+ 'indexer', [[0], slice(None, 1, None), np.array([0])])
+ @pytest.mark.parametrize(
+ 'value', [['Z'], np.array(['Z'])])
+ def test_iloc_setitem_with_scalar_index(self, indexer, value):
+ # GH #19474
+ # assigning like "df.iloc[0, [0]] = ['Z']" should be evaluated
+ # elementwisely, not using "setter('A', ['Z'])".
+
+ df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ df.iloc[0, indexer] = value
+ result = df.iloc[0, 0]
+
+ assert is_scalar(result) and result == 'Z'
+
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 86a5a82441ee8..39f4d2b7bd395 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -11,6 +11,7 @@
from pandas import Series, DataFrame, Timestamp, date_range, MultiIndex, Index
from pandas.util import testing as tm
from pandas.tests.indexing.common import Base
+from pandas.api.types import is_scalar
class TestLoc(Base):
@@ -555,6 +556,21 @@ def test_loc_setitem_frame_multiples(self):
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
+ @pytest.mark.parametrize(
+ 'indexer', [['A'], slice(None, 'A', None), np.array(['A'])])
+ @pytest.mark.parametrize(
+ 'value', [['Z'], np.array(['Z'])])
+ def test_loc_setitem_with_scalar_index(self, indexer, value):
+ # GH #19474
+ # assigning like "df.loc[0, ['A']] = ['Z']" should be evaluated
+ # elementwisely, not using "setter('A', ['Z'])".
+
+ df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
+ df.loc[0, indexer] = value
+ result = df.loc[0, 'A']
+
+ assert is_scalar(result) and result == 'Z'
+
def test_loc_coerceion(self):
# 12411
| - [x] closes #19474
- [x] tests added / passed (some failed independently of my changes)
- [x] passes `git diff master --name-only -- "*.py" | flake8`
- [x] whatsnew entry
I thought it is a straightforward way to evaluate all of `df.loc[0, ['A']] = ['X']`, `df.loc[0, ['A', 'B']] = ['X', 'Y']`, ...(more-columns case) the same way in `else` block at line 590 of pandas/core/indexing.py.
Please let me know bad or ungrammatical points. | https://api.github.com/repos/pandas-dev/pandas/pulls/20732 | 2018-04-18T14:22:41Z | 2018-04-21T16:39:43Z | 2018-04-21T16:39:43Z | 2018-05-09T20:59:37Z |
DEPR: Deprecate DatetimeIndex.offset in favor of DatetimeIndex.freq | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 641214550a3b7..2975b2a53c3a8 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -877,6 +877,7 @@ Deprecations
- The ``convert_datetime64`` parameter in :func:`DataFrame.to_records` has been deprecated and will be removed in a future version. The NumPy bug motivating this parameter has been resolved. The default value for this parameter has also changed from ``True`` to ``None`` (:issue:`18160`).
- :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`,
:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have deprecated passing an ``np.array`` by default. One will need to pass the new ``raw`` parameter to be explicit about what is passed (:issue:`20584`)
+- ``DatetimeIndex.offset`` is deprecated. Use ``DatetimeIndex.freq`` instead (:issue:`20716`)
.. _whatsnew_0230.prior_deprecations:
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 88ea3511d4ee3..e0e7ba3e8b518 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -302,7 +302,7 @@ def _add_comparison_methods(cls):
_engine_type = libindex.DatetimeEngine
tz = None
- offset = None
+ _freq = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
@@ -415,7 +415,7 @@ def __new__(cls, data=None,
subarr = data.values
if freq is None:
- freq = data.offset
+ freq = data.freq
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
@@ -467,12 +467,12 @@ def __new__(cls, data=None,
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
- subarr.offset = to_offset(inferred)
+ subarr.freq = to_offset(inferred)
return subarr._deepcopy_if_needed(ref_to_data, copy)
@classmethod
- def _generate(cls, start, end, periods, name, offset,
+ def _generate(cls, start, end, periods, name, freq,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and '
@@ -535,7 +535,7 @@ def _generate(cls, start, end, periods, name, offset,
else:
_normalized = _normalized and end.time() == _midnight
- if hasattr(offset, 'delta') and offset != offsets.Day():
+ if hasattr(freq, 'delta') and freq != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
@@ -551,11 +551,11 @@ def _generate(cls, start, end, periods, name, offset,
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
- if _use_cached_range(offset, _normalized, start, end):
+ if _use_cached_range(freq, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
- offset=offset, name=name)
+ freq=freq, name=name)
else:
- index = _generate_regular_range(start, end, periods, offset)
+ index = _generate_regular_range(start, end, periods, freq)
else:
@@ -574,11 +574,11 @@ def _generate(cls, start, end, periods, name, offset,
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
- if _use_cached_range(offset, _normalized, start, end):
+ if _use_cached_range(freq, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
- offset=offset, name=name)
+ freq=freq, name=name)
else:
- index = _generate_regular_range(start, end, periods, offset)
+ index = _generate_regular_range(start, end, periods, freq)
if tz is not None and getattr(index, 'tz', None) is None:
index = conversion.tz_localize_to_utc(_ensure_int64(index), tz,
@@ -596,12 +596,12 @@ def _generate(cls, start, end, periods, name, offset,
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
- index = cls._simple_new(index, name=name, freq=offset, tz=tz)
+ index = cls._simple_new(index, name=name, freq=freq, tz=tz)
return index
@property
def _box_func(self):
- return lambda x: Timestamp(x, freq=self.offset, tz=self.tz)
+ return lambda x: Timestamp(x, freq=self.freq, tz=self.tz)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
@@ -647,7 +647,7 @@ def _simple_new(cls, values, name=None, freq=None, tz=None,
result = object.__new__(cls)
result._data = values
result.name = name
- result.offset = freq
+ result._freq = freq
result._tz = timezones.maybe_get_tz(tz)
result._tz = timezones.tz_standardize(result._tz)
result._reset_identity()
@@ -734,7 +734,7 @@ def _has_same_tz(self, other):
return zzone == vzone
@classmethod
- def _cached_range(cls, start=None, end=None, periods=None, offset=None,
+ def _cached_range(cls, start=None, end=None, periods=None, freq=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally
@@ -747,30 +747,30 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None,
raise TypeError(
'Must either specify period or provide both start and end.')
- if offset is None:
+ if freq is None:
# This can't happen with external-facing code
- raise TypeError('Must provide offset.')
+ raise TypeError('Must provide freq.')
drc = _daterange_cache
- if offset not in _daterange_cache:
- xdr = generate_range(offset=offset, start=_CACHE_START,
+ if freq not in _daterange_cache:
+ xdr = generate_range(offset=freq, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
- cachedRange.offset = offset
+ cachedRange.freq = freq
cachedRange = cachedRange.tz_localize(None)
cachedRange.name = None
- drc[offset] = cachedRange
+ drc[freq] = cachedRange
else:
- cachedRange = drc[offset]
+ cachedRange = drc[freq]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
- end = offset.rollback(end)
+ end = freq.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
@@ -778,23 +778,23 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None,
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
- start = offset.rollforward(start)
+ start = freq.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
- if not offset.onOffset(start):
- start = offset.rollforward(start)
+ if not freq.onOffset(start):
+ start = freq.rollforward(start)
- if not offset.onOffset(end):
- end = offset.rollback(end)
+ if not freq.onOffset(end):
+ end = freq.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
- indexSlice.offset = offset
+ indexSlice.freq = freq
return indexSlice
@@ -836,7 +836,7 @@ def __setstate__(self, state):
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
- self.offset = own_state[1]
+ self.freq = own_state[1]
self._tz = timezones.tz_standardize(own_state[2])
# provide numpy < 1.7 compat
@@ -1184,7 +1184,7 @@ def union(self, other):
result._tz = timezones.tz_standardize(this.tz)
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
- result.offset = to_offset(result.inferred_freq)
+ result.freq = to_offset(result.inferred_freq)
return result
def to_perioddelta(self, freq):
@@ -1232,7 +1232,7 @@ def union_many(self, others):
this._tz = timezones.tz_standardize(tz)
if this.freq is None:
- this.offset = to_offset(this.inferred_freq)
+ this.freq = to_offset(this.inferred_freq)
return this
def join(self, other, how='left', level=None, return_indexers=False,
@@ -1271,7 +1271,7 @@ def _maybe_utc_convert(self, other):
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex) and
- self.offset == other.offset and
+ self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
@@ -1284,9 +1284,9 @@ def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
- offset = self.offset
+ freq = self.freq
- if offset is None or offset != other.offset:
+ if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
@@ -1306,10 +1306,10 @@ def _can_fast_union(self, other):
# Only need to "adjoin", not overlap
try:
- return (right_start == left_end + offset) or right_start in left
+ return (right_start == left_end + freq) or right_start in left
except (ValueError):
- # if we are comparing an offset that does not propagate timezones
+ # if we are comparing a freq that does not propagate timezones
# this will raise
return False
@@ -1329,7 +1329,7 @@ def _fast_union(self, other):
left_start, left_end = left[0], left[-1]
right_end = right[-1]
- if not self.offset._should_cache():
+ if not self.freq._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
@@ -1341,7 +1341,7 @@ def _fast_union(self, other):
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
- freq=left.offset)
+ freq=left.freq)
def __iter__(self):
"""
@@ -1393,18 +1393,18 @@ def intersection(self, other):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
- result.offset = to_offset(result.inferred_freq)
+ result.freq = to_offset(result.inferred_freq)
return result
- elif (other.offset is None or self.offset is None or
- other.offset != self.offset or
- not other.offset.isAnchored() or
+ elif (other.freq is None or self.freq is None or
+ other.freq != self.freq or
+ not other.freq.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
result = self._shallow_copy(result._values, name=result.name,
tz=result.tz, freq=None)
if result.freq is None:
- result.offset = to_offset(result.inferred_freq)
+ result.freq = to_offset(result.inferred_freq)
return result
if len(self) == 0:
@@ -1729,12 +1729,28 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
@property
def freq(self):
"""get/set the frequency of the Index"""
- return self.offset
+ return self._freq
@freq.setter
def freq(self, value):
"""get/set the frequency of the Index"""
- self.offset = value
+ self._freq = value
+
+ @property
+ def offset(self):
+ """get/set the frequency of the Index"""
+ msg = ('DatetimeIndex.offset has been deprecated and will be removed '
+ 'in a future version; use DatetimeIndex.freq instead.')
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ return self.freq
+
+ @offset.setter
+ def offset(self, value):
+ """get/set the frequency of the Index"""
+ msg = ('DatetimeIndex.offset has been deprecated and will be removed '
+ 'in a future version; use DatetimeIndex.freq instead.')
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ self.freq = value
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
@@ -2525,9 +2541,9 @@ def day_name(self, locale=None):
DatetimeIndex._add_datetimelike_methods()
-def _generate_regular_range(start, end, periods, offset):
- if isinstance(offset, Tick):
- stride = offset.nanos
+def _generate_regular_range(start, end, periods, freq):
+ if isinstance(freq, Tick):
+ stride = freq.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
@@ -2558,7 +2574,7 @@ def _generate_regular_range(start, end, periods, offset):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
- periods=periods, offset=offset)
+ periods=periods, offset=freq)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
@@ -2855,9 +2871,9 @@ def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
-def _use_cached_range(offset, _normalized, start, end):
- return (offset._should_cache() and
- not (offset._normalize_cache and not _normalized) and
+def _use_cached_range(freq, _normalized, start, end):
+ return (freq._should_cache() and
+ not (freq._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index 1cf854ad4a926..dae69a86910af 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -598,16 +598,16 @@ def test_datetimeindex_constructor_misc(self):
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=offsets.Week(weekday=6))
assert len(idx1) == len(idx2)
- assert idx1.offset == idx2.offset
+ assert idx1.freq == idx2.freq
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=offsets.QuarterBegin(startingMonth=1))
assert len(idx1) == len(idx2)
- assert idx1.offset == idx2.offset
+ assert idx1.freq == idx2.freq
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=offsets.BQuarterEnd(startingMonth=12))
assert len(idx1) == len(idx2)
- assert idx1.offset == idx2.offset
+ assert idx1.freq == idx2.freq
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index d2ec465468dfb..2dfd4ae3e6e3a 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -331,21 +331,21 @@ def test_naive_aware_conflicts(self):
aware.join(naive)
def test_cached_range(self):
- DatetimeIndex._cached_range(START, END, offset=BDay())
- DatetimeIndex._cached_range(START, periods=20, offset=BDay())
- DatetimeIndex._cached_range(end=START, periods=20, offset=BDay())
+ DatetimeIndex._cached_range(START, END, freq=BDay())
+ DatetimeIndex._cached_range(START, periods=20, freq=BDay())
+ DatetimeIndex._cached_range(end=START, periods=20, freq=BDay())
- with tm.assert_raises_regex(TypeError, "offset"):
+ with tm.assert_raises_regex(TypeError, "freq"):
DatetimeIndex._cached_range(START, END)
with tm.assert_raises_regex(TypeError, "specify period"):
- DatetimeIndex._cached_range(START, offset=BDay())
+ DatetimeIndex._cached_range(START, freq=BDay())
with tm.assert_raises_regex(TypeError, "specify period"):
- DatetimeIndex._cached_range(end=END, offset=BDay())
+ DatetimeIndex._cached_range(end=END, freq=BDay())
with tm.assert_raises_regex(TypeError, "start or end"):
- DatetimeIndex._cached_range(periods=20, offset=BDay())
+ DatetimeIndex._cached_range(periods=20, freq=BDay())
def test_cached_range_bug(self):
rng = date_range('2010-09-01 05:00:00', periods=50,
@@ -393,7 +393,7 @@ def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range('12/5/2011', '12/5/2011')
rng2 = bdate_range('12/2/2011', '12/5/2011')
- rng2.offset = BDay()
+ rng2.freq = BDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
@@ -605,27 +605,27 @@ def test_constructor(self):
bdate_range('2011-1-1', '2012-1-1', 'C')
def test_cached_range(self):
- DatetimeIndex._cached_range(START, END, offset=CDay())
+ DatetimeIndex._cached_range(START, END, freq=CDay())
DatetimeIndex._cached_range(START, periods=20,
- offset=CDay())
+ freq=CDay())
DatetimeIndex._cached_range(end=START, periods=20,
- offset=CDay())
+ freq=CDay())
# with pytest.raises(TypeError):
- with tm.assert_raises_regex(TypeError, "offset"):
+ with tm.assert_raises_regex(TypeError, "freq"):
DatetimeIndex._cached_range(START, END)
# with pytest.raises(TypeError):
with tm.assert_raises_regex(TypeError, "specify period"):
- DatetimeIndex._cached_range(START, offset=CDay())
+ DatetimeIndex._cached_range(START, freq=CDay())
# with pytest.raises(TypeError):
with tm.assert_raises_regex(TypeError, "specify period"):
- DatetimeIndex._cached_range(end=END, offset=CDay())
+ DatetimeIndex._cached_range(end=END, freq=CDay())
# with pytest.raises(TypeError):
with tm.assert_raises_regex(TypeError, "start or end"):
- DatetimeIndex._cached_range(periods=20, offset=CDay())
+ DatetimeIndex._cached_range(periods=20, freq=CDay())
def test_misc(self):
end = datetime(2009, 5, 13)
@@ -640,7 +640,7 @@ def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range('12/5/2011', '12/5/2011', freq='C')
rng2 = bdate_range('12/2/2011', '12/5/2011', freq='C')
- rng2.offset = CDay()
+ rng2.freq = CDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index af65a8618d30f..dd192db4b0eb3 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -53,10 +53,10 @@ def test_dti_business_getitem(self):
exp = DatetimeIndex(rng.view(np.ndarray)[:5])
tm.assert_index_equal(smaller, exp)
- assert smaller.offset == rng.offset
+ assert smaller.freq == rng.freq
sliced = rng[::5]
- assert sliced.offset == BDay() * 5
+ assert sliced.freq == BDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
@@ -77,10 +77,10 @@ def test_dti_custom_getitem(self):
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5])
tm.assert_index_equal(smaller, exp)
- assert smaller.offset == rng.offset
+ assert smaller.freq == rng.freq
sliced = rng[::5]
- assert sliced.offset == CDay() * 5
+ assert sliced.freq == CDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 8986828399a98..3c7d5d37e98f3 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -405,6 +405,18 @@ def test_equals(self):
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
+ def test_offset_deprecated(self):
+ # GH 20716
+ idx = pd.DatetimeIndex(['20180101', '20180102'])
+
+ # getter deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ idx.offset
+
+ # setter deprecated
+ with tm.assert_produces_warning(FutureWarning):
+ idx.offset = BDay()
+
class TestBusinessDatetimeIndex(object):
@@ -420,7 +432,7 @@ def test_comparison(self):
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
- assert unpickled.offset is not None
+ assert unpickled.freq is not None
def test_copy(self):
cp = self.rng.copy()
@@ -430,15 +442,15 @@ def test_copy(self):
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
- assert shifted.offset == self.rng.offset
+ assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
- assert shifted.offset == self.rng.offset
+ assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
- assert shifted.offset == self.rng.offset
+ assert shifted.freq == self.rng.freq
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=BDay())
@@ -485,15 +497,15 @@ def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
- assert shifted.offset == self.rng.offset
+ assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
- assert shifted.offset == self.rng.offset
+ assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
- assert shifted.offset == self.rng.offset
+ assert shifted.freq == self.rng.freq
# PerformanceWarning
with warnings.catch_warnings(record=True):
@@ -503,7 +515,7 @@ def test_shift(self):
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
- assert unpickled.offset is not None
+ assert unpickled.freq is not None
def test_equals(self):
assert not self.rng.equals(list(self.rng))
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index 84632e59e2bfb..cb9364edc0cc3 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -357,7 +357,7 @@ def test_intersection(self):
expected = rng[10:25]
tm.assert_index_equal(the_int, expected)
assert isinstance(the_int, DatetimeIndex)
- assert the_int.offset == rng.offset
+ assert the_int.freq == rng.freq
the_int = rng1.intersection(rng2.view(DatetimeIndex))
tm.assert_index_equal(the_int, expected)
| - [X] closes #20716
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20730 | 2018-04-18T03:02:10Z | 2018-04-19T12:43:21Z | 2018-04-19T12:43:21Z | 2018-09-24T17:24:30Z |
[ENH] Add read support for Google Cloud Storage | diff --git a/ci/appveyor-27.yaml b/ci/appveyor-27.yaml
index cfc6a796bd77e..10511ac0e00ca 100644
--- a/ci/appveyor-27.yaml
+++ b/ci/appveyor-27.yaml
@@ -6,6 +6,7 @@ dependencies:
- beautifulsoup4
- bottleneck
- dateutil
+ - gcsfs
- html5lib
- jinja2=2.8
- lxml
diff --git a/ci/check_imports.py b/ci/check_imports.py
index d6f24ebcc4d3e..3f09290f8c375 100644
--- a/ci/check_imports.py
+++ b/ci/check_imports.py
@@ -5,6 +5,7 @@
blacklist = {
'bs4',
+ 'gcsfs',
'html5lib',
'ipython',
'jinja2'
diff --git a/ci/circle-36-locale_slow.yaml b/ci/circle-36-locale_slow.yaml
index cc852c1e2aeeb..f44e98e1ee09d 100644
--- a/ci/circle-36-locale_slow.yaml
+++ b/ci/circle-36-locale_slow.yaml
@@ -5,6 +5,7 @@ channels:
dependencies:
- beautifulsoup4
- cython
+ - gcsfs
- html5lib
- ipython
- jinja2
diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt
index e8cfcdf80f2e8..9e4e8e99b5205 100644
--- a/ci/requirements-optional-conda.txt
+++ b/ci/requirements-optional-conda.txt
@@ -3,6 +3,7 @@ blosc
bottleneck
fastparquet
feather-format
+gcsfs
html5lib
ipython>=5.6.0
ipykernel
diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt
index 877c52fa0b4fd..3cce3f5339883 100644
--- a/ci/requirements-optional-pip.txt
+++ b/ci/requirements-optional-pip.txt
@@ -5,6 +5,7 @@ blosc
bottleneck
fastparquet
feather-format
+gcsfs
html5lib
ipython>=5.6.0
ipykernel
diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml
index 22b993a2da886..482b888b88062 100644
--- a/ci/travis-27.yaml
+++ b/ci/travis-27.yaml
@@ -9,6 +9,7 @@ dependencies:
- fastparquet
- feather-format
- flake8=3.4.1
+ - gcsfs
- html5lib
- ipython
- jemalloc=4.5.0.post
diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml
index 006276ba1a65f..ff4f1a4a86f99 100644
--- a/ci/travis-36.yaml
+++ b/ci/travis-36.yaml
@@ -8,6 +8,7 @@ dependencies:
- dask
- fastparquet
- feather-format
+ - gcsfs
- geopandas
- html5lib
- ipython
diff --git a/doc/source/install.rst b/doc/source/install.rst
index fa6b9f4fc7f4d..a8c5194124829 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -276,6 +276,7 @@ Optional Dependencies
* `Jinja2 <http://jinja.pocoo.org/>`__: Template engine for conditional HTML formatting.
* `s3fs <http://s3fs.readthedocs.io/>`__: necessary for Amazon S3 access (s3fs >= 0.0.7).
* `blosc <https://pypi.org/project/blosc>`__: for msgpack compression using ``blosc``
+* `gcsfs <http://gcsfs.readthedocs.io/>`__: necessary for Google Cloud Storage access (gcsfs >= 0.1.0).
* One of
`qtpy <https://github.com/spyder-ide/qtpy>`__ (requires PyQt or PySide),
`PyQt5 <https://www.riverbankcomputing.com/software/pyqt/download5>`__,
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index a63276efc5b7c..0fe036a2ee70f 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -18,7 +18,7 @@ Other Enhancements
- :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether NaN/NaT values should be considered (:issue:`17534`)
- :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`)
- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with :class:`MultiIndex` (:issue:`21115`)
-
+- Added support for reading from Google Cloud Storage via the ``gcsfs`` library (:issue:`19454`)
.. _whatsnew_0240.api_breaking:
diff --git a/pandas/conftest.py b/pandas/conftest.py
index d6b18db4e71f2..b4a599758417c 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1,3 +1,5 @@
+import importlib
+
import pytest
import numpy as np
@@ -249,3 +251,17 @@ def any_int_dtype(request):
"""
return request.param
+
+
+@pytest.fixture
+def mock():
+ """
+ Fixture providing the 'mock' module.
+
+ Uses 'unittest.mock' for Python 3. Attempts to import the 3rd party 'mock'
+ package for Python 2, skipping if not present.
+ """
+ if PY3:
+ return importlib.import_module("unittest.mock")
+ else:
+ return pytest.importorskip("mock")
diff --git a/pandas/io/common.py b/pandas/io/common.py
index ac9077f2db50e..6d579fc8a8a09 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -88,7 +88,7 @@ def _is_url(url):
"""
try:
return parse_url(url).scheme in _VALID_URLS
- except:
+ except Exception:
return False
@@ -165,7 +165,15 @@ def is_s3_url(url):
"""Check for an s3, s3n, or s3a url"""
try:
return parse_url(url).scheme in ['s3', 's3n', 's3a']
- except: # noqa
+ except Exception:
+ return False
+
+
+def is_gcs_url(url):
+ """Check for a gcs url"""
+ try:
+ return parse_url(url).scheme in ['gcs', 'gs']
+ except Exception:
return False
@@ -208,6 +216,13 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=compression,
mode=mode)
+ if is_gcs_url(filepath_or_buffer):
+ from pandas.io import gcs
+ return gcs.get_filepath_or_buffer(filepath_or_buffer,
+ encoding=encoding,
+ compression=compression,
+ mode=mode)
+
if isinstance(filepath_or_buffer, (compat.string_types,
compat.binary_type,
mmap.mmap)):
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index e86d33742b266..793a95ffb0ee7 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -46,7 +46,7 @@
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object, pandas ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
- and file. For file URLs, a host is expected. For instance, a local
+ gcs, and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheet_name : string, int, mixed list of strings/ints, or None, default 0
diff --git a/pandas/io/gcs.py b/pandas/io/gcs.py
new file mode 100644
index 0000000000000..aa1cb648f05d1
--- /dev/null
+++ b/pandas/io/gcs.py
@@ -0,0 +1,16 @@
+""" GCS support for remote file interactivity """
+try:
+ import gcsfs
+except ImportError:
+ raise ImportError("The gcsfs library is required to handle GCS files")
+
+
+def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
+ compression=None, mode=None):
+
+ if mode is None:
+ mode = 'rb'
+
+ fs = gcsfs.GCSFileSystem()
+ filepath_or_buffer = fs.open(filepath_or_buffer, mode)
+ return filepath_or_buffer, None, compression, True
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 1627b2f4d3ec3..9992be521d61f 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -231,9 +231,9 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
- The string could be a URL. Valid URL schemes include http, ftp, s3, and
- file. For file URLs, a host is expected. For instance, a local file
- could be ``file://localhost/path/to/table.json``
+ The string could be a URL. Valid URL schemes include http, ftp, s3,
+ gcs, and file. For file URLs, a host is expected. For instance, a local
+ file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index b4f5d67530fbd..65527ac1b278f 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -20,7 +20,7 @@
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
-from pandas.compat import u, PY2, PY3, StringIO, lrange
+from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
@@ -128,7 +128,7 @@ def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
-def test_is_file_like():
+def test_is_file_like(mock):
class MockFile(object):
pass
@@ -166,10 +166,7 @@ class MockFile(object):
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
-
- if PY3:
- from unittest import mock
- assert not is_file(mock.Mock())
+ assert not is_file(mock.Mock())
@pytest.mark.parametrize(
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index b39122e5e7906..6e1d3575a1481 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -1546,7 +1546,7 @@ def test_file_handles(self):
assert not m.closed
m.close()
- def test_invalid_file_buffer(self):
+ def test_invalid_file_buffer(self, mock):
# see gh-15337
class InvalidBuffer(object):
@@ -1577,11 +1577,8 @@ def seek(self, pos, whence=0):
tm.assert_frame_equal(result, expected)
- if PY3:
- from unittest import mock
-
- with tm.assert_raises_regex(ValueError, msg):
- self.read_csv(mock.Mock())
+ with tm.assert_raises_regex(ValueError, msg):
+ self.read_csv(mock.Mock())
@tm.capture_stderr
def test_skip_bad_lines(self):
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
new file mode 100644
index 0000000000000..251c93df0733d
--- /dev/null
+++ b/pandas/tests/io/test_gcs.py
@@ -0,0 +1,47 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, date_range, read_csv
+from pandas.compat import StringIO
+from pandas.io.common import is_gcs_url
+from pandas.util import _test_decorators as td
+from pandas.util.testing import assert_frame_equal
+
+
+def test_is_gcs_url():
+ assert is_gcs_url("gcs://pandas/somethingelse.com")
+ assert is_gcs_url("gs://pandas/somethingelse.com")
+ assert not is_gcs_url("s3://pandas/somethingelse.com")
+
+
+@td.skip_if_no('gcsfs')
+def test_read_csv_gcs(mock):
+ df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
+ 'dt': date_range('2018-06-18', periods=2)})
+ with mock.patch('gcsfs.GCSFileSystem') as MockFileSystem:
+ instance = MockFileSystem.return_value
+ instance.open.return_value = StringIO(df1.to_csv(index=False))
+ df2 = read_csv('gs://test/test.csv', parse_dates=['dt'])
+
+ assert_frame_equal(df1, df2)
+
+
+@td.skip_if_no('gcsfs')
+def test_gcs_get_filepath_or_buffer(mock):
+ df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
+ 'dt': date_range('2018-06-18', periods=2)})
+ with mock.patch('pandas.io.gcs.get_filepath_or_buffer') as MockGetFilepath:
+ MockGetFilepath.return_value = (StringIO(df1.to_csv(index=False)),
+ None, None, False)
+ df2 = read_csv('gs://test/test.csv', parse_dates=['dt'])
+
+ assert_frame_equal(df1, df2)
+ assert MockGetFilepath.called
+
+
+@pytest.mark.skipif(td.safe_import('gcsfs'),
+ reason='Only check when gcsfs not installed')
+def test_gcs_not_present_exception():
+ with pytest.raises(ImportError) as e:
+ read_csv('gs://test/test.csv')
+ assert 'gcsfs library is required' in str(e.value)
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 83c1433bf5c39..01198fc541e0c 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -96,6 +96,7 @@ def show_versions(as_json=False):
("fastparquet", lambda mod: mod.__version__),
("pandas_gbq", lambda mod: mod.__version__),
("pandas_datareader", lambda mod: mod.__version__),
+ ("gcsfs", lambda mod: mod.__version__),
]
deps_blob = list()
| - [x] closes #19454
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Couple of remaining issues:
- I had to do some weird logging magic to suppress the output of `gcsfs` when catching authentication exceptions
- Proper Parquet support seems to require more specialization so I didn't try
- I'm not sure how to properly test this functionality without a `pandas-test` bucket and/or a mock GCS library like `moto`
cc @martindurant who might have some thoughts on other things I am doing wrong | https://api.github.com/repos/pandas-dev/pandas/pulls/20729 | 2018-04-18T01:48:25Z | 2018-06-26T14:59:24Z | 2018-06-26T14:59:24Z | 2018-07-02T15:37:17Z |
BUG: Fix a bug in plotting when using color array. #20726 | diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index ce629816a14c3..067f2e08ce72e 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1595,6 +1595,7 @@ Plotting
- Bug in :func:`DataFrame.plot.scatter` and :func:`DataFrame.plot.hexbin` caused x-axis label and ticklabels to disappear when colorbar was on in IPython inline backend (:issue:`10611`, :issue:`10678`, and :issue:`20455`)
- Bug in plotting a Series with datetimes using :func:`matplotlib.axes.Axes.scatter` (:issue:`22039`)
+- Bug in validating color parameter caused extra color to be appended to the given color array. This happened to multiple plotting functions using matplotlib. (:issue:`20726`)
Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py
index 930c3d1775ad8..d50fa48c92cf5 100644
--- a/pandas/plotting/_style.py
+++ b/pandas/plotting/_style.py
@@ -81,7 +81,10 @@ def _maybe_valid_colors(colors):
# mpl will raise error any of them is invalid
pass
- if len(colors) != num_colors:
+ # Append more colors by cycling if there is not enough color.
+ # Extra colors will be ignored by matplotlib if there are more colors
+ # than needed and nothing needs to be done here.
+ if len(colors) < num_colors:
try:
multiple = num_colors // len(colors) - 1
except ZeroDivisionError:
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 9ae3e7fc423f4..5d9ece09c9dcc 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -309,3 +309,21 @@ def test_get_standard_colors_random_seed(self):
color1 = _get_standard_colors(1, color_type='random')
color2 = _get_standard_colors(1, color_type='random')
assert color1 == color2
+
+ def test_get_standard_colors_no_appending(self):
+ # GH20726
+
+ # Make sure not to add more colors so that matplotlib can cycle
+ # correctly.
+ from matplotlib import cm
+ color_before = cm.gnuplot(range(5))
+ color_after = plotting._style._get_standard_colors(
+ 1, color=color_before)
+ assert len(color_after) == len(color_before)
+
+ df = DataFrame(np.random.randn(48, 4), columns=list("ABCD"))
+
+ color_list = cm.gnuplot(np.linspace(0, 1, 16))
+ p = df.A.plot.bar(figsize=(16, 7), color=color_list)
+ assert (p.patches[1].get_facecolor()
+ == p.patches[17].get_facecolor())
| - [x] closes #20726
- [x] tests added / passed
- [x] passes `git diff master --name-only -- "*.py" | flake8`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/20727 | 2018-04-17T23:50:24Z | 2018-12-31T13:21:18Z | 2018-12-31T13:21:17Z | 2018-12-31T13:21:21Z |
DOC: add Raises, Examples and See Also sections to methods at_time/between_time/first/last | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index eb6c212731822..05e0028047941 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -916,6 +916,10 @@ Datetimelike API Changes
- :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the ``pandas.tseries.offsets`` module (:issue:`17830`)
- ``pandas.tseries.frequencies.get_freq_group()`` and ``pandas.tseries.frequencies.DAYS`` are removed from the public API (:issue:`18034`)
- :func:`Series.truncate` and :func:`DataFrame.truncate` will raise a ``ValueError`` if the index is not sorted instead of an unhelpful ``KeyError`` (:issue:`17935`)
+- :attr:`Series.first` and :attr:`DataFrame.first` will now raise a ``TypeError``
+ rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex`` (:issue:`20725`).
+- :attr:`Series.last` and :attr:`DateFrame.last` will now raise a ``TypeError``
+ rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex`` (:issue:`20725`).
- Restricted ``DateOffset`` keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`, :issue:`18226`).
- :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`)
- For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with ``freq=None``, addition or subtraction of integer-dtyped array or ``Index`` will raise ``NullFrequencyError`` instead of ``TypeError`` (:issue:`19895`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 343f36eabc0d7..9e4eda1bc4dc7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6761,6 +6761,11 @@ def at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM).
+ Raises
+ ------
+ TypeError
+ If the index is not a :class:`DatetimeIndex`
+
Parameters
----------
time : datetime.time or string
@@ -6768,6 +6773,30 @@ def at_time(self, time, asof=False):
Returns
-------
values_at_time : type of caller
+
+ Examples
+ --------
+ >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
+ >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
+ >>> ts
+ A
+ 2018-04-09 00:00:00 1
+ 2018-04-09 12:00:00 2
+ 2018-04-10 00:00:00 3
+ 2018-04-10 12:00:00 4
+
+ >>> ts.at_time('12:00')
+ A
+ 2018-04-09 12:00:00 2
+ 2018-04-10 12:00:00 4
+
+ See Also
+ --------
+ between_time : Select values between particular times of the day
+ first : Select initial periods of time series based on a date offset
+ last : Select final periods of time series based on a date offset
+ DatetimeIndex.indexer_at_time : Get just the index locations for
+ values at particular time of the day
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
@@ -6780,6 +6809,14 @@ def between_time(self, start_time, end_time, include_start=True,
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
+ By setting ``start_time`` to be later than ``end_time``,
+ you can get the times that are *not* between the two times.
+
+ Raises
+ ------
+ TypeError
+ If the index is not a :class:`DatetimeIndex`
+
Parameters
----------
start_time : datetime.time or string
@@ -6790,6 +6827,38 @@ def between_time(self, start_time, end_time, include_start=True,
Returns
-------
values_between_time : type of caller
+
+ Examples
+ --------
+ >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
+ >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
+ >>> ts
+ A
+ 2018-04-09 00:00:00 1
+ 2018-04-10 00:20:00 2
+ 2018-04-11 00:40:00 3
+ 2018-04-12 01:00:00 4
+
+ >>> ts.between_time('0:15', '0:45')
+ A
+ 2018-04-10 00:20:00 2
+ 2018-04-11 00:40:00 3
+
+ You get the times that are *not* between two times by setting
+ ``start_time`` later than ``end_time``:
+
+ >>> ts.between_time('0:45', '0:15')
+ A
+ 2018-04-09 00:00:00 1
+ 2018-04-12 01:00:00 4
+
+ See Also
+ --------
+ at_time : Select values at a particular time of the day
+ first : Select initial periods of time series based on a date offset
+ last : Select final periods of time series based on a date offset
+ DatetimeIndex.indexer_between_time : Get just the index locations for
+ values between particular times of the day
"""
try:
indexer = self.index.indexer_between_time(
@@ -7043,22 +7112,50 @@ def first(self, offset):
Convenience method for subsetting initial periods of time series data
based on a date offset.
+ Raises
+ ------
+ TypeError
+ If the index is not a :class:`DatetimeIndex`
+
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
- ts.first('10D') -> First 10 days
+ >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
+ >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
+ >>> ts
+ A
+ 2018-04-09 1
+ 2018-04-11 2
+ 2018-04-13 3
+ 2018-04-15 4
+
+ Get the rows for the first 3 days:
+
+ >>> ts.first('3D')
+ A
+ 2018-04-09 1
+ 2018-04-11 2
+
+ Notice the data for 3 first calender days were returned, not the first
+ 3 days observed in the dataset, and therefore data for 2018-04-13 was
+ not returned.
Returns
-------
subset : type of caller
+
+ See Also
+ --------
+ last : Select final periods of time series based on a date offset
+ at_time : Select values at a particular time of the day
+ between_time : Select values between particular times of the day
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
- raise NotImplementedError("'first' only supports a DatetimeIndex "
- "index")
+ raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
@@ -7079,22 +7176,50 @@ def last(self, offset):
Convenience method for subsetting final periods of time series data
based on a date offset.
+ Raises
+ ------
+ TypeError
+ If the index is not a :class:`DatetimeIndex`
+
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
- ts.last('5M') -> Last 5 months
+ >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
+ >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
+ >>> ts
+ A
+ 2018-04-09 1
+ 2018-04-11 2
+ 2018-04-13 3
+ 2018-04-15 4
+
+ Get the rows for the last 3 days:
+
+ >>> ts.last('3D')
+ A
+ 2018-04-13 3
+ 2018-04-15 4
+
+ Notice the data for 3 last calender days were returned, not the last
+ 3 observed days in the dataset, and therefore data for 2018-04-11 was
+ not returned.
Returns
-------
subset : type of caller
+
+ See Also
+ --------
+ first : Select initial periods of time series based on a date offset
+ at_time : Select values at a particular time of the day
+ between_time : Select values between particular times of the day
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
- raise NotImplementedError("'last' only supports a DatetimeIndex "
- "index")
+ raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index e9ab443a978f8..1b5aa3b45f3b5 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -2368,15 +2368,23 @@ def tz_localize(self, tz, ambiguous='raise', errors='raise'):
def indexer_at_time(self, time, asof=False):
"""
- Select values at particular time of day (e.g. 9:30AM)
+ Returns index locations of index values at particular time of day
+ (e.g. 9:30AM).
Parameters
----------
time : datetime.time or string
+ datetime.time or string in appropriate format ("%H:%M", "%H%M",
+ "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
+ "%I%M%S%p").
Returns
-------
- values_at_time : TimeSeries
+ values_at_time : array of integers
+
+ See Also
+ --------
+ indexer_between_time, DataFrame.at_time
"""
from dateutil.parser import parse
@@ -2398,24 +2406,25 @@ def indexer_at_time(self, time, asof=False):
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
- Select values between particular times of day (e.g., 9:00-9:30AM).
-
- Return values of the index between two times. If start_time or
- end_time are strings then tseries.tools.to_time is used to convert to
- a time object.
+ Return index locations of values between particular times of day
+ (e.g., 9:00-9:30AM).
Parameters
----------
start_time, end_time : datetime.time, str
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
- "%I%M%S%p")
+ "%I%M%S%p").
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
- values_between_time : TimeSeries
+ values_between_time : array of integers
+
+ See Also
+ --------
+ indexer_at_time, DataFrame.between_time
"""
start_time = tools.to_time(start_time)
end_time = tools.to_time(end_time)
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index 277c3c9bc5c23..90fbc6e628369 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -539,7 +539,59 @@ def test_first_last_valid(self):
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
- def test_at_time_frame(self):
+ def test_first_subset(self):
+ ts = tm.makeTimeDataFrame(freq='12h')
+ result = ts.first('10d')
+ assert len(result) == 20
+
+ ts = tm.makeTimeDataFrame(freq='D')
+ result = ts.first('10d')
+ assert len(result) == 10
+
+ result = ts.first('3M')
+ expected = ts[:'3/31/2000']
+ assert_frame_equal(result, expected)
+
+ result = ts.first('21D')
+ expected = ts[:21]
+ assert_frame_equal(result, expected)
+
+ result = ts[:0].first('3M')
+ assert_frame_equal(result, ts[:0])
+
+ def test_first_raises(self):
+ # GH20725
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ df.first('1D')
+
+ def test_last_subset(self):
+ ts = tm.makeTimeDataFrame(freq='12h')
+ result = ts.last('10d')
+ assert len(result) == 20
+
+ ts = tm.makeTimeDataFrame(nper=30, freq='D')
+ result = ts.last('10d')
+ assert len(result) == 10
+
+ result = ts.last('21D')
+ expected = ts['2000-01-10':]
+ assert_frame_equal(result, expected)
+
+ result = ts.last('21D')
+ expected = ts[-21:]
+ assert_frame_equal(result, expected)
+
+ result = ts[:0].last('3M')
+ assert_frame_equal(result, ts[:0])
+
+ def test_last_raises(self):
+ # GH20725
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ df.last('1D')
+
+ def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
@@ -569,7 +621,13 @@ def test_at_time_frame(self):
rs = ts.at_time('16:00')
assert len(rs) == 0
- def test_between_time_frame(self):
+ def test_at_time_raises(self):
+ # GH20725
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ df.at_time('00:00')
+
+ def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
@@ -629,6 +687,12 @@ def test_between_time_frame(self):
else:
assert (t < etime) or (t >= stime)
+ def test_between_time_raises(self):
+ # GH20725
+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ df.between_time(start_time='00:00', end_time='12:00')
+
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 8e537b137baaf..376b4d71f81e8 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -628,6 +628,12 @@ def test_first_subset(self):
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
+ def test_first_raises(self):
+ # GH20725
+ ser = pd.Series('a b c'.split())
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ ser.first('1D')
+
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
@@ -648,6 +654,12 @@ def test_last_subset(self):
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
+ def test_last_raises(self):
+ # GH20725
+ ser = pd.Series('a b c'.split())
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ ser.last('1D')
+
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
@@ -696,6 +708,12 @@ def test_at_time(self):
rs = ts.at_time('16:00')
assert len(rs) == 0
+ def test_at_time_raises(self):
+ # GH20725
+ ser = pd.Series('a b c'.split())
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ ser.at_time('00:00')
+
def test_between(self):
series = Series(date_range('1/1/2000', periods=10))
left, right = series[[2, 7]]
@@ -764,6 +782,12 @@ def test_between_time(self):
else:
assert (t < etime) or (t >= stime)
+ def test_between_time_raises(self):
+ # GH20725
+ ser = pd.Series('a b c'.split())
+ with pytest.raises(TypeError): # index is not a DatetimeIndex
+ ser.between_time(start_time='00:00', end_time='12:00')
+
def test_between_time_types(self):
# GH11818
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
| Added note that methods ``at_time``/``between_time``/``first``/``last`` requires that the index is a DateTimeIndex.
Added functioning examples to the method doc strings.
Added See Also section to the mentioned method doc string.
EDIT: The doc strings for ``DatetimeIndex.indexer_at_time`` and ``DatetimeIndex.indexer_between_time`` said they return a timeseries, while in reality they return arrays. I've corrected that, and made some minor updates to those doc strings in addition. | https://api.github.com/repos/pandas-dev/pandas/pulls/20725 | 2018-04-17T20:08:51Z | 2018-05-07T10:43:14Z | 2018-05-07T10:43:14Z | 2018-05-18T18:09:05Z |
DEPR: Series ndarray properties (strides, data, base, itemsize, flags) | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index e19aedac80213..1b3b5fcac7bcf 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -885,6 +885,9 @@ Deprecations
- The ``convert_datetime64`` parameter in :func:`DataFrame.to_records` has been deprecated and will be removed in a future version. The NumPy bug motivating this parameter has been resolved. The default value for this parameter has also changed from ``True`` to ``None`` (:issue:`18160`).
- :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`,
:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have deprecated passing an ``np.array`` by default. One will need to pass the new ``raw`` parameter to be explicit about what is passed (:issue:`20584`)
+- The ``data``, ``base``, ``strides``, ``flags`` and ``itemsize`` properties
+ of the ``Series`` and ``Index`` classes have been deprecated and will be
+ removed in a future version (:issue:`20419`).
- ``DatetimeIndex.offset`` is deprecated. Use ``DatetimeIndex.freq`` instead (:issue:`20716`)
.. _whatsnew_0230.prior_deprecations:
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 065a5782aced1..491d8a628f1d6 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1491,6 +1491,8 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
if is_sparse(arr):
arr = arr.get_values()
+ arr = np.asarray(arr)
+
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 0d55fa8b97aae..9ca1c8bea4db7 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -737,11 +737,17 @@ def item(self):
@property
def data(self):
""" return the data pointer of the underlying data """
+ warnings.warn("{obj}.data is deprecated and will be removed "
+ "in a future version".format(obj=type(self).__name__),
+ FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
+ warnings.warn("{obj}.itemsize is deprecated and will be removed "
+ "in a future version".format(obj=type(self).__name__),
+ FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
@@ -752,6 +758,9 @@ def nbytes(self):
@property
def strides(self):
""" return the strides of the underlying data """
+ warnings.warn("{obj}.strudes is deprecated and will be removed "
+ "in a future version".format(obj=type(self).__name__),
+ FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
@@ -762,6 +771,9 @@ def size(self):
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
+ warnings.warn("{obj}.flags is deprecated and will be removed "
+ "in a future version".format(obj=type(self).__name__),
+ FutureWarning, stacklevel=2)
return self.values.flags
@property
@@ -769,6 +781,9 @@ def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
+ warnings.warn("{obj}.base is deprecated and will be removed "
+ "in a future version".format(obj=type(self).__name__),
+ FutureWarning, stacklevel=2)
return self.values.base
@property
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 12bb09e8f8a8a..734b002691d1c 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -548,6 +548,9 @@ def _deepcopy_if_needed(self, orig, copy=False):
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
+ if not isinstance(orig, np.ndarray):
+ # orig is a DatetimeIndex
+ orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index e98899b2f5c1a..a266ea620bd9f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2816,6 +2816,12 @@ def _maybe_coerce_values(self, values, dtype=None):
return values
+ @property
+ def is_view(self):
+ """ return a boolean if I am possibly a view """
+ # check the ndarray values of the DatetimeIndex values
+ return self.values.values.base is not None
+
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 7c6cb5b9615cb..a10f7f6e46210 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -328,7 +328,7 @@ def test_series_agg_multi_pure_python():
'F': np.random.randn(11)})
def bad(x):
- assert (len(x.base) > 0)
+ assert (len(x.values.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index bb892f92f213e..e05f9de5ea7f4 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1066,7 +1066,7 @@ def convert_fast(x):
def convert_force_pure(x):
# base will be length 0
- assert (len(x.base) > 0)
+ assert (len(x.values.base) > 0)
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 758f3f0ef9ebc..f78bd583288a4 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -24,7 +24,7 @@
class Base(object):
""" base class for index sub-class tests """
_holder = None
- _compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
+ _compat_props = ['shape', 'ndim', 'size', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 984f37042d600..e189b6e856cb7 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -30,7 +30,7 @@
class TestMultiIndex(Base):
_holder = MultiIndex
- _compat_props = ['shape', 'ndim', 'size', 'itemsize']
+ _compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 1ebeef072fdc5..8990834ebe91a 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -22,7 +22,7 @@
class TestRangeIndex(Numeric):
_holder = RangeIndex
- _compat_props = ['shape', 'ndim', 'size', 'itemsize']
+ _compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
self.indices = dict(index=RangeIndex(0, 20, 2, name='foo'),
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 9338aba90d7cb..7fbf7ec05e91e 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -466,7 +466,11 @@ def test_copy(self, mgr):
# view assertion
assert cp_blk.equals(blk)
- assert cp_blk.values.base is blk.values.base
+ if isinstance(blk.values, np.ndarray):
+ assert cp_blk.values.base is blk.values.base
+ else:
+ # DatetimeTZBlock has DatetimeIndex values
+ assert cp_blk.values.values.base is blk.values.values.base
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
@@ -474,8 +478,8 @@ def test_copy(self, mgr):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
assert cp_blk.equals(blk)
- if cp_blk.values.base is not None and blk.values.base is not None:
- assert cp_blk.values.base is not blk.values.base
+ if not isinstance(cp_blk.values, np.ndarray):
+ assert cp_blk.values.values.base is not blk.values.values.base
else:
assert cp_blk.values.base is None and blk.values.base is None
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index c4c02c0bf6f17..a5d83c1c26948 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -316,16 +316,22 @@ def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
- for p in ['shape', 'dtype', 'flags', 'T',
- 'strides', 'itemsize', 'nbytes']:
+ for p in ['shape', 'dtype', 'T', 'nbytes']:
assert getattr(o, p, None) is not None
- assert hasattr(o, 'base')
+ # deprecated properties
+ for p in ['flags', 'strides', 'itemsize']:
+ with tm.assert_produces_warning(FutureWarning):
+ assert getattr(o, p, None) is not None
+
+ with tm.assert_produces_warning(FutureWarning):
+ assert hasattr(o, 'base')
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
- assert o.data is not None
+ with tm.assert_produces_warning(FutureWarning):
+ assert o.data is not None
except ValueError:
pass
| - [x] closes #20419
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20721 | 2018-04-17T14:50:25Z | 2018-04-25T21:07:22Z | 2018-04-25T21:07:22Z | 2020-05-23T14:54:48Z |
PKG: remove pyproject.toml for now | diff --git a/pyproject.toml b/pyproject.toml
deleted file mode 100644
index f0d57d1d808a2..0000000000000
--- a/pyproject.toml
+++ /dev/null
@@ -1,9 +0,0 @@
-[build-system]
-requires = [
- "wheel",
- "setuptools",
- "Cython", # required for VCS build, optional for released source
- "numpy==1.9.3; python_version=='3.5'",
- "numpy==1.12.1; python_version=='3.6'",
- "numpy==1.13.1; python_version>='3.7'",
-]
| Since this gives problems, it might be better to remove this until support in pip is better?
Eg I also have errors on the geopandas CI when installing pandas master with pip due to this.
Scipy did something similar: https://github.com/scipy/scipy/pull/8735 | https://api.github.com/repos/pandas-dev/pandas/pulls/20718 | 2018-04-17T09:18:17Z | 2018-04-18T08:52:02Z | 2018-04-18T08:52:02Z | 2018-04-18T08:52:09Z |
DOC: Change the indent for the pydoc of apply() function. | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1d6f770d92795..2ed4c99b7a998 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3130,7 +3130,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds):
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
- ... return x
+ ... return x
>>> series.apply(add_custom_values, june=30, july=20, august=25)
London 95
| report a bug of pydoc of apply function. | https://api.github.com/repos/pandas-dev/pandas/pulls/20715 | 2018-04-16T22:21:19Z | 2018-04-17T10:34:14Z | 2018-04-17T10:34:14Z | 2018-04-17T22:18:33Z |
DOC: Correct documentation to GroupBy.rank | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 7c89cab6b1428..8c20d62117e25 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1841,24 +1841,27 @@ def cumcount(self, ascending=True):
@Appender(_doc_template)
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0):
- """Provides the rank of values within each group
+ """
+ Provides the rank of values within each group.
Parameters
----------
- method : {'average', 'min', 'max', 'first', 'dense'}, efault 'average'
+ method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
- method : {'keep', 'top', 'bottom'}, default 'keep'
+ ascending : boolean, default True
+ False for ranks by high (1) to low (N)
+ na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
- ascending : boolean, default True
- False for ranks by high (1) to low (N)
pct : boolean, default False
Compute percentage rank of data within each group
+ axis : int, default 0
+ The axis of the object over which to compute the rank.
Returns
-----
| Closes #20694.
cc @peterpanmj | https://api.github.com/repos/pandas-dev/pandas/pulls/20708 | 2018-04-15T19:49:41Z | 2018-04-16T10:36:20Z | 2018-04-16T10:36:20Z | 2018-04-16T11:30:41Z |
DOC: Various EA docs | diff --git a/doc/source/extending.rst b/doc/source/extending.rst
index 25c4ba4a4a2a3..b94a43480ed93 100644
--- a/doc/source/extending.rst
+++ b/doc/source/extending.rst
@@ -57,6 +57,13 @@ If you write a custom accessor, make a pull request adding it to our
Extension Types
---------------
+.. versionadded:: 0.23.0
+
+.. warning::
+
+ The ``ExtensionDtype`` and ``ExtensionArray`` APIs are new and
+ experimental. They may change between versions without warning.
+
Pandas defines an interface for implementing data types and arrays that *extend*
NumPy's type system. Pandas itself uses the extension system for some types
that aren't built into NumPy (categorical, period, interval, datetime with
@@ -106,6 +113,24 @@ by some other storage type, like Python lists.
See the `extension array source`_ for the interface definition. The docstrings
and comments contain guidance for properly implementing the interface.
+We provide a test suite for ensuring that your extension arrays satisfy the expected
+behavior. To use the test suite, you must provide several pytest fixtures and inherit
+from the base test class. The required fixtures are found in
+https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/conftest.py.
+
+To use a test, subclass it:
+
+.. code-block:: python
+
+ from pandas.tests.extension import base
+
+ class TestConstructors(base.BaseConstructorsTests):
+ pass
+
+
+See https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/base/__init__.py
+for a list of all the tests available.
+
.. _extension dtype source: https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/base.py
.. _extension array source: https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/base.py
diff --git a/doc/source/install.rst b/doc/source/install.rst
index fdb22a8dc3380..ce825cefafae4 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -15,7 +15,7 @@ Instructions for installing from source,
`PyPI <http://pypi.python.org/pypi/pandas>`__, `ActivePython <https://www.activestate.com/activepython/downloads>`__, various Linux distributions, or a
`development version <http://github.com/pandas-dev/pandas>`__ are also provided.
-.. _install.dropping_27:
+.. _install.dropping-27:
Plan for dropping Python 2.7
----------------------------
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index c708b477e42f4..43dce44183e3f 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -11,7 +11,7 @@ version.
.. warning::
Starting January 1, 2019, pandas feature releases will support Python 3 only.
- See :ref:`here <install.dropping_27>` for more.
+ See :ref:`install.dropping-27` for more.
.. _whatsnew_0230.enhancements:
@@ -306,8 +306,8 @@ Supplying a ``CategoricalDtype`` will make the categories in each column consist
.. _whatsnew_023.enhancements.extension:
-Extending Pandas with Custom Types
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Extending Pandas with Custom Types (Experimental)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Pandas now supports storing array-like objects that aren't necessarily 1-D NumPy
arrays as columns in a DataFrame or values in a Series. This allows third-party
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index c281bd80cb274..2eaad3980cf08 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1,4 +1,10 @@
-"""An interface for extending pandas with custom arrays."""
+"""An interface for extending pandas with custom arrays.
+
+.. warning::
+
+ This is an experimental API and subject to breaking changes
+ without warning.
+"""
import numpy as np
from pandas.errors import AbstractMethodError
@@ -14,12 +20,15 @@ class ExtensionArray(object):
with a custom type and will not attempt to coerce them to objects. They
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
+ .. versionadded:: 0.23.0
+
Notes
-----
The interface includes the following abstract methods that must be
implemented by subclasses:
* _constructor_from_sequence
+ * _from_factorized
* __getitem__
* __len__
* dtype
@@ -30,11 +39,21 @@ class ExtensionArray(object):
* _concat_same_type
Some additional methods are available to satisfy pandas' internal, private
- block API.
+ block API:
* _can_hold_na
* _formatting_values
+ Some methods require casting the ExtensionArray to an ndarray of Python
+ objects with ``self.astype(object)``, which may be expensive. When
+ performance is a concern, we highly recommend overriding the following
+ methods:
+
+ * fillna
+ * unique
+ * factorize / _values_for_factorize
+ * argsort / _values_for_argsort
+
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
@@ -50,10 +69,6 @@ class ExtensionArray(object):
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
-
- Extension arrays should be able to be constructed with instances of
- the class, i.e. ``ExtensionArray(extension_array)`` should return
- an instance, not error.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
| Closes https://github.com/pandas-dev/pandas/issues/20668
| https://api.github.com/repos/pandas-dev/pandas/pulls/20707 | 2018-04-15T19:20:19Z | 2018-04-21T16:30:41Z | 2018-04-21T16:30:41Z | 2018-04-21T16:30:45Z |
HDFStore.select_column error reporting | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index eee0f1997d081..8f7291bc1a28c 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1111,6 +1111,7 @@ I/O
- Bug in :meth:`pandas.io.json.json_normalize` where subrecords are not properly normalized if any subrecords values are NoneType (:issue:`20030`)
- Bug in ``usecols`` parameter in :func:`pandas.io.read_csv` and :func:`pandas.io.read_table` where error is not raised correctly when passing a string. (:issue:`20529`)
- Bug in :func:`HDFStore.keys` when reading a file with a softlink causes exception (:issue:`20523`)
+- Bug in :func:`HDFStore.select_column` where a key which is not a valid store raised an ``AttributeError`` instead of a ``KeyError`` (:issue:`17912`)
Plotting
^^^^^^^^
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index f9a496edb45a3..4004a6ea8f6ff 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -887,7 +887,10 @@ def remove(self, key, where=None, start=None, stop=None):
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
- except:
+ except KeyError:
+ # the key is not a valid store, re-raising KeyError
+ raise
+ except Exception:
if where is not None:
raise ValueError(
@@ -899,9 +902,6 @@ def remove(self, key, where=None, start=None, stop=None):
s._f_remove(recursive=True)
return None
- if s is None:
- raise KeyError('No object named %s in the file' % key)
-
# remove the node
if com._all_none(where, start, stop):
s.group._f_remove(recursive=True)
@@ -1094,7 +1094,8 @@ def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
- return None
+ raise KeyError('No object named {} in the file'.format(key))
+
s = self._create_storer(group)
s.infer_axes()
return s
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index b34723d6cf72c..a6a38e005b9b6 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -3836,8 +3836,15 @@ def test_read_column(self):
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
- store.append('df', df)
+ # GH 17912
+ # HDFStore.select_column should raise a KeyError
+ # exception if the key is not a valid store
+ with pytest.raises(KeyError,
+ message='No object named index in the file'):
+ store.select_column('df', 'index')
+
+ store.append('df', df)
# error
pytest.raises(KeyError, store.select_column, 'df', 'foo')
| - [x] closes #17912
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20705 | 2018-04-15T15:01:41Z | 2018-04-16T10:30:07Z | 2018-04-16T10:30:07Z | 2018-04-16T10:30:16Z |
Revert DOC/CI: temp pin matplotlib for doc build #20045 | diff --git a/ci/requirements-3.6_DOC.run b/ci/requirements-3.6_DOC.run
index fa9cab32c0ac2..084f38ce17eb2 100644
--- a/ci/requirements-3.6_DOC.run
+++ b/ci/requirements-3.6_DOC.run
@@ -5,7 +5,7 @@ sphinx
nbconvert
nbformat
notebook
-matplotlib=2.1*
+matplotlib
seaborn
scipy
lxml
| Closes https://github.com/pandas-dev/pandas/issues/20031
Need to check the output of `merging.rst`. | https://api.github.com/repos/pandas-dev/pandas/pulls/20704 | 2018-04-15T12:39:36Z | 2018-04-15T18:57:28Z | 2018-04-15T18:57:28Z | 2018-04-15T18:57:39Z |
PERF: enhance MultiIndex.remove_unused_levels when no levels are unused | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index eee0f1997d081..772e80e160579 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -901,6 +901,7 @@ Performance Improvements
- :func:`Series` / :func:`DataFrame` tab completion limits to 100 values, for better performance. (:issue:`18587`)
- Improved performance of :func:`DataFrame.median` with ``axis=1`` when bottleneck is not installed (:issue:`16468`)
- Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`)
+- Improved performance of :func:`MultiIndex.remove_unused_levels` when there are no unused levels, at the cost of a reduction in performance when there are (:issue:`19289`)
- Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`)
- Improved performance of :func:`pandas.core.groupby.GroupBy.rank` (:issue:`15779`)
- Improved performance of variable ``.rolling()`` on ``.min()`` and ``.max()`` (:issue:`19521`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d4b9545999bc7..8098f7bb7d246 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1476,27 +1476,35 @@ def remove_unused_levels(self):
changed = False
for lev, lab in zip(self.levels, self.labels):
- uniques = algos.unique(lab)
- na_idx = np.where(uniques == -1)[0]
-
- # nothing unused
- if len(uniques) != len(lev) + len(na_idx):
+ # Since few levels are typically unused, bincount() is more
+ # efficient than unique() - however it only accepts positive values
+ # (and drops order):
+ uniques = np.where(np.bincount(lab + 1) > 0)[0] - 1
+ has_na = int(len(uniques) and (uniques[0] == -1))
+
+ if len(uniques) != len(lev) + has_na:
+ # We have unused levels
changed = True
- if len(na_idx):
+ # Recalculate uniques, now preserving order.
+ # Can easily be cythonized by exploiting the already existing
+ # "uniques" and stop parsing "lab" when all items are found:
+ uniques = algos.unique(lab)
+ if has_na:
+ na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# labels get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
- label_mapping = np.zeros(len(lev) + len(na_idx))
+ label_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
- label_mapping[uniques] = np.arange(len(uniques)) - len(na_idx)
+ label_mapping[uniques] = np.arange(len(uniques)) - has_na
lab = label_mapping[lab]
# new levels are simple
- lev = lev.take(uniques[len(na_idx):])
+ lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_labels.append(lab)
| - [x] closes #19289
- [x] tests passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This uses ``np.bincount`` rather than ``algos.unique`` to check _if_ there are unused levels: since levels indexes are adjacent integers, the former is faster (~50% in my tests). It then fallbacks to ``algos.unique`` if there are unused levels.
The result is that asv tests on ``--bench reshape`` give
```
before after ratio
[d5d5a718] [499ae6bf]
- 1.43±0.01ms 1.19±0.01ms 0.83 reshape.SparseIndex.time_unstack
```
There is a penalty (of the same order of magnitude) for cases in which _there are_ unused levels, but I think this is worth because:
- there is room for cythonizing and bringing this penalty close to zero in the average case (see comment in code)
- I think the case of no unused level is much more frequent
- if people want to avoid this penalty they can just clean their index from unused levels once for all
- in any case, asv tests on ``--bench multiindex_object`` (where the index in ``Duplicates`` always has unused levels) do not find any change in performance | https://api.github.com/repos/pandas-dev/pandas/pulls/20703 | 2018-04-15T10:40:06Z | 2018-04-15T18:50:34Z | 2018-04-15T18:50:34Z | 2018-04-15T22:37:25Z |
DOC: Addition to pd.DataFrame.assign() docstring (#20085) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9e57579ddfc05..5e3b71a7d88fa 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3168,7 +3168,8 @@ def insert(self, loc, column, value, allow_duplicates=False):
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame, returning a new object
- (a copy) with all the original columns in addition to the new ones.
+ (a copy) with the new columns added to the original ones.
+ Existing columns that are re-assigned will be overwritten.
Parameters
----------
| - [ ] closes #20085
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Adding information to pd.DataFrame.assign() docstring that re-assigning of existing columns will overwrite the same.
| https://api.github.com/repos/pandas-dev/pandas/pulls/20702 | 2018-04-15T09:19:23Z | 2018-04-15T20:25:21Z | 2018-04-15T20:25:20Z | 2018-04-16T17:05:15Z |
DEP: Add 'python_requires' to setup.py to drop 3.4 support | diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml
index 1337fc54e9aac..f9f9208519d61 100644
--- a/ci/environment-dev.yaml
+++ b/ci/environment-dev.yaml
@@ -11,5 +11,5 @@ dependencies:
- python-dateutil>=2.5.0
- python=3
- pytz
- - setuptools>=3.3
+ - setuptools>=24.2.0
- sphinx
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index fcbe0da5de305..3430e778a4573 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -7,5 +7,5 @@ moto
pytest>=3.1
python-dateutil>=2.5.0
pytz
-setuptools>=3.3
-sphinx
\ No newline at end of file
+setuptools>=24.2.0
+sphinx
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 82a97ba7b04e1..8795081c8dbfe 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -223,7 +223,7 @@ installed), make sure you have `pytest
Dependencies
------------
-* `setuptools <https://setuptools.readthedocs.io/en/latest/>`__: 3.3.0 or higher
+* `setuptools <https://setuptools.readthedocs.io/en/latest/>`__: 24.2.0 or higher
* `NumPy <http://www.numpy.org>`__: 1.9.0 or higher
* `python-dateutil <//https://dateutil.readthedocs.io/en/stable/>`__: 2.5.0 or higher
* `pytz <http://pytz.sourceforge.net/>`__
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 408a52e0526ee..4c599a8b686ba 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -435,6 +435,8 @@ If installed, we now require:
+-----------------+-----------------+----------+---------------+
| beautifulsoup4 | 4.2.1 | | :issue:`20082`|
+-----------------+-----------------+----------+---------------+
+| setuptools | 24.2.0 | | :issue:`20698`|
++-----------------+-----------------+----------+---------------+
.. _whatsnew_0230.api_breaking.dict_insertion_order:
diff --git a/setup.py b/setup.py
index 7fb5358d0950b..973b4c0abcde2 100755
--- a/setup.py
+++ b/setup.py
@@ -748,4 +748,5 @@ def pxd(name):
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
**setuptools_kwargs)
| - [x] closes #20697
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is a small change to the setup.py that *should* stop newer versions of pandas from being downloaded in environments with older/unsupported versions of python. I followed the instructions here: https://packaging.python.org/guides/dropping-older-python-versions/ | https://api.github.com/repos/pandas-dev/pandas/pulls/20698 | 2018-04-14T23:55:18Z | 2018-04-16T10:34:36Z | 2018-04-16T10:34:36Z | 2018-04-17T08:01:50Z |
BUG: Series.is_unique has extra output if contains objects with __ne__ defined | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 1f75de598330d..32166db08f8ad 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1068,6 +1068,7 @@ Indexing
- Bug in :meth:`Index.difference` when taking difference of an ``Index`` with itself (:issue:`20040`)
- Bug in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` in presence of entire rows of NaNs in the middle of values (:issue:`20499`).
- Bug in :class:`IntervalIndex` where some indexing operations were not supported for overlapping or non-monotonic ``uint64`` data (:issue:`20636`)
+- Bug in ``Series.is_unique`` where extraneous output in stderr is shown if Series contains objects with ``__ne__`` defined (:issue:`20661`)
MultiIndex
^^^^^^^^^^
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 15d93374da3a9..b9a72a0c8285f 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -39,7 +39,6 @@ from khash cimport (
kh_put_pymap, kh_resize_pymap)
-from util cimport _checknan
cimport util
from missing cimport checknull
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index eca66f78499db..dbeb8bda3e454 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -870,7 +870,7 @@ cdef class PyObjectHashTable(HashTable):
for i in range(n):
val = values[i]
hash(val)
- if not _checknan(val):
+ if not checknull(val):
k = kh_get_pymap(self.table, <PyObject*>val)
if k == self.table.n_buckets:
kh_put_pymap(self.table, <PyObject*>val, &ret)
diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd
index 5030b742849f8..d8249ec130f4d 100644
--- a/pandas/_libs/src/util.pxd
+++ b/pandas/_libs/src/util.pxd
@@ -159,8 +159,5 @@ cdef inline bint _checknull(object val):
except ValueError:
return False
-cdef inline bint _checknan(object val):
- return not cnp.PyArray_Check(val) and val != val
-
cdef inline bint is_period_object(object val):
return getattr(val, '_typ', '_typ') == 'period'
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index f93aaf2115601..6ea40329f4bc3 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1594,6 +1594,22 @@ def test_is_unique(self):
s = Series(np.arange(1000))
assert s.is_unique
+ def test_is_unique_class_ne(self, capsys):
+ # GH 20661
+ class Foo(object):
+ def __init__(self, val):
+ self._value = val
+
+ def __ne__(self, other):
+ raise Exception("NEQ not supported")
+
+ li = [Foo(i) for i in range(5)]
+ s = pd.Series(li, index=[i for i in range(5)])
+ _, err = capsys.readouterr()
+ s.is_unique
+ _, err = capsys.readouterr()
+ assert len(err) == 0
+
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
|
- [x] closes #20661
- [x] tests added / passed
- series/test/series/test_analytics.py:test_is_unique_class_ne
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Simple change from `_checknan` to `checknull` in pandas/_libs/hashtable_class_helper.pxi.in
Checked performance, and there is no difference. | https://api.github.com/repos/pandas-dev/pandas/pulls/20691 | 2018-04-13T21:17:04Z | 2018-04-15T19:03:35Z | 2018-04-15T19:03:34Z | 2018-04-15T22:53:51Z |
CLN: OrderedDict -> Dict | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d436385ba61ce..977fac518e863 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9,7 +9,7 @@
labeling information
"""
import collections
-from collections import OrderedDict, abc
+from collections import abc
from io import StringIO
import itertools
import sys
@@ -8189,10 +8189,10 @@ def isin(self, values):
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
- new_data = OrderedDict()
+ new_data = {}
for index, s in data.items():
for col, v in s.items():
- new_data[col] = new_data.get(col, OrderedDict())
+ new_data[col] = new_data.get(col, {})
new_data[col][index] = v
return new_data
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index c442f0d9bf66c..44254f54cbc7a 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -1,5 +1,4 @@
import abc
-from collections import OrderedDict
from datetime import date, datetime, timedelta
from io import BytesIO
import os
@@ -429,9 +428,9 @@ def parse(
sheets = [sheet_name]
# handle same-type duplicates.
- sheets = list(OrderedDict.fromkeys(sheets).keys())
+ sheets = list(dict.fromkeys(sheets).keys())
- output = OrderedDict()
+ output = {}
for asheetname in sheets:
if verbose:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index bd5e215730397..1f90bb12e11a3 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -10,7 +10,6 @@
http://www.statsmodels.org/devel/
"""
-from collections import OrderedDict
import datetime
from io import BytesIO
import os
@@ -1677,7 +1676,7 @@ def read(
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
- data = DataFrame.from_dict(OrderedDict(data_formatted))
+ data = DataFrame.from_dict(dict(data_formatted))
del data_formatted
data = self._do_convert_missing(data, convert_missing)
@@ -1716,7 +1715,7 @@ def any_startswith(x: str) -> bool:
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
- data = DataFrame.from_dict(OrderedDict(retyped_data))
+ data = DataFrame.from_dict(dict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
@@ -1846,7 +1845,7 @@ def _do_convert_categoricals(
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
- data = DataFrame.from_dict(OrderedDict(cat_converted_data))
+ data = DataFrame.from_dict(dict(cat_converted_data))
return data
@property
@@ -2195,7 +2194,7 @@ def _prepare_categoricals(self, data):
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
- return DataFrame.from_dict(OrderedDict(data_formatted))
+ return DataFrame.from_dict(dict(data_formatted))
def _replace_nans(self, data):
# return data
@@ -2674,7 +2673,7 @@ def __init__(self, df, columns, version=117, byteorder=None):
self.df = df
self.columns = columns
- self._gso_table = OrderedDict((("", (0, 0)),))
+ self._gso_table = {"": (0, 0)}
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
@@ -2704,7 +2703,7 @@ def generate_table(self):
Returns
-------
- gso_table : OrderedDict
+ gso_table : dict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
@@ -2762,7 +2761,7 @@ def generate_blob(self, gso_table):
Parameters
----------
- gso_table : OrderedDict
+ gso_table : dict
Ordered dictionary (str, vo)
Returns
@@ -2992,7 +2991,7 @@ def _write_map(self):
the map with 0s. The second call writes the final map locations when
all blocks have been written."""
if self._map is None:
- self._map = OrderedDict(
+ self._map = dict(
(
("stata_data", 0),
("map", self._file.tell()),
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index e4de2147586f5..9543c9d5b59de 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1,7 +1,6 @@
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
-from collections import OrderedDict
import functools
import numpy as np
@@ -175,18 +174,14 @@ def test_aggregate_str_func(tsframe, groupbyfunc):
tm.assert_frame_equal(result, expected)
# group frame by function dict
- result = grouped.agg(
- OrderedDict([["A", "var"], ["B", "std"], ["C", "mean"], ["D", "sem"]])
- )
+ result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
expected = DataFrame(
- OrderedDict(
- [
- ["A", grouped["A"].var()],
- ["B", grouped["B"].std()],
- ["C", grouped["C"].mean()],
- ["D", grouped["D"].sem()],
- ]
- )
+ {
+ "A": grouped["A"].var(),
+ "B": grouped["B"].std(),
+ "C": grouped["C"].mean(),
+ "D": grouped["D"].sem(),
+ }
)
tm.assert_frame_equal(result, expected)
@@ -261,22 +256,20 @@ def test_multiple_functions_tuples_and_non_tuples(df):
def test_more_flexible_frame_multi_function(df):
grouped = df.groupby("A")
- exmean = grouped.agg(OrderedDict([["C", np.mean], ["D", np.mean]]))
- exstd = grouped.agg(OrderedDict([["C", np.std], ["D", np.std]]))
+ exmean = grouped.agg({"C": np.mean, "D": np.mean})
+ exstd = grouped.agg({"C": np.std, "D": np.std})
expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
- d = OrderedDict([["C", [np.mean, np.std]], ["D", [np.mean, np.std]]])
+ d = {"C": [np.mean, np.std], "D": [np.mean, np.std]}
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# be careful
- result = grouped.aggregate(OrderedDict([["C", np.mean], ["D", [np.mean, np.std]]]))
- expected = grouped.aggregate(
- OrderedDict([["C", np.mean], ["D", [np.mean, np.std]]])
- )
+ result = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
+ expected = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
tm.assert_frame_equal(result, expected)
def foo(x):
@@ -288,13 +281,11 @@ def bar(x):
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
- d = OrderedDict(
- [["C", np.mean], ["D", OrderedDict([["foo", np.mean], ["bar", np.std]])]]
- )
+ d = dict([["C", np.mean], ["D", dict([["foo", np.mean], ["bar", np.std]])]])
grouped.aggregate(d)
# But without renaming, these functions are OK
- d = OrderedDict([["C", [np.mean]], ["D", [foo, bar]]])
+ d = {"C": [np.mean], "D": [foo, bar]}
grouped.aggregate(d)
@@ -303,26 +294,20 @@ def test_multi_function_flexible_mix(df):
grouped = df.groupby("A")
# Expected
- d = OrderedDict(
- [["C", OrderedDict([["foo", "mean"], ["bar", "std"]])], ["D", {"sum": "sum"}]]
- )
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 1
- d = OrderedDict(
- [["C", OrderedDict([["foo", "mean"], ["bar", "std"]])], ["D", "sum"]]
- )
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 2
- d = OrderedDict(
- [["C", OrderedDict([["foo", "mean"], ["bar", "std"]])], ["D", ["sum"]]]
- )
+ d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
@@ -642,9 +627,7 @@ def test_maybe_mangle_lambdas_args(self):
assert func["A"][0](0, 2, b=3) == (0, 2, 3)
def test_maybe_mangle_lambdas_named(self):
- func = OrderedDict(
- [("C", np.mean), ("D", OrderedDict([("foo", np.mean), ("bar", np.mean)]))]
- )
+ func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}}
result = _maybe_mangle_lambdas(func)
assert result == func
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index f14384928b979..765bc3bab5d4a 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -2,7 +2,6 @@
test all other .agg behavior
"""
-from collections import OrderedDict
import datetime as dt
from functools import partial
@@ -96,8 +95,7 @@ def test_agg_period_index():
index = period_range(start="1999-01", periods=5, freq="M")
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
- series = [("s1", s1), ("s2", s2)]
- df = DataFrame.from_dict(OrderedDict(series))
+ df = DataFrame.from_dict({"s1": s1, "s2": s2})
grouped = df.groupby(df.index.month)
list(grouped)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 5f78e4860f1e9..89ffcd9ee313e 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -1,4 +1,3 @@
-from collections import OrderedDict
from datetime import datetime
import numpy as np
@@ -1204,7 +1203,7 @@ def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
- lambda x: OrderedDict([("min", x.min()), ("max", x.max())])
+ lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index a6b9b0e35f865..3a16642641fca 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1,4 +1,3 @@
-from collections import OrderedDict
from datetime import datetime
from decimal import Decimal
from io import StringIO
@@ -598,7 +597,7 @@ def test_groupby_as_index_agg(df):
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
- result2 = grouped.agg(OrderedDict([["C", np.mean], ["D", np.sum]]))
+ result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
@@ -617,7 +616,7 @@ def test_groupby_as_index_agg(df):
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
- result2 = grouped.agg(OrderedDict([["C", np.mean], ["D", np.sum]]))
+ result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py
index c0ec889d170d6..90e993a807bd2 100644
--- a/pandas/tests/indexes/multi/test_constructor.py
+++ b/pandas/tests/indexes/multi/test_constructor.py
@@ -1,5 +1,3 @@
-from collections import OrderedDict
-
import numpy as np
import pytest
@@ -654,14 +652,12 @@ def test_from_frame_error(non_frame):
def test_from_frame_dtype_fidelity():
# GH 22420
df = pd.DataFrame(
- OrderedDict(
- [
- ("dates", pd.date_range("19910905", periods=6, tz="US/Eastern")),
- ("a", [1, 1, 1, 2, 2, 2]),
- ("b", pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True)),
- ("c", ["x", "x", "y", "z", "x", "y"]),
- ]
- )
+ {
+ "dates": pd.date_range("19910905", periods=6, tz="US/Eastern"),
+ "a": [1, 1, 1, 2, 2, 2],
+ "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
+ "c": ["x", "x", "y", "z", "x", "y"],
+ }
)
original_dtypes = df.dtypes.to_dict()
diff --git a/pandas/tests/indexes/multi/test_conversion.py b/pandas/tests/indexes/multi/test_conversion.py
index a0b17ae8924b7..fab4f72dc153b 100644
--- a/pandas/tests/indexes/multi/test_conversion.py
+++ b/pandas/tests/indexes/multi/test_conversion.py
@@ -1,5 +1,3 @@
-from collections import OrderedDict
-
import numpy as np
import pytest
@@ -107,14 +105,12 @@ def test_to_frame_dtype_fidelity():
original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)}
expected_df = pd.DataFrame(
- OrderedDict(
- [
- ("dates", pd.date_range("19910905", periods=6, tz="US/Eastern")),
- ("a", [1, 1, 1, 2, 2, 2]),
- ("b", pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True)),
- ("c", ["x", "x", "y", "z", "x", "y"]),
- ]
- )
+ {
+ "dates": pd.date_range("19910905", periods=6, tz="US/Eastern"),
+ "a": [1, 1, 1, 2, 2, 2],
+ "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
+ "c": ["x", "x", "y", "z", "x", "y"],
+ }
)
df = mi.to_frame(index=False)
df_dtypes = df.dtypes.to_dict()
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index d5b23653e8a72..a7e2363ec422e 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -2,7 +2,6 @@
The tests in this package are to ensure the proper resultant dtypes of
set operations.
"""
-from collections import OrderedDict
import itertools as it
import numpy as np
@@ -16,14 +15,12 @@
from pandas.tests.indexes.conftest import indices_dict
import pandas.util.testing as tm
-COMPATIBLE_INCONSISTENT_PAIRS = OrderedDict(
- [
- ((Int64Index, RangeIndex), (tm.makeIntIndex, tm.makeRangeIndex)),
- ((Float64Index, Int64Index), (tm.makeFloatIndex, tm.makeIntIndex)),
- ((Float64Index, RangeIndex), (tm.makeFloatIndex, tm.makeIntIndex)),
- ((Float64Index, UInt64Index), (tm.makeFloatIndex, tm.makeUIntIndex)),
- ]
-)
+COMPATIBLE_INCONSISTENT_PAIRS = {
+ (Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex),
+ (Float64Index, Int64Index): (tm.makeFloatIndex, tm.makeIntIndex),
+ (Float64Index, RangeIndex): (tm.makeFloatIndex, tm.makeIntIndex),
+ (Float64Index, UInt64Index): (tm.makeFloatIndex, tm.makeUIntIndex),
+}
@pytest.fixture(params=it.combinations(indices_dict, 2), ids="-".join)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 2cc80a6e5565d..4203d0b0241ff 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1,4 +1,3 @@
-from collections import OrderedDict
import datetime as dt
from datetime import datetime
import gzip
@@ -1029,7 +1028,7 @@ def test_categorical_order(self, file):
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
- expected = DataFrame.from_dict(OrderedDict(cols))
+ expected = DataFrame.from_dict(dict(cols))
# Read with and with out categoricals, ensure order is identical
file = getattr(self, file)
| - xref https://github.com/pandas-dev/pandas/issues/30469
- [x] follow up from https://github.com/pandas-dev/pandas/pull/29212
- [x] passes `black pandas`
As of CPython 3.6 the implementation of dict maintains insertion order.
This became a language feature in python 3.7.
Note in python 3.8 support for reversed(dict) was added - but we don't seem to use that in the use cases i've eliminated.
There is more to do here. | https://api.github.com/repos/pandas-dev/pandas/pulls/29923 | 2019-11-29T00:28:51Z | 2019-12-01T17:50:19Z | 2019-12-01T17:50:19Z | 2019-12-27T00:58:07Z |
BUG: loc-indexing with a CategoricalIndex with non-string categories | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 3e72072eae303..afecc7fa4e41b 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -742,6 +742,7 @@ Indexing
- Fix assignment of column via `.loc` with numpy non-ns datetime type (:issue:`27395`)
- Bug in :meth:`Float64Index.astype` where ``np.inf`` was not handled properly when casting to an integer dtype (:issue:`28475`)
- :meth:`Index.union` could fail when the left contained duplicates (:issue:`28257`)
+- Bug when indexing with ``.loc`` where the index was a :class:`CategoricalIndex` with integer and float categories, a ValueError was raised (:issue:`17569`)
- :meth:`Index.get_indexer_non_unique` could fail with `TypeError` in some cases, such as when searching for ints in a string index (:issue:`28257`)
- Bug in :meth:`Float64Index.get_loc` incorrectly raising ``TypeError`` instead of ``KeyError`` (:issue:`29189`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 1587d97ffb52c..0f97bb327f525 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2953,11 +2953,11 @@ def _convert_scalar_indexer(self, key, kind=None):
"unicode",
"mixed",
]:
- return self._invalid_indexer("label", key)
+ self._invalid_indexer("label", key)
elif kind in ["loc"] and is_integer(key):
if not self.holds_integer():
- return self._invalid_indexer("label", key)
+ self._invalid_indexer("label", key)
return key
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index d061f61effff3..2cc853ecf568b 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -696,9 +696,11 @@ def get_indexer_non_unique(self, target):
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
- if self.categories._defer_to_indexing:
- return self.categories._convert_scalar_indexer(key, kind=kind)
-
+ if kind == "loc":
+ try:
+ return self.categories._convert_scalar_indexer(key, kind=kind)
+ except TypeError:
+ self._invalid_indexer("label", key)
return super()._convert_scalar_indexer(key, kind=kind)
@Appender(_index_shared_docs["_convert_list_indexer"])
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index ab3b0ed13b5c0..bc3ee1c59f76c 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -12,7 +12,9 @@
Index,
Interval,
Series,
+ Timedelta,
Timestamp,
+ conftest,
)
from pandas.api.types import CategoricalDtype as CDT
import pandas.util.testing as tm
@@ -80,6 +82,13 @@ def test_loc_scalar(self):
with pytest.raises(TypeError, match=msg):
df.loc["d", "C"] = 10
+ msg = (
+ r"cannot do label indexing on <class 'pandas\.core\.indexes\.category"
+ r"\.CategoricalIndex'> with these indexers \[1\] of <class 'int'>"
+ )
+ with pytest.raises(TypeError, match=msg):
+ df.loc[1]
+
def test_getitem_scalar(self):
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
@@ -754,3 +763,56 @@ def test_map_with_dict_or_series(self):
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
+
+ @pytest.mark.parametrize(
+ "idx_values",
+ [
+ # python types
+ [1, 2, 3],
+ [-1, -2, -3],
+ [1.5, 2.5, 3.5],
+ [-1.5, -2.5, -3.5],
+ # numpy int/uint
+ *[np.array([1, 2, 3], dtype=dtype) for dtype in conftest.ALL_INT_DTYPES],
+ # numpy floats
+ *[np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in conftest.FLOAT_DTYPES],
+ # numpy object
+ np.array([1, "b", 3.5], dtype=object),
+ # pandas scalars
+ [Interval(1, 4), Interval(4, 6), Interval(6, 9)],
+ [Timestamp(2019, 1, 1), Timestamp(2019, 2, 1), Timestamp(2019, 3, 1)],
+ [Timedelta(1, "d"), Timedelta(2, "d"), Timedelta(3, "D")],
+ # pandas Integer arrays
+ *[pd.array([1, 2, 3], dtype=dtype) for dtype in conftest.ALL_EA_INT_DTYPES],
+ # other pandas arrays
+ pd.IntervalIndex.from_breaks([1, 4, 6, 9]).array,
+ pd.date_range("2019-01-01", periods=3).array,
+ pd.timedelta_range(start="1d", periods=3).array,
+ ],
+ )
+ def test_loc_with_non_string_categories(self, idx_values, ordered_fixture):
+ # GH-17569
+ cat_idx = CategoricalIndex(idx_values, ordered=ordered_fixture)
+ df = DataFrame({"A": ["foo", "bar", "baz"]}, index=cat_idx)
+
+ # scalar selection
+ result = df.loc[idx_values[0]]
+ expected = Series(["foo"], index=["A"], name=idx_values[0])
+ tm.assert_series_equal(result, expected)
+
+ # list selection
+ result = df.loc[idx_values[:2]]
+ expected = DataFrame(["foo", "bar"], index=cat_idx[:2], columns=["A"])
+ tm.assert_frame_equal(result, expected)
+
+ # scalar assignment
+ result = df.copy()
+ result.loc[idx_values[0]] = "qux"
+ expected = DataFrame({"A": ["qux", "bar", "baz"]}, index=cat_idx)
+ tm.assert_frame_equal(result, expected)
+
+ # list assignment
+ result = df.copy()
+ result.loc[idx_values[:2], "A"] = ["qux", "qux2"]
+ expected = DataFrame({"A": ["qux", "qux2", "baz"]}, index=cat_idx)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 0a3b513ff0167..cdacecc6c79d3 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -100,7 +100,12 @@ def test_scalar_non_numeric(self):
idxr(s)[3.0]
# label based can be a TypeError or KeyError
- if s.index.inferred_type in ["string", "unicode", "mixed"]:
+ if s.index.inferred_type in {
+ "categorical",
+ "string",
+ "unicode",
+ "mixed",
+ }:
error = KeyError
msg = r"^3$"
else:
| - [x] closes #17569
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Bug when indexing with ``.loc`` and the index is a CategoricalIndex with integer or float categories. | https://api.github.com/repos/pandas-dev/pandas/pulls/29922 | 2019-11-28T22:29:17Z | 2019-12-11T13:18:32Z | 2019-12-11T13:18:31Z | 2019-12-11T13:57:19Z |
Document S3 and GCS path functionality of DataFrame.to_csv() | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index fa47a5944f7bf..0112b51e42923 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -34,7 +34,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like
binary;`SPSS <https://en.wikipedia.org/wiki/SPSS>`__;:ref:`read_spss<io.spss_reader>`;
binary;`Python Pickle Format <https://docs.python.org/3/library/pickle.html>`__;:ref:`read_pickle<io.pickle>`;:ref:`to_pickle<io.pickle>`
SQL;`SQL <https://en.wikipedia.org/wiki/SQL>`__;:ref:`read_sql<io.sql>`;:ref:`to_sql<io.sql>`
- SQL;`Google Big Query <https://en.wikipedia.org/wiki/BigQuery>`__;:ref:`read_gbq<io.bigquery>`;:ref:`to_gbq<io.bigquery>`
+ SQL;`Google BigQuery <https://en.wikipedia.org/wiki/BigQuery>`__;:ref:`read_gbq<io.bigquery>`;:ref:`to_gbq<io.bigquery>`
:ref:`Here <io.perf>` is an informal performance comparison for some of these IO methods.
| - [x] closes #8508
I made more changes than absolutely necessary for documenting this. It's a bit more complete and it reads a more similar to `pd.read_csv()` | https://api.github.com/repos/pandas-dev/pandas/pulls/29920 | 2019-11-28T20:08:28Z | 2019-12-17T18:14:09Z | 2019-12-17T18:14:09Z | 2019-12-17T18:14:19Z |
DEPR: remove itemsize, data, base, flags, strides | diff --git a/doc/redirects.csv b/doc/redirects.csv
index f124fdb840ce0..a305a1a50a31a 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -622,7 +622,6 @@ generated/pandas.Index.asi8,../reference/api/pandas.Index.asi8
generated/pandas.Index.asof,../reference/api/pandas.Index.asof
generated/pandas.Index.asof_locs,../reference/api/pandas.Index.asof_locs
generated/pandas.Index.astype,../reference/api/pandas.Index.astype
-generated/pandas.Index.base,../reference/api/pandas.Index.base
generated/pandas.Index.contains,../reference/api/pandas.Index.contains
generated/pandas.Index.copy,../reference/api/pandas.Index.copy
generated/pandas.Index.data,../reference/api/pandas.Index.data
@@ -639,7 +638,6 @@ generated/pandas.Index.empty,../reference/api/pandas.Index.empty
generated/pandas.Index.equals,../reference/api/pandas.Index.equals
generated/pandas.Index.factorize,../reference/api/pandas.Index.factorize
generated/pandas.Index.fillna,../reference/api/pandas.Index.fillna
-generated/pandas.Index.flags,../reference/api/pandas.Index.flags
generated/pandas.Index.format,../reference/api/pandas.Index.format
generated/pandas.Index.get_duplicates,../reference/api/pandas.Index.get_duplicates
generated/pandas.Index.get_indexer_for,../reference/api/pandas.Index.get_indexer_for
@@ -679,7 +677,6 @@ generated/pandas.Index.is_object,../reference/api/pandas.Index.is_object
generated/pandas.Index.is_type_compatible,../reference/api/pandas.Index.is_type_compatible
generated/pandas.Index.is_unique,../reference/api/pandas.Index.is_unique
generated/pandas.Index.item,../reference/api/pandas.Index.item
-generated/pandas.Index.itemsize,../reference/api/pandas.Index.itemsize
generated/pandas.Index.join,../reference/api/pandas.Index.join
generated/pandas.Index.map,../reference/api/pandas.Index.map
generated/pandas.Index.max,../reference/api/pandas.Index.max
@@ -711,7 +708,6 @@ generated/pandas.Index.sort,../reference/api/pandas.Index.sort
generated/pandas.Index.sortlevel,../reference/api/pandas.Index.sortlevel
generated/pandas.Index.sort_values,../reference/api/pandas.Index.sort_values
generated/pandas.Index.str,../reference/api/pandas.Index.str
-generated/pandas.Index.strides,../reference/api/pandas.Index.strides
generated/pandas.Index.summary,../reference/api/pandas.Index.summary
generated/pandas.Index.symmetric_difference,../reference/api/pandas.Index.symmetric_difference
generated/pandas.Index.take,../reference/api/pandas.Index.take
@@ -1106,7 +1102,6 @@ generated/pandas.Series.at,../reference/api/pandas.Series.at
generated/pandas.Series.at_time,../reference/api/pandas.Series.at_time
generated/pandas.Series.autocorr,../reference/api/pandas.Series.autocorr
generated/pandas.Series.axes,../reference/api/pandas.Series.axes
-generated/pandas.Series.base,../reference/api/pandas.Series.base
generated/pandas.Series.between,../reference/api/pandas.Series.between
generated/pandas.Series.between_time,../reference/api/pandas.Series.between_time
generated/pandas.Series.bfill,../reference/api/pandas.Series.bfill
@@ -1215,7 +1210,6 @@ generated/pandas.Series.fillna,../reference/api/pandas.Series.fillna
generated/pandas.Series.filter,../reference/api/pandas.Series.filter
generated/pandas.Series.first,../reference/api/pandas.Series.first
generated/pandas.Series.first_valid_index,../reference/api/pandas.Series.first_valid_index
-generated/pandas.Series.flags,../reference/api/pandas.Series.flags
generated/pandas.Series.floordiv,../reference/api/pandas.Series.floordiv
generated/pandas.Series.from_array,../reference/api/pandas.Series.from_array
generated/pandas.Series.from_csv,../reference/api/pandas.Series.from_csv
@@ -1248,7 +1242,6 @@ generated/pandas.Series.isnull,../reference/api/pandas.Series.isnull
generated/pandas.Series.is_unique,../reference/api/pandas.Series.is_unique
generated/pandas.Series.item,../reference/api/pandas.Series.item
generated/pandas.Series.items,../reference/api/pandas.Series.items
-generated/pandas.Series.itemsize,../reference/api/pandas.Series.itemsize
generated/pandas.Series.__iter__,../reference/api/pandas.Series.__iter__
generated/pandas.Series.iteritems,../reference/api/pandas.Series.iteritems
generated/pandas.Series.ix,../reference/api/pandas.Series.ix
@@ -1361,7 +1354,6 @@ generated/pandas.Series.str.find,../reference/api/pandas.Series.str.find
generated/pandas.Series.str.get_dummies,../reference/api/pandas.Series.str.get_dummies
generated/pandas.Series.str.get,../reference/api/pandas.Series.str.get
generated/pandas.Series.str,../reference/api/pandas.Series.str
-generated/pandas.Series.strides,../reference/api/pandas.Series.strides
generated/pandas.Series.str.index,../reference/api/pandas.Series.str.index
generated/pandas.Series.str.isalnum,../reference/api/pandas.Series.str.isalnum
generated/pandas.Series.str.isalpha,../reference/api/pandas.Series.str.isalpha
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 448f020cfa56f..8edea28c17318 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -42,9 +42,6 @@ Properties
Index.ndim
Index.size
Index.empty
- Index.strides
- Index.itemsize
- Index.base
Index.T
Index.memory_usage
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 528cc8a0c3920..91843c7975a2c 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -33,16 +33,11 @@ Attributes
Series.nbytes
Series.ndim
Series.size
- Series.strides
- Series.itemsize
- Series.base
Series.T
Series.memory_usage
Series.hasnans
- Series.flags
Series.empty
Series.dtypes
- Series.data
Series.name
Series.put
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 05aba65888c55..217c5a49ac960 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -463,6 +463,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Changed the default ``fill_value`` in :meth:`Categorical.take` from ``True`` to ``False`` (:issue:`20841`)
- Changed the default value for the `raw` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`,
- :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` to ``False`` (:issue:`20584`)
+- Removed the previously deprecated :attr:`Series.base`, :attr:`Index.base`, :attr:`Categorical.base`, :attr:`Series.flags`, :attr:`Index.flags`, :attr:`PeriodArray.flags`, :attr:`Series.strides`, :attr:`Index.strides`, :attr:`Series.itemsize`, :attr:`Index.itemsize`, :attr:`Series.data`, :attr:`Index.data` (:issue:`20721`)
- Changed :meth:`Timedelta.resolution` to match the behavior of the standard library ``datetime.timedelta.resolution``, for the old behavior, use :meth:`Timedelta.resolution_string` (:issue:`26839`)
- Removed previously deprecated :attr:`Timestamp.weekday_name`, :attr:`DatetimeIndex.weekday_name`, and :attr:`Series.dt.weekday_name` (:issue:`18164`)
- Removed previously deprecated ``errors`` argument in :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` (:issue:`22644`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index bb4ceea420d8d..ec1f5d2d6214c 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -527,13 +527,6 @@ def tolist(self) -> list:
to_list = tolist
- @property
- def base(self) -> None:
- """
- compat, we are always our own object
- """
- return None
-
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 41a8c48452647..1012abd0b5d13 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -618,14 +618,6 @@ def astype(self, dtype, copy=True):
return self.asfreq(dtype.freq)
return super().astype(dtype, copy=copy)
- @property
- def flags(self):
- # TODO: remove
- # We need this since reduction.SeriesBinGrouper uses values.flags
- # Ideally, we wouldn't be passing objects down there in the first
- # place.
- return self._data.flags
-
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_period_array_cmp)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 83d6ac76cdd98..066a7628be364 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -628,15 +628,7 @@ class IndexOpsMixin:
# ndarray compatibility
__array_priority__ = 1000
_deprecations: FrozenSet[str] = frozenset(
- [
- "tolist", # tolist is not deprecated, just suppressed in the __dir__
- "base",
- "data",
- "item",
- "itemsize",
- "flags",
- "strides",
- ]
+ ["tolist", "item"] # tolist is not deprecated, just suppressed in the __dir__
)
def transpose(self, *args, **kwargs):
@@ -707,36 +699,6 @@ def item(self):
)
return self.values.item()
- @property
- def data(self):
- """
- Return the data pointer of the underlying data.
-
- .. deprecated:: 0.23.0
- """
- warnings.warn(
- "{obj}.data is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return self.values.data
-
- @property
- def itemsize(self):
- """
- Return the size of the dtype of the item of the underlying data.
-
- .. deprecated:: 0.23.0
- """
- warnings.warn(
- "{obj}.itemsize is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return self._ndarray_values.itemsize
-
@property
def nbytes(self):
"""
@@ -744,21 +706,6 @@ def nbytes(self):
"""
return self._values.nbytes
- @property
- def strides(self):
- """
- Return the strides of the underlying data.
-
- .. deprecated:: 0.23.0
- """
- warnings.warn(
- "{obj}.strides is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return self._ndarray_values.strides
-
@property
def size(self):
"""
@@ -766,36 +713,6 @@ def size(self):
"""
return len(self._values)
- @property
- def flags(self):
- """
- Return the ndarray.flags for the underlying data.
-
- .. deprecated:: 0.23.0
- """
- warnings.warn(
- "{obj}.flags is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return self.values.flags
-
- @property
- def base(self):
- """
- Return the base object if the memory of the underlying data is shared.
-
- .. deprecated:: 0.23.0
- """
- warnings.warn(
- "{obj}.base is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return self.values.base
-
@property
def array(self) -> ExtensionArray:
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index a9e119f3c5f87..56957b2f879ec 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -2,7 +2,6 @@
from operator import le, lt
import textwrap
from typing import Any, Optional, Tuple, Union
-import warnings
import numpy as np
@@ -455,19 +454,6 @@ def size(self):
# Avoid materializing ndarray[Interval]
return self._data.size
- @property
- def itemsize(self):
- msg = (
- "IntervalIndex.itemsize is deprecated and will be removed in "
- "a future version"
- )
- warnings.warn(msg, FutureWarning, stacklevel=2)
-
- # suppress the warning from the underlying left/right itemsize
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- return self.left.itemsize + self.right.itemsize
-
def __len__(self) -> int:
return len(self.left)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index cdd0e600c888d..b3476dcb12abd 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -911,20 +911,9 @@ def __setstate__(self, state):
_unpickle_compat = __setstate__
- @property
- def flags(self):
- """ return the ndarray.flags for the underlying data """
- warnings.warn(
- "{obj}.flags is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return self._ndarray_values.flags
-
def item(self):
"""
- return the first element of the underlying data as a python
+ Return the first element of the underlying data as a python
scalar
.. deprecated:: 0.25.0
@@ -943,30 +932,6 @@ def item(self):
# copy numpy's message here because Py26 raises an IndexError
raise ValueError("can only convert an array of size 1 to a Python scalar")
- @property
- def data(self):
- """ return the data pointer of the underlying data """
- warnings.warn(
- "{obj}.data is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return np.asarray(self._data).data
-
- @property
- def base(self):
- """ return the base object if the memory of the underlying data is
- shared
- """
- warnings.warn(
- "{obj}.base is deprecated and will be removed "
- "in a future version".format(obj=type(self).__name__),
- FutureWarning,
- stacklevel=2,
- )
- return np.asarray(self._data)
-
def memory_usage(self, deep=False):
result = super().memory_usage(deep=deep)
if hasattr(self, "_cache") and "_int64index" in self._cache:
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index f3c8c5cb6efa1..6ad7dfb22f2b3 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -836,17 +836,6 @@ def test_nbytes(self):
expected = 64 # 4 * 8 * 2
assert result == expected
- def test_itemsize(self):
- # GH 19209
- left = np.arange(0, 4, dtype="i8")
- right = np.arange(1, 5, dtype="i8")
- expected = 16 # 8 * 2
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- result = IntervalIndex.from_arrays(left, right).itemsize
-
- assert result == expected
-
@pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
def test_set_closed(self, name, closed, new_closed):
# GH 21670
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 84f98a55376f7..86219d77542af 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -788,8 +788,10 @@ def test_ensure_copied_data(self, indices):
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
- # self.value is not an ndarray.
- _base = lambda ar: ar if ar.base is None else ar.base
+ # self.values is not an ndarray.
+ # GH#29918 Index.base has been removed
+ # FIXME: is this test still meaningful?
+ _base = lambda ar: ar if getattr(ar, "base", None) is None else ar.base
result = CategoricalIndex(indices.values, copy=True)
tm.assert_index_equal(indices, result)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index e65388be2ba7d..d515a015cdbec 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -333,20 +333,8 @@ def test_ndarray_compat_properties(self):
assert getattr(o, p, None) is not None
# deprecated properties
- for p in ["flags", "strides", "itemsize"]:
- with tm.assert_produces_warning(FutureWarning):
- assert getattr(o, p, None) is not None
-
- with tm.assert_produces_warning(FutureWarning):
- assert hasattr(o, "base")
-
- # If we have a datetime-like dtype then needs a view to work
- # but the user is responsible for that
- try:
- with tm.assert_produces_warning(FutureWarning):
- assert o.data is not None
- except ValueError:
- pass
+ for p in ["flags", "strides", "itemsize", "base", "data"]:
+ assert not hasattr(o, p)
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
| and un-deprecated `item`, though im not sure where that goes in the whatsnew | https://api.github.com/repos/pandas-dev/pandas/pulls/29918 | 2019-11-28T17:21:52Z | 2019-11-30T17:47:09Z | 2019-11-30T17:47:09Z | 2019-11-30T17:53:19Z |
CI: Highlighting flake8 and grep errors in GitHub Actions | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 7c6c98d910492..4e25fd0ddb5ea 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -34,17 +34,13 @@ function invgrep {
#
# This is useful for the CI, as we want to fail if one of the patterns
# that we want to avoid is found by grep.
- if [[ "$AZURE" == "true" ]]; then
- set -o pipefail
- grep -n "$@" | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Found unwanted pattern: " $3}'
- else
- grep "$@"
- fi
- return $((! $?))
+ grep -n "$@" | sed "s/^/$INVGREP_PREPEND/" | sed "s/$/$INVGREP_APPEND/" ; EXIT_STATUS=${PIPESTATUS[0]}
+ return $((! $EXIT_STATUS))
}
-if [[ "$AZURE" == "true" ]]; then
- FLAKE8_FORMAT="##vso[task.logissue type=error;sourcepath=%(path)s;linenumber=%(row)s;columnnumber=%(col)s;code=%(code)s;]%(text)s"
+if [[ "$GITHUB_ACTIONS" == "true" ]]; then
+ FLAKE8_FORMAT="##[error]%(path)s:%(row)s:%(col)s:%(code):%(text)s"
+ INVGREP_PREPEND="##[error]"
else
FLAKE8_FORMAT="default"
fi
@@ -199,14 +195,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Check that no file in the repo contains trailing whitespaces' ; echo $MSG
- set -o pipefail
- if [[ "$AZURE" == "true" ]]; then
- # we exclude all c/cpp files as the c/cpp files of pandas code base are tested when Linting .c and .h files
- ! grep -n '--exclude=*.'{svg,c,cpp,html,js} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}'
- else
- ! grep -n '--exclude=*.'{svg,c,cpp,html,js} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}'
- fi
+ INVGREP_APPEND=" <- trailing whitespaces found"
+ invgrep -RI --exclude=\*.{svg,c,cpp,html,js} --exclude-dir=env "\s$" *
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ unset INVGREP_APPEND
fi
### CODE ###
| Our code checks genereated the output in a way that Azure pipelines was highlighting the errors. In GitHub actions it's possible to do more or less the same, but with a different syntax. Changing the syntax here, and simplifying some tricky parts in the script.
Introducing some errors, so we can see how it looks. | https://api.github.com/repos/pandas-dev/pandas/pulls/29915 | 2019-11-28T14:08:30Z | 2019-11-29T22:36:09Z | 2019-11-29T22:36:09Z | 2019-11-29T22:36:12Z |
DEPR: Series.cat.categorical, Series.cat.index, Series.cat.name | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 8a9b9db140409..4522e5b0ad0d7 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -406,6 +406,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Removed the previously deprecated "fastpath" keyword from the :class:`Index` constructor (:issue:`23110`)
- Removed the previously deprecated :meth:`Series.get_value`, :meth:`Series.set_value`, :meth:`DataFrame.get_value`, :meth:`DataFrame.set_value` (:issue:`17739`)
- Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to False (:issue:`27600`)
+- Removed the previously deprecated :attr:`Series.cat.categorical`, :attr:`Series.cat.index`, :attr:`Series.cat.name` (:issue:`24751`)
- Removed support for nested renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`DataFrameGroupBy.aggregate`, :meth:`SeriesGroupBy.aggregate`, :meth:`Rolling.aggregate` (:issue:`18529`)
- Passing ``datetime64`` data to :class:`TimedeltaIndex` or ``timedelta64`` data to ``DatetimeIndex`` now raises ``TypeError`` (:issue:`23539`, :issue:`23937`)
- A tuple passed to :meth:`DataFrame.groupby` is now exclusively treated as a single key (:issue:`18314`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 6cc3f660fb425..f20308be1ee09 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2583,43 +2583,6 @@ def _delegate_method(self, name, *args, **kwargs):
if res is not None:
return Series(res, index=self._index, name=self._name)
- @property
- def categorical(self):
- # Note: Upon deprecation, `test_tab_completion_with_categorical` will
- # need to be updated. `categorical` will need to be removed from
- # `ok_for_cat`.
- warn(
- "`Series.cat.categorical` has been deprecated. Use the "
- "attributes on 'Series.cat' directly instead.",
- FutureWarning,
- stacklevel=2,
- )
- return self._parent
-
- @property
- def name(self):
- # Note: Upon deprecation, `test_tab_completion_with_categorical` will
- # need to be updated. `name` will need to be removed from
- # `ok_for_cat`.
- warn(
- "`Series.cat.name` has been deprecated. Use `Series.name` instead.",
- FutureWarning,
- stacklevel=2,
- )
- return self._name
-
- @property
- def index(self):
- # Note: Upon deprecation, `test_tab_completion_with_categorical` will
- # need to be updated. `index` will need to be removed from
- # ok_for_cat`.
- warn(
- "`Series.cat.index` has been deprecated. Use `Series.index` instead.",
- FutureWarning,
- stacklevel=2,
- )
- return self._index
-
# utility routines
diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py
index 53733770ed954..29bd5252dbe3a 100644
--- a/pandas/tests/arrays/categorical/test_warnings.py
+++ b/pandas/tests/arrays/categorical/test_warnings.py
@@ -1,6 +1,5 @@
import pytest
-import pandas as pd
import pandas.util.testing as tm
@@ -15,15 +14,3 @@ def test_tab_complete_warning(self, ip):
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("c.", 1))
-
- def test_CategoricalAccessor_categorical_deprecation(self):
- with tm.assert_produces_warning(FutureWarning):
- pd.Series(["a", "b"], dtype="category").cat.categorical
-
- def test_CategoricalAccessor_name_deprecation(self):
- with tm.assert_produces_warning(FutureWarning):
- pd.Series(["a", "b"], dtype="category").cat.name
-
- def test_CategoricalAccessor_index_deprecation(self):
- with tm.assert_produces_warning(FutureWarning):
- pd.Series(["a", "b"], dtype="category").cat.index
| https://api.github.com/repos/pandas-dev/pandas/pulls/29914 | 2019-11-28T04:56:09Z | 2019-11-28T07:46:47Z | 2019-11-28T07:46:47Z | 2019-11-28T16:06:19Z | |
DEPR: CategoricalBlock.where casting to object | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index e54397e635c77..5f1a73f6d268c 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -418,6 +418,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Removed :meth:`DataFrame.as_blocks`, :meth:`Series.as_blocks`, `DataFrame.blocks`, :meth:`Series.blocks` (:issue:`17656`)
- :meth:`pandas.Series.str.cat` now defaults to aligning ``others``, using ``join='left'`` (:issue:`27611`)
- :meth:`pandas.Series.str.cat` does not accept list-likes *within* list-likes anymore (:issue:`27611`)
+- :meth:`Series.where` with ``Categorical`` dtype (or :meth:`DataFrame.where` with ``Categorical`` column) no longer allows setting new categories (:issue:`24114`)
- :func:`core.internals.blocks.make_block` no longer accepts the "fastpath" keyword(:issue:`19265`)
- :meth:`Block.make_block_same_class` no longer accepts the "dtype" keyword(:issue:`19434`)
- Removed the previously deprecated :meth:`ExtensionArray._formatting_values`. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 2d6ffb7277742..d4e876940fa49 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2887,37 +2887,6 @@ def concat_same_type(self, to_concat, placement=None):
values, placement=placement or slice(0, len(values), 1), ndim=self.ndim
)
- def where(
- self,
- other,
- cond,
- align=True,
- errors="raise",
- try_cast: bool = False,
- axis: int = 0,
- ) -> List["Block"]:
- # TODO(CategoricalBlock.where):
- # This can all be deleted in favor of ExtensionBlock.where once
- # we enforce the deprecation.
- object_msg = (
- "Implicitly converting categorical to object-dtype ndarray. "
- "One or more of the values in 'other' are not present in this "
- "categorical's categories. A future version of pandas will raise "
- "a ValueError when 'other' contains different categories.\n\n"
- "To preserve the current behavior, add the new categories to "
- "the categorical before calling 'where', or convert the "
- "categorical to a different dtype."
- )
- try:
- # Attempt to do preserve categorical dtype.
- result = super().where(other, cond, align, errors, try_cast, axis)
- except (TypeError, ValueError):
- warnings.warn(object_msg, FutureWarning, stacklevel=6)
- result = self.astype(object).where(
- other, cond, align=align, errors=errors, try_cast=try_cast, axis=axis
- )
- return result
-
def replace(
self,
to_replace,
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index 6edd7fd00b707..f929eb24c9f19 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -206,13 +206,11 @@ def test_where_other_categorical(self):
expected = pd.Series(Categorical(["a", "c", "c"], dtype=ser.dtype))
tm.assert_series_equal(result, expected)
- def test_where_warns(self):
+ def test_where_new_category_raises(self):
ser = pd.Series(Categorical(["a", "b", "c"]))
- with tm.assert_produces_warning(FutureWarning):
- result = ser.where([True, False, True], "d")
-
- expected = pd.Series(np.array(["a", "d", "c"], dtype="object"))
- tm.assert_series_equal(result, expected)
+ msg = "Cannot setitem on a Categorical with a new category"
+ with pytest.raises(ValueError, match=msg):
+ ser.where([True, False, True], "d")
def test_where_ordered_differs_rasies(self):
ser = pd.Series(
@@ -221,11 +219,8 @@ def test_where_ordered_differs_rasies(self):
other = Categorical(
["b", "c", "a"], categories=["a", "c", "b", "d"], ordered=True
)
- with tm.assert_produces_warning(FutureWarning):
- result = ser.where([True, False, True], other)
-
- expected = pd.Series(np.array(["a", "c", "c"], dtype=object))
- tm.assert_series_equal(result, expected)
+ with pytest.raises(ValueError, match="without identical categories"):
+ ser.where([True, False, True], other)
@pytest.mark.parametrize("index", [True, False])
| the whatsnew note here seems awkward, suggestions welcome | https://api.github.com/repos/pandas-dev/pandas/pulls/29913 | 2019-11-28T04:02:47Z | 2019-11-29T22:36:50Z | 2019-11-29T22:36:49Z | 2019-11-29T23:23:07Z |
DEPR: categorical.take allow_fill default | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index bb4a24e1b4eb5..435f99fc2cf3d 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -365,6 +365,7 @@ Deprecations
is equivalent to ``arr[idx.get_loc(idx_val)] = val``, which should be used instead (:issue:`28621`).
- :func:`is_extension_type` is deprecated, :func:`is_extension_array_dtype` should be used instead (:issue:`29457`)
- :func:`eval` keyword argument "truediv" is deprecated and will be removed in a future version (:issue:`29812`)
+- :meth:`Categorical.take_nd` is deprecated, use :meth:`Categorical.take` instead (:issue:`27745`)
.. _whatsnew_1000.prior_deprecations:
@@ -455,6 +456,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- In :func:`concat` the default value for ``sort`` has been changed from ``None`` to ``False`` (:issue:`20613`)
- Removed previously deprecated "raise_conflict" argument from :meth:`DataFrame.update`, use "errors" instead (:issue:`23585`)
- Removed previously deprecated keyword "n" from :meth:`DatetimeIndex.shift`, :meth:`TimedeltaIndex.shift`, :meth:`PeriodIndex.shift`, use "periods" instead (:issue:`22458`)
+- Changed the default ``fill_value`` in :meth:`Categorical.take` from ``True`` to ``False`` (:issue:`20841`)
- Changed the default value for the `raw` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`,
- :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` to ``False`` (:issue:`20584`)
- Changed :meth:`Timedelta.resolution` to match the behavior of the standard library ``datetime.timedelta.resolution``, for the old behavior, use :meth:`Timedelta.resolution_string` (:issue:`26839`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f20308be1ee09..bb4ceea420d8d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1,6 +1,5 @@
import operator
from shutil import get_terminal_size
-import textwrap
from typing import Type, Union, cast
from warnings import warn
@@ -59,18 +58,6 @@
from .base import ExtensionArray, _extension_array_shared_docs, try_cast_to_ea
-_take_msg = textwrap.dedent(
- """\
- Interpreting negative values in 'indexer' as missing values.
- In the future, this will change to meaning positional indices
- from the right.
-
- Use 'allow_fill=True' to retain the previous behavior and silence this
- warning.
-
- Use 'allow_fill=False' to accept the new behavior."""
-)
-
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@@ -1829,7 +1816,7 @@ def fillna(self, value=None, method=None, limit=None):
return self._constructor(codes, dtype=self.dtype, fastpath=True)
- def take_nd(self, indexer, allow_fill=None, fill_value=None):
+ def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
@@ -1838,7 +1825,7 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
- allow_fill : bool, default None
+ allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
@@ -1849,11 +1836,9 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
- .. versionchanged:: 0.23.0
+ .. versionchanged:: 1.0.0
- Deprecated the default value of `allow_fill`. The deprecated
- default is ``True``. In the future, this will change to
- ``False``.
+ Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
@@ -1903,10 +1888,6 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
- if allow_fill is None:
- if (indexer < 0).any():
- warn(_take_msg, FutureWarning, stacklevel=2)
- allow_fill = True
dtype = self.dtype
@@ -1927,7 +1908,14 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
result = type(self).from_codes(codes, dtype=dtype)
return result
- take = take_nd
+ def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
+ # GH#27745 deprecate alias that other EAs dont have
+ warn(
+ "Categorical.take_nd is deprecated, use Categorical.take instead",
+ FutureWarning,
+ stacklevel=2,
+ )
+ return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)
def __len__(self) -> int:
"""
diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index e076015c5f61d..dce3c4e4d5e98 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -89,10 +89,12 @@ def test_isin_empty(empty):
class TestTake:
# https://github.com/pandas-dev/pandas/issues/20664
- def test_take_warns(self):
+ def test_take_default_allow_fill(self):
cat = pd.Categorical(["a", "b"])
- with tm.assert_produces_warning(FutureWarning):
- cat.take([0, -1])
+ with tm.assert_produces_warning(None):
+ result = cat.take([0, -1])
+
+ assert result.equals(cat)
def test_take_positive_no_warning(self):
cat = pd.Categorical(["a", "b"])
@@ -158,3 +160,8 @@ def test_take_fill_value_new_raises(self):
xpr = r"'fill_value' \('d'\) is not in this Categorical's categories."
with pytest.raises(TypeError, match=xpr):
cat.take([0, 1, -1], fill_value="d", allow_fill=True)
+
+ def test_take_nd_deprecated(self):
+ cat = pd.Categorical(["a", "b", "c"])
+ with tm.assert_produces_warning(FutureWarning):
+ cat.take_nd([0, 1])
| - [x] closes #27745
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Also deprecates take_nd as an alias for take. | https://api.github.com/repos/pandas-dev/pandas/pulls/29912 | 2019-11-28T03:52:45Z | 2019-11-29T22:39:28Z | 2019-11-29T22:39:28Z | 2019-11-29T23:19:14Z |
DEPR: Remove errors argument in tz_localize | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index e54397e635c77..1468718b16170 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -458,6 +458,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Changed the default value for the `raw` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`,
- :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` to ``False`` (:issue:`20584`)
- Removed previously deprecated :attr:`Timestamp.weekday_name`, :attr:`DatetimeIndex.weekday_name`, and :attr:`Series.dt.weekday_name` (:issue:`18164`)
+- Removed previously deprecated ``errors`` argument in :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` (:issue:`22644`)
-
.. _whatsnew_1000.performance:
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 966f72dcd7889..76a694c64e1fb 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -720,18 +720,6 @@ default 'raise'
nonexistent times.
.. versionadded:: 0.24.0
- errors : 'raise', 'coerce', default None
- Determine how errors should be handled.
-
- The behavior is as follows:
-
- * 'raise' will raise a NonExistentTimeError if a timestamp is not
- valid in the specified timezone (e.g. due to a transition from
- or to DST time). Use ``nonexistent='raise'`` instead.
- * 'coerce' will return NaT if the timestamp can not be converted
- into the specified timezone. Use ``nonexistent='NaT'`` instead.
-
- .. deprecated:: 0.24.0
Returns
-------
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 08e504ada789e..e7dc911ff0bae 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -753,8 +753,7 @@ timedelta}, default 'raise'
# GH#21336, GH#21365
return Timedelta(nanoseconds=1)
- def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
- errors=None):
+ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise'):
"""
Convert naive Timestamp to local time zone, or remove
timezone from tz-aware Timestamp.
@@ -797,18 +796,6 @@ default 'raise'
nonexistent times.
.. versionadded:: 0.24.0
- errors : 'raise', 'coerce', default None
- Determine how errors should be handled.
-
- The behavior is as follows:
-
- * 'raise' will raise a NonExistentTimeError if a timestamp is not
- valid in the specified timezone (e.g. due to a transition from
- or to DST time). Use ``nonexistent='raise'`` instead.
- * 'coerce' will return NaT if the timestamp can not be converted
- into the specified timezone. Use ``nonexistent='NaT'`` instead.
-
- .. deprecated:: 0.24.0
Returns
-------
@@ -822,19 +809,6 @@ default 'raise'
if ambiguous == 'infer':
raise ValueError('Cannot infer offset with only one time.')
- if errors is not None:
- warnings.warn("The errors argument is deprecated and will be "
- "removed in a future release. Use "
- "nonexistent='NaT' or nonexistent='raise' "
- "instead.", FutureWarning)
- if errors == 'coerce':
- nonexistent = 'NaT'
- elif errors == 'raise':
- nonexistent = 'raise'
- else:
- raise ValueError("The errors argument must be either 'coerce' "
- "or 'raise'.")
-
nonexistent_options = ('raise', 'NaT', 'shift_forward',
'shift_backward')
if nonexistent not in nonexistent_options and not isinstance(
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 07cbaa8cd1eb6..47f236c19ffe7 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -955,7 +955,7 @@ def tz_convert(self, tz):
dtype = tz_to_dtype(tz)
return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)
- def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None):
+ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
@@ -1004,17 +1004,6 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None):
.. versionadded:: 0.24.0
- errors : {'raise', 'coerce'}, default None
- The method to handle errors:
-
- - 'raise' will raise a NonExistentTimeError if a timestamp is not
- valid in the specified time zone (e.g. due to a transition from
- or to DST time). Use ``nonexistent='raise'`` instead.
- - 'coerce' will return NaT if the timestamp can not be converted
- to the specified time zone. Use ``nonexistent='NaT'`` instead.
-
- .. deprecated:: 0.24.0
-
Returns
-------
Same type as self
@@ -1105,23 +1094,6 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None):
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
"""
- if errors is not None:
- warnings.warn(
- "The errors argument is deprecated and will be "
- "removed in a future release. Use "
- "nonexistent='NaT' or nonexistent='raise' "
- "instead.",
- FutureWarning,
- )
- if errors == "coerce":
- nonexistent = "NaT"
- elif errors == "raise":
- nonexistent = "raise"
- else:
- raise ValueError(
- "The errors argument must be either 'coerce' or 'raise'."
- )
-
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 059dbb00019d8..3f942f9b79428 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -323,13 +323,9 @@ def test_dti_tz_localize_nonexistent_raise_coerce(self):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError):
- with tm.assert_produces_warning(FutureWarning):
- index.tz_localize(tz=tz, errors="raise")
+ index.tz_localize(tz=tz, nonexistent="raise")
- with tm.assert_produces_warning(
- FutureWarning, clear=FutureWarning, check_stacklevel=False
- ):
- result = index.tz_localize(tz=tz, errors="coerce")
+ result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
@@ -704,20 +700,6 @@ def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
- @pytest.mark.filterwarnings("ignore::FutureWarning")
- def test_dti_tz_localize_errors_deprecation(self):
- # GH 22644
- tz = "Europe/Warsaw"
- n = 60
- dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- with pytest.raises(ValueError):
- dti.tz_localize(tz, errors="foo")
- # make sure errors='coerce' gets mapped correctly to nonexistent
- result = dti.tz_localize(tz, errors="coerce")
- expected = dti.tz_localize(tz, nonexistent="NaT")
- tm.assert_index_equal(result, expected)
-
# -------------------------------------------------------------
# DatetimeIndex.normalize
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index 250f48b7e711b..6537f6ccd8432 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -14,7 +14,6 @@
import pandas.util._test_decorators as td
from pandas import NaT, Timestamp
-import pandas.util.testing as tm
class TestTimestampTZOperations:
@@ -80,7 +79,6 @@ def test_tz_localize_ambiguous(self):
("2015-03-29 02:30", "Europe/Belgrade"),
],
)
- @pytest.mark.filterwarnings("ignore::FutureWarning")
def test_tz_localize_nonexistent(self, stamp, tz):
# GH#13057
ts = Timestamp(stamp)
@@ -88,36 +86,21 @@ def test_tz_localize_nonexistent(self, stamp, tz):
ts.tz_localize(tz)
# GH 22644
with pytest.raises(NonExistentTimeError):
- with tm.assert_produces_warning(FutureWarning):
- ts.tz_localize(tz, errors="raise")
- with tm.assert_produces_warning(FutureWarning):
- assert ts.tz_localize(tz, errors="coerce") is NaT
+ ts.tz_localize(tz, nonexistent="raise")
+ assert ts.tz_localize(tz, nonexistent="NaT") is NaT
- def test_tz_localize_errors_ambiguous(self):
+ def test_tz_localize_ambiguous_raise(self):
# GH#13057
ts = Timestamp("2015-11-1 01:00")
with pytest.raises(AmbiguousTimeError):
- with tm.assert_produces_warning(FutureWarning):
- ts.tz_localize("US/Pacific", errors="coerce")
+ ts.tz_localize("US/Pacific", ambiguous="raise")
- @pytest.mark.filterwarnings("ignore::FutureWarning")
- def test_tz_localize_errors_invalid_arg(self):
+ def test_tz_localize_nonexistent_invalid_arg(self):
# GH 22644
tz = "Europe/Warsaw"
ts = Timestamp("2015-03-29 02:00:00")
with pytest.raises(ValueError):
- with tm.assert_produces_warning(FutureWarning):
- ts.tz_localize(tz, errors="foo")
-
- def test_tz_localize_errors_coerce(self):
- # GH 22644
- # make sure errors='coerce' gets mapped correctly to nonexistent
- tz = "Europe/Warsaw"
- ts = Timestamp("2015-03-29 02:00:00")
- with tm.assert_produces_warning(FutureWarning):
- result = ts.tz_localize(tz, errors="coerce")
- expected = ts.tz_localize(tz, nonexistent="NaT")
- assert result is expected
+ ts.tz_localize(tz, nonexistent="foo")
@pytest.mark.parametrize(
"stamp",
diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py
index c16e2864b131f..c03101265f7e7 100644
--- a/pandas/tests/series/test_timezones.py
+++ b/pandas/tests/series/test_timezones.py
@@ -33,21 +33,6 @@ def test_series_tz_localize(self):
with pytest.raises(TypeError, match="Already tz-aware"):
ts.tz_localize("US/Eastern")
- @pytest.mark.filterwarnings("ignore::FutureWarning")
- def test_tz_localize_errors_deprecation(self):
- # GH 22644
- tz = "Europe/Warsaw"
- n = 60
- rng = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
- ts = Series(rng)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- with pytest.raises(ValueError):
- ts.dt.tz_localize(tz, errors="foo")
- # make sure errors='coerce' gets mapped correctly to nonexistent
- result = ts.dt.tz_localize(tz, errors="coerce")
- expected = ts.dt.tz_localize(tz, nonexistent="NaT")
- tm.assert_series_equal(result, expected)
-
def test_series_tz_localize_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
| - [x] xref #22644
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29911 | 2019-11-28T03:43:07Z | 2019-11-29T17:39:40Z | 2019-11-29T17:39:40Z | 2019-11-29T19:50:48Z |
BUG/DEPR: Timestamp/Timedelta resolution | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 14f36a808c468..bb4a24e1b4eb5 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -457,6 +457,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Removed previously deprecated keyword "n" from :meth:`DatetimeIndex.shift`, :meth:`TimedeltaIndex.shift`, :meth:`PeriodIndex.shift`, use "periods" instead (:issue:`22458`)
- Changed the default value for the `raw` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`,
- :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` to ``False`` (:issue:`20584`)
+- Changed :meth:`Timedelta.resolution` to match the behavior of the standard library ``datetime.timedelta.resolution``, for the old behavior, use :meth:`Timedelta.resolution_string` (:issue:`26839`)
- Removed previously deprecated :attr:`Timestamp.weekday_name`, :attr:`DatetimeIndex.weekday_name`, and :attr:`Series.dt.weekday_name` (:issue:`18164`)
- Removed previously deprecated ``errors`` argument in :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` (:issue:`22644`)
-
@@ -516,6 +517,7 @@ Datetimelike
- Bug in :func:`pandas._config.localization.get_locales` where the ``locales -a`` encodes the locales list as windows-1252 (:issue:`23638`, :issue:`24760`, :issue:`27368`)
- Bug in :meth:`Series.var` failing to raise ``TypeError`` when called with ``timedelta64[ns]`` dtype (:issue:`28289`)
- Bug in :meth:`DatetimeIndex.strftime` and :meth:`Series.dt.strftime` where ``NaT`` was converted to the string ``'NaT'`` instead of ``np.nan`` (:issue:`29578`)
+- Bug in :attr:`Timestamp.resolution` being a property instead of a class attribute (:issue:`29910`)
Timedelta
^^^^^^^^^
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 48a2a05011ab5..726d664c1ebea 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1005,56 +1005,6 @@ cdef class _Timedelta(timedelta):
else:
return "D"
- @property
- def resolution(self):
- """
- Return a string representing the lowest timedelta resolution.
-
- Each timedelta has a defined resolution that represents the lowest OR
- most granular level of precision. Each level of resolution is
- represented by a short string as defined below:
-
- Resolution: Return value
-
- * Days: 'D'
- * Hours: 'H'
- * Minutes: 'T'
- * Seconds: 'S'
- * Milliseconds: 'L'
- * Microseconds: 'U'
- * Nanoseconds: 'N'
-
- Returns
- -------
- str
- Timedelta resolution.
-
- Examples
- --------
- >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns')
- >>> td.resolution
- 'N'
-
- >>> td = pd.Timedelta('1 days 2 min 3 us')
- >>> td.resolution
- 'U'
-
- >>> td = pd.Timedelta('2 min 3 s')
- >>> td.resolution
- 'S'
-
- >>> td = pd.Timedelta(36, unit='us')
- >>> td.resolution
- 'U'
- """
- # See GH#21344
- warnings.warn("Timedelta.resolution is deprecated, in a future "
- "version will behave like the standard library "
- "datetime.timedelta.resolution attribute. "
- "Use Timedelta.resolution_string instead.",
- FutureWarning)
- return self.resolution_string
-
@property
def nanoseconds(self):
"""
@@ -1602,3 +1552,4 @@ cdef _broadcast_floordiv_td64(int64_t value, object other,
# resolution in ns
Timedelta.min = Timedelta(np.iinfo(np.int64).min + 1)
Timedelta.max = Timedelta(np.iinfo(np.int64).max)
+Timedelta.resolution = Timedelta(nanoseconds=1)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index e7dc911ff0bae..fbe71a0a6d198 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -744,15 +744,6 @@ timedelta}, default 'raise'
"""
return bool(ccalendar.is_leapyear(self.year))
- @property
- def resolution(self):
- """
- Return resolution describing the smallest difference between two
- times that can be represented by Timestamp object_state.
- """
- # GH#21336, GH#21365
- return Timedelta(nanoseconds=1)
-
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise'):
"""
Convert naive Timestamp to local time zone, or remove
@@ -1036,3 +1027,4 @@ cdef int64_t _NS_LOWER_BOUND = -9223372036854775000
# Resolution is in nanoseconds
Timestamp.min = Timestamp(_NS_LOWER_BOUND)
Timestamp.max = Timestamp(_NS_UPPER_BOUND)
+Timestamp.resolution = Timedelta(nanoseconds=1) # GH#21336, GH#21365
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index d4881ff0e1747..5a5724401029c 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -804,9 +804,13 @@ def test_resolution_string(self):
def test_resolution_deprecated(self):
# GH#21344
td = Timedelta(days=4, hours=3)
- with tm.assert_produces_warning(FutureWarning) as w:
- td.resolution
- assert "Use Timedelta.resolution_string instead" in str(w[0].message)
+ result = td.resolution
+ assert result == Timedelta(nanoseconds=1)
+
+ # Check that the attribute is available on the class, mirroring
+ # the stdlib timedelta behavior
+ result = Timedelta.resolution
+ assert result == Timedelta(nanoseconds=1)
@pytest.mark.parametrize(
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index a33afc8b3ccca..d6251ffc7940d 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -192,6 +192,10 @@ def test_resolution(self):
dt = Timestamp("2100-01-01 00:00:00")
assert dt.resolution == Timedelta(nanoseconds=1)
+ # Check that the attribute is available on the class, mirroring
+ # the stdlib datetime behavior
+ assert Timestamp.resolution == Timedelta(nanoseconds=1)
+
class TestTimestampConstructors:
def test_constructor(self):
| Timedelta.resolution used to return a string, but that was deprecated in 0.25.0. Enforce that deprecation to return Timedelta(nanoseconds=1), mirroring the stdlib timedelta.resolution behavior.
Timestamp.resolution correctly mirrors datetime.resolution by returning Timedelta(nanoseconds=1), but it is a property whereas for the stdlib it is a class attribute. This PR makes it a class attribute for Timestamp. | https://api.github.com/repos/pandas-dev/pandas/pulls/29910 | 2019-11-28T01:12:37Z | 2019-11-29T21:07:56Z | 2019-11-29T21:07:56Z | 2019-11-29T21:11:59Z |
Added FutureWarning to Series.str.__iter__ | diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst
index 0a74d67486715..86ff338536f80 100644
--- a/doc/source/whatsnew/v0.12.0.rst
+++ b/doc/source/whatsnew/v0.12.0.rst
@@ -417,6 +417,7 @@ Bug fixes
original ``Series`` or ``NaN``. For example,
.. ipython:: python
+ :okwarning:
strs = 'go', 'bow', 'joe', 'slow'
ds = pd.Series(strs)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 5c9543580be26..9a41531855245 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -440,6 +440,7 @@ Other API changes
- :meth:`Series.dropna` has dropped its ``**kwargs`` argument in favor of a single ``how`` parameter.
Supplying anything else than ``how`` to ``**kwargs`` raised a ``TypeError`` previously (:issue:`29388`)
- When testing pandas, the new minimum required version of pytest is 5.0.1 (:issue:`29664`)
+- :meth:`Series.str.__iter__` was deprecated and will be removed in future releases (:issue:`28277`).
.. _whatsnew_1000.api.documentation:
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 6cc102dce3b9c..040a28ea2ee8c 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -2091,6 +2091,11 @@ def __getitem__(self, key):
return self.get(key)
def __iter__(self):
+ warnings.warn(
+ "Columnar iteration over characters will be deprecated in future releases.",
+ FutureWarning,
+ stacklevel=2,
+ )
i = 0
g = self.get(i)
while g.notna().any():
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index cf52e286a47a5..d08c8b1cc13e5 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -327,17 +327,18 @@ def test_iter(self):
strs = "google", "wikimedia", "wikipedia", "wikitravel"
ds = Series(strs)
- for s in ds.str:
- # iter must yield a Series
- assert isinstance(s, Series)
+ with tm.assert_produces_warning(FutureWarning):
+ for s in ds.str:
+ # iter must yield a Series
+ assert isinstance(s, Series)
- # indices of each yielded Series should be equal to the index of
- # the original Series
- tm.assert_index_equal(s.index, ds.index)
+ # indices of each yielded Series should be equal to the index of
+ # the original Series
+ tm.assert_index_equal(s.index, ds.index)
- for el in s:
- # each element of the series is either a basestring/str or nan
- assert isinstance(el, str) or isna(el)
+ for el in s:
+ # each element of the series is either a basestring/str or nan
+ assert isinstance(el, str) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
@@ -349,8 +350,9 @@ def test_iter_empty(self):
i, s = 100, 1
- for i, s in enumerate(ds.str):
- pass
+ with tm.assert_produces_warning(FutureWarning):
+ for i, s in enumerate(ds.str):
+ pass
# nothing to iterate over so nothing defined values should remain
# unchanged
@@ -360,8 +362,9 @@ def test_iter_empty(self):
def test_iter_single_element(self):
ds = Series(["a"])
- for i, s in enumerate(ds.str):
- pass
+ with tm.assert_produces_warning(FutureWarning):
+ for i, s in enumerate(ds.str):
+ pass
assert not i
tm.assert_series_equal(ds, s)
@@ -371,8 +374,9 @@ def test_iter_object_try_string(self):
i, s = 100, "h"
- for i, s in enumerate(ds.str):
- pass
+ with tm.assert_produces_warning(FutureWarning):
+ for i, s in enumerate(ds.str):
+ pass
assert i == 100
assert s == "h"
| - [x] #28277
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29909 | 2019-11-27T23:57:39Z | 2019-12-08T17:55:03Z | 2019-12-08T17:55:02Z | 2023-12-11T23:53:30Z |
TYP: Annotated pandas/core/indexing.py | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index b52015b738c6e..2523d7b2d56b1 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -240,7 +240,7 @@ def _has_valid_tuple(self, key: Tuple):
"[{types}] types".format(types=self._valid_types)
)
- def _is_nested_tuple_indexer(self, tup: Tuple):
+ def _is_nested_tuple_indexer(self, tup: Tuple) -> bool:
if any(isinstance(ax, ABCMultiIndex) for ax in self.obj.axes):
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False
@@ -273,10 +273,10 @@ def _convert_slice_indexer(self, key: slice, axis: int):
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
- def _has_valid_setitem_indexer(self, indexer):
+ def _has_valid_setitem_indexer(self, indexer) -> bool:
return True
- def _has_valid_positional_setitem_indexer(self, indexer):
+ def _has_valid_positional_setitem_indexer(self, indexer) -> bool:
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
@@ -1318,7 +1318,7 @@ def __init__(self, name, obj):
super().__init__(name, obj)
@Appender(_NDFrameIndexer._validate_key.__doc__)
- def _validate_key(self, key, axis: int):
+ def _validate_key(self, key, axis: int) -> bool:
if isinstance(key, slice):
return True
@@ -1689,7 +1689,7 @@ def _validate_key(self, key, axis: int):
if not is_list_like_indexer(key):
self._convert_scalar_indexer(key, axis)
- def _is_scalar_access(self, key: Tuple):
+ def _is_scalar_access(self, key: Tuple) -> bool:
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
@@ -2002,7 +2002,7 @@ def _validate_key(self, key, axis: int):
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
- def _is_scalar_access(self, key: Tuple):
+ def _is_scalar_access(self, key: Tuple) -> bool:
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
@@ -2026,7 +2026,7 @@ def _getitem_scalar(self, key):
values = self.obj._get_value(*key, takeable=True)
return values
- def _validate_integer(self, key: int, axis: int):
+ def _validate_integer(self, key: int, axis: int) -> None:
"""
Check that 'key' is a valid position in the desired axis.
@@ -2452,7 +2452,7 @@ def maybe_convert_ix(*args):
return args
-def is_nested_tuple(tup, labels):
+def is_nested_tuple(tup, labels) -> bool:
# check for a compatible nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
@@ -2465,12 +2465,12 @@ def is_nested_tuple(tup, labels):
return False
-def is_label_like(key):
+def is_label_like(key) -> bool:
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
-def need_slice(obj):
+def need_slice(obj) -> bool:
return (
obj.start is not None
or obj.stop is not None
@@ -2491,7 +2491,7 @@ def _non_reducing_slice(slice_):
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
- def pred(part):
+ def pred(part) -> bool:
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
return (isinstance(part, slice) or is_list_like(part)) and not isinstance(
@@ -2523,7 +2523,7 @@ def _maybe_numeric_slice(df, slice_, include_bool=False):
return slice_
-def _can_do_equal_len(labels, value, plane_indexer, lplane_indexer, obj):
+def _can_do_equal_len(labels, value, plane_indexer, lplane_indexer, obj) -> bool:
""" return True if we have an equal len settable """
if not len(labels) == 1 or not np.iterable(value) or is_scalar(plane_indexer[0]):
return False
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29908 | 2019-11-27T23:44:17Z | 2019-11-30T17:48:11Z | 2019-11-30T17:48:11Z | 2019-12-01T13:18:16Z |
CI: Making benchmark errors easier to find | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index b689da8e39ff0..f68080d05bea6 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -80,15 +80,18 @@ jobs:
git fetch upstream
if git diff upstream/master --name-only | grep -q "^asv_bench/"; then
asv machine --yes
- ASV_OUTPUT="$(asv dev)"
- if [[ $(echo "$ASV_OUTPUT" | grep "failed") ]]; then
- echo "##vso[task.logissue type=error]Benchmarks run with errors"
- echo "$ASV_OUTPUT"
+ asv dev | sed "/failed$/ s/^/##[error]/" | tee benchmarks.log
+ if grep "failed" benchmarks.log > /dev/null ; then
exit 1
- else
- echo "Benchmarks run without errors"
fi
else
echo "Benchmarks did not run, no changes detected"
fi
if: true
+
+ - name: Publish benchmarks artifact
+ uses: actions/upload-artifact@master
+ with:
+ name: Benchmarks log
+ path: asv_bench/benchmarks.log
+ if: failure()
| - [X] xref https://github.com/pandas-dev/pandas/pull/29900#issuecomment-559278405
It takes a while to find the failing line of the benchmarks logs at the moment. This should highlight the failing ones, and also publish the log as an artifact so it can be analyzed easier.
CC: @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/29907 | 2019-11-27T23:33:08Z | 2019-11-29T22:50:20Z | 2019-11-29T22:50:20Z | 2019-11-29T23:37:13Z |
CI: Fix broken asv | diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 498774034d642..ba0b51922fd31 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -113,7 +113,7 @@ class InferFreq:
def setup(self, freq):
if freq is None:
self.idx = date_range(start="1/1/1700", freq="D", periods=10000)
- self.idx.freq = None
+ self.idx._data._freq = None
else:
self.idx = date_range(start="1/1/1700", freq=freq, periods=10000)
| i think this is causing fails in unrelated PRs | https://api.github.com/repos/pandas-dev/pandas/pulls/29906 | 2019-11-27T22:46:14Z | 2019-11-28T04:16:14Z | 2019-11-28T04:16:14Z | 2019-11-28T04:20:53Z |
STY: x.__class__ to type(x) #batch-6 | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 0620f2b9aae49..9c0bceb1d5110 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -309,9 +309,8 @@ def apply_index(self, i):
if type(self) is not DateOffset:
raise NotImplementedError(
- "DateOffset subclass {name} "
- "does not have a vectorized "
- "implementation".format(name=self.__class__.__name__)
+ f"DateOffset subclass {type(self).__name__} "
+ "does not have a vectorized implementation"
)
kwds = self.kwds
relativedelta_fast = {
@@ -402,7 +401,7 @@ def rollback(self, dt):
"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
- dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
+ dt = dt - type(self)(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
@@ -416,7 +415,7 @@ def rollforward(self, dt):
"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
- dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
+ dt = dt + type(self)(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
diff --git a/pandas/util/_depr_module.py b/pandas/util/_depr_module.py
index 45e7db9281837..ae3c6359d20e0 100644
--- a/pandas/util/_depr_module.py
+++ b/pandas/util/_depr_module.py
@@ -32,7 +32,7 @@ def __init__(self, deprmod, deprmodto=None, removals=None, moved=None):
self.moved = moved
# For introspection purposes.
- self.self_dir = frozenset(dir(self.__class__))
+ self.self_dir = frozenset(dir(type(self)))
def __dir__(self):
deprmodule = self._import_deprmod()
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index bcd12eba1651a..9adbf4cee5d74 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -713,7 +713,7 @@ def repr_class(x):
return x
try:
- return x.__class__.__name__
+ return type(x).__name__
except AttributeError:
return repr(type(x))
@@ -780,17 +780,17 @@ def assert_is_valid_plot_return_object(objs):
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
- "one of 'objs' is not a matplotlib Axes instance, type "
- "encountered {name!r}"
- ).format(name=el.__class__.__name__)
+ "one of 'objs' is not a matplotlib Axes instance, "
+ f"type encountered {repr(type(el).__name__)}"
+ )
assert isinstance(el, (plt.Axes, dict)), msg
else:
- assert isinstance(objs, (plt.Artist, tuple, dict)), (
- "objs is neither an ndarray of Artist instances nor a "
- 'single Artist instance, tuple, or dict, "objs" is a {name!r}'.format(
- name=objs.__class__.__name__
- )
+ msg = (
+ "objs is neither an ndarray of Artist instances nor a single "
+ "ArtistArtist instance, tuple, or dict, 'objs' is a "
+ f"{repr(type(objs).__name__)}"
)
+ assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def isiterable(obj):
| - [x] ref https://github.com/pandas-dev/pandas/pull/29816
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/29905 | 2019-11-27T22:40:16Z | 2019-11-29T16:48:03Z | 2019-11-29T16:48:03Z | 2019-11-30T10:12:54Z |
STY: x.__class__ to type(x) #batch-5 | diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index e6e38ce9921f5..42244626749b9 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -38,7 +38,7 @@ def test_str(self):
idx.name = "foo"
assert not "length={}".format(len(idx)) in str(idx)
assert "'foo'" in str(idx)
- assert idx.__class__.__name__ in str(idx)
+ assert type(idx).__name__ in str(idx)
if hasattr(idx, "tz"):
if idx.tz is not None:
diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py
index 15bbd2ce97c3c..31de40512c474 100644
--- a/pandas/tests/indexes/multi/test_missing.py
+++ b/pandas/tests/indexes/multi/test_missing.py
@@ -42,9 +42,9 @@ def test_fillna(idx):
values[1] = np.nan
if isinstance(index, PeriodIndex):
- idx = index.__class__(values, freq=index.freq)
+ idx = type(index)(values, freq=index.freq)
else:
- idx = index.__class__(values)
+ idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
@@ -115,7 +115,7 @@ def test_hasnans_isnans(idx):
values = index.values
values[1] = np.nan
- index = idx.__class__(values)
+ index = type(idx)(values)
expected = np.array([False] * len(index), dtype=bool)
expected[1] = True
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 50a12baf352d9..501c2a4d8edcc 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -123,7 +123,7 @@ def test_range_slice_outofbounds(self):
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
- empty = DataFrame(index=idx.__class__([], freq="D"), columns=["units"])
+ empty = DataFrame(index=type(idx)([], freq="D"), columns=["units"])
empty["units"] = empty["units"].astype("int64")
tm.assert_frame_equal(df["2013/09/01":"2013/09/30"], empty)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 1f99ba7ad01db..77d81a4a9566e 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -752,7 +752,7 @@ def test_fancy(self):
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, index, dtype):
empty_arr = np.array([], dtype=dtype)
- empty_index = index.__class__([])
+ empty_index = type(index)([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@@ -762,7 +762,7 @@ def test_empty_fancy_raises(self, index):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
- empty_index = index.__class__([])
+ empty_index = type(index)([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
@@ -2446,8 +2446,8 @@ def test_copy_name(self):
# GH12309
index = self.create_index()
- first = index.__class__(index, copy=True, name="mario")
- second = first.__class__(first, copy=False)
+ first = type(index)(index, copy=True, name="mario")
+ second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index bb8339439d339..63f1ef7595f31 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -949,7 +949,7 @@ def test_append_preserve_index_name(self):
all_indexes = indexes_can_append + indexes_cannot_append_with_other
- @pytest.mark.parametrize("index", all_indexes, ids=lambda x: x.__class__.__name__)
+ @pytest.mark.parametrize("index", all_indexes, ids=lambda x: type(x).__name__)
def test_append_same_columns_type(self, index):
# GH18359
@@ -979,7 +979,7 @@ def test_append_same_columns_type(self, index):
@pytest.mark.parametrize(
"df_columns, series_index",
combinations(indexes_can_append, r=2),
- ids=lambda x: x.__class__.__name__,
+ ids=lambda x: type(x).__name__,
)
def test_append_different_columns_types(self, df_columns, series_index):
# GH18359
@@ -1004,12 +1004,12 @@ def test_append_different_columns_types(self, df_columns, series_index):
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
- "index_can_append", indexes_can_append, ids=lambda x: x.__class__.__name__
+ "index_can_append", indexes_can_append, ids=lambda x: type(x).__name__
)
@pytest.mark.parametrize(
"index_cannot_append_with_other",
indexes_cannot_append_with_other,
- ids=lambda x: x.__class__.__name__,
+ ids=lambda x: type(x).__name__,
)
def test_append_different_columns_types_raises(
self, index_can_append, index_cannot_append_with_other
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index bdbfa333ef33a..eb4f3273f8713 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -92,7 +92,7 @@ def test_apply_box(self):
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
- res = s.apply(lambda x: "{0}_{1}_{2}".format(x.__class__.__name__, x.day, x.tz))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
@@ -102,7 +102,7 @@ def test_apply_box(self):
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
- res = s.apply(lambda x: "{0}_{1}_{2}".format(x.__class__.__name__, x.day, x.tz))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
@@ -110,7 +110,7 @@ def test_apply_box(self):
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
- res = s.apply(lambda x: "{0}_{1}".format(x.__class__.__name__, x.days))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
@@ -118,7 +118,7 @@ def test_apply_box(self):
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
- res = s.apply(lambda x: "{0}_{1}".format(x.__class__.__name__, x.freqstr))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
@@ -614,7 +614,7 @@ def test_map_box(self):
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
- res = s.map(lambda x: "{0}_{1}_{2}".format(x.__class__.__name__, x.day, x.tz))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
@@ -624,7 +624,7 @@ def test_map_box(self):
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
- res = s.map(lambda x: "{0}_{1}_{2}".format(x.__class__.__name__, x.day, x.tz))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
@@ -632,7 +632,7 @@ def test_map_box(self):
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
- res = s.map(lambda x: "{0}_{1}".format(x.__class__.__name__, x.days))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
@@ -640,7 +640,7 @@ def test_map_box(self):
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
- res = s.map(lambda x: "{0}_{1}".format(x.__class__.__name__, x.freqstr))
+ res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index f24bb9e72aef5..e65388be2ba7d 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -400,7 +400,7 @@ def test_value_counts_unique_nunique(self):
result = o.unique()
if isinstance(o, Index):
- assert isinstance(result, o.__class__)
+ assert isinstance(result, type(o))
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
diff --git a/pandas/tests/tseries/holiday/test_holiday.py b/pandas/tests/tseries/holiday/test_holiday.py
index 06869fcd7a4f8..7748b965f8962 100644
--- a/pandas/tests/tseries/holiday/test_holiday.py
+++ b/pandas/tests/tseries/holiday/test_holiday.py
@@ -238,7 +238,7 @@ class TestCalendar(AbstractHolidayCalendar):
rules = []
calendar = get_calendar("TestCalendar")
- assert TestCalendar == calendar.__class__
+ assert TestCalendar == type(calendar)
def test_factory():
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index d70780741aa88..ae78d5a55bb5e 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -358,7 +358,7 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=Fals
ts = Timestamp(dt) + Nano(5)
if (
- offset_s.__class__.__name__ == "DateOffset"
+ type(offset_s).__name__ == "DateOffset"
and (funcname == "apply" or normalize)
and ts.nanosecond > 0
):
@@ -395,7 +395,7 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=Fals
ts = Timestamp(dt, tz=tz) + Nano(5)
if (
- offset_s.__class__.__name__ == "DateOffset"
+ type(offset_s).__name__ == "DateOffset"
and (funcname == "apply" or normalize)
and ts.nanosecond > 0
):
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 9417dc4b48499..2e5477ea00e39 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -363,7 +363,7 @@ def __init__(self, name=None, rules=None):
"""
super().__init__()
if name is None:
- name = self.__class__.__name__
+ name = type(self).__name__
self.name = name
if rules is not None:
| - [x] ref https://github.com/pandas-dev/pandas/pull/29816
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/29904 | 2019-11-27T22:09:56Z | 2019-11-29T16:46:37Z | 2019-11-29T16:46:37Z | 2019-11-30T10:12:33Z |
Convert core/indexes/base.py to f-strings | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 4a3fa26c3460e..aa2326eeab8fa 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -120,7 +120,7 @@ def cmp_method(self, other):
return result
return ops.invalid_comparison(self, other, op)
- name = "__{name}__".format(name=op.__name__)
+ name = f"__{op.__name__}__"
return set_function_name(cmp_method, name, cls)
@@ -136,7 +136,7 @@ def index_arithmetic_method(self, other):
return (Index(result[0]), Index(result[1]))
return Index(result)
- name = "__{name}__".format(name=op.__name__)
+ name = f"__{op.__name__}__"
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
@@ -441,7 +441,7 @@ def __new__(
except IncompatibleFrequency:
pass
if kwargs:
- raise TypeError(f"Unexpected keyword arguments {set(kwargs)!r}")
+ raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}")
return cls._simple_new(subarr, name, **kwargs)
elif hasattr(data, "__array__"):
@@ -768,8 +768,7 @@ def astype(self, dtype, copy=True):
self.values.astype(dtype, copy=copy), name=self.name, dtype=dtype
)
except (TypeError, ValueError):
- msg = "Cannot cast {name} to dtype {dtype}"
- raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
+ raise TypeError(f"Cannot cast {type(self).__name__} to dtype {dtype}")
_index_shared_docs[
"take"
@@ -814,8 +813,10 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
)
else:
if allow_fill and fill_value is not None:
- msg = "Unable to fill values because {0} cannot contain NA"
- raise ValueError(msg.format(type(self).__name__))
+ cls_name = type(self).__name__
+ raise ValueError(
+ f"Unable to fill values because {cls_name} cannot contain NA"
+ )
taken = self.values.take(indices)
return self._shallow_copy(taken)
@@ -1286,9 +1287,7 @@ def _set_names(self, values, level=None):
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
- raise TypeError(
- "{}.name must be a hashable type".format(type(self).__name__)
- )
+ raise TypeError(f"{type(self).__name__}.name must be a hashable type")
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
@@ -1456,13 +1455,11 @@ def _validate_index_level(self, level):
)
elif level > 0:
raise IndexError(
- "Too many levels: Index has only 1 level, not %d" % (level + 1)
+ f"Too many levels: Index has only 1 level, not {level + 1}"
)
elif level != self.name:
raise KeyError(
- "Requested level ({}) does not match index name ({})".format(
- level, self.name
- )
+ f"Requested level ({level}) does not match index name ({self.name})"
)
def _get_level_number(self, level):
@@ -1558,9 +1555,8 @@ def droplevel(self, level=0):
return self
if len(level) >= self.nlevels:
raise ValueError(
- "Cannot remove {} levels from an index with {} "
- "levels: at least one level must be "
- "left.".format(len(level), self.nlevels)
+ f"Cannot remove {len(level)} levels from an index with {self.nlevels} "
+ "levels: at least one level must be left."
)
# The two checks above guarantee that here self is a MultiIndex
@@ -2014,7 +2010,7 @@ def fillna(self, value=None, downcast=None):
@Appender(_index_shared_docs["dropna"])
def dropna(self, how="any"):
if how not in ("any", "all"):
- raise ValueError("invalid how option: {0}".format(how))
+ raise ValueError(f"invalid how option: {how}")
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
@@ -2288,10 +2284,8 @@ def __xor__(self, other):
def __nonzero__(self):
raise ValueError(
- "The truth value of a {0} is ambiguous. "
- "Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(
- type(self).__name__
- )
+ f"The truth value of a {type(self).__name__} is ambiguous. "
+ "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
@@ -2354,7 +2348,7 @@ def _validate_sort_keyword(self, sort):
if sort not in [None, False]:
raise ValueError(
"The 'sort' keyword only takes the values of "
- "None or False; {0} was passed.".format(sort)
+ f"None or False; {sort} was passed."
)
def union(self, other, sort=None):
@@ -2481,10 +2475,9 @@ def _union(self, other, sort):
if sort is None:
try:
result = algos.safe_sort(result)
- except TypeError as e:
+ except TypeError as err:
warnings.warn(
- "{}, sort order is undefined for "
- "incomparable objects".format(e),
+ f"{err}, sort order is undefined for incomparable objects",
RuntimeWarning,
stacklevel=3,
)
@@ -2939,8 +2932,8 @@ def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
if limit is not None:
raise ValueError(
- "limit argument for %r method only well-defined "
- "if index and target are monotonic" % method
+ f"limit argument for {repr(method)} method only well-defined "
+ "if index and target are monotonic"
)
side = "left" if method == "pad" else "right"
@@ -3227,10 +3220,8 @@ def _invalid_indexer(self, form, key):
Consistent invalid indexer message.
"""
raise TypeError(
- "cannot do {form} indexing on {klass} with these "
- "indexers [{key}] of {kind}".format(
- form=form, klass=type(self), key=key, kind=type(key)
- )
+ f"cannot do {form} indexing on {type(self)} with these "
+ f"indexers [{key}] of {type(key)}"
)
# --------------------------------------------------------------------
@@ -3992,8 +3983,8 @@ def _scalar_data_error(cls, data):
# We return the TypeError so that we can raise it from the constructor
# in order to keep mypy happy
return TypeError(
- "{0}(...) must be called with a collection of some "
- "kind, {1} was passed".format(cls.__name__, repr(data))
+ f"{cls.__name__}(...) must be called with a collection of some "
+ f"kind, {repr(data)} was passed"
)
@classmethod
@@ -4037,8 +4028,7 @@ def _assert_can_do_op(self, value):
Check value is valid for scalar op.
"""
if not is_scalar(value):
- msg = "'value' must be a scalar, passed: {0}"
- raise TypeError(msg.format(type(value).__name__))
+ raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}")
def _is_memory_usage_qualified(self) -> bool:
"""
@@ -4113,7 +4103,7 @@ def contains(self, key) -> bool:
return key in self
def __hash__(self):
- raise TypeError("unhashable type: %r" % type(self).__name__)
+ raise TypeError(f"unhashable type: {repr(type(self).__name__)}")
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
@@ -5052,8 +5042,8 @@ def get_slice_bound(self, label, side, kind):
slc = lib.maybe_indices_to_slice(slc.astype("i8"), len(self))
if isinstance(slc, np.ndarray):
raise KeyError(
- "Cannot get %s slice bound for non-unique "
- "label: %r" % (side, original_label)
+ f"Cannot get {side} slice bound for non-unique "
+ f"label: {repr(original_label)}"
)
if isinstance(slc, slice):
@@ -5211,7 +5201,7 @@ def drop(self, labels, errors="raise"):
mask = indexer == -1
if mask.any():
if errors != "ignore":
- raise KeyError("{} not found in axis".format(labels[mask]))
+ raise KeyError(f"{labels[mask]} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
| https://api.github.com/repos/pandas-dev/pandas/pulls/29903 | 2019-11-27T21:58:59Z | 2019-12-01T22:24:50Z | 2019-12-01T22:24:49Z | 2019-12-01T22:24:50Z | |
STY: x.__class__ to type(x) #batch-4 | diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index b18f0db622b3e..f8f5d337185c4 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -352,7 +352,7 @@ def to_string(self) -> str:
if len(series) == 0:
return "{name}([], {footer})".format(
- name=self.series.__class__.__name__, footer=footer
+ name=type(self.series).__name__, footer=footer
)
fmt_index, have_header = self._get_formatted_index()
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index a4f1488fb6b69..8218799129952 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -321,7 +321,7 @@ def format_object_summary(
if display_width is None:
display_width = get_option("display.width") or 80
if name is None:
- name = obj.__class__.__name__
+ name = type(obj).__name__
if indent_for_name:
name_len = len(name)
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 253441ab25813..bb7b00571b0df 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -404,7 +404,7 @@ def encode(obj):
if isinstance(obj, RangeIndex):
return {
"typ": "range_index",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"name": getattr(obj, "name", None),
"start": obj._range.start,
"stop": obj._range.stop,
@@ -413,7 +413,7 @@ def encode(obj):
elif isinstance(obj, PeriodIndex):
return {
"typ": "period_index",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"name": getattr(obj, "name", None),
"freq": getattr(obj, "freqstr", None),
"dtype": obj.dtype.name,
@@ -429,7 +429,7 @@ def encode(obj):
obj = obj.tz_convert("UTC")
return {
"typ": "datetime_index",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"name": getattr(obj, "name", None),
"dtype": obj.dtype.name,
"data": convert(obj.asi8),
@@ -444,7 +444,7 @@ def encode(obj):
typ = "interval_array"
return {
"typ": typ,
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"name": getattr(obj, "name", None),
"left": getattr(obj, "left", None),
"right": getattr(obj, "right", None),
@@ -453,7 +453,7 @@ def encode(obj):
elif isinstance(obj, MultiIndex):
return {
"typ": "multi_index",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"names": getattr(obj, "names", None),
"dtype": obj.dtype.name,
"data": convert(obj.values),
@@ -462,7 +462,7 @@ def encode(obj):
else:
return {
"typ": "index",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"name": getattr(obj, "name", None),
"dtype": obj.dtype.name,
"data": convert(obj.values),
@@ -472,7 +472,7 @@ def encode(obj):
elif isinstance(obj, Categorical):
return {
"typ": "category",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"name": getattr(obj, "name", None),
"codes": obj.codes,
"categories": obj.categories,
@@ -483,7 +483,7 @@ def encode(obj):
elif isinstance(obj, Series):
return {
"typ": "series",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"name": getattr(obj, "name", None),
"index": obj.index,
"dtype": obj.dtype.name,
@@ -498,7 +498,7 @@ def encode(obj):
# the block manager
return {
"typ": "block_manager",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"axes": data.axes,
"blocks": [
{
@@ -506,7 +506,7 @@ def encode(obj):
"values": convert(b.values),
"shape": b.values.shape,
"dtype": b.dtype.name,
- "klass": b.__class__.__name__,
+ "klass": type(b).__name__,
"compress": compressor,
}
for b in data.blocks
@@ -553,7 +553,7 @@ def encode(obj):
elif isinstance(obj, BlockIndex):
return {
"typ": "block_index",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"blocs": obj.blocs,
"blengths": obj.blengths,
"length": obj.length,
@@ -561,7 +561,7 @@ def encode(obj):
elif isinstance(obj, IntIndex):
return {
"typ": "int_index",
- "klass": obj.__class__.__name__,
+ "klass": type(obj).__name__,
"indices": obj.indices,
"length": obj.length,
}
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index bf7aa5970519f..fb63bdcaaa876 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3692,7 +3692,7 @@ def create_axes(
# the non_index_axes info
info = _get_info(self.info, i)
info["names"] = list(a.names)
- info["type"] = a.__class__.__name__
+ info["type"] = type(a).__name__
self.non_index_axes.append((i, append_axis))
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index bd5e215730397..59bb4e3bf236a 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -856,12 +856,11 @@ def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
- # not perfect :-/
- return "{cls}({obj})".format(cls=self.__class__, obj=self)
+ return f"{type(self)}({self})"
def __eq__(self, other) -> bool:
return (
- isinstance(other, self.__class__)
+ isinstance(other, type(self))
and self.string == other.string
and self.value == other.value
)
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index a5040c8cfc2fc..d1e1717225e15 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -96,7 +96,7 @@ def test_eq(self, dtype):
assert dtype != "anonther_type"
def test_construct_from_string(self, dtype):
- dtype_instance = dtype.__class__.construct_from_string(dtype.name)
- assert isinstance(dtype_instance, dtype.__class__)
+ dtype_instance = type(dtype).construct_from_string(dtype.name)
+ assert isinstance(dtype_instance, type(dtype))
with pytest.raises(TypeError):
- dtype.__class__.construct_from_string("another_type")
+ type(dtype).construct_from_string("another_type")
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index 5e4fb6d69e52c..20d06ef2e5647 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -123,9 +123,7 @@ def test_direct_arith_with_series_returns_not_implemented(self, data):
result = data.__add__(other)
assert result is NotImplemented
else:
- raise pytest.skip(
- "{} does not implement add".format(data.__class__.__name__)
- )
+ raise pytest.skip(f"{type(data).__name__} does not implement add")
class BaseComparisonOpsTests(BaseOpsUtil):
@@ -169,6 +167,4 @@ def test_direct_arith_with_series_returns_not_implemented(self, data):
result = data.__eq__(other)
assert result is NotImplemented
else:
- raise pytest.skip(
- "{} does not implement __eq__".format(data.__class__.__name__)
- )
+ raise pytest.skip(f"{type(data).__name__} does not implement __eq__")
diff --git a/pandas/tests/extension/base/printing.py b/pandas/tests/extension/base/printing.py
index 0f10efbf32a49..5d17a4b0cbee2 100644
--- a/pandas/tests/extension/base/printing.py
+++ b/pandas/tests/extension/base/printing.py
@@ -18,7 +18,7 @@ def test_array_repr(self, data, size):
data = type(data)._concat_same_type([data] * 5)
result = repr(data)
- assert data.__class__.__name__ in result
+ assert type(data).__name__ in result
assert "Length: {}".format(len(data)) in result
assert str(data.dtype) in result
if size == "big":
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index 3c97a87c95bd2..26a3c738750ca 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -642,7 +642,7 @@ def test_applymap_box(self):
}
)
- result = df.applymap(lambda x: "{0}".format(x.__class__.__name__))
+ result = df.applymap(lambda x: type(x).__name__)
expected = pd.DataFrame(
{
"a": ["Timestamp", "Timestamp"],
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index c35c4c3568f74..102949fe3f05e 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -244,7 +244,7 @@ def test_str(self):
idx = self.create_index()
idx.name = "foo"
assert "'foo'" in str(idx)
- assert idx.__class__.__name__ in str(idx)
+ assert type(idx).__name__ in str(idx)
def test_repr_max_seq_item_setting(self):
# GH10182
@@ -260,8 +260,8 @@ def test_copy_name(self, indices):
if isinstance(indices, MultiIndex):
return
- first = indices.__class__(indices, copy=True, name="mario")
- second = first.__class__(first, copy=False)
+ first = type(indices)(indices, copy=True, name="mario")
+ second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
@@ -292,7 +292,7 @@ def test_ensure_copied_data(self, indices):
# MultiIndex and CategoricalIndex are tested separately
return
- index_type = indices.__class__
+ index_type = type(indices)
result = index_type(indices.values, copy=True, **init_kwargs)
tm.assert_index_equal(indices, result)
tm.assert_numpy_array_equal(
@@ -502,7 +502,7 @@ def test_difference_base(self, sort, indices):
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
if isinstance(indices, (DatetimeIndex, TimedeltaIndex)):
- assert result.__class__ == answer.__class__
+ assert type(result) == type(answer)
tm.assert_numpy_array_equal(
result.sort_values().asi8, answer.sort_values().asi8
)
@@ -677,9 +677,9 @@ def test_hasnans_isnans(self, indices):
values[1] = np.nan
if isinstance(indices, PeriodIndex):
- idx = indices.__class__(values, freq=indices.freq)
+ idx = type(indices)(values, freq=indices.freq)
else:
- idx = indices.__class__(values)
+ idx = type(indices)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
@@ -716,9 +716,9 @@ def test_fillna(self, indices):
values[1] = np.nan
if isinstance(indices, PeriodIndex):
- idx = indices.__class__(values, freq=indices.freq)
+ idx = type(indices)(values, freq=indices.freq)
else:
- idx = indices.__class__(values)
+ idx = type(indices)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
| - [x] ref https://github.com/pandas-dev/pandas/pull/29816
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/29902 | 2019-11-27T21:37:33Z | 2019-11-29T16:45:12Z | 2019-11-29T16:45:12Z | 2019-11-30T10:12:13Z |
DEPR: Categorical.ravel, get_dtype_counts, dtype_str, to_dense | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index eb9a0e83271f1..9647693d4ed6b 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -565,7 +565,7 @@ def setup(self):
def time_frame_get_dtype_counts(self):
with warnings.catch_warnings(record=True):
- self.df.get_dtype_counts()
+ self.df._data.get_dtype_counts()
def time_info(self):
self.df.info()
diff --git a/doc/redirects.csv b/doc/redirects.csv
index 46021d052679f..4d171bc3d400d 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -358,7 +358,6 @@ generated/pandas.DataFrame.from_dict,../reference/api/pandas.DataFrame.from_dict
generated/pandas.DataFrame.from_items,../reference/api/pandas.DataFrame.from_items
generated/pandas.DataFrame.from_records,../reference/api/pandas.DataFrame.from_records
generated/pandas.DataFrame.ge,../reference/api/pandas.DataFrame.ge
-generated/pandas.DataFrame.get_dtype_counts,../reference/api/pandas.DataFrame.get_dtype_counts
generated/pandas.DataFrame.get,../reference/api/pandas.DataFrame.get
generated/pandas.DataFrame.get_value,../reference/api/pandas.DataFrame.get_value
generated/pandas.DataFrame.get_values,../reference/api/pandas.DataFrame.get_values
@@ -486,7 +485,6 @@ generated/pandas.DataFrame.T,../reference/api/pandas.DataFrame.T
generated/pandas.DataFrame.timetuple,../reference/api/pandas.DataFrame.timetuple
generated/pandas.DataFrame.to_clipboard,../reference/api/pandas.DataFrame.to_clipboard
generated/pandas.DataFrame.to_csv,../reference/api/pandas.DataFrame.to_csv
-generated/pandas.DataFrame.to_dense,../reference/api/pandas.DataFrame.to_dense
generated/pandas.DataFrame.to_dict,../reference/api/pandas.DataFrame.to_dict
generated/pandas.DataFrame.to_excel,../reference/api/pandas.DataFrame.to_excel
generated/pandas.DataFrame.to_feather,../reference/api/pandas.DataFrame.to_feather
@@ -632,7 +630,6 @@ generated/pandas.Index.drop,../reference/api/pandas.Index.drop
generated/pandas.Index.droplevel,../reference/api/pandas.Index.droplevel
generated/pandas.Index.dropna,../reference/api/pandas.Index.dropna
generated/pandas.Index.dtype,../reference/api/pandas.Index.dtype
-generated/pandas.Index.dtype_str,../reference/api/pandas.Index.dtype_str
generated/pandas.Index.duplicated,../reference/api/pandas.Index.duplicated
generated/pandas.Index.empty,../reference/api/pandas.Index.empty
generated/pandas.Index.equals,../reference/api/pandas.Index.equals
@@ -1046,7 +1043,6 @@ generated/pandas.Series.floordiv,../reference/api/pandas.Series.floordiv
generated/pandas.Series.from_array,../reference/api/pandas.Series.from_array
generated/pandas.Series.from_csv,../reference/api/pandas.Series.from_csv
generated/pandas.Series.ge,../reference/api/pandas.Series.ge
-generated/pandas.Series.get_dtype_counts,../reference/api/pandas.Series.get_dtype_counts
generated/pandas.Series.get,../reference/api/pandas.Series.get
generated/pandas.Series.get_value,../reference/api/pandas.Series.get_value
generated/pandas.Series.get_values,../reference/api/pandas.Series.get_values
@@ -1235,7 +1231,6 @@ generated/pandas.Series.T,../reference/api/pandas.Series.T
generated/pandas.Series.timetuple,../reference/api/pandas.Series.timetuple
generated/pandas.Series.to_clipboard,../reference/api/pandas.Series.to_clipboard
generated/pandas.Series.to_csv,../reference/api/pandas.Series.to_csv
-generated/pandas.Series.to_dense,../reference/api/pandas.Series.to_dense
generated/pandas.Series.to_dict,../reference/api/pandas.Series.to_dict
generated/pandas.Series.to_excel,../reference/api/pandas.Series.to_excel
generated/pandas.Series.to_frame,../reference/api/pandas.Series.to_frame
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst
index 5c860f2d4cb03..2604af4e33a89 100644
--- a/doc/source/reference/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -28,7 +28,6 @@ Attributes and underlying data
:toctree: api/
DataFrame.dtypes
- DataFrame.get_dtype_counts
DataFrame.select_dtypes
DataFrame.values
DataFrame.get_values
@@ -363,7 +362,6 @@ Serialization / IO / conversion
DataFrame.to_msgpack
DataFrame.to_gbq
DataFrame.to_records
- DataFrame.to_dense
DataFrame.to_string
DataFrame.to_clipboard
DataFrame.style
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 8edea28c17318..c155b5e3fcb37 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -32,7 +32,6 @@ Properties
Index.has_duplicates
Index.hasnans
Index.dtype
- Index.dtype_str
Index.inferred_type
Index.is_all_dates
Index.shape
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 91843c7975a2c..2485b94ab4d09 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -579,7 +579,6 @@ Serialization / IO / conversion
Series.to_sql
Series.to_msgpack
Series.to_json
- Series.to_dense
Series.to_string
Series.to_clipboard
Series.to_latex
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 3f8d9d3916797..7d3f61ccf4e9f 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -459,6 +459,10 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- In :func:`concat` the default value for ``sort`` has been changed from ``None`` to ``False`` (:issue:`20613`)
- Removed previously deprecated "raise_conflict" argument from :meth:`DataFrame.update`, use "errors" instead (:issue:`23585`)
- Removed previously deprecated keyword "n" from :meth:`DatetimeIndex.shift`, :meth:`TimedeltaIndex.shift`, :meth:`PeriodIndex.shift`, use "periods" instead (:issue:`22458`)
+- Removed the previously deprecated :meth:`Series.to_dense`, :meth:`DataFrame.to_dense` (:issue:`26684`)
+- Removed the previously deprecated :meth:`Index.dtype_str`, use ``str(index.dtype)`` instead (:issue:`27106`)
+- :meth:`Categorical.ravel` returns a :class:`Categorical` instead of a ``ndarray`` (:issue:`27199`)
+- Removed previously deprecated :meth:`Series.get_dtype_counts` and :meth:`DataFrame.get_dtype_counts` (:issue:`27145`)
- Changed the default ``fill_value`` in :meth:`Categorical.take` from ``True`` to ``False`` (:issue:`20841`)
- Changed the default value for the `raw` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`,
- :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` to ``False`` (:issue:`20584`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index ec1f5d2d6214c..0dc972011833a 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1689,24 +1689,6 @@ def _values_for_rank(self):
)
return values
- def ravel(self, order="C"):
- """
- Return a flattened (numpy) array.
-
- For internal compatibility with numpy arrays.
-
- Returns
- -------
- numpy.array
- """
- warn(
- "Categorical.ravel will return a Categorical object instead "
- "of an ndarray in a future version.",
- FutureWarning,
- stacklevel=2,
- )
- return np.array(self)
-
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index d981a1d6e4aa4..43810df18b0aa 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -269,10 +269,6 @@ def is_sparse(arr) -> bool:
bool
Whether or not the array-like is a pandas sparse array.
- See Also
- --------
- Series.to_dense : Return dense representation of a Series.
-
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b13aee238efb3..42b8214e07d49 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -171,7 +171,7 @@ class NDFrame(PandasObject, SelectionMixin):
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
- _deprecations: FrozenSet[str] = frozenset(["get_dtype_counts", "get_values", "ix"])
+ _deprecations: FrozenSet[str] = frozenset(["get_values", "ix"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
@@ -1988,26 +1988,6 @@ def __array_wrap__(self, result, context=None):
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
- def to_dense(self):
- """
- Return dense representation of Series/DataFrame (as opposed to sparse).
-
- .. deprecated:: 0.25.0
-
- Returns
- -------
- %(klass)s
- Dense %(klass)s.
- """
- warnings.warn(
- "DataFrame/Series.to_dense is deprecated "
- "and will be removed in a future version",
- FutureWarning,
- stacklevel=2,
- )
- # compat
- return self
-
# ----------------------------------------------------------------------
# Picklability
@@ -5520,51 +5500,6 @@ def get_values(self):
def _internal_get_values(self):
return self.values
- def get_dtype_counts(self):
- """
- Return counts of unique dtypes in this object.
-
- .. deprecated:: 0.25.0
-
- Use `.dtypes.value_counts()` instead.
-
- Returns
- -------
- dtype : Series
- Series with the count of columns with each dtype.
-
- See Also
- --------
- dtypes : Return the dtypes in this object.
-
- Examples
- --------
- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
- >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
- >>> df
- str int float
- 0 a 1 1.0
- 1 b 2 2.0
- 2 c 3 3.0
-
- >>> df.get_dtype_counts()
- float64 1
- int64 1
- object 1
- dtype: int64
- """
- warnings.warn(
- "`get_dtype_counts` has been deprecated and will be "
- "removed in a future version. For DataFrames use "
- "`.dtypes.value_counts()",
- FutureWarning,
- stacklevel=2,
- )
-
- from pandas import Series
-
- return Series(self._data.get_dtype_counts())
-
@property
def dtypes(self):
"""
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 4a3fa26c3460e..c2352c94f1316 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -214,7 +214,7 @@ class Index(IndexOpsMixin, PandasObject):
_deprecations: FrozenSet[str] = (
PandasObject._deprecations
| IndexOpsMixin._deprecations
- | frozenset(["asobject", "contains", "dtype_str", "get_values", "set_value"])
+ | frozenset(["contains", "get_values", "set_value"])
)
# To hand over control to subclasses
@@ -670,21 +670,6 @@ def dtype(self):
"""
return self._data.dtype
- @property
- def dtype_str(self):
- """
- Return the dtype str of the underlying data.
-
- .. deprecated:: 0.25.0
- """
- warnings.warn(
- "`dtype_str` has been deprecated. Call `str` on the "
- "dtype attribute instead.",
- FutureWarning,
- stacklevel=2,
- )
- return str(self.dtype)
-
def ravel(self, order="C"):
"""
Return an ndarray of the flattened values of the underlying data.
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 528053aa8c7f1..dff1e58641ade 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -93,10 +93,7 @@ class TestConstructors(base.BaseConstructorsTests):
class TestReshaping(base.BaseReshapingTests):
- def test_ravel(self, data):
- # GH#27199 Categorical.ravel returns self until after deprecation cycle
- with tm.assert_produces_warning(FutureWarning):
- data.ravel()
+ pass
class TestGetitem(base.BaseGetitemTests):
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index c180511e31619..0912a8901dc6a 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -950,23 +950,3 @@ def test_axis_classmethods(self, box):
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
-
- def test_deprecated_to_dense(self):
- # GH 26557: DEPR
- # Deprecated 0.25.0
-
- df = pd.DataFrame({"A": [1, 2, 3]})
- with tm.assert_produces_warning(FutureWarning):
- result = df.to_dense()
- tm.assert_frame_equal(result, df)
-
- ser = pd.Series([1, 2, 3])
- with tm.assert_produces_warning(FutureWarning):
- result = ser.to_dense()
- tm.assert_series_equal(result, ser)
-
- def test_deprecated_get_dtype_counts(self):
- # GH 18262
- df = DataFrame([1])
- with tm.assert_produces_warning(FutureWarning):
- df.get_dtype_counts()
diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py
index a7f58b9ea78bd..3a8063aed8d20 100644
--- a/pandas/tests/indexes/multi/test_format.py
+++ b/pandas/tests/indexes/multi/test_format.py
@@ -7,13 +7,6 @@
import pandas.util.testing as tm
-def test_dtype_str(indices):
- with tm.assert_produces_warning(FutureWarning):
- dtype = indices.dtype_str
- assert isinstance(dtype, str)
- assert dtype == str(indices.dtype)
-
-
def test_format(idx):
idx.format()
idx[:0].format()
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index d75bd7bb21827..a07a87080804f 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -156,17 +156,6 @@ def test_shallow_copy_changing_freq_raises(self):
with pytest.raises(IncompatibleFrequency, match=msg):
pi._shallow_copy(pi, freq="H")
- def test_dtype_str(self):
- pi = pd.PeriodIndex([], freq="M")
- with tm.assert_produces_warning(FutureWarning):
- assert pi.dtype_str == "period[M]"
- assert pi.dtype_str == str(pi.dtype)
-
- with tm.assert_produces_warning(FutureWarning):
- pi = pd.PeriodIndex([], freq="3M")
- assert pi.dtype_str == "period[3M]"
- assert pi.dtype_str == str(pi.dtype)
-
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq="M")
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 558ba04b657a1..9e60b91db5e18 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -158,12 +158,6 @@ def test_set_name_methods(self, indices):
assert indices.name == name
assert indices.names == [name]
- def test_dtype_str(self, indices):
- with tm.assert_produces_warning(FutureWarning):
- dtype = indices.dtype_str
- assert isinstance(dtype, str)
- assert dtype == str(indices.dtype)
-
def test_hash_error(self, indices):
index = indices
with pytest.raises(
| Starting in on the 0.25.0 section in #6581. | https://api.github.com/repos/pandas-dev/pandas/pulls/29900 | 2019-11-27T19:51:06Z | 2019-12-01T18:11:53Z | 2019-12-01T18:11:53Z | 2019-12-01T18:15:03Z |
TYP: some types for pandas/core/arrays/sparse/dtype.py | diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index 3b656705f5568..0124304727ab3 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -1,7 +1,7 @@
"""Sparse Dtype"""
import re
-from typing import Any
+from typing import Any, Tuple
import numpy as np
@@ -223,7 +223,7 @@ def construct_from_string(cls, string):
raise TypeError(msg)
@staticmethod
- def _parse_subtype(dtype):
+ def _parse_subtype(dtype: str) -> Tuple[str, bool]:
"""
Parse a string to get the subtype
@@ -249,7 +249,7 @@ def _parse_subtype(dtype):
has_fill_value = False
if m:
subtype = m.groupdict()["subtype"]
- has_fill_value = m.groupdict()["fill_value"] or has_fill_value
+ has_fill_value = bool(m.groupdict()["fill_value"])
elif dtype == "Sparse":
subtype = "float64"
else:
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/29899 | 2019-11-27T19:47:02Z | 2019-11-29T17:01:13Z | 2019-11-29T17:01:13Z | 2019-12-01T13:21:18Z |
TYP: some types for pandas/core/arrays/sparse/array.py | diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 943dea4252499..593ba7a643193 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -260,6 +260,7 @@ class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
_pandas_ftype = "sparse"
_subtyp = "sparse_array" # register ABCSparseArray
_deprecations = PandasObject._deprecations | frozenset(["get_values"])
+ _sparse_index: SparseIndex
def __init__(
self,
@@ -372,8 +373,8 @@ def __init__(
@classmethod
def _simple_new(
- cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype
- ) -> ABCSparseArray:
+ cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype,
+ ) -> "SparseArray":
new = cls([])
new._sparse_index = sparse_index
new._sparse_values = sparse_array
@@ -1392,8 +1393,8 @@ def __abs__(self):
# ------------------------------------------------------------------------
@classmethod
- def _create_unary_method(cls, op):
- def sparse_unary_method(self):
+ def _create_unary_method(cls, op) -> Callable[["SparseArray"], "SparseArray"]:
+ def sparse_unary_method(self) -> "SparseArray":
fill_value = op(np.array(self.fill_value)).item()
values = op(self.sp_values)
dtype = SparseDtype(values.dtype, fill_value)
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/29898 | 2019-11-27T19:43:12Z | 2019-11-29T16:59:59Z | 2019-11-29T16:59:59Z | 2019-12-01T13:21:58Z |
TYP: some types for pandas/_config/config.py | diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 814f855cceeac..8f75d0381c1a6 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -51,7 +51,7 @@
from collections import namedtuple
from contextlib import contextmanager
import re
-from typing import Dict, List
+from typing import Any, Dict, Iterable, List
import warnings
DeprecatedOption = namedtuple("DeprecatedOption", "key msg rkey removal_ver")
@@ -64,7 +64,7 @@
_registered_options: Dict[str, RegisteredOption] = {}
# holds the current values for registered options
-_global_config: Dict[str, str] = {}
+_global_config: Dict[str, Any] = {}
# keys which have a special meaning
_reserved_keys: List[str] = ["all"]
@@ -85,7 +85,7 @@ def _get_single_key(pat, silent):
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
- raise OptionError("No such keys(s): {pat!r}".format(pat=pat))
+ raise OptionError(f"No such keys(s): {repr(pat)}")
if len(keys) > 1:
raise OptionError("Pattern matched multiple keys")
key = keys[0]
@@ -116,8 +116,8 @@ def _set_option(*args, **kwargs):
silent = kwargs.pop("silent", False)
if kwargs:
- msg = '_set_option() got an unexpected keyword argument "{kwarg}"'
- raise TypeError(msg.format(list(kwargs.keys())[0]))
+ kwarg = list(kwargs.keys())[0]
+ raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"')
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
@@ -412,7 +412,7 @@ def __exit__(self, *args):
_set_option(pat, val, silent=True)
-def register_option(key, defval, doc="", validator=None, cb=None):
+def register_option(key: str, defval: object, doc="", validator=None, cb=None):
"""Register an option in the package-wide pandas config object
Parameters
@@ -441,11 +441,9 @@ def register_option(key, defval, doc="", validator=None, cb=None):
key = key.lower()
if key in _registered_options:
- msg = "Option '{key}' has already been registered"
- raise OptionError(msg.format(key=key))
+ raise OptionError(f"Option '{key}' has already been registered")
if key in _reserved_keys:
- msg = "Option '{key}' is a reserved key"
- raise OptionError(msg.format(key=key))
+ raise OptionError(f"Option '{key}' is a reserved key")
# the default value should be legal
if validator:
@@ -455,10 +453,12 @@ def register_option(key, defval, doc="", validator=None, cb=None):
path = key.split(".")
for k in path:
- if not bool(re.match("^" + tokenize.Name + "$", k)):
- raise ValueError("{k} is not a valid identifier".format(k=k))
+ # NOTE: tokenize.Name is not a public constant
+ # error: Module has no attribute "Name" [attr-defined]
+ if not re.match("^" + tokenize.Name + "$", k): # type: ignore
+ raise ValueError(f"{k} is not a valid identifier")
if keyword.iskeyword(k):
- raise ValueError("{k} is a python keyword".format(k=k))
+ raise ValueError(f"{k} is a python keyword")
cursor = _global_config
msg = "Path prefix to option '{option}' is already an option"
@@ -522,8 +522,7 @@ def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
key = key.lower()
if key in _deprecated_options:
- msg = "Option '{key}' has already been defined as deprecated."
- raise OptionError(msg.format(key=key))
+ raise OptionError(f"Option '{key}' has already been defined as deprecated.")
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
@@ -621,11 +620,11 @@ def _warn_if_deprecated(key):
print(d.msg)
warnings.warn(d.msg, FutureWarning)
else:
- msg = "'{key}' is deprecated".format(key=key)
+ msg = f"'{key}' is deprecated"
if d.removal_ver:
- msg += " and will be removed in {version}".format(version=d.removal_ver)
+ msg += f" and will be removed in {d.removal_ver}"
if d.rkey:
- msg += ", please use '{rkey}' instead.".format(rkey=d.rkey)
+ msg += f", please use '{d.rkey}' instead."
else:
msg += ", please refrain from using it."
@@ -640,7 +639,7 @@ def _build_option_description(k):
o = _get_registered_option(k)
d = _get_deprecated_option(k)
- s = "{k} ".format(k=k)
+ s = f"{k} "
if o.doc:
s += "\n".join(o.doc.strip().split("\n"))
@@ -648,9 +647,7 @@ def _build_option_description(k):
s += "No description available."
if o:
- s += "\n [default: {default}] [currently: {current}]".format(
- default=o.defval, current=_get_option(k, True)
- )
+ s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]"
if d:
s += "\n (Deprecated"
@@ -666,7 +663,7 @@ def pp_options_list(keys, width=80, _print=False):
from textwrap import wrap
from itertools import groupby
- def pp(name, ks):
+ def pp(name: str, ks: Iterable[str]) -> List[str]:
pfx = "- " + name + ".[" if name else ""
ls = wrap(
", ".join(ks),
@@ -679,7 +676,7 @@ def pp(name, ks):
ls[-1] = ls[-1] + "]"
return ls
- ls = []
+ ls: List[str] = []
singles = [x for x in sorted(keys) if x.find(".") < 0]
if singles:
ls += pp("", singles)
@@ -731,7 +728,7 @@ def config_prefix(prefix):
def wrap(func):
def inner(key, *args, **kwds):
- pkey = "{prefix}.{key}".format(prefix=prefix, key=key)
+ pkey = f"{prefix}.{key}"
return func(pkey, *args, **kwds)
return inner
@@ -768,8 +765,7 @@ def is_type_factory(_type):
def inner(x):
if type(x) != _type:
- msg = "Value must have type '{typ!s}'"
- raise ValueError(msg.format(typ=_type))
+ raise ValueError(f"Value must have type '{_type}'")
return inner
@@ -792,12 +788,11 @@ def is_instance_factory(_type):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
- type_repr = "'{typ}'".format(typ=_type)
+ type_repr = f"'{_type}'"
def inner(x):
if not isinstance(x, _type):
- msg = "Value must be an instance of {type_repr}"
- raise ValueError(msg.format(type_repr=type_repr))
+ raise ValueError(f"Value must be an instance of {type_repr}")
return inner
@@ -813,10 +808,10 @@ def inner(x):
if not any(c(x) for c in callables):
uvals = [str(lval) for lval in legal_values]
pp_values = "|".join(uvals)
- msg = "Value must be one of {pp_values}"
+ msg = f"Value must be one of {pp_values}"
if len(callables):
msg += " or a callable"
- raise ValueError(msg.format(pp_values=pp_values))
+ raise ValueError(msg)
return inner
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/29897 | 2019-11-27T19:35:56Z | 2019-11-29T22:59:11Z | 2019-11-29T22:59:11Z | 2020-12-12T12:51:23Z |
DEPR: ftype, ftypes | diff --git a/doc/redirects.csv b/doc/redirects.csv
index fb922eb79e363..f124fdb840ce0 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -357,10 +357,8 @@ generated/pandas.DataFrame.from_csv,../reference/api/pandas.DataFrame.from_csv
generated/pandas.DataFrame.from_dict,../reference/api/pandas.DataFrame.from_dict
generated/pandas.DataFrame.from_items,../reference/api/pandas.DataFrame.from_items
generated/pandas.DataFrame.from_records,../reference/api/pandas.DataFrame.from_records
-generated/pandas.DataFrame.ftypes,../reference/api/pandas.DataFrame.ftypes
generated/pandas.DataFrame.ge,../reference/api/pandas.DataFrame.ge
generated/pandas.DataFrame.get_dtype_counts,../reference/api/pandas.DataFrame.get_dtype_counts
-generated/pandas.DataFrame.get_ftype_counts,../reference/api/pandas.DataFrame.get_ftype_counts
generated/pandas.DataFrame.get,../reference/api/pandas.DataFrame.get
generated/pandas.DataFrame.get_value,../reference/api/pandas.DataFrame.get_value
generated/pandas.DataFrame.get_values,../reference/api/pandas.DataFrame.get_values
@@ -883,10 +881,8 @@ generated/pandas.Panel.first_valid_index,../reference/api/pandas.Panel.first_val
generated/pandas.Panel.floordiv,../reference/api/pandas.Panel.floordiv
generated/pandas.Panel.from_dict,../reference/api/pandas.Panel.from_dict
generated/pandas.Panel.fromDict,../reference/api/pandas.Panel.fromDict
-generated/pandas.Panel.ftypes,../reference/api/pandas.Panel.ftypes
generated/pandas.Panel.ge,../reference/api/pandas.Panel.ge
generated/pandas.Panel.get_dtype_counts,../reference/api/pandas.Panel.get_dtype_counts
-generated/pandas.Panel.get_ftype_counts,../reference/api/pandas.Panel.get_ftype_counts
generated/pandas.Panel.get,../reference/api/pandas.Panel.get
generated/pandas.Panel.get_value,../reference/api/pandas.Panel.get_value
generated/pandas.Panel.get_values,../reference/api/pandas.Panel.get_values
@@ -1223,11 +1219,8 @@ generated/pandas.Series.flags,../reference/api/pandas.Series.flags
generated/pandas.Series.floordiv,../reference/api/pandas.Series.floordiv
generated/pandas.Series.from_array,../reference/api/pandas.Series.from_array
generated/pandas.Series.from_csv,../reference/api/pandas.Series.from_csv
-generated/pandas.Series.ftype,../reference/api/pandas.Series.ftype
-generated/pandas.Series.ftypes,../reference/api/pandas.Series.ftypes
generated/pandas.Series.ge,../reference/api/pandas.Series.ge
generated/pandas.Series.get_dtype_counts,../reference/api/pandas.Series.get_dtype_counts
-generated/pandas.Series.get_ftype_counts,../reference/api/pandas.Series.get_ftype_counts
generated/pandas.Series.get,../reference/api/pandas.Series.get
generated/pandas.Series.get_value,../reference/api/pandas.Series.get_value
generated/pandas.Series.get_values,../reference/api/pandas.Series.get_values
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst
index 4540504974f56..5c860f2d4cb03 100644
--- a/doc/source/reference/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -28,7 +28,6 @@ Attributes and underlying data
:toctree: api/
DataFrame.dtypes
- DataFrame.ftypes
DataFrame.get_dtype_counts
DataFrame.select_dtypes
DataFrame.values
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index c501e8bc91379..528cc8a0c3920 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -29,7 +29,6 @@ Attributes
Series.array
Series.values
Series.dtype
- Series.ftype
Series.shape
Series.nbytes
Series.ndim
@@ -43,7 +42,6 @@ Attributes
Series.flags
Series.empty
Series.dtypes
- Series.ftypes
Series.data
Series.name
Series.put
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index db23bfdc8a5bd..87a152c65524d 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -440,6 +440,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- Removed the previously deprecated :meth:`Series.valid`; use :meth:`Series.dropna` instead (:issue:`18800`)
- Removed the previously properties :attr:`DataFrame.is_copy`, :attr:`Series.is_copy` (:issue:`18812`)
- Removed the previously deprecated :meth:`DataFrame.get_ftype_counts`, :meth:`Series.get_ftype_counts` (:issue:`18243`)
+- Removed the previously deprecated :meth:`DataFrame.ftypes`, :meth:`Series.ftypes`, :meth:`Series.ftype` (:issue:`26744`)
- Removed the previously deprecated :meth:`Index.get_duplicated`, use ``idx[idx.duplicated()].unique()`` instead (:issue:`20239`)
- Removed the previously deprecated :meth:`Series.clip_upper`, :meth:`Series.clip_lower`, :meth:`DataFrame.clip_upper`, :meth:`DataFrame.clip_lower` (:issue:`24203`)
- Removed the ability to alter :attr:`DatetimeIndex.freq`, :attr:`TimedeltaIndex.freq`, or :attr:`PeriodIndex.freq` (:issue:`20772`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2e2ae4e1dfa0a..1bfde2c4c3479 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -171,9 +171,7 @@ class NDFrame(PandasObject, SelectionMixin):
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
- _deprecations: FrozenSet[str] = frozenset(
- ["get_dtype_counts", "get_values", "ftypes", "ix"]
- )
+ _deprecations: FrozenSet[str] = frozenset(["get_dtype_counts", "get_values", "ix"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
@@ -5582,10 +5580,6 @@ def dtypes(self):
pandas.Series
The data type of each column.
- See Also
- --------
- DataFrame.ftypes : Dtype and sparsity information.
-
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
@@ -5603,55 +5597,6 @@ def dtypes(self):
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
- @property
- def ftypes(self):
- """
- Return the ftypes (indication of sparse/dense and dtype) in DataFrame.
-
- .. deprecated:: 0.25.0
- Use :func:`dtypes` instead.
-
- This returns a Series with the data type of each column.
- The result's index is the original DataFrame's columns. Columns
- with mixed types are stored with the ``object`` dtype. See
- :ref:`the User Guide <basics.dtypes>` for more.
-
- Returns
- -------
- pandas.Series
- The data type and indication of sparse/dense of each column.
-
- See Also
- --------
- DataFrame.dtypes: Series with just dtype information.
-
- Notes
- -----
- Sparse data should have the same dtypes as its dense representation.
-
- Examples
- --------
- >>> arr = np.random.RandomState(0).randn(100, 4)
- >>> arr[arr < .8] = np.nan
- >>> pd.DataFrame(arr).ftypes
- 0 float64:dense
- 1 float64:dense
- 2 float64:dense
- 3 float64:dense
- dtype: object
- """
- warnings.warn(
- "DataFrame.ftypes is deprecated and will "
- "be removed in a future version. "
- "Use DataFrame.dtypes instead.",
- FutureWarning,
- stacklevel=2,
- )
-
- from pandas import Series
-
- return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_)
-
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index c37a8ea5e42a4..19901dc510199 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -82,7 +82,6 @@ class BlockManager(PandasObject):
get_dtype_counts
get_dtypes
- get_ftypes
apply(func, axes, block_filter_fn)
@@ -251,10 +250,6 @@ def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
- def get_ftypes(self):
- ftypes = np.array([blk.ftype for blk in self.blocks])
- return algos.take_1d(ftypes, self._blknos, allow_fill=False)
-
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
@@ -1546,19 +1541,12 @@ def dtype(self):
def array_dtype(self):
return self._block.array_dtype
- @property
- def ftype(self):
- return self._block.ftype
-
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
- def get_ftypes(self):
- return np.array([self._block.ftype])
-
def external_values(self):
return self._block.external_values()
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a9ecf97dad68b..dd1f3b1a9d3f3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -158,9 +158,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_deprecations = (
base.IndexOpsMixin._deprecations
| generic.NDFrame._deprecations
- | frozenset(
- ["compress", "valid", "ftype", "real", "imag", "put", "ptp", "nonzero"]
- )
+ | frozenset(["compress", "valid", "real", "imag", "put", "ptp", "nonzero"])
)
# Override cache_readonly bc Series is mutable
@@ -418,42 +416,6 @@ def name(self, value: Optional[Hashable]) -> None:
raise TypeError("Series.name must be a hashable type")
self.attrs["name"] = value
- @property
- def ftype(self):
- """
- Return if the data is sparse|dense.
-
- .. deprecated:: 0.25.0
- Use :func:`dtype` instead.
- """
- warnings.warn(
- "Series.ftype is deprecated and will "
- "be removed in a future version. "
- "Use Series.dtype instead.",
- FutureWarning,
- stacklevel=2,
- )
-
- return self._data.ftype
-
- @property
- def ftypes(self):
- """
- Return if the data is sparse|dense.
-
- .. deprecated:: 0.25.0
- Use :func:`dtypes` instead.
- """
- warnings.warn(
- "Series.ftypes is deprecated and will "
- "be removed in a future version. "
- "Use Series.dtype instead.",
- FutureWarning,
- stacklevel=2,
- )
-
- return self._data.ftype
-
@property
def values(self):
"""
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 69d632479e969..a50d2d1a72155 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2641,7 +2641,9 @@ def write(self, **kwargs):
"cannot write on an abstract storer: sublcasses should implement"
)
- def delete(self, where=None, start=None, stop=None, **kwargs):
+ def delete(
+ self, where=None, start: Optional[int] = None, stop: Optional[int] = None
+ ):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index cdcd5996324da..af5a765aa2729 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -46,63 +46,33 @@ def test_concat_empty_dataframe_dtypes(self):
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
- def test_empty_frame_dtypes_ftypes(self):
+ def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
-
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
-
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(
- norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
- )
-
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(
- norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
- )
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
- ex_ftypes = pd.Series(
- odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
- )
tm.assert_series_equal(df.dtypes, ex_dtypes)
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(df.ftypes, ex_ftypes)
-
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
-
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
@@ -474,22 +444,6 @@ def test_dtypes_gh8722(self, float_string_frame):
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
- def test_ftypes(self, mixed_float_frame):
- frame = mixed_float_frame
- expected = Series(
- dict(
- A="float32:dense",
- B="float32:dense",
- C="float16:dense",
- D="float64:dense",
- )
- ).sort_values()
-
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- result = frame.ftypes.sort_values()
- tm.assert_series_equal(result, expected)
-
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index b2ecd7c4997f1..9d02c1bdc2d9c 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -290,10 +290,6 @@ def test_concat_empty_series_dtypes(self):
)
assert result.dtype == "Sparse[float64]"
- # GH 26705 - Assert .ftype is deprecated
- with tm.assert_produces_warning(FutureWarning):
- assert result.ftype == "float64:sparse"
-
result = pd.concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
@@ -301,10 +297,6 @@ def test_concat_empty_series_dtypes(self):
expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
- # GH 26705 - Assert .ftype is deprecated
- with tm.assert_produces_warning(FutureWarning):
- assert result.ftype == "float64:sparse"
-
result = pd.concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
)
@@ -312,10 +304,6 @@ def test_concat_empty_series_dtypes(self):
expected = pd.SparseDtype("object")
assert result.dtype == expected
- # GH 26705 - Assert .ftype is deprecated
- with tm.assert_produces_warning(FutureWarning):
- assert result.ftype == "object:sparse"
-
def test_combine_first_dt64(self):
from pandas.core.tools.datetimes import to_datetime
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index ec0318b2af13a..0dc64651e8d58 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -49,14 +49,6 @@ def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
- # GH 26705 - Assert .ftype is deprecated
- with tm.assert_produces_warning(FutureWarning):
- assert datetime_series.ftype == "float64:dense"
-
- # GH 26705 - Assert .ftypes is deprecated
- with tm.assert_produces_warning(FutureWarning):
- assert datetime_series.ftypes == "float64:dense"
-
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
| Side-note, should we remove all of the redirects.csv entries for Panel? | https://api.github.com/repos/pandas-dev/pandas/pulls/29895 | 2019-11-27T19:05:26Z | 2019-11-27T20:49:09Z | 2019-11-27T20:49:09Z | 2019-11-27T21:24:44Z |
STY: x.__class__ to type(x) #batch-3 | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index b52015b738c6e..67412ed5e5b26 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -105,7 +105,7 @@ class _NDFrameIndexer(_NDFrameIndexerBase):
def __call__(self, axis=None):
# we need to return a copy of ourselves
- new_self = self.__class__(self.name, self.obj)
+ new_self = type(self)(self.name, self.obj)
if axis is not None:
axis = self.obj._get_axis_number(axis)
@@ -228,7 +228,9 @@ def _validate_key(self, key, axis: int):
raise AbstractMethodError(self)
def _has_valid_tuple(self, key: Tuple):
- """ check the key for valid keys across my indexer """
+ """
+ Check the key for valid keys across my indexer.
+ """
for i, k in enumerate(key):
if i >= self.ndim:
raise IndexingError("Too many indexers")
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 2d6ffb7277742..e4de1c94da450 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -257,11 +257,11 @@ def make_block_same_class(self, values, placement=None, ndim=None):
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
- return make_block(values, placement=placement, ndim=ndim, klass=self.__class__)
+ return make_block(values, placement=placement, ndim=ndim, klass=type(self))
def __repr__(self) -> str:
# don't want to print out all of the items here
- name = pprint_thing(self.__class__.__name__)
+ name = type(self).__name__
if self._is_single_block:
result = "{name}: {len} dtype: {dtype}".format(
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index f981c00fdad36..6c4ab2882d67f 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -122,7 +122,7 @@ def __init__(self, block, shape, indexers=None):
def __repr__(self) -> str:
return "{name}({block!r}, {indexers})".format(
- name=self.__class__.__name__, block=self.block, indexers=self.indexers
+ name=type(self).__name__, block=self.block, indexers=self.indexers
)
@cache_readonly
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index c37a8ea5e42a4..f00cf5a786228 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -154,7 +154,7 @@ def make_empty(self, axes=None):
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
- return self.__class__(blocks, axes)
+ return type(self)(blocks, axes)
def __nonzero__(self):
return True
@@ -321,7 +321,7 @@ def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
- output = pprint_thing(self.__class__.__name__)
+ output = type(self).__name__
for i, ax in enumerate(self.axes):
if i == 0:
output += "\nItems: {ax}".format(ax=ax)
@@ -435,7 +435,7 @@ def apply(
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
- bm = self.__class__(
+ bm = type(self)(
result_blocks, axes or self.axes, do_integrity_check=do_integrity_check
)
bm._consolidate_inplace()
@@ -524,7 +524,7 @@ def get_axe(block, qs, axes):
for b in blocks
]
- return self.__class__(blocks, new_axes)
+ return type(self)(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
@@ -634,7 +634,7 @@ def comp(s, regex=False):
rb = new_rb
result_blocks.extend(rb)
- bm = self.__class__(result_blocks, self.axes)
+ bm = type(self)(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
@@ -729,7 +729,7 @@ def combine(self, blocks, copy=True):
axes = list(self.axes)
axes[0] = self.items.take(indexer)
- return self.__class__(new_blocks, axes, do_integrity_check=False)
+ return type(self)(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
@@ -746,7 +746,7 @@ def get_slice(self, slobj, axis=0):
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
- bm = self.__class__(new_blocks, new_axes, do_integrity_check=False)
+ bm = type(self)(new_blocks, new_axes, do_integrity_check=False)
bm._consolidate_inplace()
return bm
@@ -922,7 +922,7 @@ def consolidate(self):
if self.is_consolidated():
return self
- bm = self.__class__(self.blocks, self.axes)
+ bm = type(self)(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
@@ -1256,7 +1256,7 @@ def reindex_indexer(
new_axes = list(self.axes)
new_axes[axis] = new_axis
- return self.__class__(new_blocks, new_axes)
+ return type(self)(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
@@ -1526,9 +1526,7 @@ def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
- return self.__class__(
- self._block._slice(slobj), self.index[slobj], fastpath=True
- )
+ return type(self)(self._block._slice(slobj), self.index[slobj], fastpath=True)
@property
def index(self):
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 2433e3f52b4a9..58c4a97d651d8 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -96,7 +96,7 @@ def __str__(self) -> str:
if getattr(self.groupby, k, None) is not None
)
return "{klass} [{attrs}]".format(
- klass=self.__class__.__name__, attrs=", ".join(attrs)
+ klass=type(self).__name__, attrs=", ".join(attrs)
)
def __getattr__(self, attr):
@@ -885,7 +885,7 @@ def count(self):
result = self._downsample("count")
if not len(self.ax):
if self._selected_obj.ndim == 1:
- result = self._selected_obj.__class__(
+ result = type(self._selected_obj)(
[], index=result.index, dtype="int64", name=self._selected_obj.name
)
else:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a9ecf97dad68b..8ce84388b7849 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -256,9 +256,7 @@ def __init__(
elif is_extension_array_dtype(data):
pass
elif isinstance(data, (set, frozenset)):
- raise TypeError(
- "{0!r} type is unordered".format(data.__class__.__name__)
- )
+ raise TypeError(f"{repr(type(data).__name__)} type is unordered")
elif isinstance(data, ABCSparseArray):
# handle sparse passed here (and force conversion)
data = data.to_dense()
@@ -1571,9 +1569,8 @@ def to_string(
# catch contract violations
if not isinstance(result, str):
raise AssertionError(
- "result must be of type unicode, type"
- " of result is {0!r}"
- "".format(result.__class__.__name__)
+ "result must be of type str, type"
+ f" of result is {repr(type(result).__name__)}"
)
if buf is None:
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 7f3404100f71c..d8aa362080093 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -204,7 +204,7 @@ def _get_window(self, other=None, win_type: Optional[str] = None) -> int:
@property
def _window_type(self) -> str:
- return self.__class__.__name__
+ return type(self).__name__
def __repr__(self) -> str:
"""
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 73cc40ae0e0d3..34838af5fd6e4 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -178,6 +178,6 @@ def __str__(self) -> str:
if self.methodtype == "classmethod":
name = self.class_instance.__name__
else:
- name = self.class_instance.__class__.__name__
+ name = type(self.class_instance).__name__
msg = "This {methodtype} must be defined in the concrete class {name}"
return msg.format(methodtype=self.methodtype, name=name)
diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py
index 7d3dbaf6ee021..f808b7e706afb 100644
--- a/pandas/io/clipboard/__init__.py
+++ b/pandas/io/clipboard/__init__.py
@@ -95,8 +95,8 @@ def _stringifyText(text) -> str:
acceptedTypes = (str, int, float, bool)
if not isinstance(text, acceptedTypes):
raise PyperclipException(
- f"only str, int, float, and bool values"
- f"can be copied to the clipboard, not {text.__class__.__name__}"
+ f"only str, int, float, and bool values "
+ f"can be copied to the clipboard, not {type(text).__name__}"
)
return str(text)
| - [x] ref https://github.com/pandas-dev/pandas/pull/29816
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/29894 | 2019-11-27T18:57:33Z | 2019-11-29T16:43:44Z | 2019-11-29T16:43:44Z | 2019-11-30T10:11:50Z |
STY: x.__class__ to type(x) #batch-2 | diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 2c601b01dbae5..fb3097684f0c3 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -420,7 +420,7 @@ def __repr__(self) -> str_type:
if self.categories is None:
data = "None, "
else:
- data = self.categories._format_data(name=self.__class__.__name__)
+ data = self.categories._format_data(name=type(self).__name__)
return tpl.format(data=data, ordered=self._ordered)
@staticmethod
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2e2ae4e1dfa0a..fbaf00d8172ea 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -253,7 +253,7 @@ def _validate_dtype(self, dtype):
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented"
- " in the {0} constructor".format(self.__class__.__name__)
+ " in the {0} constructor".format(type(self).__name__)
)
return dtype
@@ -1536,7 +1536,7 @@ def __nonzero__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(
- self.__class__.__name__
+ type(self).__name__
)
)
@@ -1561,7 +1561,7 @@ def bool(self):
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
- "{0}".format(self.__class__.__name__)
+ "{0}".format(type(self).__name__)
)
self.__nonzero__()
@@ -1867,7 +1867,7 @@ def _drop_labels_or_levels(self, keys, axis=0):
def __hash__(self):
raise TypeError(
"{0!r} objects are mutable, thus they cannot be"
- " hashed".format(self.__class__.__name__)
+ " hashed".format(type(self).__name__)
)
def __iter__(self):
@@ -2061,7 +2061,7 @@ def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = "[%s]" % ",".join(map(pprint_thing, self))
- return f"{self.__class__.__name__}({prepr})"
+ return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 407cd8342d486..e088400b25f0f 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -41,7 +41,7 @@ def _gotitem(self, key, ndim, subset=None):
except IndexError:
groupby = self._groupby
- self = self.__class__(subset, groupby=groupby, parent=self, **kwargs)
+ self = type(self)(subset, groupby=groupby, parent=self, **kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 99ef281e842b1..4726cdfb05a70 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -473,7 +473,7 @@ def _transform_general(self, func, *args, **kwargs):
"""
Transform with a non-str `func`.
"""
- klass = self._selected_obj.__class__
+ klass = type(self._selected_obj)
results = []
for name, group in self:
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index dc924455b141d..9b2f43d8dd484 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -211,7 +211,7 @@ def __repr__(self) -> str:
if getattr(self, attr_name) is not None
)
attrs = ", ".join(attrs_list)
- cls_name = self.__class__.__name__
+ cls_name = type(self).__name__
return f"{cls_name}({attrs})"
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 486cc0cd9032d..4a3fa26c3460e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -815,7 +815,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
else:
if allow_fill and fill_value is not None:
msg = "Unable to fill values because {0} cannot contain NA"
- raise ValueError(msg.format(self.__class__.__name__))
+ raise ValueError(msg.format(type(self).__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
@@ -948,7 +948,7 @@ def __repr__(self):
"""
Return a string representation for this object.
"""
- klass = self.__class__.__name__
+ klass_name = type(self).__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
@@ -959,7 +959,7 @@ def __repr__(self):
if data is None:
data = ""
- res = f"{klass}({data}{prepr})"
+ res = f"{klass_name}({data}{prepr})"
return res
@@ -1287,7 +1287,7 @@ def _set_names(self, values, level=None):
for name in values:
if not is_hashable(name):
raise TypeError(
- "{}.name must be a hashable type".format(self.__class__.__name__)
+ "{}.name must be a hashable type".format(type(self).__name__)
)
self.name = values[0]
@@ -1794,7 +1794,7 @@ def is_all_dates(self) -> bool:
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
- return _new_Index, (self.__class__, d), None
+ return _new_Index, (type(self), d), None
def __setstate__(self, state):
"""
@@ -2290,7 +2290,7 @@ def __nonzero__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(
- self.__class__.__name__
+ type(self).__name__
)
)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index ab9f57ff9ac69..0d368845ea4f2 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -423,7 +423,7 @@ def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
- return _new_DatetimeIndex, (self.__class__, d), None
+ return _new_DatetimeIndex, (type(self), d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index 13c386187a9e5..ab9852157b9ef 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -69,13 +69,13 @@ def difference(self, other) -> "FrozenList":
def __getitem__(self, n):
if isinstance(n, slice):
- return self.__class__(super().__getitem__(n))
+ return type(self)(super().__getitem__(n))
return super().__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
- return self.__class__(other + list(self))
+ return type(self)(other + list(self))
def __eq__(self, other) -> bool:
if isinstance(other, (tuple, FrozenList)):
@@ -85,12 +85,12 @@ def __eq__(self, other) -> bool:
__req__ = __eq__
def __mul__(self, other):
- return self.__class__(super().__mul__(other))
+ return type(self)(super().__mul__(other))
__imul__ = __mul__
def __reduce__(self):
- return self.__class__, (list(self),)
+ return type(self), (list(self),)
def __hash__(self):
return hash(tuple(self))
@@ -99,7 +99,7 @@ def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError(
"'{cls}' does not support mutable operations.".format(
- cls=self.__class__.__name__
+ cls=type(self).__name__
)
)
@@ -107,7 +107,7 @@ def __str__(self) -> str:
return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n"))
def __repr__(self) -> str:
- return f"{self.__class__.__name__}({str(self)})"
+ return f"{type(self).__name__}({str(self)})"
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
@@ -132,7 +132,7 @@ def __new__(cls, data, dtype=None, copy=False):
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError(
- "'{cls}' does not support mutable operations.".format(cls=self.__class__)
+ "'{cls}' does not support mutable operations.".format(cls=type(self))
)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 35e8405e0f1aa..a9e119f3c5f87 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -497,7 +497,7 @@ def __array_wrap__(self, result, context=None):
def __reduce__(self):
d = dict(left=self.left, right=self.right)
d.update(self._get_attributes_dict())
- return _new_IntervalIndex, (self.__class__, d), None
+ return _new_IntervalIndex, (type(self), d), None
@Appender(_index_shared_docs["copy"])
def copy(self, deep=False, name=None):
@@ -512,7 +512,7 @@ def copy(self, deep=False, name=None):
@Appender(_index_shared_docs["astype"])
def astype(self, dtype, copy=True):
- with rewrite_exception("IntervalArray", self.__class__.__name__):
+ with rewrite_exception("IntervalArray", type(self).__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
@@ -1205,7 +1205,7 @@ def _format_attrs(self):
return attrs
def _format_space(self):
- space = " " * (len(self.__class__.__name__) + 1)
+ space = " " * (len(type(self).__name__) + 1)
return "\n{space}".format(space=space)
# --------------------------------------------------------------------
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 048112cbf0836..d151fb7260a58 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1245,9 +1245,7 @@ def _set_names(self, names, level=None, validate=True):
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
- "{}.name must be a hashable type".format(
- self.__class__.__name__
- )
+ "{}.name must be a hashable type".format(type(self).__name__)
)
self._names[lev] = name
@@ -1911,7 +1909,7 @@ def __reduce__(self):
sortorder=self.sortorder,
names=list(self.names),
)
- return ibase._new_Index, (self.__class__, d), None
+ return ibase._new_Index, (type(self), d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
@@ -3264,7 +3262,7 @@ def astype(self, dtype, copy=True):
elif not is_object_dtype(dtype):
msg = (
"Setting {cls} dtype to anything other than object is not supported"
- ).format(cls=self.__class__)
+ ).format(cls=type(self))
raise TypeError(msg)
elif copy is True:
return self._shallow_copy()
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index f7bbbee461e8d..f300cde3b5bcc 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -179,7 +179,7 @@ def _get_data_as_items(self):
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
- return ibase._new_Index, (self.__class__, d), None
+ return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
@@ -592,27 +592,27 @@ def _union(self, other, sort):
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
- return self.__class__(start_r, end_r + step_s, step_s)
+ return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
- return self.__class__(start_r, end_r + step_s / 2, step_s / 2)
+ return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
- return self.__class__(start_r, end_r + step_s, step_s)
+ return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
- return self.__class__(start_r, end_r + step_o, step_o)
+ return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
@Appender(_index_shared_docs["join"])
@@ -781,7 +781,7 @@ def _evaluate_numeric_binop(self, other):
rstart = op(left.start, right)
rstop = op(left.stop, right)
- result = self.__class__(rstart, rstop, rstep, **attrs)
+ result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
| - [x] ref https://github.com/pandas-dev/pandas/pull/29816
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/29893 | 2019-11-27T18:27:27Z | 2019-11-29T16:41:14Z | 2019-11-29T16:41:14Z | 2019-11-30T10:11:34Z |
String formatting > fstring | diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 34838af5fd6e4..43ce8ad4abb45 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -167,9 +167,7 @@ class AbstractMethodError(NotImplementedError):
def __init__(self, class_instance, methodtype="method"):
types = {"method", "classmethod", "staticmethod", "property"}
if methodtype not in types:
- msg = "methodtype must be one of {}, got {} instead.".format(
- methodtype, types
- )
+ msg = f"methodtype must be one of {methodtype}, got {types} instead."
raise ValueError(msg)
self.methodtype = methodtype
self.class_instance = class_instance
@@ -179,5 +177,5 @@ def __str__(self) -> str:
name = self.class_instance.__name__
else:
name = type(self.class_instance).__name__
- msg = "This {methodtype} must be defined in the concrete class {name}"
+ msg = f"This {self.methodtype} must be defined in the concrete class {name}"
return msg.format(methodtype=self.methodtype, name=name)
diff --git a/pandas/tests/arrays/interval/test_ops.py b/pandas/tests/arrays/interval/test_ops.py
index 43601ea301568..a55c33c2f22e9 100644
--- a/pandas/tests/arrays/interval/test_ops.py
+++ b/pandas/tests/arrays/interval/test_ops.py
@@ -83,8 +83,6 @@ def test_overlaps_na(self, constructor, start_shift):
)
def test_overlaps_invalid_type(self, constructor, other):
interval_container = constructor.from_breaks(range(5))
- msg = "`other` must be Interval-like, got {other}".format(
- other=type(other).__name__
- )
+ msg = f"`other` must be Interval-like, got {type(other).__name__}"
with pytest.raises(TypeError, match=msg):
interval_container.overlaps(other)
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 755cbfb716fcd..c9f96ed516dc5 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -1007,7 +1007,7 @@ def test_cumsum(self, data, expected, numpy):
np.cumsum(SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
- msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
+ msg = re.escape(f"axis(={axis}) out of bounds")
with pytest.raises(ValueError, match=msg):
SparseArray(data).cumsum(axis=axis)
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index a6836c58348b3..7a85ccf271e76 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -596,6 +596,6 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
@pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"])
def test_op(self, opname):
- sparse_op = getattr(splib, "sparse_{opname}_float64".format(opname=opname))
+ sparse_op = getattr(splib, f"sparse_{opname}_float64")
python_op = getattr(operator, opname)
self._op_tests(sparse_op, python_op)
diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index 6a28f76e474cc..a4554aca1325e 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -33,7 +33,7 @@ def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
- raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string))
+ raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@classmethod
def construct_array_type(cls):
@@ -56,7 +56,7 @@ def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
- raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string))
+ raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@classmethod
def construct_array_type(cls):
@@ -79,7 +79,7 @@ def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls.from_scalars(scalars)
def __repr__(self):
- return "{cls}({data})".format(cls=type(self).__name__, data=repr(self._data))
+ return f"{type(self).__name__}({repr(self._data)})"
def __getitem__(self, item):
if pd.api.types.is_scalar(item):
diff --git a/pandas/tests/extension/base/printing.py b/pandas/tests/extension/base/printing.py
index 5d17a4b0cbee2..ad34a83c7cf71 100644
--- a/pandas/tests/extension/base/printing.py
+++ b/pandas/tests/extension/base/printing.py
@@ -19,7 +19,7 @@ def test_array_repr(self, data, size):
result = repr(data)
assert type(data).__name__ in result
- assert "Length: {}".format(len(data)) in result
+ assert f"Length: {len(data)}" in result
assert str(data.dtype) in result
if size == "big":
assert "..." in result
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index f9ba4b7a8ba16..0c2f1e845909a 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -23,7 +23,7 @@ def __init__(self, context=None):
self.context = context or decimal.getcontext()
def __repr__(self) -> str:
- return "DecimalDtype(context={})".format(self.context)
+ return f"DecimalDtype(context={self.context})"
@classmethod
def construct_array_type(cls):
@@ -40,7 +40,7 @@ def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
- raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string))
+ raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@property
def _is_numeric(self):
@@ -178,9 +178,7 @@ def _reduce(self, name, skipna=True, **kwargs):
try:
op = getattr(self.data, name)
except AttributeError:
- raise NotImplementedError(
- "decimal does not support the {} operation".format(name)
- )
+ raise NotImplementedError(f"decimal does not support the {name} operation")
return op(axis=0)
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index a29f910261b58..f9229e8066be4 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -146,9 +146,7 @@ def test_resample_basic_grouper(series):
def test_resample_string_kwargs(series, keyword, value):
# see gh-19303
# Check that wrong keyword argument strings raise an error
- msg = "Unsupported value {value} for `{keyword}`".format(
- value=value, keyword=keyword
- )
+ msg = f"Unsupported value {value} for `{keyword}`"
with pytest.raises(ValueError, match=msg):
series.resample("5min", **({keyword: value}))
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index 574182ae99c5c..4c27d48cff6fd 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -89,7 +89,7 @@ def test_fails_on_no_datetime_index(name, func):
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex "
- "or PeriodIndex, but got an instance of '{}'".format(name)
+ f"or PeriodIndex, but got an instance of '{name}'"
)
with pytest.raises(TypeError, match=msg):
df.groupby(Grouper(freq="D"))
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 79608f4fb3cde..e709db980b721 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -141,7 +141,7 @@ def test_round_nat(klass, method, freq):
)
def test_nat_methods_raise(method):
# see gh-9513, gh-17329
- msg = "NaTType does not support {method}".format(method=method)
+ msg = f"NaTType does not support {method}"
with pytest.raises(ValueError, match=msg):
getattr(NaT, method)()
diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py
index fbf4454109ec0..a097636bbf0b4 100644
--- a/pandas/tests/tseries/offsets/common.py
+++ b/pandas/tests/tseries/offsets/common.py
@@ -13,18 +13,14 @@ def assert_offset_equal(offset, base, expected):
assert actual_apply == expected
except AssertionError:
raise AssertionError(
- "\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
- "\nAt Date: {base}".format(
- expected=expected, actual=actual, offset=offset, base=base
- )
+ f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
+ f"\nAt Date: {base}"
)
def assert_onOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected, (
- "\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
- "\nAt Date: {date}".format(
- expected=expected, actual=actual, offset=offset, date=date
- )
+ f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
+ f"\nAt Date: {date}"
)
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 898060d011372..9ff5fb41957b9 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -243,7 +243,7 @@ def infer_freq(index, warn=True):
):
raise TypeError(
"cannot infer freq from a non-convertible dtype "
- "on a Series of {dtype}".format(dtype=index.dtype)
+ f"on a Series of {index.dtype}"
)
index = values
@@ -260,8 +260,7 @@ def infer_freq(index, warn=True):
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError(
- "cannot infer freq from a non-convertible index "
- "type {type}".format(type=type(index))
+ f"cannot infer freq from a non-convertible index type {type(index)}"
)
index = index.values
@@ -393,7 +392,7 @@ def _infer_daily_rule(self):
if annual_rule:
nyears = self.ydiffs[0]
month = MONTH_ALIASES[self.rep_stamp.month]
- alias = "{prefix}-{month}".format(prefix=annual_rule, month=month)
+ alias = f"{annual_rule}-{month}"
return _maybe_add_count(alias, nyears)
quarterly_rule = self._get_quarterly_rule()
@@ -401,7 +400,7 @@ def _infer_daily_rule(self):
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]]
- alias = "{prefix}-{month}".format(prefix=quarterly_rule, month=month)
+ alias = f"{quarterly_rule}-{month}"
return _maybe_add_count(alias, nquarters)
monthly_rule = self._get_monthly_rule()
@@ -413,7 +412,7 @@ def _infer_daily_rule(self):
if days % 7 == 0:
# Weekly
day = int_to_weekday[self.rep_stamp.weekday()]
- return _maybe_add_count("W-{day}".format(day=day), days / 7)
+ return _maybe_add_count(f"W-{day}", days / 7)
else:
return _maybe_add_count("D", days)
@@ -485,7 +484,7 @@ def _get_wom_rule(self):
week = week_of_months[0] + 1
wd = int_to_weekday[weekdays[0]]
- return "WOM-{week}{weekday}".format(week=week, weekday=wd)
+ return f"WOM-{week}{wd}"
class _TimedeltaFrequencyInferer(_FrequencyInferer):
@@ -495,7 +494,7 @@ def _infer_daily_rule(self):
if days % 7 == 0:
# Weekly
wd = int_to_weekday[self.rep_stamp.weekday()]
- alias = "W-{weekday}".format(weekday=wd)
+ alias = f"W-{wd}"
return _maybe_add_count(alias, days / 7)
else:
return _maybe_add_count("D", days)
@@ -509,6 +508,6 @@ def _maybe_add_count(base, count):
if count != 1:
assert count == int(count)
count = int(count)
- return "{count}{base}".format(count=count, base=base)
+ return f"{count}{base}"
else:
return base
| xref #29547
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
More fstring changes | https://api.github.com/repos/pandas-dev/pandas/pulls/29892 | 2019-11-27T18:25:00Z | 2019-12-04T14:20:47Z | 2019-12-04T14:20:47Z | 2019-12-04T14:20:50Z |
CI: fix mypy complaint introduced in #29873 | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 69d632479e969..a50d2d1a72155 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2641,7 +2641,9 @@ def write(self, **kwargs):
"cannot write on an abstract storer: sublcasses should implement"
)
- def delete(self, where=None, start=None, stop=None, **kwargs):
+ def delete(
+ self, where=None, start: Optional[int] = None, stop: Optional[int] = None
+ ):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
| @jreback the pertinent `delete` methods are never hit in tests. any chance we can rip them out? | https://api.github.com/repos/pandas-dev/pandas/pulls/29891 | 2019-11-27T16:50:05Z | 2019-11-27T18:14:19Z | 2019-11-27T18:14:19Z | 2019-11-27T19:07:58Z |
CLN: follow-up to 29725 | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 0717f478e2782..f7d30d537b358 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -878,6 +878,6 @@ def index_or_series(request):
List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
- See GH#?????
+ See GH#29725
"""
return request.param
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 77713deada44a..9733d589ee93b 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -33,7 +33,7 @@ def adjust_negative_zero(zero, expected):
# TODO: remove this kludge once mypy stops giving false positives here
# List comprehension has incompatible type List[PandasObject]; expected List[RangeIndex]
-# See GH#?????
+# See GH#29725
ser_or_index: List[Any] = [pd.Series, pd.Index]
lefts: List[Any] = [pd.RangeIndex(10, 40, 10)]
lefts.extend(
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index c00e792fb210f..3c97b75ecfa0c 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -666,7 +666,7 @@ def test_str_cat_align_mixed_inputs(self, join):
index_or_series2 = [Series, Index] # type: ignore
# List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
- # See GH#>????
+ # See GH#29725
@pytest.mark.parametrize("other", index_or_series2)
def test_str_cat_all_na(self, index_or_series, other):
| fill in GH references | https://api.github.com/repos/pandas-dev/pandas/pulls/29890 | 2019-11-27T16:35:14Z | 2019-11-27T19:02:19Z | 2019-11-27T19:02:19Z | 2019-11-27T19:06:15Z |
STY: x.__class__ to type(x) #batch-1 | diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index ba108c4524b9c..603caed805e58 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -54,7 +54,7 @@ cdef class BlockPlacement:
else:
v = self._as_array
- return f'{self.__class__.__name__}({v})'
+ return f'{type(self).__name__}({v})'
def __repr__(self) -> str:
return str(self)
diff --git a/pandas/_libs/tslibs/c_timestamp.pyx b/pandas/_libs/tslibs/c_timestamp.pyx
index c6c98e996b745..02e252219453b 100644
--- a/pandas/_libs/tslibs/c_timestamp.pyx
+++ b/pandas/_libs/tslibs/c_timestamp.pyx
@@ -87,7 +87,7 @@ cdef class _Timestamp(datetime):
return PyObject_RichCompareBool(val, other, op)
try:
- ots = self.__class__(other)
+ ots = type(self)(other)
except ValueError:
return self._compare_outside_nanorange(other, op)
else:
@@ -96,7 +96,7 @@ cdef class _Timestamp(datetime):
if ndim != -1:
if ndim == 0:
if is_datetime64_object(other):
- other = self.__class__(other)
+ other = type(self)(other)
elif is_array(other):
# zero-dim array, occurs if try comparison with
# datetime64 scalar on the left hand side
@@ -105,7 +105,7 @@ cdef class _Timestamp(datetime):
# the numpy C api to extract it.
other = cnp.PyArray_ToScalar(cnp.PyArray_DATA(other),
other)
- other = self.__class__(other)
+ other = type(self)(other)
else:
return NotImplemented
elif is_array(other):
@@ -226,8 +226,7 @@ cdef class _Timestamp(datetime):
if is_timedelta64_object(other):
other_int = other.astype('timedelta64[ns]').view('i8')
- return self.__class__(self.value + other_int,
- tz=self.tzinfo, freq=self.freq)
+ return type(self)(self.value + other_int, tz=self.tzinfo, freq=self.freq)
elif is_integer_object(other):
maybe_integer_op_deprecated(self)
@@ -238,8 +237,7 @@ cdef class _Timestamp(datetime):
elif self.freq is None:
raise NullFrequencyError(
"Cannot add integral value to Timestamp without freq.")
- return self.__class__((self.freq * other).apply(self),
- freq=self.freq)
+ return type(self)((self.freq * other).apply(self), freq=self.freq)
elif PyDelta_Check(other) or hasattr(other, 'delta'):
# delta --> offsets.Tick
@@ -253,8 +251,7 @@ cdef class _Timestamp(datetime):
other.seconds * 1000000 +
other.microseconds) * 1000
- result = self.__class__(self.value + nanos,
- tz=self.tzinfo, freq=self.freq)
+ result = type(self)(self.value + nanos, tz=self.tzinfo, freq=self.freq)
return result
elif is_array(other):
@@ -272,7 +269,7 @@ cdef class _Timestamp(datetime):
result = datetime.__add__(self, other)
if PyDateTime_Check(result):
- result = self.__class__(result)
+ result = type(self)(result)
result.nanosecond = self.nanosecond
return result
@@ -304,9 +301,9 @@ cdef class _Timestamp(datetime):
if (PyDateTime_Check(self)
and (PyDateTime_Check(other) or is_datetime64_object(other))):
if isinstance(self, _Timestamp):
- other = self.__class__(other)
+ other = type(self)(other)
else:
- self = other.__class__(self)
+ self = type(other)(self)
# validate tz's
if not tz_compare(self.tzinfo, other.tzinfo):
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 327d1067dd17d..c8985c365741d 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -363,7 +363,7 @@ class _BaseOffset:
attrs = [(k, v) for k, v in all_paras.items()
if (k not in exclude) and (k[0] != '_')]
attrs = sorted(set(attrs))
- params = tuple([str(self.__class__)] + attrs)
+ params = tuple([str(type(self))] + attrs)
return params
@property
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index a444a4e46d0d7..bf50d6e9b50e7 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -923,7 +923,7 @@ def __repr__(self) -> str:
data = format_object_summary(
self, self._formatter(), indent_for_name=False
).rstrip(", \n")
- class_name = "<{}>\n".format(self.__class__.__name__)
+ class_name = "<{}>\n".format(type(self).__name__)
return template.format(
class_name=class_name, data=data, length=len(self), dtype=self.dtype
)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index cb482665b3534..ab558b8fa75d6 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -870,7 +870,7 @@ def __repr__(self) -> str:
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = self._format_data()
- class_name = "<{}>\n".format(self.__class__.__name__)
+ class_name = "<{}>\n".format(type(self).__name__)
return template.format(
class_name=class_name,
data=data,
@@ -880,7 +880,7 @@ def __repr__(self) -> str:
)
def _format_space(self):
- space = " " * (len(self.__class__.__name__) + 1)
+ space = " " * (len(type(self).__name__) + 1)
return "\n{space}".format(space=space)
@property
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 176a92132e20a..83d6ac76cdd98 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -51,7 +51,7 @@ class PandasObject(DirNamesMixin):
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
- return self.__class__
+ return type(self)
def __repr__(self) -> str:
"""
@@ -1185,7 +1185,7 @@ def _reduce(
if func is None:
raise TypeError(
"{klass} cannot perform the operation {op}".format(
- klass=self.__class__.__name__, op=name
+ klass=type(self).__name__, op=name
)
)
return func(skipna=skipna, **kwds)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 41b6ebbd2f196..d62f1557952a8 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -317,7 +317,7 @@ def get_callable_name(obj):
return get_callable_name(obj.func)
# fall back to class name
if hasattr(obj, "__call__"):
- return obj.__class__.__name__
+ return type(obj).__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 95785af8dc5ea..e608f82b03ade 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -435,7 +435,7 @@ def visit(self, node, **kwargs):
e.msg = "Python keyword not valid identifier in numexpr query"
raise e
- method = "visit_" + node.__class__.__name__
+ method = "visit_" + type(node).__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index 983382dce717a..4852e498537f2 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -145,7 +145,7 @@ def type(self):
def raw(self) -> str:
return pprint_thing(
"{0}(name={1!r}, type={2})"
- "".format(self.__class__.__name__, self.name, self.type)
+ "".format(type(self).__name__, self.name, self.type)
)
@property
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 58bbfd0a1bdee..65e38ff290ce4 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -440,7 +440,7 @@ def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
- ctx = node.ctx.__class__
+ ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
| - [x] ref https://github.com/pandas-dev/pandas/pull/29816
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29889 | 2019-11-27T16:25:12Z | 2019-11-27T20:50:33Z | 2019-11-27T20:50:33Z | 2019-11-28T05:14:17Z |
BUG: .count() raises if use_inf_as_na is enabled | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 48808a7ef7a46..e6f02222248c0 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -674,6 +674,7 @@ Other
- Bug in :meth:`DataFrame.append` that raised ``IndexError`` when appending with empty list (:issue:`28769`)
- Fix :class:`AbstractHolidayCalendar` to return correct results for
years after 2030 (now goes up to 2200) (:issue:`27790`)
+- Bug in :meth:`Series.count` raises if use_inf_as_na is enabled (:issue:`29478`)
.. _whatsnew_1000.contributors:
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index cb4199272f574..205ca193636c6 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -176,7 +176,7 @@ def _isna_old(obj):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
- elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
+ elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, ABCExtensionArray)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index e25c4456147f7..fe9306a06efc7 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -554,6 +554,10 @@ def test_count(self, datetime_series):
ts.iloc[[0, 3, 5]] = np.nan
tm.assert_series_equal(ts.count(level=1), right - 1)
+ # GH29478
+ with pd.option_context("use_inf_as_na", True):
+ assert pd.Series([pd.Timestamp("1990/1/1")]).count() == 1
+
def test_dot(self):
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
b = DataFrame(
| - [x] closes #29478
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29888 | 2019-11-27T16:13:13Z | 2019-11-29T17:51:05Z | 2019-11-29T17:51:05Z | 2019-12-19T06:24:15Z |
CLN: Clean up of locale testing | diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
index cb0b17e3553a4..55e8e839f4fae 100644
--- a/ci/azure/posix.yml
+++ b/ci/azure/posix.yml
@@ -19,18 +19,24 @@ jobs:
ENV_FILE: ci/deps/azure-36-minimum_versions.yaml
CONDA_PY: "36"
PATTERN: "not slow and not network"
+
py36_locale_slow_old_np:
ENV_FILE: ci/deps/azure-36-locale_slow.yaml
CONDA_PY: "36"
PATTERN: "slow"
- LOCALE_OVERRIDE: "zh_CN.UTF-8"
+ # pandas does not use the language (zh_CN), but should support diferent encodings (utf8)
+ # we should test with encodings different than utf8, but doesn't seem like Ubuntu supports any
+ LANG: "zh_CN.utf8"
+ LC_ALL: "zh_CN.utf8"
EXTRA_APT: "language-pack-zh-hans"
py36_locale:
ENV_FILE: ci/deps/azure-36-locale.yaml
CONDA_PY: "36"
PATTERN: "not slow and not network"
- LOCALE_OVERRIDE: "it_IT.UTF-8"
+ LANG: "it_IT.utf8"
+ LC_ALL: "it_IT.utf8"
+ EXTRA_APT: "language-pack-it"
py36_32bit:
ENV_FILE: ci/deps/azure-36-32bit.yaml
@@ -42,7 +48,9 @@ jobs:
ENV_FILE: ci/deps/azure-37-locale.yaml
CONDA_PY: "37"
PATTERN: "not slow and not network"
- LOCALE_OVERRIDE: "zh_CN.UTF-8"
+ LANG: "zh_CN.utf8"
+ LC_ALL: "zh_CN.utf8"
+ EXTRA_APT: "language-pack-zh-hans"
py37_np_dev:
ENV_FILE: ci/deps/azure-37-numpydev.yaml
@@ -54,10 +62,16 @@ jobs:
steps:
- script: |
- if [ "$(uname)" == "Linux" ]; then sudo apt-get install -y libc6-dev-i386 $EXTRA_APT; fi
- echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
- echo "Creating Environment"
- ci/setup_env.sh
+ if [ "$(uname)" == "Linux" ]; then
+ sudo apt-get update
+ sudo apt-get install -y libc6-dev-i386 $EXTRA_APT
+ fi
+ displayName: 'Install extra packages'
+
+ - script: echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
+ displayName: 'Set conda path'
+
+ - script: ci/setup_env.sh
displayName: 'Setup environment and build pandas'
- script: |
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 0b68164e5767e..8020680d617d7 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -5,17 +5,6 @@
# https://github.com/pytest-dev/pytest/issues/1075
export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))')
-if [ -n "$LOCALE_OVERRIDE" ]; then
- export LC_ALL="$LOCALE_OVERRIDE"
- export LANG="$LOCALE_OVERRIDE"
- PANDAS_LOCALE=`python -c 'import pandas; pandas.get_option("display.encoding")'`
- if [[ "$LOCALE_OVERRIDE" != "$PANDAS_LOCALE" ]]; then
- echo "pandas could not detect the locale. System locale: $LOCALE_OVERRIDE, pandas detected: $PANDAS_LOCALE"
- # TODO Not really aborting the tests until https://github.com/pandas-dev/pandas/issues/23923 is fixed
- # exit 1
- fi
-fi
-
if [[ "not network" == *"$PATTERN"* ]]; then
export http_proxy=http://1.2.3.4 https_proxy=http://1.2.3.4;
fi
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 2b488295b5cc2..db28eaea8956e 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -1,15 +1,15 @@
#!/bin/bash -e
# edit the locale file if needed
-if [ -n "$LOCALE_OVERRIDE" ]; then
+if [[ "$(uname)" == "Linux" && -n "$LC_ALL" ]]; then
echo "Adding locale to the first line of pandas/__init__.py"
rm -f pandas/__init__.pyc
- SEDC="3iimport locale\nlocale.setlocale(locale.LC_ALL, '$LOCALE_OVERRIDE')\n"
+ SEDC="3iimport locale\nlocale.setlocale(locale.LC_ALL, '$LC_ALL')\n"
sed -i "$SEDC" pandas/__init__.py
+
echo "[head -4 pandas/__init__.py]"
head -4 pandas/__init__.py
echo
- sudo locale-gen "$LOCALE_OVERRIDE"
fi
MINICONDA_DIR="$HOME/miniconda3"
diff --git a/pandas/tests/config/test_localization.py b/pandas/tests/config/test_localization.py
index 20a5be0c8a289..e815a90207a08 100644
--- a/pandas/tests/config/test_localization.py
+++ b/pandas/tests/config/test_localization.py
@@ -8,6 +8,8 @@
from pandas.compat import is_platform_windows
+import pandas as pd
+
_all_locales = get_locales() or []
_current_locale = locale.getlocale()
@@ -56,21 +58,21 @@ def test_get_locales_prefix():
@_skip_if_only_one_locale
-def test_set_locale():
+@pytest.mark.parametrize(
+ "lang,enc",
+ [
+ ("it_CH", "UTF-8"),
+ ("en_US", "ascii"),
+ ("zh_CN", "GB2312"),
+ ("it_IT", "ISO-8859-1"),
+ ],
+)
+def test_set_locale(lang, enc):
if all(x is None for x in _current_locale):
# Not sure why, but on some Travis runs with pytest,
# getlocale() returned (None, None).
pytest.skip("Current locale is not set.")
- locale_override = os.environ.get("LOCALE_OVERRIDE", None)
-
- if locale_override is None:
- lang, enc = "it_CH", "UTF-8"
- elif locale_override == "C":
- lang, enc = "en_US", "ascii"
- else:
- lang, enc = locale_override.split(".")
-
enc = codecs.lookup(enc).name
new_locale = lang, enc
@@ -91,3 +93,13 @@ def test_set_locale():
# Once we exit the "with" statement, locale should be back to what it was.
current_locale = locale.getlocale()
assert current_locale == _current_locale
+
+
+def test_encoding_detected():
+ system_locale = os.environ.get("LC_ALL")
+ system_encoding = system_locale.split(".")[-1] if system_locale else "utf-8"
+
+ assert (
+ codecs.lookup(pd.options.display.encoding).name
+ == codecs.lookup(system_encoding).name
+ )
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index cfcd2c9f2df95..59d7f6f904337 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -146,11 +146,15 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
- r"\[Errno 2\] File .+does_not_exist\.{} does not exist:"
- r" '.+does_not_exist\.{}'"
- ).format(fn_ext, fn_ext)
+ fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist:"
+ fr" '.+does_not_exist\.{fn_ext}'"
+ )
+ msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
+ msg7 = (
+ fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
+ )
with pytest.raises(
- error_class, match=r"({}|{}|{}|{}|{})".format(msg1, msg2, msg3, msg4, msg5)
+ error_class, match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7})"
):
reader(path)
@@ -177,17 +181,21 @@ def test_read_expands_user_home_dir(
path = os.path.join("~", "does_not_exist." + fn_ext)
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
- msg1 = r"File (b')?.+does_not_exist\.{}'? does not exist".format(fn_ext)
+ msg1 = fr"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
- r"\[Errno 2\] File .+does_not_exist\.{} does not exist:"
- r" '.+does_not_exist\.{}'"
- ).format(fn_ext, fn_ext)
+ fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist:"
+ fr" '.+does_not_exist\.{fn_ext}'"
+ )
+ msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
+ msg7 = (
+ fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
+ )
with pytest.raises(
- error_class, match=r"({}|{}|{}|{}|{})".format(msg1, msg2, msg3, msg4, msg5)
+ error_class, match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7})"
):
reader(path)
| - [X] xref #23923, https://github.com/pandas-dev/pandas/pull/29852#discussion_r351228283
There are couple of things I don't understand from the locale:
- For what I see, we test with different locales (e.g. `it_IT.UTF-8`) but pandas never uses the language (`it_IT`) only the encoding (`UTF-8`). So, not sure if testing with different locales is being useful.
- The variable `LOCALE_OVERRIDE` seems to add more complexity than value. Unless `LC_ALL`/`LANG` are being set afterwards, but I don't think so.
- There is a test that is using `LOCALE_OVERRIDE` but doesn't seem to depend on the actual locale of the system. It probably makes more sense to parametrize and test all the locales we want, than use the variable value (when testing locally this test will be more useful).
- There is a "test" being skipped in `run_tests.sh` that is probably worth converting to an actual test.
Addressing these things here, but I may be missing something here. Please let me know if that's the case. | https://api.github.com/repos/pandas-dev/pandas/pulls/29883 | 2019-11-27T12:55:58Z | 2020-01-01T16:25:37Z | 2020-01-01T16:25:37Z | 2020-01-01T16:25:42Z |
TYP: some types for pandas/util/_exceptions.py | diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index 4f2cbd4314b8e..b8719154eb791 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -1,15 +1,16 @@
import contextlib
+from typing import Tuple
@contextlib.contextmanager
-def rewrite_exception(old_name, new_name):
+def rewrite_exception(old_name: str, new_name: str):
"""Rewrite the message of an exception."""
try:
yield
except Exception as err:
msg = err.args[0]
msg = msg.replace(old_name, new_name)
- args = (msg,)
+ args: Tuple[str, ...] = (msg,)
if len(err.args) > 1:
args = args + err.args[1:]
err.args = args
| broken off #28339
| https://api.github.com/repos/pandas-dev/pandas/pulls/29881 | 2019-11-27T12:43:24Z | 2019-11-27T14:39:15Z | 2019-11-27T14:39:15Z | 2019-11-27T14:39:17Z |
TYP: some types for util._print_versions | diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 289a32c51a916..d1c74e8530245 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -6,14 +6,15 @@
import struct
import subprocess
import sys
+from typing import List, Optional, Tuple, Union
from pandas.compat._optional import VERSIONS, _get_version, import_optional_dependency
-def get_sys_info():
- "Returns system information as a dict"
+def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]:
+ "Returns system information as a list"
- blob = []
+ blob: List[Tuple[str, Optional[Union[str, int]]]] = []
# get full commit hash
commit = None
@@ -29,12 +30,7 @@ def get_sys_info():
pass
else:
if pipe.returncode == 0:
- commit = so
- try:
- commit = so.decode("utf-8")
- except ValueError:
- pass
- commit = commit.strip().strip('"')
+ commit = so.decode("utf-8").strip().strip('"')
blob.append(("commit", commit))
@@ -99,6 +95,7 @@ def show_versions(as_json=False):
mod = import_optional_dependency(
modname, raise_on_missing=False, on_version="ignore"
)
+ ver: Optional[str]
if mod:
ver = _get_version(mod)
else:
| broken off #28339 | https://api.github.com/repos/pandas-dev/pandas/pulls/29880 | 2019-11-27T12:37:31Z | 2019-11-27T14:40:38Z | 2019-11-27T14:40:38Z | 2019-11-27T14:40:40Z |
ENH: Allow users to definite their own window bound calculations in rolling | diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst
index d09ac0d1fa7f7..3db1aa12a4275 100644
--- a/doc/source/reference/window.rst
+++ b/doc/source/reference/window.rst
@@ -74,3 +74,14 @@ Exponentially-weighted moving window functions
EWM.var
EWM.corr
EWM.cov
+
+Window Indexer
+--------------
+.. currentmodule:: pandas
+
+Base class for defining custom window boundaries.
+
+.. autosummary::
+ :toctree: api/
+
+ api.indexers.BaseIndexer
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index bc00cd7f13e13..627a83b7359bb 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -466,6 +466,64 @@ default of the index) in a DataFrame.
dft
dft.rolling('2s', on='foo').sum()
+.. _stats.custom_rolling_window:
+
+Custom window rolling
+~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 1.0
+
+In addition to accepting an integer or offset as a ``window`` argument, ``rolling`` also accepts
+a ``BaseIndexer`` subclass that allows a user to define a custom method for calculating window bounds.
+The ``BaseIndexer`` subclass will need to define a ``get_window_bounds`` method that returns
+a tuple of two arrays, the first being the starting indices of the windows and second being the
+ending indices of the windows. Additionally, ``num_values``, ``min_periods``, ``center``, ``closed``
+and will automatically be passed to ``get_window_bounds`` and the defined method must
+always accept these arguments.
+
+For example, if we have the following ``DataFrame``:
+
+.. ipython:: python
+
+ use_expanding = [True, False, True, False, True]
+ use_expanding
+ df = pd.DataFrame({'values': range(5)})
+ df
+
+and we want to use an expanding window where ``use_expanding`` is ``True`` otherwise a window of size
+1, we can create the following ``BaseIndexer``:
+
+.. code-block:: ipython
+
+ In [2]: from pandas.api.indexers import BaseIndexer
+ ...:
+ ...: class CustomIndexer(BaseIndexer):
+ ...:
+ ...: def get_window_bounds(self, num_values, min_periods, center, closed):
+ ...: start = np.empty(num_values, dtype=np.int64)
+ ...: end = np.empty(num_values, dtype=np.int64)
+ ...: for i in range(num_values):
+ ...: if self.use_expanding[i]:
+ ...: start[i] = 0
+ ...: end[i] = i + 1
+ ...: else:
+ ...: start[i] = i
+ ...: end[i] = i + self.window_size
+ ...: return start, end
+ ...:
+
+ In [3]: indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
+
+ In [4]: df.rolling(indexer).sum()
+ Out[4]:
+ values
+ 0 0.0
+ 1 1.0
+ 2 3.0
+ 3 3.0
+ 4 10.0
+
+
.. _stats.rolling_window.endpoints:
Rolling window endpoints
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 5c9543580be26..1032f2e73531d 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -169,6 +169,16 @@ You can use the alias ``"boolean"`` as well.
s = pd.Series([True, False, None], dtype="boolean")
s
+.. _whatsnew_1000.custom_window:
+
+Defining custom windows for rolling operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We've added a :func:`pandas.api.indexers.BaseIndexer` class that allows users to define how
+window bounds are created during ``rolling`` operations. Users can define their own ``get_window_bounds``
+method on a :func:`pandas.api.indexers.BaseIndexer` subclass that will generate the start and end
+indices used for each window during the rolling aggregation. For more details and example usage, see
+the :ref:`custom window rolling documentation <stats.custom_rolling_window>`
.. _whatsnew_1000.enhancements.other:
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 303b4f6f24eac..1fdecbca32102 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -183,7 +183,8 @@ cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x) nogi
def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp,
+ bint is_monotonic_bounds=True):
cdef:
float64_t sum_x = 0
int64_t s, e
@@ -198,11 +199,10 @@ def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start,
s = start[i]
e = end[i]
- if i == 0:
+ if i == 0 or not is_monotonic_bounds:
# setup
- sum_x = 0.0
- nobs = 0
+
for j in range(s, e):
add_sum(values[j], &nobs, &sum_x)
@@ -218,6 +218,10 @@ def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start,
output[i] = calc_sum(minp, nobs, sum_x)
+ if not is_monotonic_bounds:
+ for j in range(s, e):
+ remove_sum(values[j], &nobs, &sum_x)
+
return output
@@ -327,7 +331,8 @@ def roll_mean_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_mean_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp,
+ bint is_monotonic_bounds=True):
cdef:
float64_t val, sum_x = 0
int64_t s, e
@@ -342,11 +347,9 @@ def roll_mean_variable(ndarray[float64_t] values, ndarray[int64_t] start,
s = start[i]
e = end[i]
- if i == 0:
+ if i == 0 or not is_monotonic_bounds:
# setup
- sum_x = 0.0
- nobs = 0
for j in range(s, e):
val = values[j]
add_mean(val, &nobs, &sum_x, &neg_ct)
@@ -365,6 +368,10 @@ def roll_mean_variable(ndarray[float64_t] values, ndarray[int64_t] start,
output[i] = calc_mean(minp, nobs, neg_ct, sum_x)
+ if not is_monotonic_bounds:
+ for j in range(s, e):
+ val = values[j]
+ remove_mean(val, &nobs, &sum_x, &neg_ct)
return output
# ----------------------------------------------------------------------
@@ -486,7 +493,8 @@ def roll_var_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_var_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int ddof=1):
+ ndarray[int64_t] end, int64_t minp, int ddof=1,
+ bint is_monotonic_bounds=True):
"""
Numerically stable implementation using Welford's method.
"""
@@ -508,7 +516,7 @@ def roll_var_variable(ndarray[float64_t] values, ndarray[int64_t] start,
# Over the first window, observations can only be added
# never removed
- if i == 0:
+ if i == 0 or not is_monotonic_bounds:
for j in range(s, e):
add_var(values[j], &nobs, &mean_x, &ssqdm_x)
@@ -528,6 +536,10 @@ def roll_var_variable(ndarray[float64_t] values, ndarray[int64_t] start,
output[i] = calc_var(minp, ddof, nobs, ssqdm_x)
+ if not is_monotonic_bounds:
+ for j in range(s, e):
+ remove_var(values[j], &nobs, &mean_x, &ssqdm_x)
+
return output
# ----------------------------------------------------------------------
@@ -629,7 +641,8 @@ def roll_skew_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_skew_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp,
+ bint is_monotonic_bounds=True):
cdef:
float64_t val, prev
float64_t x = 0, xx = 0, xxx = 0
@@ -648,7 +661,7 @@ def roll_skew_variable(ndarray[float64_t] values, ndarray[int64_t] start,
# Over the first window, observations can only be added
# never removed
- if i == 0:
+ if i == 0 or not is_monotonic_bounds:
for j in range(s, e):
val = values[j]
@@ -671,6 +684,11 @@ def roll_skew_variable(ndarray[float64_t] values, ndarray[int64_t] start,
output[i] = calc_skew(minp, nobs, x, xx, xxx)
+ if not is_monotonic_bounds:
+ for j in range(s, e):
+ val = values[j]
+ remove_skew(val, &nobs, &x, &xx, &xxx)
+
return output
# ----------------------------------------------------------------------
@@ -776,7 +794,8 @@ def roll_kurt_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_kurt_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp,
+ bint is_monotonic_bounds=True):
cdef:
float64_t val, prev
float64_t x = 0, xx = 0, xxx = 0, xxxx = 0
@@ -794,7 +813,7 @@ def roll_kurt_variable(ndarray[float64_t] values, ndarray[int64_t] start,
# Over the first window, observations can only be added
# never removed
- if i == 0:
+ if i == 0 or not is_monotonic_bounds:
for j in range(s, e):
add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx)
@@ -814,6 +833,10 @@ def roll_kurt_variable(ndarray[float64_t] values, ndarray[int64_t] start,
output[i] = calc_kurt(minp, nobs, x, xx, xxx, xxxx)
+ if not is_monotonic_bounds:
+ for j in range(s, e):
+ remove_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx)
+
return output
@@ -1007,7 +1030,8 @@ def roll_min_fixed(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_min_variable(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp,
+ bint is_monotonic_bounds=True):
"""
Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
@@ -1400,7 +1424,10 @@ def roll_generic_variable(object obj,
ndarray[int64_t] start, ndarray[int64_t] end,
int64_t minp,
int offset, object func, bint raw,
- object args, object kwargs):
+ object args, object kwargs,
+ bint is_monotonic_bounds=True):
+ # is_monotonic_bounds unused since variable algorithm doesn't calculate
+ # adds/subtracts across windows, but matches other *_variable functions
cdef:
ndarray[float64_t] output, counts, bufarr
ndarray[float64_t, cast=True] arr
diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx
index eab9f0f8aab43..2d01d1964c043 100644
--- a/pandas/_libs/window/indexers.pyx
+++ b/pandas/_libs/window/indexers.pyx
@@ -1,140 +1,105 @@
# cython: boundscheck=False, wraparound=False, cdivision=True
-from typing import Tuple
-
import numpy as np
from numpy cimport ndarray, int64_t
-# ----------------------------------------------------------------------
-# The indexer objects for rolling
-# These define start/end indexers to compute offsets
+# Cython routines for window indexers
-class FixedWindowIndexer:
+def calculate_variable_window_bounds(
+ int64_t num_values,
+ int64_t window_size,
+ object min_periods, # unused but here to match get_window_bounds signature
+ object center, # unused but here to match get_window_bounds signature
+ object closed,
+ const int64_t[:] index
+):
"""
- create a fixed length window indexer object
- that has start & end, that point to offsets in
- the index object; these are defined based on the win
- arguments
+ Calculate window boundaries for rolling windows from a time offset.
Parameters
----------
- values: ndarray
- values data array
- win: int64_t
- window size
- index: object
- index of the values
- closed: string
- closed behavior
- """
- def __init__(self, ndarray values, int64_t win, object closed, object index=None):
- cdef:
- ndarray[int64_t, ndim=1] start_s, start_e, end_s, end_e
- int64_t N = len(values)
+ num_values : int64
+ total number of values
- start_s = np.zeros(win, dtype='int64')
- start_e = np.arange(win, N, dtype='int64') - win + 1
- self.start = np.concatenate([start_s, start_e])[:N]
+ window_size : int64
+ window size calculated from the offset
- end_s = np.arange(win, dtype='int64') + 1
- end_e = start_e + win
- self.end = np.concatenate([end_s, end_e])[:N]
+ min_periods : object
+ ignored, exists for compatibility
- def get_window_bounds(self) -> Tuple[np.ndarray, np.ndarray]:
- return self.start, self.end
+ center : object
+ ignored, exists for compatibility
+ closed : str
+ string of side of the window that should be closed
-class VariableWindowIndexer:
- """
- create a variable length window indexer object
- that has start & end, that point to offsets in
- the index object; these are defined based on the win
- arguments
+ index : ndarray[int64]
+ time series index to roll over
- Parameters
- ----------
- values: ndarray
- values data array
- win: int64_t
- window size
- index: ndarray
- index of the values
- closed: string
- closed behavior
+ Returns
+ -------
+ (ndarray[int64], ndarray[int64])
"""
- def __init__(self, ndarray values, int64_t win, object closed, ndarray index):
- cdef:
- bint left_closed = False
- bint right_closed = False
- int64_t N = len(index)
-
- # if windows is variable, default is 'right', otherwise default is 'both'
- if closed is None:
- closed = 'right' if index is not None else 'both'
-
- if closed in ['right', 'both']:
- right_closed = True
-
- if closed in ['left', 'both']:
- left_closed = True
-
- self.start, self.end = self.build(index, win, left_closed, right_closed, N)
-
- @staticmethod
- def build(const int64_t[:] index, int64_t win, bint left_closed,
- bint right_closed, int64_t N) -> Tuple[np.ndarray, np.ndarray]:
-
- cdef:
- ndarray[int64_t] start, end
- int64_t start_bound, end_bound
- Py_ssize_t i, j
-
- start = np.empty(N, dtype='int64')
- start.fill(-1)
- end = np.empty(N, dtype='int64')
- end.fill(-1)
-
- start[0] = 0
-
- # right endpoint is closed
- if right_closed:
- end[0] = 1
- # right endpoint is open
- else:
- end[0] = 0
-
- with nogil:
-
- # start is start of slice interval (including)
- # end is end of slice interval (not including)
- for i in range(1, N):
- end_bound = index[i]
- start_bound = index[i] - win
-
- # left endpoint is closed
- if left_closed:
- start_bound -= 1
-
- # advance the start bound until we are
- # within the constraint
- start[i] = i
- for j in range(start[i - 1], i):
- if index[j] > start_bound:
- start[i] = j
- break
-
- # end bound is previous end
- # or current index
- if index[end[i - 1]] <= end_bound:
- end[i] = i + 1
- else:
- end[i] = end[i - 1]
-
- # right endpoint is open
- if not right_closed:
- end[i] -= 1
- return start, end
-
- def get_window_bounds(self) -> Tuple[np.ndarray, np.ndarray]:
- return self.start, self.end
+ cdef:
+ bint left_closed = False
+ bint right_closed = False
+ ndarray[int64_t, ndim=1] start, end
+ int64_t start_bound, end_bound
+ Py_ssize_t i, j
+
+ # if windows is variable, default is 'right', otherwise default is 'both'
+ if closed is None:
+ closed = 'right' if index is not None else 'both'
+
+ if closed in ['right', 'both']:
+ right_closed = True
+
+ if closed in ['left', 'both']:
+ left_closed = True
+
+ start = np.empty(num_values, dtype='int64')
+ start.fill(-1)
+ end = np.empty(num_values, dtype='int64')
+ end.fill(-1)
+
+ start[0] = 0
+
+ # right endpoint is closed
+ if right_closed:
+ end[0] = 1
+ # right endpoint is open
+ else:
+ end[0] = 0
+
+ with nogil:
+
+ # start is start of slice interval (including)
+ # end is end of slice interval (not including)
+ for i in range(1, num_values):
+ end_bound = index[i]
+ start_bound = index[i] - window_size
+
+ # left endpoint is closed
+ if left_closed:
+ start_bound -= 1
+
+ # advance the start bound until we are
+ # within the constraint
+ start[i] = i
+ for j in range(start[i - 1], i):
+ if index[j] > start_bound:
+ start[i] = j
+ break
+
+ # end bound is previous end
+ # or current index
+ if index[end[i - 1]] <= end_bound:
+ end[i] = i + 1
+ else:
+ end[i] = end[i - 1]
+
+ # right endpoint is open
+ if not right_closed:
+ end[i] -= 1
+ return start, end
diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py
index 58422811990c4..d0a26864a1102 100644
--- a/pandas/api/__init__.py
+++ b/pandas/api/__init__.py
@@ -1,2 +1,2 @@
""" public toolkit API """
-from . import extensions, types # noqa
+from . import extensions, indexers, types # noqa
diff --git a/pandas/api/indexers/__init__.py b/pandas/api/indexers/__init__.py
new file mode 100644
index 0000000000000..a5d6bc07da3eb
--- /dev/null
+++ b/pandas/api/indexers/__init__.py
@@ -0,0 +1,2 @@
+"""Public API for Rolling Window Indexers"""
+from pandas.core.window.indexers import BaseIndexer # noqa: F401
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
new file mode 100644
index 0000000000000..0fa24a0ba1b5a
--- /dev/null
+++ b/pandas/core/window/indexers.py
@@ -0,0 +1,122 @@
+"""Indexer objects for computing start/end window bounds for rolling operations"""
+from typing import Optional, Tuple
+
+import numpy as np
+
+from pandas._libs.window.indexers import calculate_variable_window_bounds
+from pandas.util._decorators import Appender
+
+get_window_bounds_doc = """
+Computes the bounds of a window.
+
+Parameters
+----------
+num_values : int, default 0
+ number of values that will be aggregated over
+window_size : int, default 0
+ the number of rows in a window
+min_periods : int, default None
+ min_periods passed from the top level rolling API
+center : bool, default None
+ center passed from the top level rolling API
+closed : str, default None
+ closed passed from the top level rolling API
+win_type : str, default None
+ win_type passed from the top level rolling API
+
+Returns
+-------
+A tuple of ndarray[int64]s, indicating the boundaries of each
+window
+"""
+
+
+class BaseIndexer:
+ """Base class for window bounds calculations"""
+
+ def __init__(
+ self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs,
+ ):
+ """
+ Parameters
+ ----------
+ **kwargs :
+ keyword arguments that will be available when get_window_bounds is called
+ """
+ self.index_array = index_array
+ self.window_size = window_size
+ # Set user defined kwargs as attributes that can be used in get_window_bounds
+ for key, value in kwargs.items():
+ setattr(self, key, value)
+
+ @Appender(get_window_bounds_doc)
+ def get_window_bounds(
+ self,
+ num_values: int = 0,
+ min_periods: Optional[int] = None,
+ center: Optional[bool] = None,
+ closed: Optional[str] = None,
+ ) -> Tuple[np.ndarray, np.ndarray]:
+
+ raise NotImplementedError
+
+
+class FixedWindowIndexer(BaseIndexer):
+ """Creates window boundaries that are of fixed length."""
+
+ @Appender(get_window_bounds_doc)
+ def get_window_bounds(
+ self,
+ num_values: int = 0,
+ min_periods: Optional[int] = None,
+ center: Optional[bool] = None,
+ closed: Optional[str] = None,
+ ) -> Tuple[np.ndarray, np.ndarray]:
+
+ start_s = np.zeros(self.window_size, dtype="int64")
+ start_e = (
+ np.arange(self.window_size, num_values, dtype="int64")
+ - self.window_size
+ + 1
+ )
+ start = np.concatenate([start_s, start_e])[:num_values]
+
+ end_s = np.arange(self.window_size, dtype="int64") + 1
+ end_e = start_e + self.window_size
+ end = np.concatenate([end_s, end_e])[:num_values]
+ return start, end
+
+
+class VariableWindowIndexer(BaseIndexer):
+ """Creates window boundaries that are of variable length, namely for time series."""
+
+ @Appender(get_window_bounds_doc)
+ def get_window_bounds(
+ self,
+ num_values: int = 0,
+ min_periods: Optional[int] = None,
+ center: Optional[bool] = None,
+ closed: Optional[str] = None,
+ ) -> Tuple[np.ndarray, np.ndarray]:
+
+ return calculate_variable_window_bounds(
+ num_values, self.window_size, min_periods, center, closed, self.index_array,
+ )
+
+
+class ExpandingIndexer(BaseIndexer):
+ """Calculate expanding window bounds, mimicking df.expanding()"""
+
+ @Appender(get_window_bounds_doc)
+ def get_window_bounds(
+ self,
+ num_values: int = 0,
+ min_periods: Optional[int] = None,
+ center: Optional[bool] = None,
+ closed: Optional[str] = None,
+ ) -> Tuple[np.ndarray, np.ndarray]:
+
+ return (
+ np.zeros(num_values, dtype=np.int64),
+ np.arange(1, num_values + 1, dtype=np.int64),
+ )
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 19ec4e335ee21..9f804584f532a 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -4,13 +4,13 @@
"""
from datetime import timedelta
from functools import partial
+import inspect
from textwrap import dedent
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import pandas._libs.window.aggregations as window_aggregations
-import pandas._libs.window.indexers as window_indexers
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -49,6 +49,11 @@
_zsqrt,
calculate_min_periods,
)
+from pandas.core.window.indexers import (
+ BaseIndexer,
+ FixedWindowIndexer,
+ VariableWindowIndexer,
+)
class _Window(PandasObject, ShallowMixin, SelectionMixin):
@@ -118,6 +123,26 @@ def validate(self):
raise ValueError("closed must be 'right', 'left', 'both' or 'neither'")
if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):
raise TypeError(f"invalid type: {type(self)}")
+ if isinstance(self.window, BaseIndexer):
+ self._validate_get_window_bounds_signature(self.window)
+
+ @staticmethod
+ def _validate_get_window_bounds_signature(window: BaseIndexer) -> None:
+ """
+ Validate that the passed BaseIndexer subclass has
+ a get_window_bounds with the correct signature.
+ """
+ get_window_bounds_signature = inspect.signature(
+ window.get_window_bounds
+ ).parameters.keys()
+ expected_signature = inspect.signature(
+ BaseIndexer().get_window_bounds
+ ).parameters.keys()
+ if get_window_bounds_signature != expected_signature:
+ raise ValueError(
+ f"{type(window).__name__} does not implement the correct signature for "
+ f"get_window_bounds"
+ )
def _create_blocks(self):
"""
@@ -200,6 +225,8 @@ def _get_window(self, other=None, win_type: Optional[str] = None) -> int:
-------
window : int
"""
+ if isinstance(self.window, BaseIndexer):
+ return self.min_periods or 0
return self.window
@property
@@ -391,17 +418,21 @@ def _get_cython_func_type(self, func):
Variable algorithms do not use window while fixed do.
"""
- if self.is_freq_type:
+ if self.is_freq_type or isinstance(self.window, BaseIndexer):
return self._get_roll_func(f"{func}_variable")
return partial(self._get_roll_func(f"{func}_fixed"), win=self._get_window())
- def _get_window_indexer(self):
+ def _get_window_indexer(
+ self, index_as_array: Optional[np.ndarray], window: int
+ ) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
+ if isinstance(self.window, BaseIndexer):
+ return self.window
if self.is_freq_type:
- return window_indexers.VariableWindowIndexer
- return window_indexers.FixedWindowIndexer
+ return VariableWindowIndexer(index_array=index_as_array, window_size=window)
+ return FixedWindowIndexer(index_array=index_as_array, window_size=window)
def _apply(
self,
@@ -440,7 +471,7 @@ def _apply(
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
- window_indexer = self._get_window_indexer()
+ window_indexer = self._get_window_indexer(index_as_array, window)
results = []
exclude: List[Scalar] = []
@@ -468,12 +499,31 @@ def _apply(
def calc(x):
x = np.concatenate((x, additional_nans))
- min_periods = calculate_min_periods(
- window, self.min_periods, len(x), require_min_periods, floor
+ if not isinstance(window, BaseIndexer):
+ min_periods = calculate_min_periods(
+ window, self.min_periods, len(x), require_min_periods, floor
+ )
+ else:
+ min_periods = calculate_min_periods(
+ self.min_periods or 1,
+ self.min_periods,
+ len(x),
+ require_min_periods,
+ floor,
+ )
+ start, end = window_indexer.get_window_bounds(
+ num_values=len(x),
+ min_periods=self.min_periods,
+ center=self.center,
+ closed=self.closed,
)
- start, end = window_indexer(
- x, window, self.closed, index_as_array
- ).get_window_bounds()
+ if np.any(np.diff(start) < 0) or np.any(np.diff(end) < 0):
+ # Our "variable" algorithms assume start/end are
+ # monotonically increasing. A custom window indexer
+ # can produce a non monotonic start/end.
+ return func(
+ x, start, end, min_periods, is_monotonic_bounds=False
+ )
return func(x, start, end, min_periods)
else:
@@ -754,13 +804,18 @@ class Window(_Window):
Parameters
----------
- window : int, or offset
+ window : int, offset, or BaseIndexer subclass
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes.
+
+ If a BaseIndexer subclass is passed, calculates the window boundaries
+ based on the defined ``get_window_bounds`` method. Additional rolling
+ keyword arguments, namely `min_periods`, `center`, and
+ `closed` will be passed to `get_window_bounds`.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
@@ -901,7 +956,11 @@ def validate(self):
super().validate()
window = self.window
- if isinstance(window, (list, tuple, np.ndarray)):
+ if isinstance(window, BaseIndexer):
+ raise NotImplementedError(
+ "BaseIndexer subclasses not implemented with win_types."
+ )
+ elif isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
@@ -1755,6 +1814,9 @@ def validate(self):
if self.min_periods is None:
self.min_periods = 1
+ elif isinstance(self.window, BaseIndexer):
+ # Passed BaseIndexer subclass should handle all other rolling kwargs
+ return
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 3c0abd7fca830..76141dceae930 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -219,7 +219,7 @@ def test_api(self):
class TestApi(Base):
- allowed = ["types", "extensions"]
+ allowed = ["types", "extensions", "indexers"]
def test_api(self):
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
new file mode 100644
index 0000000000000..6a3f2c19babdc
--- /dev/null
+++ b/pandas/tests/window/test_base_indexer.py
@@ -0,0 +1,82 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Series
+from pandas.api.indexers import BaseIndexer
+from pandas.core.window.indexers import ExpandingIndexer
+import pandas.util.testing as tm
+
+
+def test_bad_get_window_bounds_signature():
+ class BadIndexer(BaseIndexer):
+ def get_window_bounds(self):
+ return None
+
+ indexer = BadIndexer()
+ with pytest.raises(ValueError, match="BadIndexer does not implement"):
+ Series(range(5)).rolling(indexer)
+
+
+def test_expanding_indexer():
+ s = Series(range(10))
+ indexer = ExpandingIndexer()
+ result = s.rolling(indexer).mean()
+ expected = s.expanding().mean()
+ tm.assert_series_equal(result, expected)
+
+
+def test_indexer_constructor_arg():
+ # Example found in computation.rst
+ use_expanding = [True, False, True, False, True]
+ df = DataFrame({"values": range(5)})
+
+ class CustomIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed):
+ start = np.empty(num_values, dtype=np.int64)
+ end = np.empty(num_values, dtype=np.int64)
+ for i in range(num_values):
+ if self.use_expanding[i]:
+ start[i] = 0
+ end[i] = i + 1
+ else:
+ start[i] = i
+ end[i] = i + self.window_size
+ return start, end
+
+ indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
+ result = df.rolling(indexer).sum()
+ expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_indexer_accepts_rolling_args():
+ df = DataFrame({"values": range(5)})
+
+ class CustomIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed):
+ start = np.empty(num_values, dtype=np.int64)
+ end = np.empty(num_values, dtype=np.int64)
+ for i in range(num_values):
+ if center and min_periods == 1 and closed == "both" and i == 2:
+ start[i] = 0
+ end[i] = num_values
+ else:
+ start[i] = i
+ end[i] = i + self.window_size
+ return start, end
+
+ indexer = CustomIndexer(window_size=1)
+ result = df.rolling(indexer, center=True, min_periods=1, closed="both").sum()
+ expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_win_type_not_implemented():
+ class CustomIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed):
+ return np.array([0, 1]), np.array([1, 2])
+
+ df = DataFrame({"values": range(2)})
+ indexer = CustomIndexer()
+ with pytest.raises(NotImplementedError, match="BaseIndexer subclasses not"):
+ df.rolling(indexer, win_type="boxcar")
| Currently `rolling` allows integer and offset as `window` arguments. This PR allows users to define a custom method for calculating window boundaries by passing a subclass of a new `BaseIndexer`
```
from pandas.api.indexers import BaseIndexer
class MyWindowIndexer(BaseIndexer):
def get_window_bounds(self, ...):
....
return (start, end)
indexer = MyWindowIndexer(arg1, arg2, ...)
result = df.rolling(indexer).mean()
```
Could help address more custom rolling window operations as described in #26959, #13969, #25510, #24295
- [x] documented new feature
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29878 | 2019-11-27T03:53:34Z | 2019-12-05T12:56:12Z | 2019-12-05T12:56:12Z | 2022-07-15T03:12:12Z |
CI: Fix tests broken by np 1.18 sorting change | diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index b41227871ae03..9dcf62d472481 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -284,7 +284,10 @@ def sort_values(self, return_indexer=False, ascending=True):
sorted_index = self.take(_as)
return sorted_index, _as
else:
- sorted_values = np.sort(self._ndarray_values)
+ # NB: using asi8 instead of _ndarray_values matters in numpy 1.18
+ # because the treatment of NaT has been changed to put NaT last
+ # instead of first.
+ sorted_values = np.sort(self.asi8)
attribs = self._get_attributes_dict()
freq = attribs["freq"]
| Longer-term, do we want to use numpy's new behavior?
cc @TomAugspurger
<b>Update</b> A little more background: consider two ndarrays in np<1.18
```
arr1 = np.array([1, 2, np.nan, 3, 4])
arr2 = np.array([1, 2, np.datetime64("NaT"), 3, 4], dtype="datetime64[ns]")
>>> np.sort(arr1)
array([ 1., 2., 3., 4., nan])
>>> np.sort(arr2)
array([ 'NaT', '1970-01-01T00:00:00.000000001',
'1970-01-01T00:00:00.000000002', '1970-01-01T00:00:00.000000003',
'1970-01-01T00:00:00.000000004'], dtype='datetime64[ns]')
```
In numpy 1.18, the behavior of `np.sort(arr2)` is changing to put the NaT at the end instead of at the beginning, to behave more like NaN. This breaks a few tests, which this PR fixes.
Side-note: as of now, 1.18 changes the behavior for datetime64, but not timedelta64. | https://api.github.com/repos/pandas-dev/pandas/pulls/29877 | 2019-11-27T02:46:51Z | 2019-11-27T09:51:15Z | 2019-11-27T09:51:15Z | 2019-11-27T16:23:02Z |
DEPR: infer_dtype default for skipna is now True | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index d01a5fb4f8b43..ca950fbe047d9 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -402,6 +402,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
**Other removals**
- Floordiv of integer-dtyped array by :class:`Timedelta` now raises ``TypeError`` (:issue:`21036`)
+- :func:`pandas.api.types.infer_dtype` argument ``skipna`` defaults to ``True`` instead of ``False`` (:issue:`24050`)
- Removed the previously deprecated :meth:`Index.summary` (:issue:`18217`)
- Removed the previously deprecated "fastpath" keyword from the :class:`Index` constructor (:issue:`23110`)
- Removed the previously deprecated :meth:`Series.get_value`, :meth:`Series.set_value`, :meth:`DataFrame.get_value`, :meth:`DataFrame.set_value` (:issue:`17739`)
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index aaf6456df8f8e..780f93291cee8 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -4,7 +4,6 @@ from fractions import Fraction
from numbers import Number
import sys
-import warnings
import cython
from cython import Py_ssize_t
@@ -615,7 +614,7 @@ def clean_index_list(obj: list):
# don't force numpy coerce with nan's
inferred = infer_dtype(obj, skipna=False)
- if inferred in ['string', 'bytes', 'unicode', 'mixed', 'mixed-integer']:
+ if inferred in ['string', 'bytes', 'mixed', 'mixed-integer']:
return np.asarray(obj, dtype=object), 0
elif inferred in ['integer']:
# TODO: we infer an integer but it *could* be a uint64
@@ -1094,7 +1093,7 @@ cdef _try_infer_map(v):
return None
-def infer_dtype(value: object, skipna: object=None) -> str:
+def infer_dtype(value: object, skipna: bool = True) -> str:
"""
Efficiently infer the type of a passed val, or list-like
array of values. Return a string describing the type.
@@ -1102,7 +1101,7 @@ def infer_dtype(value: object, skipna: object=None) -> str:
Parameters
----------
value : scalar, list, ndarray, or pandas type
- skipna : bool, default False
+ skipna : bool, default True
Ignore NaN values when inferring the type.
.. versionadded:: 0.21.0
@@ -1113,7 +1112,6 @@ def infer_dtype(value: object, skipna: object=None) -> str:
Results can include:
- string
- - unicode
- bytes
- floating
- integer
@@ -1200,12 +1198,6 @@ def infer_dtype(value: object, skipna: object=None) -> str:
bint seen_pdnat = False
bint seen_val = False
- if skipna is None:
- msg = ('A future version of pandas will default to `skipna=True`. To '
- 'silence this warning, pass `skipna=True|False` explicitly.')
- warnings.warn(msg, FutureWarning, stacklevel=2)
- skipna = False
-
if util.is_array(value):
values = value
elif hasattr(value, 'dtype'):
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 743b844917edf..53e979d12a56d 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -628,13 +628,13 @@ def test_integer_na(self, arr, skipna):
expected = "integer" if skipna else "integer-na"
assert result == expected
- def test_deprecation(self):
- # GH 24050
- arr = np.array([1, 2, 3], dtype=object)
+ def test_infer_dtype_skipna_default(self):
+ # infer_dtype `skipna` default deprecated in GH#24050,
+ # changed to True in GH#29876
+ arr = np.array([1, 2, 3, np.nan], dtype=object)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- result = lib.infer_dtype(arr) # default: skipna=None -> warn
- assert result == "integer"
+ result = lib.infer_dtype(arr)
+ assert result == "integer"
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype="O")
| https://api.github.com/repos/pandas-dev/pandas/pulls/29876 | 2019-11-27T02:35:19Z | 2019-11-28T07:36:04Z | 2019-11-28T07:36:04Z | 2019-11-28T16:04:26Z | |
DEPR: dropna multiple axes, fillna int for td64, from_codes with floats, Series.nonzero | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 7d3f61ccf4e9f..42e28e0556d18 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -459,6 +459,10 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more.
- In :func:`concat` the default value for ``sort`` has been changed from ``None`` to ``False`` (:issue:`20613`)
- Removed previously deprecated "raise_conflict" argument from :meth:`DataFrame.update`, use "errors" instead (:issue:`23585`)
- Removed previously deprecated keyword "n" from :meth:`DatetimeIndex.shift`, :meth:`TimedeltaIndex.shift`, :meth:`PeriodIndex.shift`, use "periods" instead (:issue:`22458`)
+- Passing an integer to :meth:`Series.fillna` or :meth:`DataFrame.fillna` with ``timedelta64[ns]`` dtype now raises ``TypeError`` (:issue:`24694`)
+- Passing multiple axes to :meth:`DataFrame.dropna` is no longer supported (:issue:`20995`)
+- Removed previously deprecated :meth:`Series.nonzero`, use `to_numpy().nonzero()` instead (:issue:`24048`)
+- Passing floating dtype ``codes`` to :meth:`Categorical.from_codes` is no longer supported, pass ``codes.astype(np.int64)`` instead (:issue:`21775`)
- Removed the previously deprecated :meth:`Series.to_dense`, :meth:`DataFrame.to_dense` (:issue:`26684`)
- Removed the previously deprecated :meth:`Index.dtype_str`, use ``str(index.dtype)`` instead (:issue:`27106`)
- :meth:`Categorical.ravel` returns a :class:`Categorical` instead of a ``ndarray`` (:issue:`27199`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 0dc972011833a..46aab31770fde 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -27,7 +27,6 @@
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
- is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
@@ -646,22 +645,7 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
codes = np.asarray(codes) # #21767
if len(codes) and not is_integer_dtype(codes):
- msg = "codes need to be array-like integers"
- if is_float_dtype(codes):
- icodes = codes.astype("i8")
- if (icodes == codes).all():
- msg = None
- codes = icodes
- warn(
- (
- "float codes will be disallowed in the future and "
- "raise a ValueError"
- ),
- FutureWarning,
- stacklevel=2,
- )
- if msg:
- raise ValueError(msg)
+ raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ca943111b7e9f..0b690363a2178 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4475,7 +4475,7 @@ def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
- .. deprecated:: 0.23.0
+ .. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
@@ -4565,43 +4565,35 @@ def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
- msg = (
- "supplying multiple axes to axis is deprecated and "
- "will be removed in a future version."
- )
- warnings.warn(msg, FutureWarning, stacklevel=2)
+ raise TypeError("supplying multiple axes to axis is no longer supported.")
- result = self
- for ax in axis:
- result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax)
+ axis = self._get_axis_number(axis)
+ agg_axis = 1 - axis
+
+ agg_obj = self
+ if subset is not None:
+ ax = self._get_axis(agg_axis)
+ indices = ax.get_indexer_for(subset)
+ check = indices == -1
+ if check.any():
+ raise KeyError(list(np.compress(check, subset)))
+ agg_obj = self.take(indices, axis=agg_axis)
+
+ count = agg_obj.count(axis=agg_axis)
+
+ if thresh is not None:
+ mask = count >= thresh
+ elif how == "any":
+ mask = count == len(agg_obj._get_axis(agg_axis))
+ elif how == "all":
+ mask = count > 0
else:
- axis = self._get_axis_number(axis)
- agg_axis = 1 - axis
-
- agg_obj = self
- if subset is not None:
- ax = self._get_axis(agg_axis)
- indices = ax.get_indexer_for(subset)
- check = indices == -1
- if check.any():
- raise KeyError(list(np.compress(check, subset)))
- agg_obj = self.take(indices, axis=agg_axis)
-
- count = agg_obj.count(axis=agg_axis)
-
- if thresh is not None:
- mask = count >= thresh
- elif how == "any":
- mask = count == len(agg_obj._get_axis(agg_axis))
- elif how == "all":
- mask = count > 0
+ if how is not None:
+ raise ValueError("invalid how option: {h}".format(h=how))
else:
- if how is not None:
- raise ValueError("invalid how option: {h}".format(h=how))
- else:
- raise TypeError("must specify how or thresh")
+ raise TypeError("must specify how or thresh")
- result = self.loc(axis=axis)[mask]
+ result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index b0382755f2edb..8dd39473ee1f4 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2444,15 +2444,11 @@ def fillna(self, value, **kwargs):
# interpreted as nanoseconds
if is_integer(value):
# Deprecation GH#24694, GH#19233
- warnings.warn(
- "Passing integers to fillna is deprecated, will "
- "raise a TypeError in a future version. To retain "
- "the old behavior, pass pd.Timedelta(seconds=n) "
- "instead.",
- FutureWarning,
- stacklevel=6,
+ raise TypeError(
+ "Passing integers to fillna for timedelta64[ns] dtype is no "
+ "longer supporetd. To obtain the old behavior, pass "
+ "`pd.Timedelta(seconds=n)` instead."
)
- value = Timedelta(value, unit="s")
return super().fillna(value, **kwargs)
def should_store(self, value):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 56039605651ac..a8232f137f3ef 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -528,55 +528,6 @@ def compress(self, condition, *args, **kwargs):
nv.validate_compress(args, kwargs)
return self[condition]
- def nonzero(self):
- """
- Return the *integer* indices of the elements that are non-zero.
-
- .. deprecated:: 0.24.0
- Please use .to_numpy().nonzero() as a replacement.
-
- This method is equivalent to calling `numpy.nonzero` on the
- series data. For compatibility with NumPy, the return value is
- the same (a tuple with an array of indices for each dimension),
- but it will always be a one-item tuple because series only have
- one dimension.
-
- Returns
- -------
- numpy.ndarray
- Indices of elements that are non-zero.
-
- See Also
- --------
- numpy.nonzero
-
- Examples
- --------
- >>> s = pd.Series([0, 3, 0, 4])
- >>> s.nonzero()
- (array([1, 3]),)
- >>> s.iloc[s.nonzero()[0]]
- 1 3
- 3 4
- dtype: int64
-
- # same return although index of s is different
- >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
- >>> s.nonzero()
- (array([1, 3]),)
- >>> s.iloc[s.nonzero()[0]]
- b 3
- d 4
- dtype: int64
- """
- msg = (
- "Series.nonzero() is deprecated "
- "and will be removed in a future version."
- "Use Series.to_numpy().nonzero() instead"
- )
- warnings.warn(msg, FutureWarning, stacklevel=2)
- return self._values.nonzero()
-
def put(self, *args, **kwargs):
"""
Apply the `put` method to its `values` attribute if it has one.
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 59017a1442cb4..14bb9b88eee88 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -529,13 +529,11 @@ def test_from_codes_with_float(self):
# empty codes should not raise for floats
Categorical.from_codes([], dtype.categories)
- with tm.assert_produces_warning(FutureWarning):
- cat = Categorical.from_codes(codes, dtype.categories)
- tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="i1"))
+ with pytest.raises(ValueError, match="codes need to be array-like integers"):
+ Categorical.from_codes(codes, dtype.categories)
- with tm.assert_produces_warning(FutureWarning):
- cat = Categorical.from_codes(codes, dtype=dtype)
- tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="i1"))
+ with pytest.raises(ValueError, match="codes need to be array-like integers"):
+ Categorical.from_codes(codes, dtype=dtype)
codes = [1.1, 2.0, 0] # non-integer
with pytest.raises(ValueError, match="codes need to be array-like integers"):
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 24510ff9338ca..0b77c0067e5f2 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -165,23 +165,16 @@ def test_dropna_multiple_axes(self):
[7, np.nan, 8, 9],
]
)
- cp = df.copy()
# GH20987
- with tm.assert_produces_warning(FutureWarning):
- result = df.dropna(how="all", axis=[0, 1])
- with tm.assert_produces_warning(FutureWarning):
- result2 = df.dropna(how="all", axis=(0, 1))
- expected = df.dropna(how="all").dropna(how="all", axis=1)
-
- tm.assert_frame_equal(result, expected)
- tm.assert_frame_equal(result2, expected)
- tm.assert_frame_equal(df, cp)
+ with pytest.raises(TypeError, match="supplying multiple axes"):
+ df.dropna(how="all", axis=[0, 1])
+ with pytest.raises(TypeError, match="supplying multiple axes"):
+ df.dropna(how="all", axis=(0, 1))
inp = df.copy()
- with tm.assert_produces_warning(FutureWarning):
+ with pytest.raises(TypeError, match="supplying multiple axes"):
inp.dropna(how="all", axis=(0, 1), inplace=True)
- tm.assert_frame_equal(inp, expected)
def test_dropna_tz_aware_datetime(self):
# GH13407
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 81bf1edbe86df..09f1db25a3e31 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -16,6 +16,7 @@
MultiIndex,
NaT,
Series,
+ Timedelta,
Timestamp,
date_range,
isna,
@@ -60,8 +61,7 @@ def test_timedelta_fillna(self):
td = s.diff()
# reg fillna
- with tm.assert_produces_warning(FutureWarning):
- result = td.fillna(0)
+ result = td.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
@@ -73,8 +73,10 @@ def test_timedelta_fillna(self):
tm.assert_series_equal(result, expected)
# interpreted as seconds, deprecated
- with tm.assert_produces_warning(FutureWarning):
- result = td.fillna(1)
+ with pytest.raises(TypeError, match="Passing integers to fillna"):
+ td.fillna(1)
+
+ result = td.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
@@ -122,16 +124,14 @@ def test_timedelta_fillna(self):
# ffill
td[2] = np.nan
result = td.ffill()
- with tm.assert_produces_warning(FutureWarning):
- expected = td.fillna(0)
+ expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
tm.assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
- with tm.assert_produces_warning(FutureWarning):
- expected = td.fillna(0)
+ expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
tm.assert_series_equal(result, expected)
@@ -1597,12 +1597,6 @@ def test_series_interpolate_intraday(self):
tm.assert_numpy_array_equal(result.values, exp.values)
- def test_nonzero_warning(self):
- # GH 24048
- ser = pd.Series([1, 0, 3, 4])
- with tm.assert_produces_warning(FutureWarning):
- ser.nonzero()
-
@pytest.mark.parametrize(
"ind",
[
| https://api.github.com/repos/pandas-dev/pandas/pulls/29875 | 2019-11-27T02:06:38Z | 2019-12-01T22:23:28Z | 2019-12-01T22:23:28Z | 2019-12-01T22:25:30Z | |
CI: Building docs in GitHub actions | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a36420556ae24..7d20f172533c6 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -95,3 +95,65 @@ jobs:
name: Benchmarks log
path: asv_bench/benchmarks.log
if: failure()
+
+ web_and_docs:
+ name: Web and docs
+ runs-on: ubuntu-latest
+ steps:
+
+ - name: Setting conda path
+ run: echo "::set-env name=PATH::${HOME}/miniconda3/bin:${PATH}"
+
+ - name: Checkout
+ uses: actions/checkout@v1
+
+ - name: Setup environment and build pandas
+ run: ci/setup_env.sh
+
+ - name: Build website
+ run: |
+ source activate pandas-dev
+ python web/pandas_web.py web/pandas --target-path=web/build
+
+ - name: Build documentation
+ run: |
+ source activate pandas-dev
+ doc/make.py --warnings-are-errors | tee sphinx.log ; exit ${PIPESTATUS[0]}
+
+ # This can be removed when the ipython directive fails when there are errors,
+ # including the `tee sphinx.log` in te previous step (https://github.com/ipython/ipython/issues/11547)
+ - name: Check ipython directive errors
+ run: "! grep -B1 \"^<<<-------------------------------------------------------------------------$\" sphinx.log"
+
+ - name: Merge website and docs
+ run: |
+ mkdir -p pandas_web/docs
+ cp -r web/build/* pandas_web/
+ cp -r doc/build/html/* pandas_web/docs/
+ if: github.event_name == 'push'
+
+ - name: Install Rclone
+ run: sudo apt install rclone -y
+ if: github.event_name == 'push'
+
+ - name: Set up Rclone
+ run: |
+ RCLONE_CONFIG_PATH=$HOME/.config/rclone/rclone.conf
+ mkdir -p `dirname $RCLONE_CONFIG_PATH`
+ echo "[ovh_cloud_pandas_web]" > $RCLONE_CONFIG_PATH
+ echo "type = swift" >> $RCLONE_CONFIG_PATH
+ echo "env_auth = false" >> $RCLONE_CONFIG_PATH
+ echo "auth_version = 3" >> $RCLONE_CONFIG_PATH
+ echo "auth = https://auth.cloud.ovh.net/v3/" >> $RCLONE_CONFIG_PATH
+ echo "endpoint_type = public" >> $RCLONE_CONFIG_PATH
+ echo "tenant_domain = default" >> $RCLONE_CONFIG_PATH
+ echo "tenant = 2977553886518025" >> $RCLONE_CONFIG_PATH
+ echo "domain = default" >> $RCLONE_CONFIG_PATH
+ echo "user = w4KGs3pmDxpd" >> $RCLONE_CONFIG_PATH
+ echo "key = ${{ secrets.ovh_object_store_key }}" >> $RCLONE_CONFIG_PATH
+ echo "region = BHS" >> $RCLONE_CONFIG_PATH
+ if: github.event_name == 'push'
+
+ - name: Sync web
+ run: rclone sync pandas_web ovh_cloud_pandas_web:dev
+ if: github.event_name == 'push'
| Building the documentation in GitHub actions.
In Azure-pipelines we publish the built docs to GitHub pages, served at dev.pandas.io.
After this is merged, I'll work on publishing the Actions docs to the new OVH server. Possibly, publishing the docs of every PR.
| https://api.github.com/repos/pandas-dev/pandas/pulls/29874 | 2019-11-26T23:36:43Z | 2019-12-30T20:40:07Z | 2019-12-30T20:40:07Z | 2020-01-08T15:47:14Z |
CLN: remove never-used kwargs, make kwargs explicit | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 18ae081caf69d..4cc40c55361a3 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -779,7 +779,6 @@ def select_as_coordinates(
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
- **kwargs,
):
"""
return the selection as an Index
@@ -795,7 +794,7 @@ def select_as_coordinates(
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
- return tbl.read_coordinates(where=where, start=start, stop=stop, **kwargs)
+ return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(self, key: str, column: str, **kwargs):
"""
@@ -2286,7 +2285,7 @@ def set_atom_data(self, block):
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
- def set_atom_categorical(self, block, items, info=None, values=None):
+ def set_atom_categorical(self, block, items, info=None):
# currently only supports a 1-D categorical
# in a 1-D block
@@ -2314,17 +2313,15 @@ def set_atom_categorical(self, block, items, info=None, values=None):
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
- def set_atom_datetime64(self, block, values=None):
+ def set_atom_datetime64(self, block):
self.kind = "datetime64"
self.typ = self.get_atom_datetime64(block)
- if values is None:
- values = block.values.view("i8")
+ values = block.values.view("i8")
self.set_data(values, "datetime64")
- def set_atom_datetime64tz(self, block, info, values=None):
+ def set_atom_datetime64tz(self, block, info):
- if values is None:
- values = block.values
+ values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
@@ -2340,11 +2337,10 @@ def set_atom_datetime64tz(self, block, info, values=None):
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
- def set_atom_timedelta64(self, block, values=None):
+ def set_atom_timedelta64(self, block):
self.kind = "timedelta64"
self.typ = self.get_atom_timedelta64(block)
- if values is None:
- values = block.values.view("i8")
+ values = block.values.view("i8")
self.set_data(values, "timedelta64")
@property
@@ -2532,10 +2528,6 @@ def version(self) -> Tuple[int, int, int]:
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None))
- @property
- def format_type(self) -> str:
- return "fixed"
-
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
@@ -2633,7 +2625,13 @@ def infer_axes(self):
self.get_attrs()
return True
- def read(self, **kwargs):
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: Optional[int] = None,
+ stop: Optional[int] = None,
+ ):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement"
)
@@ -2797,9 +2795,7 @@ def write_index(self, key, index):
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
- converted = _convert_index(
- "index", index, self.encoding, self.errors, self.format_type
- )
+ converted = _convert_index("index", index, self.encoding, self.errors)
self.write_array(key, converted.values)
@@ -2848,9 +2844,7 @@ def write_multi_index(self, key, index):
"Saving a MultiIndex with an extension dtype is not supported."
)
level_key = f"{key}_level{i}"
- conv_level = _convert_index(
- level_key, lev, self.encoding, self.errors, self.format_type
- )
+ conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
@@ -3190,10 +3184,6 @@ def __init__(self, *args, **kwargs):
def table_type_short(self) -> str:
return self.table_type.split("_")[0]
- @property
- def format_type(self) -> str:
- return "table"
-
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
@@ -3544,14 +3534,17 @@ def create_index(self, columns=None, optlevel=None, kind=None):
)
v.create_index(**kw)
- def read_axes(self, where, **kwargs) -> bool:
+ def read_axes(
+ self, where, start: Optional[int] = None, stop: Optional[int] = None
+ ) -> bool:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
- **kwargs
+ start: int or None, default None
+ stop: int or None, default None
Returns
-------
@@ -3567,21 +3560,19 @@ def read_axes(self, where, **kwargs) -> bool:
return False
# create the selection
- selection = Selection(self, where=where, **kwargs)
+ selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
- # `kwargs` may contain `start` and `stop` arguments if passed to
- # `store.select()`. If set they determine the index size.
a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
- start=kwargs.get("start"),
- stop=kwargs.get("stop"),
+ start=start,
+ stop=stop,
)
return True
@@ -3636,7 +3627,6 @@ def create_axes(
nan_rep=None,
data_columns=None,
min_itemsize=None,
- **kwargs,
):
""" create and return the axes
legacy tables create an indexable column, indexable index,
@@ -3706,9 +3696,7 @@ def create_axes(
if i in axes:
name = obj._AXIS_NAMES[i]
- new_index = _convert_index(
- name, a, self.encoding, self.errors, self.format_type
- )
+ new_index = _convert_index(name, a, self.encoding, self.errors)
new_index.axis = i
index_axes_map[i] = new_index
@@ -3948,11 +3936,7 @@ def create_description(
return d
def read_coordinates(
- self,
- where=None,
- start: Optional[int] = None,
- stop: Optional[int] = None,
- **kwargs,
+ self, where=None, start: Optional[int] = None, stop: Optional[int] = None,
):
"""select coordinates (row numbers) from a table; return the
coordinates object
@@ -4061,7 +4045,9 @@ def write(
chunksize=None,
expectedrows=None,
dropna=False,
- **kwargs,
+ nan_rep=None,
+ data_columns=None,
+ errors="strict", # not used hre, but passed to super
):
if not append and self.is_exists:
@@ -4069,7 +4055,12 @@ def write(
# create the axes
self.create_axes(
- axes=axes, obj=obj, validate=append, min_itemsize=min_itemsize, **kwargs
+ axes=axes,
+ obj=obj,
+ validate=append,
+ min_itemsize=min_itemsize,
+ nan_rep=nan_rep,
+ data_columns=data_columns,
)
for a in self.axes:
@@ -4219,11 +4210,7 @@ def write_data_chunk(self, rows, indexes, mask, values):
self.table.flush()
def delete(
- self,
- where=None,
- start: Optional[int] = None,
- stop: Optional[int] = None,
- **kwargs,
+ self, where=None, start: Optional[int] = None, stop: Optional[int] = None,
):
# delete all rows (and return the nrows)
@@ -4303,9 +4290,15 @@ def get_object(self, obj):
obj = obj.T
return obj
- def read(self, where=None, columns=None, **kwargs):
+ def read(
+ self,
+ where=None,
+ columns=None,
+ start: Optional[int] = None,
+ stop: Optional[int] = None,
+ ):
- if not self.read_axes(where=where, **kwargs):
+ if not self.read_axes(where=where, start=start, stop=stop):
return None
info = (
@@ -4349,7 +4342,7 @@ def read(self, where=None, columns=None, **kwargs):
else:
df = concat(frames, axis=1)
- selection = Selection(self, where=where, **kwargs)
+ selection = Selection(self, where=where, start=start, stop=stop)
# apply the selection filters & axis orderings
df = self.process_axes(df, selection=selection, columns=columns)
@@ -4573,7 +4566,7 @@ def _set_tz(values, tz, preserve_UTC: bool = False, coerce: bool = False):
return values
-def _convert_index(name: str, index, encoding=None, errors="strict", format_type=None):
+def _convert_index(name: str, index, encoding=None, errors="strict"):
assert isinstance(name, str)
index_name = getattr(index, "name", None)
| https://api.github.com/repos/pandas-dev/pandas/pulls/29873 | 2019-11-26T23:34:47Z | 2019-11-27T15:58:50Z | 2019-11-27T15:58:50Z | 2019-11-27T16:45:33Z | |
REF: implement cumulative ops block-wise | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index b403508029f1f..7b94ee20eabb7 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -763,6 +763,7 @@ Numeric
- Bug in :class:`NumericIndex` construction that caused :class:`UInt64Index` to be casted to :class:`Float64Index` when integers in the ``np.uint64`` range were used to index a :class:`DataFrame` (:issue:`28279`)
- Bug in :meth:`Series.interpolate` when using method=`index` with an unsorted index, would previously return incorrect results. (:issue:`21037`)
- Bug in :meth:`DataFrame.round` where a :class:`DataFrame` with a :class:`CategoricalIndex` of :class:`IntervalIndex` columns would incorrectly raise a ``TypeError`` (:issue:`30063`)
+- Bug in :class:`DataFrame` cumulative operations (e.g. cumsum, cummax) incorrect casting to object-dtype (:issue:`19296`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c4461a9530e5c..ea05bb4d9345c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11086,44 +11086,66 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs):
else:
axis = self._get_axis_number(axis)
- y = com.values_from_object(self).copy()
- d = self._construct_axes_dict()
- d["copy"] = False
+ if axis == 1:
+ return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
+
+ def na_accum_func(blk_values):
+ # We will be applying this function to block values
+ if blk_values.dtype.kind in ["m", "M"]:
+ # numpy 1.18 started sorting NaTs at the end instead of beginning,
+ # so we need to work around to maintain backwards-consistency.
+ orig_dtype = blk_values.dtype
+
+ # We need to define mask before masking NaTs
+ mask = isna(blk_values)
+
+ if accum_func == np.minimum.accumulate:
+ # Note: the accum_func comparison fails as an "is" comparison
+ y = blk_values.view("i8")
+ y[mask] = np.iinfo(np.int64).max
+ changed = True
+ else:
+ y = blk_values
+ changed = False
+
+ result = accum_func(y.view("i8"), axis)
+ if skipna:
+ np.putmask(result, mask, iNaT)
+ elif accum_func == np.minimum.accumulate:
+ # Restore NaTs that we masked previously
+ nz = (~np.asarray(mask)).nonzero()[0]
+ if len(nz):
+ # everything up to the first non-na entry stays NaT
+ result[: nz[0]] = iNaT
+
+ if changed:
+ # restore NaT elements
+ y[mask] = iNaT # TODO: could try/finally for this?
+
+ if isinstance(blk_values, np.ndarray):
+ result = result.view(orig_dtype)
+ else:
+ # DatetimeArray
+ result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
+
+ elif skipna and not issubclass(
+ blk_values.dtype.type, (np.integer, np.bool_)
+ ):
+ vals = blk_values.copy().T
+ mask = isna(vals)
+ np.putmask(vals, mask, mask_a)
+ result = accum_func(vals, axis)
+ np.putmask(result, mask, mask_b)
+ else:
+ result = accum_func(blk_values.T, axis)
- if issubclass(y.dtype.type, (np.datetime64, np.timedelta64)):
- # numpy 1.18 started sorting NaTs at the end instead of beginning,
- # so we need to work around to maintain backwards-consistency.
- orig_dtype = y.dtype
- if accum_func == np.minimum.accumulate:
- # Note: the accum_func comparison fails as an "is" comparison
- # Note that "y" is always a copy, so we can safely modify it
- mask = isna(self)
- y = y.view("i8")
- y[mask] = np.iinfo(np.int64).max
-
- result = accum_func(y.view("i8"), axis).view(orig_dtype)
- if skipna:
- mask = isna(self)
- np.putmask(result, mask, iNaT)
- elif accum_func == np.minimum.accumulate:
- # Restore NaTs that we masked previously
- nz = (~np.asarray(mask)).nonzero()[0]
- if len(nz):
- # everything up to the first non-na entry stays NaT
- result[: nz[0]] = iNaT
+ # transpose back for ndarray, not for EA
+ return result.T if hasattr(result, "T") else result
- if self.ndim == 1:
- # restore dt64tz dtype
- d["dtype"] = self.dtype
-
- elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
- mask = isna(self)
- np.putmask(y, mask, mask_a)
- result = accum_func(y, axis)
- np.putmask(result, mask, mask_b)
- else:
- result = accum_func(y, axis)
+ result = self._data.apply(na_accum_func)
+ d = self._construct_axes_dict()
+ d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index eb98bdc49f976..93e165ad3d71e 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -1331,8 +1331,8 @@ def test_agg_cython_table(self, df, func, expected, axis):
_get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]),
[
- ("cumprod", DataFrame([[np.nan, 1], [1.0, 2.0]])),
- ("cumsum", DataFrame([[np.nan, 1], [1.0, 3.0]])),
+ ("cumprod", DataFrame([[np.nan, 1], [1, 2]])),
+ ("cumsum", DataFrame([[np.nan, 1], [1, 3]])),
],
),
),
@@ -1341,6 +1341,10 @@ def test_agg_cython_table_transform(self, df, func, expected, axis):
# GH 21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
+ if axis == "columns" or axis == 1:
+ # operating blockwise doesn't let us preserve dtypes
+ expected = expected.astype("float64")
+
result = df.agg(func, axis=axis)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_cumulative.py b/pandas/tests/frame/test_cumulative.py
index ad2cbff888b2e..2deeeb95d057d 100644
--- a/pandas/tests/frame/test_cumulative.py
+++ b/pandas/tests/frame/test_cumulative.py
@@ -118,3 +118,18 @@ def test_cummax(self, datetime_frame):
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
+
+ def test_cumulative_ops_preserve_dtypes(self):
+ # GH#19296 dont incorrectly upcast to object
+ df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3.0], "C": [True, False, False]})
+
+ result = df.cumsum()
+
+ expected = DataFrame(
+ {
+ "A": Series([1, 3, 6], dtype=np.int64),
+ "B": Series([1, 3, 6], dtype=np.float64),
+ "C": df["C"].cumsum(),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #19296
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @TomAugspurger | https://api.github.com/repos/pandas-dev/pandas/pulls/29872 | 2019-11-26T23:22:57Z | 2019-12-30T13:30:19Z | 2019-12-30T13:30:19Z | 2019-12-30T16:43:46Z |
REF: io.pytables operate on DataFrames instead of Blocks | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 69d632479e969..cd1ea3dd16975 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -48,7 +48,6 @@
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
from pandas.core.index import ensure_index
-from pandas.core.internals import BlockManager, _block_shape, make_block
from pandas.io.common import _stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
@@ -2301,7 +2300,7 @@ def set_atom_categorical(self, block, items, info=None):
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
- self.set_data(_block_shape(codes))
+ self.set_data(codes)
# write the categories
self.meta = "category"
@@ -3100,17 +3099,23 @@ def read(self, start=None, stop=None, **kwargs):
axes.append(ax)
items = axes[0]
- blocks = []
+ dfs = []
+
for i in range(self.nblocks):
blk_items = self.read_index(f"block{i}_items")
values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
- blk = make_block(
- values, placement=items.get_indexer(blk_items), ndim=len(axes)
- )
- blocks.append(blk)
- return self.obj_type(BlockManager(blocks, axes))
+ columns = items[items.get_indexer(blk_items)]
+ df = DataFrame(values.T, columns=columns, index=axes[1])
+ dfs.append(df)
+
+ if len(dfs) > 0:
+ out = concat(dfs, axis=1)
+ out = out.reindex(columns=items, copy=False)
+ return out
+
+ return DataFrame(columns=axes[0], index=axes[1])
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
@@ -4333,9 +4338,15 @@ def read(
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
- block = make_block(values, placement=np.arange(len(cols_)), ndim=2)
- mgr = BlockManager([block], [cols_, index_])
- frames.append(DataFrame(mgr))
+ if isinstance(values, np.ndarray):
+ df = DataFrame(values.T, columns=cols_, index=index_)
+ elif isinstance(values, Index):
+ df = DataFrame(values, columns=cols_, index=index_)
+ else:
+ # Categorical
+ df = DataFrame([values], columns=cols_, index=index_)
+ assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
+ frames.append(df)
if len(frames) == 1:
df = frames[0]
| Wouldn't be surprised if there is a perf hit here, will run asvs. | https://api.github.com/repos/pandas-dev/pandas/pulls/29871 | 2019-11-26T21:53:43Z | 2019-12-03T13:49:46Z | 2019-12-03T13:49:46Z | 2019-12-03T16:08:38Z |
STY: F-strings | diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx
index 7c7c8f7b61e60..5c0499b489110 100644
--- a/pandas/io/msgpack/_packer.pyx
+++ b/pandas/io/msgpack/_packer.pyx
@@ -234,7 +234,7 @@ cdef class Packer:
default_used = 1
continue
else:
- raise TypeError("can't serialize {thing!r}".format(thing=o))
+ raise TypeError(f"can't serialize {repr(o)}")
break
return ret
diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx
index cf9b2c7c04d42..f1817f29bd42a 100644
--- a/pandas/io/msgpack/_unpacker.pyx
+++ b/pandas/io/msgpack/_unpacker.pyx
@@ -99,7 +99,7 @@ cdef inline init_ctx(unpack_context *ctx,
def default_read_extended_type(typecode, data):
raise NotImplementedError("Cannot decode extended type "
- "with typecode={code}".format(code=typecode))
+ f"with typecode={typecode}")
def unpackb(object packed, object object_hook=None, object list_hook=None,
@@ -159,7 +159,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
return obj
else:
PyBuffer_Release(&view)
- raise UnpackValueError("Unpack failed: error = {ret}".format(ret=ret))
+ raise UnpackValueError(f"Unpack failed: error = {ret}")
def unpack(object stream, object object_hook=None, object list_hook=None,
@@ -430,8 +430,7 @@ cdef class Unpacker:
else:
raise OutOfData("No more data to unpack.")
else:
- raise ValueError("Unpack failed: error = {ret}"
- .format(ret=ret))
+ raise ValueError(f"Unpack failed: error = {ret}")
def read_bytes(self, Py_ssize_t nbytes):
"""Read a specified number of raw bytes from the stream"""
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 6378198225516..bb5bce96bc64b 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -105,13 +105,11 @@ cdef const uint8_t[:] rle_decompress(int result_length,
result[rpos] = 0x00
rpos += 1
else:
- raise ValueError("unknown control byte: {byte}"
- .format(byte=control_byte))
+ raise ValueError(f"unknown control byte: {control_byte}")
# In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t
if <Py_ssize_t>len(result) != <Py_ssize_t>result_length:
- raise ValueError("RLE: {got} != {expect}".format(got=len(result),
- expect=result_length))
+ raise ValueError(f"RLE: {len(result)} != {result_length}")
return np.asarray(result)
@@ -194,8 +192,7 @@ cdef const uint8_t[:] rdc_decompress(int result_length,
# In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t
if <Py_ssize_t>len(outbuff) != <Py_ssize_t>result_length:
- raise ValueError("RDC: {got} != {expect}\n"
- .format(got=len(outbuff), expect=result_length))
+ raise ValueError(f"RDC: {len(outbuff)} != {result_length}\n")
return np.asarray(outbuff)
@@ -271,8 +268,7 @@ cdef class Parser:
self.column_types[j] = column_type_string
else:
raise ValueError("unknown column type: "
- "{typ}"
- .format(typ=self.parser.columns[j].ctype))
+ f"{self.parser.columns[j].ctype}")
# compression
if parser.compression == const.rle_compression:
@@ -392,8 +388,7 @@ cdef class Parser:
return True
return False
else:
- raise ValueError("unknown page type: {typ}"
- .format(typ=self.current_page_type))
+ raise ValueError(f"unknown page type: {self.current_page_type}")
cdef void process_byte_array_with_data(self, int offset, int length):
| - [x] xref #29547
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29870 | 2019-11-26T20:23:54Z | 2019-11-27T13:07:34Z | 2019-11-27T13:07:33Z | 2019-11-27T13:15:30Z |
CI: Removing Checks job form Azure pipelines | diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 6fb8241d6d600..57032932b878c 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -16,95 +16,6 @@ jobs:
name: Windows
vmImage: vs2017-win2016
-- job: 'Checks'
- pool:
- vmImage: ubuntu-16.04
- timeoutInMinutes: 90
- steps:
- - script: |
- echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
- echo '##vso[task.setvariable variable=ENV_FILE]environment.yml'
- echo '##vso[task.setvariable variable=AZURE]true'
- displayName: 'Setting environment variables'
-
- # Do not require a conda environment
- - script: ci/code_checks.sh patterns
- displayName: 'Looking for unwanted patterns'
- condition: true
-
- - script: |
- sudo apt-get update
- sudo apt-get install -y libc6-dev-i386
- ci/setup_env.sh
- displayName: 'Setup environment and build pandas'
- condition: true
-
- # Do not require pandas
- - script: |
- source activate pandas-dev
- ci/code_checks.sh lint
- displayName: 'Linting'
- condition: true
-
- - script: |
- source activate pandas-dev
- ci/code_checks.sh dependencies
- displayName: 'Dependencies consistency'
- condition: true
-
- # Require pandas
- - script: |
- source activate pandas-dev
- ci/code_checks.sh code
- displayName: 'Checks on imported code'
- condition: true
-
- - script: |
- source activate pandas-dev
- ci/code_checks.sh doctests
- displayName: 'Running doctests'
- condition: true
-
- - script: |
- source activate pandas-dev
- ci/code_checks.sh docstrings
- displayName: 'Docstring validation'
- condition: true
-
- - script: |
- source activate pandas-dev
- ci/code_checks.sh typing
- displayName: 'Typing validation'
- condition: true
-
- - script: |
- source activate pandas-dev
- pytest --capture=no --strict scripts
- displayName: 'Testing docstring validation script'
- condition: true
-
- - script: |
- source activate pandas-dev
- cd asv_bench
- asv check -E existing
- git remote add upstream https://github.com/pandas-dev/pandas.git
- git fetch upstream
- if git diff upstream/master --name-only | grep -q "^asv_bench/"; then
- asv machine --yes
- ASV_OUTPUT="$(asv dev)"
- if [[ $(echo "$ASV_OUTPUT" | grep "failed") ]]; then
- echo "##vso[task.logissue type=error]Benchmarks run with errors"
- echo "$ASV_OUTPUT"
- exit 1
- else
- echo "Benchmarks run without errors"
- fi
- else
- echo "Benchmarks did not run, no changes detected"
- fi
- displayName: 'Running benchmarks'
- condition: true
-
- job: 'Web_and_Docs'
pool:
vmImage: ubuntu-16.04
| The `Checks` job was copied to GitHub actions, and looks like it's been working all right. Removing it from pipelines, so it's not diuplicated anymore. | https://api.github.com/repos/pandas-dev/pandas/pulls/29869 | 2019-11-26T17:29:36Z | 2019-11-27T04:30:43Z | 2019-11-27T04:30:43Z | 2019-11-27T04:31:04Z |
CI: Setting path only once in GitHub Actions | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5aa31e0ed3ab0..b689da8e39ff0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -8,8 +8,6 @@ on:
env:
ENV_FILE: environment.yml
- # TODO: remove export PATH=... in each step once this works
- # PATH: $HOME/miniconda3/bin:$PATH
jobs:
checks:
@@ -20,68 +18,61 @@ jobs:
- name: Checkout
uses: actions/checkout@v1
+ - name: Setting conda path
+ run: echo "::set-env name=PATH::${HOME}/miniconda3/bin:${PATH}"
+
- name: Looking for unwanted patterns
run: ci/code_checks.sh patterns
if: true
- name: Setup environment and build pandas
- run: |
- export PATH=$HOME/miniconda3/bin:$PATH
- ci/setup_env.sh
+ run: ci/setup_env.sh
if: true
- name: Linting
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh lint
if: true
- name: Dependencies consistency
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh dependencies
if: true
- name: Checks on imported code
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh code
if: true
- name: Running doctests
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh doctests
if: true
- name: Docstring validation
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh docstrings
if: true
- name: Typing validation
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
ci/code_checks.sh typing
if: true
- name: Testing docstring validation script
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
pytest --capture=no --strict scripts
if: true
- name: Running benchmarks
run: |
- export PATH=$HOME/miniconda3/bin:$PATH
source activate pandas-dev
cd asv_bench
asv check -E existing
| Updating PATH only once for the whole job, so it doesn't need to be set at every step.
| https://api.github.com/repos/pandas-dev/pandas/pulls/29867 | 2019-11-26T17:25:44Z | 2019-11-27T21:08:51Z | 2019-11-27T21:08:51Z | 2019-11-27T21:08:53Z |
TST added test for groupby agg on mulitlevel column (#29772) | diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index ea986058616d7..e4de2147586f5 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -92,6 +92,25 @@ def test_groupby_aggregation_mixed_dtype():
tm.assert_frame_equal(result, expected)
+def test_groupby_aggregation_multi_level_column():
+ # GH 29772
+ lst = [
+ [True, True, True, False],
+ [True, False, np.nan, False],
+ [True, True, np.nan, False],
+ [True, True, np.nan, False],
+ ]
+ df = pd.DataFrame(
+ data=lst,
+ columns=pd.MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
+ )
+
+ result = df.groupby(level=1, axis=1).sum()
+ expected = pd.DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]})
+
+ tm.assert_frame_equal(result, expected)
+
+
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
| - [x] closes #29772
- [x] tests added / passed
- [x] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29866 | 2019-11-26T17:06:02Z | 2019-11-27T18:39:49Z | 2019-11-27T18:39:48Z | 2019-11-27T18:39:49Z |
CLN: remove unsupported sparse code from io.pytables | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 18ae081caf69d..1df89b0ce1f71 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -44,7 +44,6 @@
isna,
)
from pandas.core.arrays.categorical import Categorical
-from pandas.core.arrays.sparse import BlockIndex, IntIndex
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
from pandas.core.index import ensure_index
@@ -2770,31 +2769,21 @@ def read_array(
else:
return ret
- def read_index(self, key, **kwargs):
+ def read_index(self, key: str, **kwargs) -> Index:
variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety"))
if variety == "multi":
return self.read_multi_index(key, **kwargs)
- elif variety == "block":
- return self.read_block_index(key, **kwargs)
- elif variety == "sparseint":
- return self.read_sparse_intindex(key, **kwargs)
elif variety == "regular":
_, index = self.read_index_node(getattr(self.group, key), **kwargs)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
- def write_index(self, key, index):
+ def write_index(self, key: str, index: Index):
if isinstance(index, MultiIndex):
setattr(self.attrs, f"{key}_variety", "multi")
self.write_multi_index(key, index)
- elif isinstance(index, BlockIndex):
- setattr(self.attrs, f"{key}_variety", "block")
- self.write_block_index(key, index)
- elif isinstance(index, IntIndex):
- setattr(self.attrs, f"{key}_variety", "sparseint")
- self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
converted = _convert_index(
@@ -2810,32 +2799,12 @@ def write_index(self, key, index):
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
- if hasattr(index, "freq"):
+ if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
node._v_attrs.freq = index.freq
- if hasattr(index, "tz") and index.tz is not None:
+ if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
- def write_block_index(self, key, index):
- self.write_array(f"{key}_blocs", index.blocs)
- self.write_array(f"{key}_blengths", index.blengths)
- setattr(self.attrs, f"{key}_length", index.length)
-
- def read_block_index(self, key, **kwargs) -> BlockIndex:
- length = getattr(self.attrs, f"{key}_length")
- blocs = self.read_array(f"{key}_blocs", **kwargs)
- blengths = self.read_array(f"{key}_blengths", **kwargs)
- return BlockIndex(length, blocs, blengths)
-
- def write_sparse_intindex(self, key, index):
- self.write_array(f"{key}_indices", index.indices)
- setattr(self.attrs, f"{key}_length", index.length)
-
- def read_sparse_intindex(self, key, **kwargs) -> IntIndex:
- length = getattr(self.attrs, f"{key}_length")
- indices = self.read_array(f"{key}_indices", **kwargs)
- return IntIndex(length, indices)
-
def write_multi_index(self, key, index):
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
| AFAICT this is a legacy of SparseDataFrame | https://api.github.com/repos/pandas-dev/pandas/pulls/29863 | 2019-11-26T16:49:28Z | 2019-11-27T20:54:20Z | 2019-11-27T20:54:20Z | 2019-11-27T21:19:55Z |
CI: Fix version openpyxl | diff --git a/environment.yml b/environment.yml
index 848825c37a160..2b171d097a693 100644
--- a/environment.yml
+++ b/environment.yml
@@ -78,7 +78,7 @@ dependencies:
- fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet
- html5lib # pandas.read_html
- lxml # pandas.read_html
- - openpyxl # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
+ - openpyxl<=3.0.1 # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile
- pyarrow>=0.13.1 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather
- pyqt>=5.9.2 # pandas.read_clipboard
- pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf
diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py
index e9b4a5d4e430d..f00329e9c7f81 100644
--- a/pandas/tests/io/excel/test_openpyxl.py
+++ b/pandas/tests/io/excel/test_openpyxl.py
@@ -1,5 +1,9 @@
+import os
+
+import numpy as np
import pytest
+import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
@@ -101,3 +105,21 @@ def test_write_append_mode(ext, mode, expected):
for index, cell_value in enumerate(expected):
assert wb2.worksheets[index]["A1"].value == cell_value
+
+
+@pytest.mark.xfail(openpyxl.__version__ > "3.0.1", reason="broken change in openpyxl")
+def test_to_excel_with_openpyxl_engine(ext, tmpdir):
+ # GH 29854
+ # TODO: Fix this once newer version of openpyxl fixes the bug
+ df1 = DataFrame({"A": np.linspace(1, 10, 10)})
+ df2 = DataFrame({"B": np.linspace(1, 20, 10)})
+ df = pd.concat([df1, df2], axis=1)
+ styled = df.style.applymap(
+ lambda val: "color: %s" % "red" if val < 0 else "black"
+ ).highlight_max()
+
+ filename = tmpdir / "styled.xlsx"
+ styled.to_excel(filename, engine="openpyxl")
+
+ assert filename.exists()
+ os.remove(filename)
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 4d0e7ee904294..5f67726a3e476 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -51,7 +51,7 @@ beautifulsoup4>=4.6.0
fastparquet>=0.3.2
html5lib
lxml
-openpyxl
+openpyxl<=3.0.1
pyarrow>=0.13.1
pyqt5>=5.9.2
tables>=3.4.2
| - [ ] closes #29854
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/29862 | 2019-11-26T16:47:04Z | 2019-11-26T22:55:16Z | 2019-11-26T22:55:15Z | 2020-04-29T16:50:45Z |
TYP: io.pytables annotations | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index bf7aa5970519f..8539d0547e5d1 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -53,7 +53,7 @@
from pandas.io.formats.printing import adjoin, pprint_thing
if TYPE_CHECKING:
- from tables import File # noqa:F401
+ from tables import File, Node # noqa:F401
# versioning attribute
@@ -244,7 +244,7 @@ def to_hdf(
key,
value,
mode=None,
- complevel=None,
+ complevel: Optional[int] = None,
complib=None,
append=None,
**kwargs,
@@ -459,12 +459,14 @@ class HDFStore:
"""
_handle: Optional["File"]
+ _complevel: int
+ _fletcher32: bool
def __init__(
self,
path,
mode=None,
- complevel=None,
+ complevel: Optional[int] = None,
complib=None,
fletcher32: bool = False,
**kwargs,
@@ -526,7 +528,7 @@ def __getattr__(self, name: str):
f"'{type(self).__name__}' object has no attribute '{name}'"
)
- def __contains__(self, key: str):
+ def __contains__(self, key: str) -> bool:
""" check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
@@ -1267,18 +1269,22 @@ def walk(self, where="/"):
yield (g._v_pathname.rstrip("/"), groups, leaves)
- def get_node(self, key: str):
+ def get_node(self, key: str) -> Optional["Node"]:
""" return the node with the key or None if it does not exist """
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
+ assert _table_mod is not None # for mypy
try:
- return self._handle.get_node(self.root, key)
- except _table_mod.exceptions.NoSuchNodeError: # type: ignore
+ node = self._handle.get_node(self.root, key)
+ except _table_mod.exceptions.NoSuchNodeError:
return None
+ assert isinstance(node, _table_mod.Node), type(node)
+ return node
+
def get_storer(self, key: str) -> Union["GenericFixed", "Table"]:
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
@@ -1296,7 +1302,7 @@ def copy(
propindexes: bool = True,
keys=None,
complib=None,
- complevel=None,
+ complevel: Optional[int] = None,
fletcher32: bool = False,
overwrite=True,
):
@@ -1387,7 +1393,9 @@ def info(self) -> str:
return output
- # private methods ######
+ # ------------------------------------------------------------------------
+ # private methods
+
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
@@ -1559,7 +1567,7 @@ def _write_to_group(
if isinstance(s, Table) and index:
s.create_index(columns=index)
- def _read_group(self, group, **kwargs):
+ def _read_group(self, group: "Node", **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
@@ -1786,7 +1794,7 @@ def copy(self):
new_self = copy.copy(self)
return new_self
- def infer(self, handler):
+ def infer(self, handler: "Table"):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
@@ -2499,9 +2507,16 @@ class Fixed:
pandas_kind: str
obj_type: Type[Union[DataFrame, Series]]
ndim: int
+ parent: HDFStore
+ group: "Node"
is_table = False
- def __init__(self, parent, group, encoding=None, errors="strict", **kwargs):
+ def __init__(
+ self, parent: HDFStore, group: "Node", encoding=None, errors="strict", **kwargs
+ ):
+ assert isinstance(parent, HDFStore), type(parent)
+ assert _table_mod is not None # needed for mypy
+ assert isinstance(group, _table_mod.Node), type(group)
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
@@ -2568,11 +2583,11 @@ def _filters(self):
return self.parent._filters
@property
- def _complevel(self):
+ def _complevel(self) -> int:
return self.parent._complevel
@property
- def _fletcher32(self):
+ def _fletcher32(self) -> bool:
return self.parent._fletcher32
@property
@@ -2637,7 +2652,7 @@ def read(
def write(self, **kwargs):
raise NotImplementedError(
- "cannot write on an abstract storer: sublcasses should implement"
+ "cannot write on an abstract storer: subclasses should implement"
)
def delete(
@@ -2803,7 +2818,7 @@ def write_index(self, key: str, index: Index):
if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
- def write_multi_index(self, key, index):
+ def write_multi_index(self, key: str, index: MultiIndex):
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
for i, (lev, level_codes, name) in enumerate(
@@ -2828,7 +2843,7 @@ def write_multi_index(self, key, index):
label_key = f"{key}_label{i}"
self.write_array(label_key, level_codes)
- def read_multi_index(self, key, **kwargs) -> MultiIndex:
+ def read_multi_index(self, key: str, **kwargs) -> MultiIndex:
nlevels = getattr(self.attrs, f"{key}_nlevels")
levels = []
@@ -2849,7 +2864,7 @@ def read_multi_index(self, key, **kwargs) -> MultiIndex:
)
def read_index_node(
- self, node, start: Optional[int] = None, stop: Optional[int] = None
+ self, node: "Node", start: Optional[int] = None, stop: Optional[int] = None
):
data = node[start:stop]
# If the index was an empty array write_array_empty() will
@@ -3310,7 +3325,7 @@ def values_cols(self) -> List[str]:
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
- def _get_metadata_path(self, key) -> str:
+ def _get_metadata_path(self, key: str) -> str:
""" return the metadata pathname for this key """
group = self.group._v_pathname
return f"{group}/meta/{key}/meta"
@@ -3877,10 +3892,10 @@ def process_filter(field, filt):
def create_description(
self,
complib=None,
- complevel=None,
+ complevel: Optional[int] = None,
fletcher32: bool = False,
expectedrows: Optional[int] = None,
- ):
+ ) -> Dict[str, Any]:
""" create the description of the table from the axes & values """
# provided expected rows if its passed
@@ -4537,10 +4552,10 @@ def _set_tz(values, tz, preserve_UTC: bool = False, coerce: bool = False):
return values
-def _convert_index(name: str, index, encoding=None, errors="strict"):
+def _convert_index(name: str, index: Index, encoding=None, errors="strict"):
assert isinstance(name, str)
- index_name = getattr(index, "name", None)
+ index_name = index.name
if isinstance(index, DatetimeIndex):
converted = index.asi8
@@ -4630,8 +4645,9 @@ def _convert_index(name: str, index, encoding=None, errors="strict"):
)
-def _unconvert_index(data, kind, encoding=None, errors="strict"):
- kind = _ensure_decoded(kind)
+def _unconvert_index(data, kind: str, encoding=None, errors="strict"):
+ index: Union[Index, np.ndarray]
+
if kind == "datetime64":
index = DatetimeIndex(data)
elif kind == "timedelta64":
| https://api.github.com/repos/pandas-dev/pandas/pulls/29861 | 2019-11-26T16:37:21Z | 2019-11-29T23:06:23Z | 2019-11-29T23:06:23Z | 2019-11-29T23:27:04Z | |
CI: Fix npdev build | diff --git a/pandas/__init__.py b/pandas/__init__.py
index cd697b757a26a..d6f3458b4d604 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -24,6 +24,7 @@
_np_version_under1p15,
_np_version_under1p16,
_np_version_under1p17,
+ _np_version_under1p18,
)
try:
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 1282aa6edd538..85e38d58a6c57 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -189,6 +189,7 @@ class TestPDApi(Base):
"_np_version_under1p15",
"_np_version_under1p16",
"_np_version_under1p17",
+ "_np_version_under1p18",
"_tslib",
"_typing",
"_version",
diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py
index 6626ccf4a29f8..3d24c70afdda2 100644
--- a/pandas/tests/indexes/test_numpy_compat.py
+++ b/pandas/tests/indexes/test_numpy_compat.py
@@ -6,9 +6,11 @@
Float64Index,
Index,
Int64Index,
+ PeriodIndex,
TimedeltaIndex,
UInt64Index,
_np_version_under1p17,
+ _np_version_under1p18,
)
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
@@ -80,18 +82,22 @@ def test_numpy_ufuncs_other(indices, func):
idx = indices
if isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
- # ok under numpy >= 1.17
- if not _np_version_under1p17 and func in [np.isfinite]:
+ if not _np_version_under1p18 and func in [np.isfinite, np.isinf, np.isnan]:
+ # numpy 1.18(dev) changed isinf and isnan to not raise on dt64/tfd64
+ result = func(idx)
+ assert isinstance(result, np.ndarray)
+
+ elif not _np_version_under1p17 and func in [np.isfinite]:
+ # ok under numpy >= 1.17
# Results in bool array
result = func(idx)
assert isinstance(result, np.ndarray)
- assert not isinstance(result, Index)
else:
# raise TypeError or ValueError (PeriodIndex)
with pytest.raises(Exception):
func(idx)
- elif isinstance(idx, DatetimeIndexOpsMixin):
+ elif isinstance(idx, PeriodIndex):
# raise TypeError or ValueError (PeriodIndex)
with pytest.raises(Exception):
func(idx)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 667fe689861be..bb8339439d339 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -765,13 +765,15 @@ def test_concat_join_axes_deprecated(self, axis):
)
expected = pd.concat([one, two], axis=1, sort=False).reindex(index=two.index)
- result = pd.concat([one, two], axis=1, sort=False, join_axes=[two.index])
+ with tm.assert_produces_warning(FutureWarning):
+ result = pd.concat([one, two], axis=1, sort=False, join_axes=[two.index])
tm.assert_frame_equal(result, expected)
expected = pd.concat([one, two], axis=0, sort=False).reindex(
columns=two.columns
)
- result = pd.concat([one, two], axis=0, sort=False, join_axes=[two.columns])
+ with tm.assert_produces_warning(FutureWarning):
+ result = pd.concat([one, two], axis=0, sort=False, join_axes=[two.columns])
tm.assert_frame_equal(result, expected)
| https://api.github.com/repos/pandas-dev/pandas/pulls/29860 | 2019-11-26T15:31:01Z | 2019-11-27T00:55:40Z | 2019-11-27T00:55:40Z | 2019-11-27T16:27:14Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.